From 07217a1eec17535f03a174b819bd35bb189ba551 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Mon, 22 Apr 2024 21:07:21 +0100 Subject: [PATCH 01/10] Dequeue upkeeps using the current dequeue iteration --- .changeset/modern-ghosts-hang.md | 4 + .../evmregistry/v21/logprovider/buffer_v1.go | 34 +- .../v21/logprovider/buffer_v1_test.go | 137 +++++- .../evmregistry/v21/logprovider/provider.go | 36 +- .../v21/logprovider/provider_test.go | 449 ++++++++++++++++++ .../v21/logprovider/recoverer_test.go | 5 + 6 files changed, 656 insertions(+), 9 deletions(-) create mode 100644 .changeset/modern-ghosts-hang.md diff --git a/.changeset/modern-ghosts-hang.md b/.changeset/modern-ghosts-hang.md new file mode 100644 index 00000000000..edd3f54f27e --- /dev/null +++ b/.changeset/modern-ghosts-hang.md @@ -0,0 +1,4 @@ +--- +"chainlink": patch +--- +Iterate over upkeeps using an upkeep selector #changed diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index fe15e962e53..617f1ce2358 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -26,7 +26,7 @@ type LogBuffer interface { // It also accepts a function to select upkeeps. // Returns logs (associated to upkeeps) and the number of remaining // logs in that window for the involved upkeeps. - Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) + Dequeue(start, end int64, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) // SetConfig sets the buffer size and the maximum number of logs to keep for each upkeep. SetConfig(lookback, blockRate, logLimit uint32) // NumOfUpkeeps returns the number of upkeeps that are being tracked by the buffer. @@ -81,6 +81,7 @@ type logBuffer struct { // map for then number of times we have enqueued logs for a block number enqueuedBlocks map[int64]map[string]int enqueuedBlockLock sync.RWMutex + queueIDs []string } func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32) LogBuffer { @@ -89,6 +90,7 @@ func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32) LogB opts: newLogBufferOptions(lookback, blockRate, logLimit), lastBlockSeen: new(atomic.Int64), enqueuedBlocks: map[int64]map[string]int{}, + queueIDs: []string{}, queues: make(map[string]*upkeepLogQueue), } } @@ -167,11 +169,10 @@ func (b *logBuffer) trackBlockNumbersForUpkeep(uid *big.Int, uniqueBlocks map[in // Dequeue greedly pulls logs from the buffers. // Returns logs and the number of remaining logs in the buffer. -func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { +func (b *logBuffer) Dequeue(start, end int64, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { b.lock.RLock() defer b.lock.RUnlock() - start, end := getBlockWindow(block, blockRate) return b.dequeue(start, end, upkeepLimit, maxResults, upkeepSelector) } @@ -183,11 +184,14 @@ func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, func (b *logBuffer) dequeue(start, end int64, upkeepLimit, capacity int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { var result []BufferedLog var remainingLogs int - for _, q := range b.queues { + var selectedUpkeeps int + numLogs := 0 + for _, qid := range b.queueIDs { + q := b.queues[qid] if !upkeepSelector(q.id) { - // if the upkeep is not selected, skip it continue } + selectedUpkeeps++ logsInRange := q.sizeOfRange(start, end) if logsInRange == 0 { // if there are no logs in the range, skip the upkeep @@ -207,8 +211,10 @@ func (b *logBuffer) dequeue(start, end int64, upkeepLimit, capacity int, upkeepS result = append(result, BufferedLog{ID: q.id, Log: l}) capacity-- } + numLogs += len(logs) remainingLogs += remaining } + b.lggr.Debugw("dequeued logs for upkeeps", "selectedUpkeeps", selectedUpkeeps, "numLogs", numLogs) return result, remainingLogs } @@ -230,12 +236,18 @@ func (b *logBuffer) SyncFilters(filterStore UpkeepFilterStore) error { b.lock.Lock() defer b.lock.Unlock() - for upkeepID := range b.queues { + for _, upkeepID := range b.queueIDs { uid := new(big.Int) _, ok := uid.SetString(upkeepID, 10) if ok && !filterStore.Has(uid) { // remove upkeep that is not in the filter store delete(b.queues, upkeepID) + for i, v := range b.queueIDs { + if v == upkeepID { + b.queueIDs = append(b.queueIDs[:i], b.queueIDs[i+1:]...) + break + } + } } } @@ -254,6 +266,16 @@ func (b *logBuffer) setUpkeepQueue(uid *big.Int, buf *upkeepLogQueue) { b.lock.Lock() defer b.lock.Unlock() + found := false + for _, id := range b.queueIDs { + if id == uid.String() { + found = true + break + } + } + if !found { + b.queueIDs = append(b.queueIDs, uid.String()) + } b.queues[uid.String()] = buf } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index c41dd3d9bcc..eba78947f1a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -1,6 +1,8 @@ package logprovider import ( + "fmt" + "math" "math/big" "testing" @@ -232,13 +234,146 @@ func TestLogEventBufferV1_Dequeue(t *testing.T) { added, dropped := buf.Enqueue(id, logs...) require.Equal(t, len(logs), added+dropped) } - results, remaining := buf.Dequeue(tc.args.block, tc.args.blockRate, tc.args.upkeepLimit, tc.args.maxResults, tc.args.upkeepSelector) + start, end := getBlockWindow(tc.args.block, tc.args.blockRate) + results, remaining := buf.Dequeue(start, end, tc.args.upkeepLimit, tc.args.maxResults, tc.args.upkeepSelector) require.Equal(t, len(tc.results), len(results)) require.Equal(t, tc.remaining, remaining) }) } } +func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { + t.Run("Dequeue from a deterministic order of upkeeps", func(t *testing.T) { + lookback := uint32(20) + blockRate := uint32(1) + logLimit := uint32(1) + buf := NewLogBuffer(logger.TestLogger(t), lookback, blockRate, logLimit) + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + numUpkeeps := len(upkeepIDs) + + blockNumbers := []int64{ + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + } + + // for each upkeep, enqueue 10 logs per block, for 10 blocks + for _, upkeepID := range upkeepIDs { + for _, blockNumber := range blockNumbers { + for i := 0; i < 10; i++ { + log := logpoller.Log{ + BlockNumber: blockNumber, + TxHash: common.HexToHash(fmt.Sprintf("0x%dff%dff%d", blockNumber, upkeepID.Int64(), i)), + } + buf.Enqueue(upkeepID, log) + } + } + } + + bufV1 := buf.(*logBuffer) + + assert.Equal(t, 5, len(bufV1.queues)) + + // each queue should have 100 logs + assert.Equal(t, 100, len(bufV1.queues["1"].logs)) + assert.Equal(t, 100, len(bufV1.queues["2"].logs)) + assert.Equal(t, 100, len(bufV1.queues["3"].logs)) + assert.Equal(t, 100, len(bufV1.queues["4"].logs)) + assert.Equal(t, 100, len(bufV1.queues["5"].logs)) + + maxResults := 5 + iterations := int(math.Ceil(float64(numUpkeeps*5) / float64(maxResults))) + + assert.Equal(t, 5, iterations) + + upkeepSelectorFn := func(id *big.Int) bool { + return id.Int64()%int64(iterations) == int64(0) // on this dequeue attempt, current iteration will be 0 + } + + logs, remaining := buf.Dequeue(100, 101, 5, maxResults, upkeepSelectorFn) + + // we should dequeue 5 logs, and the block window should have 15 logs remaining for this selector + assert.Equal(t, 5, len(logs)) + assert.Equal(t, 15, remaining) + + assert.Equal(t, 100, len(bufV1.queues["1"].logs)) + assert.Equal(t, 100, len(bufV1.queues["2"].logs)) + assert.Equal(t, 100, len(bufV1.queues["3"].logs)) + assert.Equal(t, 100, len(bufV1.queues["4"].logs)) + assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + + upkeepSelectorFn = func(id *big.Int) bool { + return id.Int64()%int64(iterations) == int64(1) // on this dequeue attempt, current iteration will be 1 + } + + logs, remaining = buf.Dequeue(100, 101, 5, maxResults, upkeepSelectorFn) + + // we should dequeue 5 logs, and the block window should have 15 logs remaining for this selector + assert.Equal(t, 5, len(logs)) + assert.Equal(t, 15, remaining) + + assert.Equal(t, 95, len(bufV1.queues["1"].logs)) + assert.Equal(t, 100, len(bufV1.queues["2"].logs)) + assert.Equal(t, 100, len(bufV1.queues["3"].logs)) + assert.Equal(t, 100, len(bufV1.queues["4"].logs)) + assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + + upkeepSelectorFn = func(id *big.Int) bool { + return id.Int64()%int64(iterations) == int64(2) // on this dequeue attempt, current iteration will be 2 + } + + logs, remaining = buf.Dequeue(100, 101, 5, maxResults, upkeepSelectorFn) + + // we should dequeue 5 logs, and the block window should have 15 logs remaining for this selector + assert.Equal(t, 5, len(logs)) + assert.Equal(t, 15, remaining) + + assert.Equal(t, 95, len(bufV1.queues["1"].logs)) + assert.Equal(t, 95, len(bufV1.queues["2"].logs)) + assert.Equal(t, 100, len(bufV1.queues["3"].logs)) + assert.Equal(t, 100, len(bufV1.queues["4"].logs)) + assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + + upkeepSelectorFn = func(id *big.Int) bool { + return id.Int64()%int64(iterations) == int64(3) // on this dequeue attempt, current iteration will be 3 + } + + logs, remaining = buf.Dequeue(100, 101, 5, maxResults, upkeepSelectorFn) + + // we should dequeue 5 logs, and the block window should have 15 logs remaining for this selector + assert.Equal(t, 5, len(logs)) + assert.Equal(t, 15, remaining) + + assert.Equal(t, 95, len(bufV1.queues["1"].logs)) + assert.Equal(t, 95, len(bufV1.queues["2"].logs)) + assert.Equal(t, 95, len(bufV1.queues["3"].logs)) + assert.Equal(t, 100, len(bufV1.queues["4"].logs)) + assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + + upkeepSelectorFn = func(id *big.Int) bool { + return id.Int64()%int64(iterations) == int64(4) // on this dequeue attempt, current iteration will be 4 + } + + logs, remaining = buf.Dequeue(100, 101, 5, maxResults, upkeepSelectorFn) + + // we should dequeue 5 logs, and the block window should have 15 logs remaining for this selector + assert.Equal(t, 5, len(logs)) + assert.Equal(t, 15, remaining) + + assert.Equal(t, 95, len(bufV1.queues["1"].logs)) + assert.Equal(t, 95, len(bufV1.queues["2"].logs)) + assert.Equal(t, 95, len(bufV1.queues["3"].logs)) + assert.Equal(t, 95, len(bufV1.queues["4"].logs)) + assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + }) +} + func TestLogEventBufferV1_Enqueue(t *testing.T) { tests := []struct { name string diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 633188c8396..d2d82ee5aa7 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -7,6 +7,7 @@ import ( "fmt" "hash" "io" + "math" "math/big" "runtime" "sync" @@ -114,6 +115,9 @@ type logEventProvider struct { currentPartitionIdx uint64 chainID *big.Int + + currentIteration int + iterations int } func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big.Int, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logEventProvider { @@ -151,6 +155,7 @@ func (p *logEventProvider) SetConfig(cfg ocr2keepers.LogEventProviderConfig) { switch p.opts.BufferVersion { case BufferVersionV1: + p.lggr.With("where", "setConfig").Infow("setting buffer v1 config") p.bufferV1.SetConfig(uint32(p.opts.LookbackBlocks), blockRate, logLimit) default: } @@ -287,9 +292,33 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up switch p.opts.BufferVersion { case BufferVersionV1: // in v1, we use a greedy approach - we keep dequeuing logs until we reach the max results or cover the entire range. - blockRate, logLimitLow, maxResults, _ := p.getBufferDequeueArgs() + blockRate, logLimitLow, maxResults, numOfUpkeeps := p.getBufferDequeueArgs() + + // when numOfUpkeeps exceeds maxResults, it isn't possible to dequeue a log for every upkeep in a single round, + // even if logLimitLow is set to 1. For this reason, we can spread the dequeue process across multiple iterations, + // e.g. if we have 200 upkeeps, and maxResults is 100, a single dequeue could only dequeue logs for half + // of the upkeeps, whereas a dequeue process of two iterations (two dequeue calls) can dequeue logs for upkeeps. + if p.iterations == p.currentIteration { + p.currentIteration = 0 + p.iterations = int(math.Ceil(float64(numOfUpkeeps*logLimitLow) / float64(maxResults))) + if p.iterations == 0 { + p.iterations = 1 + } + } + + // upkeepSelectorFn is a function that accepts an upkeep ID, and performs a modulus against the number of + // iterations, and compares the result against the current iteration. When this comparison returns true, the + // upkeep is selected for the dequeuing. This means that, for a given set of upkeeps, a different subset of + // upkeeps will be dequeued for each iteration once only, and, across all iterations, all upkeeps will be + // dequeued once. + upkeepSelectorFn := func(id *big.Int) bool { + return id.Int64()%int64(p.iterations) == int64(p.currentIteration) + } + for len(payloads) < maxResults && start <= latestBlock { - logs, remaining := p.bufferV1.Dequeue(start, blockRate, logLimitLow, maxResults-len(payloads), DefaultUpkeepSelector) + startWindow, end := getBlockWindow(start, blockRate) + + logs, remaining := p.bufferV1.Dequeue(startWindow, end, logLimitLow, maxResults-len(payloads), upkeepSelectorFn) if len(logs) > 0 { p.lggr.Debugw("Dequeued logs", "start", start, "latestBlock", latestBlock, "logs", len(logs)) } @@ -299,13 +328,16 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up payloads = append(payloads, payload) } } + if remaining > 0 { p.lggr.Debugw("Remaining logs", "start", start, "latestBlock", latestBlock, "remaining", remaining) // TODO: handle remaining logs in a better way than consuming the entire window, e.g. do not repeat more than x times continue } + start += int64(blockRate) } + p.currentIteration++ default: logs := p.buffer.dequeueRange(start, latestBlock, AllowedLogsPerUpkeep, MaxPayloads) for _, l := range logs { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index c5bc047e8f4..30f64aaa1fe 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -8,6 +8,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -315,6 +317,453 @@ func newEntry(p *logEventProvider, i int, args ...string) (LogTriggerConfig, upk return cfg, f } +func TestLogEventProvider_GetLatestPayloads(t *testing.T) { + t.Run("5 upkeeps, 100 logs per upkeep per block for 100 blocks", func(t *testing.T) { + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i < end; i++ { + for j := 0; j < 100; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i+1)), + BlockNumber: i + 1, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 100, nil + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + _, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + // each upkeep should have 100 logs * 100 blocks = 10000 logs + assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["2"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["3"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["4"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["5"].logs)) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 9980, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9980, len(bufV1.queues["2"].logs)) + assert.Equal(t, 9980, len(bufV1.queues["3"].logs)) + assert.Equal(t, 9980, len(bufV1.queues["4"].logs)) + assert.Equal(t, 9980, len(bufV1.queues["5"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 9960, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9960, len(bufV1.queues["2"].logs)) + assert.Equal(t, 9960, len(bufV1.queues["3"].logs)) + assert.Equal(t, 9960, len(bufV1.queues["4"].logs)) + assert.Equal(t, 9960, len(bufV1.queues["5"].logs)) + }) + + t.Run("200 upkeeps", func(t *testing.T) { + var upkeepIDs []*big.Int + + for i := int64(1); i <= 200; i++ { + upkeepIDs = append(upkeepIDs, big.NewInt(i)) + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i < end; i++ { + for j := 0; j < 100; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i+1)), + BlockNumber: i + 1, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 100, nil + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + _, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 200, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + // each upkeep should have 100 logs * 100 blocks = 10000 logs + assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["150"].logs)) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 2, provider.iterations) + assert.Equal(t, 1, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps + assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 2, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps + assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 1, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps + assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 2, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps + assert.Equal(t, 9998, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) + }) + + t.Run("200 upkeeps, increasing to 300 upkeeps midway through the test", func(t *testing.T) { + var upkeepIDs []*big.Int + + for i := int64(1); i <= 200; i++ { + upkeepIDs = append(upkeepIDs, big.NewInt(i)) + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i < end; i++ { + for j := 0; j < 100; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i+1)), + BlockNumber: i + 1, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 100, nil + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + _, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 200, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + // each upkeep should have 100 logs * 100 blocks = 10000 logs + assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["9"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["21"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["150"].logs)) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 2, provider.iterations) + assert.Equal(t, 1, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps; with 2 iterations this means even upkeep IDs are dequeued first + assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["40"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["45"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 2, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps; on the second iteration, odd upkeep IDs are dequeued + assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["99"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["100"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 1, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps; on the third iteration, even upkeep IDs are dequeued once again + assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["160"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["170"].logs)) + + for i := int64(201); i <= 300; i++ { + upkeepIDs = append(upkeepIDs, big.NewInt(i)) + } + + for i := 200; i < len(upkeepIDs); i++ { + upkeepID := upkeepIDs[i] + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 300, provider.bufferV1.NumOfUpkeeps()) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 2, provider.iterations) + assert.Equal(t, 2, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps; the new iterations + // have not yet been recalculated despite the new logs being added; new iterations + // are only calculated when current iteration maxes out at the total number of iterations + assert.Equal(t, 9998, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["51"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["52"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // with the newly added logs, iterations is recalculated + assert.Equal(t, 3, provider.iterations) + assert.Equal(t, 1, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps + assert.Equal(t, 9998, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["11"].logs)) + assert.Equal(t, 9997, len(bufV1.queues["111"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9997, len(bufV1.queues["150"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 3, provider.iterations) + assert.Equal(t, 2, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + // the dequeue is evenly distributed across selected upkeeps + assert.Equal(t, 9997, len(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["2"].logs)) + assert.Equal(t, 9997, len(bufV1.queues["3"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) + assert.Equal(t, 9997, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["250"].logs)) + assert.Equal(t, 10000, len(bufV1.queues["299"].logs)) + assert.Equal(t, 9999, len(bufV1.queues["300"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 3, provider.iterations) + assert.Equal(t, 3, provider.currentIteration) + + // we dequeue a maximum of 100 logs + assert.Equal(t, 100, len(payloads)) + + var remainingLogs int + // at this point, every queue should have had at least one log dequeued + for _, queue := range bufV1.queues { + assert.True(t, len(queue.logs) < 10000) + remainingLogs += len(queue.logs) + } + + // check that across all 300 upkeeps, we have only dequeued 700 of the 3000000 logs (7 dequeue calls of 100 logs) + assert.Equal(t, 2999300, remainingLogs) + }) +} + type mockedPacker struct { } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go index 65a05b2537e..944ded713e2 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go @@ -1187,12 +1187,17 @@ type mockFilterStore struct { UpkeepFilterStore HasFn func(id *big.Int) bool RangeFiltersByIDsFn func(iterator func(int, upkeepFilter), ids ...*big.Int) + UpdateFiltersFn func(updater func(upkeepFilter, upkeepFilter) upkeepFilter, filters ...upkeepFilter) } func (s *mockFilterStore) RangeFiltersByIDs(iterator func(int, upkeepFilter), ids ...*big.Int) { s.RangeFiltersByIDsFn(iterator, ids...) } +func (s *mockFilterStore) UpdateFilters(updater func(upkeepFilter, upkeepFilter) upkeepFilter, filters ...upkeepFilter) { + s.UpdateFiltersFn(updater, filters...) +} + func (s *mockFilterStore) Has(id *big.Int) bool { return s.HasFn(id) } From 2d6dc486957f60eec168c13da3451b83820cc16a Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Fri, 7 Jun 2024 18:27:24 +0100 Subject: [PATCH 02/10] Dequeue minimum commitment of logs first --- .../v21/logprovider/dequeue_coordinator.go | 100 ++ .../evmregistry/v21/logprovider/provider.go | 68 +- .../v21/logprovider/provider_test.go | 1167 ++++++++++++++++- 3 files changed, 1292 insertions(+), 43 deletions(-) create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go new file mode 100644 index 00000000000..e1f99a61649 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go @@ -0,0 +1,100 @@ +package logprovider + +import "math/big" + +type dequeueCoordinator struct { + dequeuedMinimum map[int64]bool + notReady map[int64]bool + remainingLogs map[int64]int + dequeuedLogs map[int64]int + completeWindows map[int64]bool + dequeuedUpkeeps map[int64]map[string]int +} + +func (c *dequeueCoordinator) dequeueBlockWindow(start int64, latestBlock int64, blockRate int) (int64, int64, bool) { + // check if minimum logs have been dequeued + for i := start; i <= latestBlock; i += int64(blockRate) { + startWindow, end := getBlockWindow(i, blockRate) + if latestBlock >= end { + c.completeWindows[startWindow] = true + } else if c.notReady[startWindow] { // the window is incomplete and has no logs to provide as of yet + return 0, 0, false + } + + if hasDequeued, ok := c.dequeuedMinimum[startWindow]; !ok || !hasDequeued { + return startWindow, end, true + } + } + + // check best effort dequeue + for i := start; i < latestBlock; i += int64(blockRate) { + startWindow, end := getBlockWindow(i, blockRate) + + if remainingLogs, ok := c.remainingLogs[startWindow]; ok { + if remainingLogs > 0 { + return startWindow, end, true + } + } + } + + return 0, 0, false +} + +// getUpkeepSelector returns a function that accepts an upkeep ID, and performs a modulus against the number of +// iterations, and compares the result against the current iteration. When this comparison returns true, the +// upkeep is selected for the dequeuing. This means that, for a given set of upkeeps, a different subset of +// upkeeps will be dequeued for each iteration once only, and, across all iterations, all upkeeps will be +// dequeued once. +func (c *dequeueCoordinator) getUpkeepSelector(startWindow int64, logLimitLow, iterations, currentIteration int) func(id *big.Int) bool { + bestEffort := false + + if hasDequeued, ok := c.dequeuedMinimum[startWindow]; ok { + if hasDequeued { + bestEffort = true + } + } + + return func(id *big.Int) bool { + // query the map of block number to upkeep ID for dequeued count here when the block window is incomplete + dequeueUpkeep := true + if !bestEffort { + if windowUpkeeps, ok := c.dequeuedUpkeeps[startWindow]; ok { + if windowUpkeeps[id.String()] >= logLimitLow { + dequeueUpkeep = false + } + } + } + return dequeueUpkeep && id.Int64()%int64(iterations) == int64(currentIteration) + } +} + +func (c *dequeueCoordinator) trackUpkeeps(startWindow int64, upkeepID *big.Int) { + if windowUpkeeps, ok := c.dequeuedUpkeeps[startWindow]; ok { + windowUpkeeps[upkeepID.String()] = windowUpkeeps[upkeepID.String()] + 1 + c.dequeuedUpkeeps[startWindow] = windowUpkeeps + } else { + c.dequeuedUpkeeps[startWindow] = map[string]int{ + upkeepID.String(): 1, + } + } +} + +func (c *dequeueCoordinator) updateBlockWindow(startWindow int64, logs, remaining, numberOfUpkeeps, logLimitLow int) { + c.remainingLogs[startWindow] = remaining + c.dequeuedLogs[startWindow] += logs + + if isComplete, ok := c.completeWindows[startWindow]; ok { + if isComplete { + // if the window is complete, and there are no more logs, then we have to consider this as min dequeued, even if no logs were dequeued + if c.remainingLogs[startWindow] == 0 || c.dequeuedLogs[startWindow] >= numberOfUpkeeps*logLimitLow { + c.dequeuedMinimum[startWindow] = true + } + } else if c.dequeuedLogs[startWindow] >= numberOfUpkeeps*logLimitLow { // this assumes we don't dequeue the same upkeeps more than logLimitLow in min commitment + c.dequeuedMinimum[startWindow] = true + } + } else if c.dequeuedLogs[startWindow] >= numberOfUpkeeps*logLimitLow { // this assumes we don't dequeue the same upkeeps more than logLimitLow in min commitment + c.dequeuedMinimum[startWindow] = true + } else if logs == 0 && remaining == 0 { + c.notReady[startWindow] = true + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index d2d82ee5aa7..6d23b89fd73 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -118,19 +118,33 @@ type logEventProvider struct { currentIteration int iterations int + + dequeueCoordinator *dequeueCoordinator +} + +func newDequeueCoordinator() *dequeueCoordinator { + return &dequeueCoordinator{ + dequeuedMinimum: map[int64]bool{}, + notReady: map[int64]bool{}, + remainingLogs: map[int64]int{}, + dequeuedLogs: map[int64]int{}, + completeWindows: map[int64]bool{}, + dequeuedUpkeeps: map[int64]map[string]int{}, + } } func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big.Int, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logEventProvider { return &logEventProvider{ - threadCtrl: utils.NewThreadControl(), - lggr: lggr.Named("KeepersRegistry.LogEventProvider"), - packer: packer, - buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV1: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit), - poller: poller, - opts: opts, - filterStore: filterStore, - chainID: chainID, + threadCtrl: utils.NewThreadControl(), + lggr: lggr.Named("KeepersRegistry.LogEventProvider"), + packer: packer, + buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), + bufferV1: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit), + poller: poller, + opts: opts, + filterStore: filterStore, + chainID: chainID, + dequeueCoordinator: newDequeueCoordinator(), } } @@ -306,36 +320,28 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up } } - // upkeepSelectorFn is a function that accepts an upkeep ID, and performs a modulus against the number of - // iterations, and compares the result against the current iteration. When this comparison returns true, the - // upkeep is selected for the dequeuing. This means that, for a given set of upkeeps, a different subset of - // upkeeps will be dequeued for each iteration once only, and, across all iterations, all upkeeps will be - // dequeued once. - upkeepSelectorFn := func(id *big.Int) bool { - return id.Int64()%int64(p.iterations) == int64(p.currentIteration) - } + for len(payloads) < maxResults { + startWindow, end, canDequeue := p.dequeueCoordinator.dequeueBlockWindow(start, latestBlock, blockRate) + if !canDequeue { + p.lggr.Debugw("Nothing to dequeue", "start", start, "latestBlock", latestBlock) + break + } - for len(payloads) < maxResults && start <= latestBlock { - startWindow, end := getBlockWindow(start, blockRate) + upkeepSelectorFn := p.dequeueCoordinator.getUpkeepSelector(startWindow, logLimitLow, p.iterations, p.currentIteration) logs, remaining := p.bufferV1.Dequeue(startWindow, end, logLimitLow, maxResults-len(payloads), upkeepSelectorFn) if len(logs) > 0 { p.lggr.Debugw("Dequeued logs", "start", start, "latestBlock", latestBlock, "logs", len(logs)) - } - for _, l := range logs { - payload, err := p.createPayload(l.ID, l.Log) - if err == nil { - payloads = append(payloads, payload) + for _, l := range logs { + payload, err := p.createPayload(l.ID, l.Log) + if err == nil { + payloads = append(payloads, payload) + } + p.dequeueCoordinator.trackUpkeeps(startWindow, l.ID) } } - if remaining > 0 { - p.lggr.Debugw("Remaining logs", "start", start, "latestBlock", latestBlock, "remaining", remaining) - // TODO: handle remaining logs in a better way than consuming the entire window, e.g. do not repeat more than x times - continue - } - - start += int64(blockRate) + p.dequeueCoordinator.updateBlockWindow(startWindow, len(logs), remaining, numOfUpkeeps, logLimitLow) } p.currentIteration++ default: diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index 30f64aaa1fe..bb607dc7b29 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -373,10 +373,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - _, err := provider.GetLatestPayloads(ctx) - assert.NoError(t, err) - - err = provider.ReadLogs(ctx, upkeepIDs...) + err := provider.ReadLogs(ctx, upkeepIDs...) assert.NoError(t, err) assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) @@ -470,10 +467,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - _, err := provider.GetLatestPayloads(ctx) - assert.NoError(t, err) - - err = provider.ReadLogs(ctx, upkeepIDs...) + err := provider.ReadLogs(ctx, upkeepIDs...) assert.NoError(t, err) assert.Equal(t, 200, provider.bufferV1.NumOfUpkeeps()) @@ -597,10 +591,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - _, err := provider.GetLatestPayloads(ctx) - assert.NoError(t, err) - - err = provider.ReadLogs(ctx, upkeepIDs...) + err := provider.ReadLogs(ctx, upkeepIDs...) assert.NoError(t, err) assert.Equal(t, 200, provider.bufferV1.NumOfUpkeeps()) @@ -762,6 +753,1158 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { // check that across all 300 upkeeps, we have only dequeued 700 of the 3000000 logs (7 dequeue calls of 100 logs) assert.Equal(t, 2999300, remainingLogs) }) + + t.Run("minimum guaranteed for all windows followed by best effort", func(t *testing.T) { + oldMaxPayloads := MaxPayloads + MaxPayloads = 10 + defer func() { + MaxPayloads = oldMaxPayloads + }() + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i < end; i++ { + for j := 0; j < 10; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i+1)), + BlockNumber: i + 1, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 100, nil + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + err := provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + // each upkeep should have 10 logs * 100 blocks = 1000 logs + assert.Equal(t, 1000, len(bufV1.queues["1"].logs)) + assert.Equal(t, 1000, len(bufV1.queues["2"].logs)) + assert.Equal(t, 1000, len(bufV1.queues["3"].logs)) + assert.Equal(t, 1000, len(bufV1.queues["4"].logs)) + assert.Equal(t, 1000, len(bufV1.queues["5"].logs)) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 998, len(bufV1.queues["1"].logs)) + assert.Equal(t, 998, len(bufV1.queues["2"].logs)) + assert.Equal(t, 998, len(bufV1.queues["3"].logs)) + assert.Equal(t, 998, len(bufV1.queues["4"].logs)) + assert.Equal(t, 998, len(bufV1.queues["5"].logs)) + + blockWindowCounts := map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the first block window + assert.Equal(t, 40, blockWindowCounts[1]) + assert.Equal(t, 50, blockWindowCounts[2]) + assert.Equal(t, 50, blockWindowCounts[3]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 996, len(bufV1.queues["1"].logs)) + assert.Equal(t, 996, len(bufV1.queues["2"].logs)) + assert.Equal(t, 996, len(bufV1.queues["3"].logs)) + assert.Equal(t, 996, len(bufV1.queues["4"].logs)) + assert.Equal(t, 996, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 40, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + assert.Equal(t, 50, blockWindowCounts[3]) + + for i := 0; i < 97; i++ { + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + } + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 802, len(bufV1.queues["1"].logs)) + assert.Equal(t, 802, len(bufV1.queues["2"].logs)) + assert.Equal(t, 802, len(bufV1.queues["3"].logs)) + assert.Equal(t, 802, len(bufV1.queues["4"].logs)) + assert.Equal(t, 802, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 40, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + assert.Equal(t, 40, blockWindowCounts[3]) + assert.Equal(t, 40, blockWindowCounts[99]) + assert.Equal(t, 50, blockWindowCounts[100]) + + // at this point, all block windows except for the latest block window will have been dequeued + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 800, len(bufV1.queues["1"].logs)) + assert.Equal(t, 800, len(bufV1.queues["2"].logs)) + assert.Equal(t, 800, len(bufV1.queues["3"].logs)) + assert.Equal(t, 800, len(bufV1.queues["4"].logs)) + assert.Equal(t, 800, len(bufV1.queues["5"].logs)) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 798, len(bufV1.queues["1"].logs)) + assert.Equal(t, 798, len(bufV1.queues["2"].logs)) + assert.Equal(t, 798, len(bufV1.queues["3"].logs)) + assert.Equal(t, 798, len(bufV1.queues["4"].logs)) + assert.Equal(t, 798, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 30, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + assert.Equal(t, 40, blockWindowCounts[3]) + assert.Equal(t, 40, blockWindowCounts[100]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 796, len(bufV1.queues["1"].logs)) + assert.Equal(t, 796, len(bufV1.queues["2"].logs)) + assert.Equal(t, 796, len(bufV1.queues["3"].logs)) + assert.Equal(t, 796, len(bufV1.queues["4"].logs)) + assert.Equal(t, 796, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 20, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + assert.Equal(t, 40, blockWindowCounts[3]) + assert.Equal(t, 40, blockWindowCounts[100]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 794, len(bufV1.queues["1"].logs)) + assert.Equal(t, 794, len(bufV1.queues["2"].logs)) + assert.Equal(t, 794, len(bufV1.queues["3"].logs)) + assert.Equal(t, 794, len(bufV1.queues["4"].logs)) + assert.Equal(t, 794, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 10, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + assert.Equal(t, 40, blockWindowCounts[3]) + assert.Equal(t, 40, blockWindowCounts[100]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 792, len(bufV1.queues["1"].logs)) + assert.Equal(t, 792, len(bufV1.queues["2"].logs)) + assert.Equal(t, 792, len(bufV1.queues["3"].logs)) + assert.Equal(t, 792, len(bufV1.queues["4"].logs)) + assert.Equal(t, 792, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 0, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + assert.Equal(t, 40, blockWindowCounts[3]) + assert.Equal(t, 40, blockWindowCounts[100]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 790, len(bufV1.queues["1"].logs)) + assert.Equal(t, 790, len(bufV1.queues["2"].logs)) + assert.Equal(t, 790, len(bufV1.queues["3"].logs)) + assert.Equal(t, 790, len(bufV1.queues["4"].logs)) + assert.Equal(t, 790, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + blockWindowCounts[l.BlockNumber]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 0, blockWindowCounts[1]) + assert.Equal(t, 30, blockWindowCounts[2]) + assert.Equal(t, 40, blockWindowCounts[3]) + assert.Equal(t, 40, blockWindowCounts[100]) + }) + + t.Run("minimum guaranteed for all windows including an incomplete window followed by best effort", func(t *testing.T) { + oldMaxPayloads := MaxPayloads + MaxPayloads = 10 + defer func() { + MaxPayloads = oldMaxPayloads + }() + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i <= end; i++ { + logsToAdd := 10 + if i >= 100 { + logsToAdd = 1 + } + for j := 0; j < logsToAdd; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i)), + BlockNumber: i, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 102, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + opts.BlockRate = 4 // block window will be 4 blocks big + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + err := provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + blockWindowCounts := map[int64]int{} + + bufV1 := provider.bufferV1.(*logBuffer) + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + // all 10 logs should have been dequeued from the first block window + assert.Equal(t, 150, blockWindowCounts[0]) // block 0 is outside the block threshold of 1 and is not enqueued + assert.Equal(t, 200, blockWindowCounts[4]) + assert.Equal(t, 200, blockWindowCounts[8]) + assert.Equal(t, 15, blockWindowCounts[100]) // the block window starting at block 100 is only 3/4 complete as of block 102 + + // each upkeep should have 10 logs * 102 blocks = 1020 logs + assert.Equal(t, 993, len(bufV1.queues["1"].logs)) + assert.Equal(t, 993, len(bufV1.queues["2"].logs)) + assert.Equal(t, 993, len(bufV1.queues["3"].logs)) + assert.Equal(t, 993, len(bufV1.queues["4"].logs)) + assert.Equal(t, 993, len(bufV1.queues["5"].logs)) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 991, len(bufV1.queues["1"].logs)) + assert.Equal(t, 991, len(bufV1.queues["2"].logs)) + assert.Equal(t, 991, len(bufV1.queues["3"].logs)) + assert.Equal(t, 991, len(bufV1.queues["4"].logs)) + assert.Equal(t, 991, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + // all 10 logs should have been dequeued from the first block window + assert.Equal(t, 140, blockWindowCounts[0]) + assert.Equal(t, 200, blockWindowCounts[4]) + assert.Equal(t, 200, blockWindowCounts[8]) + assert.Equal(t, 15, blockWindowCounts[100]) // the block window starting at block 100 is only 3/4 complete as of block 102 + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 989, len(bufV1.queues["1"].logs)) + assert.Equal(t, 989, len(bufV1.queues["2"].logs)) + assert.Equal(t, 989, len(bufV1.queues["3"].logs)) + assert.Equal(t, 989, len(bufV1.queues["4"].logs)) + assert.Equal(t, 989, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment + assert.Equal(t, 140, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + assert.Equal(t, 200, blockWindowCounts[8]) + + for i := 0; i < 23; i++ { + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + } + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 943, len(bufV1.queues["1"].logs)) + assert.Equal(t, 943, len(bufV1.queues["2"].logs)) + assert.Equal(t, 943, len(bufV1.queues["3"].logs)) + assert.Equal(t, 943, len(bufV1.queues["4"].logs)) + assert.Equal(t, 943, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 140, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + assert.Equal(t, 190, blockWindowCounts[8]) + assert.Equal(t, 190, blockWindowCounts[96]) + assert.Equal(t, 15, blockWindowCounts[100]) // still not been dequeued at this point + + // at this point, all block windows except for the latest block window will have been dequeued + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 941, len(bufV1.queues["1"].logs)) + assert.Equal(t, 941, len(bufV1.queues["2"].logs)) + assert.Equal(t, 941, len(bufV1.queues["3"].logs)) + assert.Equal(t, 941, len(bufV1.queues["4"].logs)) + assert.Equal(t, 941, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 140, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + assert.Equal(t, 190, blockWindowCounts[8]) + assert.Equal(t, 190, blockWindowCounts[96]) + assert.Equal(t, 5, blockWindowCounts[100]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 939, len(bufV1.queues["1"].logs)) + assert.Equal(t, 939, len(bufV1.queues["2"].logs)) + assert.Equal(t, 939, len(bufV1.queues["3"].logs)) + assert.Equal(t, 939, len(bufV1.queues["4"].logs)) + assert.Equal(t, 939, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 130, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + assert.Equal(t, 190, blockWindowCounts[8]) + assert.Equal(t, 190, blockWindowCounts[96]) + assert.Equal(t, 5, blockWindowCounts[100]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 937, len(bufV1.queues["1"].logs)) + assert.Equal(t, 937, len(bufV1.queues["2"].logs)) + assert.Equal(t, 937, len(bufV1.queues["3"].logs)) + assert.Equal(t, 937, len(bufV1.queues["4"].logs)) + assert.Equal(t, 937, len(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 120, blockWindowCounts[0]) // first block window is repeatedly dequeued as best effort + assert.Equal(t, 190, blockWindowCounts[4]) + assert.Equal(t, 190, blockWindowCounts[8]) + assert.Equal(t, 190, blockWindowCounts[96]) + assert.Equal(t, 5, blockWindowCounts[100]) + + provider.poller = &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 103, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 935, len(bufV1.queues["1"].logs)) + assert.Equal(t, 935, len(bufV1.queues["2"].logs)) + assert.Equal(t, 935, len(bufV1.queues["3"].logs)) + assert.Equal(t, 935, len(bufV1.queues["4"].logs)) + assert.Equal(t, 935, len(bufV1.queues["5"].logs)) + + for i := 0; i < 467; i++ { + _, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + } + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 0, blockWindowCounts[0]) // first block window is repeatedly dequeued as best effort + assert.Equal(t, 0, blockWindowCounts[4]) + assert.Equal(t, 0, blockWindowCounts[8]) + assert.Equal(t, 0, blockWindowCounts[96]) + assert.Equal(t, 5, blockWindowCounts[100]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 5, len(payloads)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 0, blockWindowCounts[0]) + assert.Equal(t, 0, blockWindowCounts[4]) + assert.Equal(t, 0, blockWindowCounts[8]) + assert.Equal(t, 0, blockWindowCounts[96]) + assert.Equal(t, 0, blockWindowCounts[100]) + }) + + t.Run("a complete window with no logs present is immediately marked as having the min logs dequeued, logs are dequeued from the next window", func(t *testing.T) { + oldMaxPayloads := MaxPayloads + MaxPayloads = 10 + defer func() { + MaxPayloads = oldMaxPayloads + }() + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start + 4; i <= end; i++ { + logsToAdd := 10 + if i >= 100 { + logsToAdd = 1 + } + for j := 0; j < logsToAdd; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i)), + BlockNumber: i, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 99, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + opts.BlockRate = 4 // block window will be 4 blocks big + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + err := provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + blockWindowCounts := map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + // all 10 logs should have been dequeued from the first block window + assert.Equal(t, 0, blockWindowCounts[0]) + assert.Equal(t, 200, blockWindowCounts[4]) + assert.Equal(t, 200, blockWindowCounts[8]) + assert.Equal(t, 200, blockWindowCounts[96]) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + // the first block window does not contain any logs, so it automatically gets marked as having the minimum dequeued + assert.True(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + // all 10 logs should have been dequeued from the second block window + assert.Equal(t, 0, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + assert.Equal(t, 200, blockWindowCounts[8]) + assert.Equal(t, 200, blockWindowCounts[96]) + }) + + t.Run("an incomplete window with no logs present is marked as not ready then min dequeued when the window is complete", func(t *testing.T) { + oldMaxPayloads := MaxPayloads + MaxPayloads = 10 + defer func() { + MaxPayloads = oldMaxPayloads + }() + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start + 4; i <= end; i++ { + logsToAdd := 10 + if i >= 100 { + logsToAdd = 1 + } + for j := 0; j < logsToAdd; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i)), + BlockNumber: i, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 2, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + opts.BlockRate = 4 // block window will be 4 blocks big + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + err := provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + blockWindowCounts := map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 0, blockWindowCounts[0]) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 0, len(payloads)) + + assert.Equal(t, false, provider.dequeueCoordinator.dequeuedMinimum[0]) + assert.Equal(t, true, provider.dequeueCoordinator.notReady[0]) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + // all 10 logs should have been dequeued from the second block window + assert.Equal(t, 0, blockWindowCounts[0]) + + provider.poller = &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 3, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 0, len(payloads)) + + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) // now that the window is complete, it should be marked as dequeued minimum + assert.Equal(t, true, provider.dequeueCoordinator.notReady[0]) + + provider.poller = &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 7, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 0, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 0, blockWindowCounts[0]) + assert.Equal(t, 180, blockWindowCounts[4]) + + provider.poller = &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 11, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 0, blockWindowCounts[0]) + assert.Equal(t, 180, blockWindowCounts[4]) + assert.Equal(t, 190, blockWindowCounts[8]) + + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[4]) + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[8]) + }) + + t.Run("an incomplete window with minimum logs already present is marked as min dequeued", func(t *testing.T) { + oldMaxPayloads := MaxPayloads + MaxPayloads = 10 + defer func() { + MaxPayloads = oldMaxPayloads + }() + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i <= end; i++ { + logsToAdd := 10 + for j := 0; j < logsToAdd; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i)), + BlockNumber: i, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 2, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + opts.BlockRate = 4 // block window will be 4 blocks big + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + err := provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + blockWindowCounts := map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 100, blockWindowCounts[0]) // 100 logs because blocks 0, 1, 2 exist, 0 is omitted in enqueue, so blocks 1 and 2 have 10x5 logs each + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + // all 10 logs should have been dequeued from the first block window + assert.Equal(t, 90, blockWindowCounts[0]) + + logGenerator = func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start + 4; i <= end; i++ { + logsToAdd := 10 + for j := 0; j < logsToAdd; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i)), + BlockNumber: i, + }) + } + } + return res + } + + provider.poller = &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 7, nil // make the latest window incomplete + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 90, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[4]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 80, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for _, l := range q.logs { + startWindow, _ := getBlockWindow(l.BlockNumber, 4) + + blockWindowCounts[startWindow]++ + } + } + + assert.Equal(t, 70, blockWindowCounts[0]) + assert.Equal(t, 190, blockWindowCounts[4]) + }) } type mockedPacker struct { From 0a7bf2c733cafa74b419d66264955e59b61ed947 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Fri, 7 Jun 2024 18:27:24 +0100 Subject: [PATCH 03/10] Handle reorgs in the provider --- .../evmregistry/v21/logprovider/buffer_v1.go | 194 ++-- .../v21/logprovider/buffer_v1_test.go | 78 +- .../v21/logprovider/dequeue_coordinator.go | 27 +- .../evmregistry/v21/logprovider/log.go | 11 +- .../evmregistry/v21/logprovider/provider.go | 5 +- .../v21/logprovider/provider_test.go | 901 +++++++++++++----- 6 files changed, 835 insertions(+), 381 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 617f1ce2358..b284be1d2eb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -1,7 +1,6 @@ package logprovider import ( - "math" "math/big" "sort" "sync" @@ -65,10 +64,6 @@ func (o *logBufferOptions) override(lookback, blockRate, logLimit uint32) { o.blockRate.Store(blockRate) } -func (o *logBufferOptions) windows() int { - return int(math.Ceil(float64(o.lookback.Load()) / float64(o.blockRate.Load()))) -} - type logBuffer struct { lggr logger.Logger opts *logBufferOptions @@ -79,19 +74,23 @@ type logBuffer struct { lock sync.RWMutex // map for then number of times we have enqueued logs for a block number - enqueuedBlocks map[int64]map[string]int - enqueuedBlockLock sync.RWMutex - queueIDs []string + enqueuedBlocks map[int64]map[string]int + enqueuedBlockLock sync.RWMutex + queueIDs []string + blockHashes map[int64]string + dequeueCoordinator *dequeueCoordinator } -func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32) LogBuffer { +func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32, dequeueCoordinator *dequeueCoordinator) LogBuffer { return &logBuffer{ - lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), - opts: newLogBufferOptions(lookback, blockRate, logLimit), - lastBlockSeen: new(atomic.Int64), - enqueuedBlocks: map[int64]map[string]int{}, - queueIDs: []string{}, - queues: make(map[string]*upkeepLogQueue), + lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), + opts: newLogBufferOptions(lookback, blockRate, logLimit), + lastBlockSeen: new(atomic.Int64), + enqueuedBlocks: map[int64]map[string]int{}, + queueIDs: []string{}, + queues: make(map[string]*upkeepLogQueue), + blockHashes: map[int64]string{}, + dequeueCoordinator: dequeueCoordinator, } } @@ -107,7 +106,11 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { b.setUpkeepQueue(uid, buf) } - latestLogBlock, uniqueBlocks := blockStatistics(logs...) + latestLogBlock, uniqueBlocks, reorgBlocks := b.blockStatistics(logs...) + if len(reorgBlocks) > 0 { + b.evictReorgdLogs(reorgBlocks) + } + if lastBlockSeen := b.lastBlockSeen.Load(); lastBlockSeen < latestLogBlock { b.lastBlockSeen.Store(latestLogBlock) } else if latestLogBlock < lastBlockSeen { @@ -126,6 +129,17 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { return buf.enqueue(blockThreshold, logs...) } +func (b *logBuffer) evictReorgdLogs(reorgBlocks map[int64]bool) { + for _, queue := range b.queues { + for blockNumber := range reorgBlocks { + if _, ok := queue.logs[blockNumber]; ok { + queue.logs[blockNumber] = []logpoller.Log{} + b.dequeueCoordinator.markReorg(blockNumber, b.opts.blockRate.Load()) + } + } + } +} + func (b *logBuffer) cleanupEnqueuedBlocks(blockThreshold int64) { b.enqueuedBlockLock.Lock() defer b.enqueuedBlockLock.Unlock() @@ -309,21 +323,23 @@ type upkeepLogQueue struct { opts *logBufferOptions // logs is the buffer of logs for the upkeep - logs []logpoller.Log + logs map[int64][]logpoller.Log // states keeps track of the state of the logs that are known to the queue // and the block number they were seen at - states map[string]logTriggerStateEntry - lock sync.RWMutex + blockNumbers []int64 + states map[string]logTriggerStateEntry + lock sync.RWMutex } func newUpkeepLogQueue(lggr logger.Logger, id *big.Int, opts *logBufferOptions) *upkeepLogQueue { - maxLogs := int(opts.windowLimit.Load()) * opts.windows() // limit per window * windows + //maxLogs := int(opts.windowLimit.Load()) * opts.windows() // limit per window * windows return &upkeepLogQueue{ - lggr: lggr.With("upkeepID", id.String()), - id: id, - opts: opts, - logs: make([]logpoller.Log, 0, maxLogs), - states: make(map[string]logTriggerStateEntry), + lggr: lggr.With("upkeepID", id.String()), + id: id, + opts: opts, + logs: map[int64][]logpoller.Log{}, + blockNumbers: make([]int64, 0), + states: make(map[string]logTriggerStateEntry), } } @@ -333,9 +349,9 @@ func (q *upkeepLogQueue) sizeOfRange(start, end int64) int { defer q.lock.RUnlock() size := 0 - for _, l := range q.logs { - if l.BlockNumber >= start && l.BlockNumber <= end { - size++ + for blockNumber, logs := range q.logs { + if blockNumber >= start && blockNumber <= end { + size += len(logs) } } return size @@ -353,25 +369,30 @@ func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, var results []logpoller.Log var remaining int - updatedLogs := make([]logpoller.Log, 0) - for _, l := range q.logs { - if l.BlockNumber >= start && l.BlockNumber <= end { - if len(results) < limit { - results = append(results, l) - lid := logID(l) - if s, ok := q.states[lid]; ok { - s.state = logTriggerStateDequeued - q.states[lid] = s + + for _, blockNumber := range q.blockNumbers { + if blockNumber >= start && blockNumber <= end { + updatedLogs := make([]logpoller.Log, 0) + + for _, l := range q.logs[blockNumber] { + if len(results) < limit { + results = append(results, l) + lid := logID(l) + if s, ok := q.states[lid]; ok { + s.state = logTriggerStateDequeued + q.states[lid] = s + } + } else { + remaining++ + updatedLogs = append(updatedLogs, l) } - continue } - remaining++ + + q.logs[blockNumber] = updatedLogs } - updatedLogs = append(updatedLogs, l) } if len(results) > 0 { - q.logs = updatedLogs q.lggr.Debugw("Dequeued logs", "start", start, "end", end, "limit", limit, "results", len(results), "remaining", remaining) } @@ -401,7 +422,13 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo } q.states[lid] = logTriggerStateEntry{state: logTriggerStateEnqueued, block: log.BlockNumber} added++ - logs = append(logs, log) + if logList, ok := q.logs[log.BlockNumber]; ok { + logList = append(logList, log) + q.logs[log.BlockNumber] = logList + } else { + q.logs[log.BlockNumber] = []logpoller.Log{log} + q.blockNumbers = append(q.blockNumbers, log.BlockNumber) + } } q.logs = logs @@ -424,9 +451,13 @@ func (q *upkeepLogQueue) orderLogs() { // sort logs by block number, tx hash and log index // to keep the q sorted and to ensure that logs can be // grouped by block windows for the cleanup - sort.SliceStable(q.logs, func(i, j int) bool { - return LogSorter(q.logs[i], q.logs[j]) - }) + for _, blockNumber := range q.blockNumbers { + toSort := q.logs[blockNumber] + sort.SliceStable(toSort, func(i, j int) bool { + return LogSorter(toSort[i], toSort[j]) + }) + q.logs[blockNumber] = toSort + } } // clean removes logs that are older than blockThreshold and drops logs if the limit for the @@ -436,46 +467,49 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { var dropped, expired int blockRate := int(q.opts.blockRate.Load()) windowLimit := int(q.opts.windowLimit.Load()) - updated := make([]logpoller.Log, 0) // helper variables to keep track of the current window capacity currentWindowCapacity, currentWindowStart := 0, int64(0) - for _, l := range q.logs { - if blockThreshold > l.BlockNumber { // old log, removed - prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionExpired).Inc() - // q.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) - logid := logID(l) - delete(q.states, logid) - expired++ - continue - } - start, _ := getBlockWindow(l.BlockNumber, blockRate) - if start != currentWindowStart { - // new window, reset capacity - currentWindowStart = start - currentWindowCapacity = 0 - } - currentWindowCapacity++ - // if capacity has been reached, drop the log - if currentWindowCapacity > windowLimit { - lid := logID(l) - if s, ok := q.states[lid]; ok { - s.state = logTriggerStateDropped - q.states[lid] = s + for blockNumber, logs := range q.logs { + updated := make([]logpoller.Log, 0) + + for _, l := range logs { + if blockThreshold > l.BlockNumber { // old log, removed + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionExpired).Inc() + // q.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) + logid := logID(l) + delete(q.states, logid) + expired++ + continue } - dropped++ - prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Inc() - q.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, - "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), - "currentWindowStart", currentWindowStart, "currentWindowCapacity", currentWindowCapacity, - "maxLogsPerWindow", windowLimit, "blockRate", blockRate) - continue + start, _ := getBlockWindow(l.BlockNumber, blockRate) + if start != currentWindowStart { + // new window, reset capacity + currentWindowStart = start + currentWindowCapacity = 0 + } + currentWindowCapacity++ + // if capacity has been reached, drop the log + if currentWindowCapacity > windowLimit { + lid := logID(l) + if s, ok := q.states[lid]; ok { + s.state = logTriggerStateDropped + q.states[lid] = s + } + dropped++ + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Inc() + q.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, + "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), + "currentWindowStart", currentWindowStart, "currentWindowCapacity", currentWindowCapacity, + "maxLogsPerWindow", windowLimit, "blockRate", blockRate) + continue + } + updated = append(updated, l) } - updated = append(updated, l) - } - if dropped > 0 || expired > 0 { - q.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len before", len(q.logs)) - q.logs = updated + if dropped > 0 || expired > 0 { + q.logs[blockNumber] = updated + q.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len before", len(q.logs)) + } } q.cleanStates(blockThreshold) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index eba78947f1a..755f5328a48 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -15,7 +15,7 @@ import ( ) func TestLogEventBufferV1(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, newDequeueCoordinator()) buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -35,7 +35,7 @@ func TestLogEventBufferV1(t *testing.T) { } func TestLogEventBufferV1_SyncFilters(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, newDequeueCoordinator()) buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -87,7 +87,7 @@ func TestLogEventBufferV1_EnqueueViolations(t *testing.T) { }, } - logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1) + logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1, newDequeueCoordinator()) buf := logBufferV1.(*logBuffer) @@ -118,7 +118,7 @@ func TestLogEventBufferV1_EnqueueViolations(t *testing.T) { }, } - logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1) + logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1, newDequeueCoordinator()) buf := logBufferV1.(*logBuffer) @@ -229,7 +229,7 @@ func TestLogEventBufferV1_Dequeue(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), uint32(tc.lookback), uint32(tc.args.blockRate), uint32(tc.args.upkeepLimit)) + buf := NewLogBuffer(logger.TestLogger(t), uint32(tc.lookback), uint32(tc.args.blockRate), uint32(tc.args.upkeepLimit), newDequeueCoordinator()) for id, logs := range tc.logsInBuffer { added, dropped := buf.Enqueue(id, logs...) require.Equal(t, len(logs), added+dropped) @@ -247,7 +247,7 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { lookback := uint32(20) blockRate := uint32(1) logLimit := uint32(1) - buf := NewLogBuffer(logger.TestLogger(t), lookback, blockRate, logLimit) + buf := NewLogBuffer(logger.TestLogger(t), lookback, blockRate, logLimit, newDequeueCoordinator()) upkeepIDs := []*big.Int{ big.NewInt(1), @@ -281,11 +281,11 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { assert.Equal(t, 5, len(bufV1.queues)) // each queue should have 100 logs - assert.Equal(t, 100, len(bufV1.queues["1"].logs)) - assert.Equal(t, 100, len(bufV1.queues["2"].logs)) - assert.Equal(t, 100, len(bufV1.queues["3"].logs)) - assert.Equal(t, 100, len(bufV1.queues["4"].logs)) - assert.Equal(t, 100, len(bufV1.queues["5"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["5"].logs)) maxResults := 5 iterations := int(math.Ceil(float64(numUpkeeps*5) / float64(maxResults))) @@ -302,11 +302,11 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { assert.Equal(t, 5, len(logs)) assert.Equal(t, 15, remaining) - assert.Equal(t, 100, len(bufV1.queues["1"].logs)) - assert.Equal(t, 100, len(bufV1.queues["2"].logs)) - assert.Equal(t, 100, len(bufV1.queues["3"].logs)) - assert.Equal(t, 100, len(bufV1.queues["4"].logs)) - assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["5"].logs)) upkeepSelectorFn = func(id *big.Int) bool { return id.Int64()%int64(iterations) == int64(1) // on this dequeue attempt, current iteration will be 1 @@ -318,11 +318,11 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { assert.Equal(t, 5, len(logs)) assert.Equal(t, 15, remaining) - assert.Equal(t, 95, len(bufV1.queues["1"].logs)) - assert.Equal(t, 100, len(bufV1.queues["2"].logs)) - assert.Equal(t, 100, len(bufV1.queues["3"].logs)) - assert.Equal(t, 100, len(bufV1.queues["4"].logs)) - assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["5"].logs)) upkeepSelectorFn = func(id *big.Int) bool { return id.Int64()%int64(iterations) == int64(2) // on this dequeue attempt, current iteration will be 2 @@ -334,11 +334,11 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { assert.Equal(t, 5, len(logs)) assert.Equal(t, 15, remaining) - assert.Equal(t, 95, len(bufV1.queues["1"].logs)) - assert.Equal(t, 95, len(bufV1.queues["2"].logs)) - assert.Equal(t, 100, len(bufV1.queues["3"].logs)) - assert.Equal(t, 100, len(bufV1.queues["4"].logs)) - assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["5"].logs)) upkeepSelectorFn = func(id *big.Int) bool { return id.Int64()%int64(iterations) == int64(3) // on this dequeue attempt, current iteration will be 3 @@ -350,11 +350,11 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { assert.Equal(t, 5, len(logs)) assert.Equal(t, 15, remaining) - assert.Equal(t, 95, len(bufV1.queues["1"].logs)) - assert.Equal(t, 95, len(bufV1.queues["2"].logs)) - assert.Equal(t, 95, len(bufV1.queues["3"].logs)) - assert.Equal(t, 100, len(bufV1.queues["4"].logs)) - assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["5"].logs)) upkeepSelectorFn = func(id *big.Int) bool { return id.Int64()%int64(iterations) == int64(4) // on this dequeue attempt, current iteration will be 4 @@ -366,11 +366,11 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { assert.Equal(t, 5, len(logs)) assert.Equal(t, 15, remaining) - assert.Equal(t, 95, len(bufV1.queues["1"].logs)) - assert.Equal(t, 95, len(bufV1.queues["2"].logs)) - assert.Equal(t, 95, len(bufV1.queues["3"].logs)) - assert.Equal(t, 95, len(bufV1.queues["4"].logs)) - assert.Equal(t, 95, len(bufV1.queues["5"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["5"].logs)) }) } @@ -474,7 +474,7 @@ func TestLogEventBufferV1_Enqueue(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), tc.lookback, tc.blockRate, tc.upkeepLimit) + buf := NewLogBuffer(logger.TestLogger(t), tc.lookback, tc.blockRate, tc.upkeepLimit, newDequeueCoordinator()) for id, logs := range tc.logsToAdd { added, dropped := buf.Enqueue(id, logs...) sid := id.String() @@ -562,7 +562,7 @@ func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { }) t.Run("happy path", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 5, 1) + buf := NewLogBuffer(logger.TestLogger(t), 10, 5, 1, newDequeueCoordinator()) buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -698,7 +698,7 @@ func createDummyLogSequence(n, startIndex int, block int64, tx common.Hash) []lo } func Test_trackBlockNumbersForUpkeep(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, newDequeueCoordinator()) logBuffer := buf.(*logBuffer) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go index e1f99a61649..9081a12846b 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go @@ -1,6 +1,9 @@ package logprovider -import "math/big" +import ( + "math/big" + "sync" +) type dequeueCoordinator struct { dequeuedMinimum map[int64]bool @@ -9,9 +12,13 @@ type dequeueCoordinator struct { dequeuedLogs map[int64]int completeWindows map[int64]bool dequeuedUpkeeps map[int64]map[string]int + mu sync.Mutex } func (c *dequeueCoordinator) dequeueBlockWindow(start int64, latestBlock int64, blockRate int) (int64, int64, bool) { + c.mu.Lock() + defer c.mu.Unlock() + // check if minimum logs have been dequeued for i := start; i <= latestBlock; i += int64(blockRate) { startWindow, end := getBlockWindow(i, blockRate) @@ -46,6 +53,9 @@ func (c *dequeueCoordinator) dequeueBlockWindow(start int64, latestBlock int64, // upkeeps will be dequeued for each iteration once only, and, across all iterations, all upkeeps will be // dequeued once. func (c *dequeueCoordinator) getUpkeepSelector(startWindow int64, logLimitLow, iterations, currentIteration int) func(id *big.Int) bool { + c.mu.Lock() + defer c.mu.Unlock() + bestEffort := false if hasDequeued, ok := c.dequeuedMinimum[startWindow]; ok { @@ -79,7 +89,22 @@ func (c *dequeueCoordinator) trackUpkeeps(startWindow int64, upkeepID *big.Int) } } +func (c *dequeueCoordinator) markReorg(block int64, blockRate uint32) { + c.mu.Lock() + defer c.mu.Unlock() + + startWindow, _ := getBlockWindow(block, int(blockRate)) + c.dequeuedMinimum[startWindow] = false + // TODO instead of wiping the count for all upkeeps, should we wipe for upkeeps only impacted by the reorg? + for upkeepID := range c.dequeuedUpkeeps[startWindow] { + c.dequeuedUpkeeps[startWindow][upkeepID] = 0 + } +} + func (c *dequeueCoordinator) updateBlockWindow(startWindow int64, logs, remaining, numberOfUpkeeps, logLimitLow int) { + c.mu.Lock() + defer c.mu.Unlock() + c.remainingLogs[startWindow] = remaining c.dequeuedLogs[startWindow] += logs diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go index 9603d6da5be..43a8ddc27dc 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go @@ -58,16 +58,23 @@ func logID(l logpoller.Log) string { } // blockStatistics returns the latest block number from the given logs, and a map of unique block numbers -func blockStatistics(logs ...logpoller.Log) (int64, map[int64]bool) { +func (b *logBuffer) blockStatistics(logs ...logpoller.Log) (int64, map[int64]bool, map[int64]bool) { var latest int64 uniqueBlocks := map[int64]bool{} + reorgBlocks := map[int64]bool{} for _, l := range logs { if l.BlockNumber > latest { latest = l.BlockNumber } uniqueBlocks[l.BlockNumber] = true + if hash, ok := b.blockHashes[l.BlockNumber]; ok { + if hash != l.BlockHash.String() { + reorgBlocks[l.BlockNumber] = true + } + } + b.blockHashes[l.BlockNumber] = l.BlockHash.String() } - return latest, uniqueBlocks + return latest, uniqueBlocks, reorgBlocks } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 6d23b89fd73..e9672c007b8 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -134,17 +134,18 @@ func newDequeueCoordinator() *dequeueCoordinator { } func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big.Int, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logEventProvider { + dequeueCoordinator := newDequeueCoordinator() return &logEventProvider{ threadCtrl: utils.NewThreadControl(), lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV1: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit), + bufferV1: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit, dequeueCoordinator), poller: poller, opts: opts, filterStore: filterStore, chainID: chainID, - dequeueCoordinator: newDequeueCoordinator(), + dequeueCoordinator: dequeueCoordinator, } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index bb607dc7b29..b93e27b0aba 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -317,6 +317,14 @@ func newEntry(p *logEventProvider, i int, args ...string) (LogTriggerConfig, upk return cfg, f } +func countLogs(logs map[int64][]logpoller.Log) int { + count := 0 + for _, logList := range logs { + count += len(logList) + } + return count +} + func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("5 upkeeps, 100 logs per upkeep per block for 100 blocks", func(t *testing.T) { upkeepIDs := []*big.Int{ @@ -381,11 +389,11 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { bufV1 := provider.bufferV1.(*logBuffer) // each upkeep should have 100 logs * 100 blocks = 10000 logs - assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["2"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["3"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["4"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["5"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["5"].logs)) payloads, err := provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -394,11 +402,11 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 9980, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9980, len(bufV1.queues["2"].logs)) - assert.Equal(t, 9980, len(bufV1.queues["3"].logs)) - assert.Equal(t, 9980, len(bufV1.queues["4"].logs)) - assert.Equal(t, 9980, len(bufV1.queues["5"].logs)) + assert.Equal(t, 9980, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9980, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 9980, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 9980, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 9980, countLogs(bufV1.queues["5"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -407,11 +415,11 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 9960, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9960, len(bufV1.queues["2"].logs)) - assert.Equal(t, 9960, len(bufV1.queues["3"].logs)) - assert.Equal(t, 9960, len(bufV1.queues["4"].logs)) - assert.Equal(t, 9960, len(bufV1.queues["5"].logs)) + assert.Equal(t, 9960, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9960, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 9960, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 9960, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 9960, countLogs(bufV1.queues["5"].logs)) }) t.Run("200 upkeeps", func(t *testing.T) { @@ -475,10 +483,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { bufV1 := provider.bufferV1.(*logBuffer) // each upkeep should have 100 logs * 100 blocks = 10000 logs - assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["50"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["150"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["150"].logs)) payloads, err := provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -490,10 +498,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps - assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["150"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -504,10 +512,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps - assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["150"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -518,10 +526,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps - assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["150"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -532,10 +540,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps - assert.Equal(t, 9998, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["150"].logs)) }) t.Run("200 upkeeps, increasing to 300 upkeeps midway through the test", func(t *testing.T) { @@ -599,12 +607,12 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { bufV1 := provider.bufferV1.(*logBuffer) // each upkeep should have 100 logs * 100 blocks = 10000 logs - assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["9"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["21"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["50"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["150"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["9"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["21"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["150"].logs)) payloads, err := provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -616,12 +624,12 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps; with 2 iterations this means even upkeep IDs are dequeued first - assert.Equal(t, 10000, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["40"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["45"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["40"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["45"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["150"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -632,12 +640,12 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps; on the second iteration, odd upkeep IDs are dequeued - assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["99"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["100"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["99"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["100"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["150"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -648,12 +656,12 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps; on the third iteration, even upkeep IDs are dequeued once again - assert.Equal(t, 9999, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["160"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["170"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["150"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["160"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["170"].logs)) for i := int64(201); i <= 300; i++ { upkeepIDs = append(upkeepIDs, big.NewInt(i)) @@ -689,12 +697,12 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { // the dequeue is evenly distributed across selected upkeeps; the new iterations // have not yet been recalculated despite the new logs being added; new iterations // are only calculated when current iteration maxes out at the total number of iterations - assert.Equal(t, 9998, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["51"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["52"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["51"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["52"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["150"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -707,12 +715,12 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps - assert.Equal(t, 9998, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["11"].logs)) - assert.Equal(t, 9997, len(bufV1.queues["111"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9997, len(bufV1.queues["150"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["11"].logs)) + assert.Equal(t, 9997, countLogs(bufV1.queues["111"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9997, countLogs(bufV1.queues["150"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -724,15 +732,15 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 100, len(payloads)) // the dequeue is evenly distributed across selected upkeeps - assert.Equal(t, 9997, len(bufV1.queues["1"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["2"].logs)) - assert.Equal(t, 9997, len(bufV1.queues["3"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["50"].logs)) - assert.Equal(t, 9998, len(bufV1.queues["101"].logs)) - assert.Equal(t, 9997, len(bufV1.queues["150"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["250"].logs)) - assert.Equal(t, 10000, len(bufV1.queues["299"].logs)) - assert.Equal(t, 9999, len(bufV1.queues["300"].logs)) + assert.Equal(t, 9997, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 9997, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["50"].logs)) + assert.Equal(t, 9998, countLogs(bufV1.queues["101"].logs)) + assert.Equal(t, 9997, countLogs(bufV1.queues["150"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["250"].logs)) + assert.Equal(t, 10000, countLogs(bufV1.queues["299"].logs)) + assert.Equal(t, 9999, countLogs(bufV1.queues["300"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -746,8 +754,8 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { var remainingLogs int // at this point, every queue should have had at least one log dequeued for _, queue := range bufV1.queues { - assert.True(t, len(queue.logs) < 10000) - remainingLogs += len(queue.logs) + assert.True(t, countLogs(queue.logs) < 10000) + remainingLogs += countLogs(queue.logs) } // check that across all 300 upkeeps, we have only dequeued 700 of the 3000000 logs (7 dequeue calls of 100 logs) @@ -823,11 +831,11 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { bufV1 := provider.bufferV1.(*logBuffer) // each upkeep should have 10 logs * 100 blocks = 1000 logs - assert.Equal(t, 1000, len(bufV1.queues["1"].logs)) - assert.Equal(t, 1000, len(bufV1.queues["2"].logs)) - assert.Equal(t, 1000, len(bufV1.queues["3"].logs)) - assert.Equal(t, 1000, len(bufV1.queues["4"].logs)) - assert.Equal(t, 1000, len(bufV1.queues["5"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["5"].logs)) payloads, err := provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -836,20 +844,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 998, len(bufV1.queues["1"].logs)) - assert.Equal(t, 998, len(bufV1.queues["2"].logs)) - assert.Equal(t, 998, len(bufV1.queues["3"].logs)) - assert.Equal(t, 998, len(bufV1.queues["4"].logs)) - assert.Equal(t, 998, len(bufV1.queues["5"].logs)) + assert.Equal(t, 998, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 998, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 998, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 998, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 998, countLogs(bufV1.queues["5"].logs)) blockWindowCounts := map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } - // all 10 logs should have been dequeued from the first block window assert.Equal(t, 40, blockWindowCounts[1]) assert.Equal(t, 50, blockWindowCounts[2]) @@ -862,17 +869,17 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 996, len(bufV1.queues["1"].logs)) - assert.Equal(t, 996, len(bufV1.queues["2"].logs)) - assert.Equal(t, 996, len(bufV1.queues["3"].logs)) - assert.Equal(t, 996, len(bufV1.queues["4"].logs)) - assert.Equal(t, 996, len(bufV1.queues["5"].logs)) + assert.Equal(t, 996, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 996, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 996, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 996, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 996, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } @@ -890,17 +897,17 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { } // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 802, len(bufV1.queues["1"].logs)) - assert.Equal(t, 802, len(bufV1.queues["2"].logs)) - assert.Equal(t, 802, len(bufV1.queues["3"].logs)) - assert.Equal(t, 802, len(bufV1.queues["4"].logs)) - assert.Equal(t, 802, len(bufV1.queues["5"].logs)) + assert.Equal(t, 802, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 802, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 802, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 802, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 802, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } @@ -919,11 +926,11 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 800, len(bufV1.queues["1"].logs)) - assert.Equal(t, 800, len(bufV1.queues["2"].logs)) - assert.Equal(t, 800, len(bufV1.queues["3"].logs)) - assert.Equal(t, 800, len(bufV1.queues["4"].logs)) - assert.Equal(t, 800, len(bufV1.queues["5"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["5"].logs)) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -932,20 +939,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 798, len(bufV1.queues["1"].logs)) - assert.Equal(t, 798, len(bufV1.queues["2"].logs)) - assert.Equal(t, 798, len(bufV1.queues["3"].logs)) - assert.Equal(t, 798, len(bufV1.queues["4"].logs)) - assert.Equal(t, 798, len(bufV1.queues["5"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } - // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment assert.Equal(t, 30, blockWindowCounts[1]) assert.Equal(t, 40, blockWindowCounts[2]) @@ -959,20 +965,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 796, len(bufV1.queues["1"].logs)) - assert.Equal(t, 796, len(bufV1.queues["2"].logs)) - assert.Equal(t, 796, len(bufV1.queues["3"].logs)) - assert.Equal(t, 796, len(bufV1.queues["4"].logs)) - assert.Equal(t, 796, len(bufV1.queues["5"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } - // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment assert.Equal(t, 20, blockWindowCounts[1]) assert.Equal(t, 40, blockWindowCounts[2]) @@ -986,20 +991,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 794, len(bufV1.queues["1"].logs)) - assert.Equal(t, 794, len(bufV1.queues["2"].logs)) - assert.Equal(t, 794, len(bufV1.queues["3"].logs)) - assert.Equal(t, 794, len(bufV1.queues["4"].logs)) - assert.Equal(t, 794, len(bufV1.queues["5"].logs)) + assert.Equal(t, 794, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 794, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 794, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 794, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 794, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } - // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment assert.Equal(t, 10, blockWindowCounts[1]) assert.Equal(t, 40, blockWindowCounts[2]) @@ -1013,20 +1017,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 792, len(bufV1.queues["1"].logs)) - assert.Equal(t, 792, len(bufV1.queues["2"].logs)) - assert.Equal(t, 792, len(bufV1.queues["3"].logs)) - assert.Equal(t, 792, len(bufV1.queues["4"].logs)) - assert.Equal(t, 792, len(bufV1.queues["5"].logs)) + assert.Equal(t, 792, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 792, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 792, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 792, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 792, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } - // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment assert.Equal(t, 0, blockWindowCounts[1]) assert.Equal(t, 40, blockWindowCounts[2]) @@ -1040,20 +1043,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 790, len(bufV1.queues["1"].logs)) - assert.Equal(t, 790, len(bufV1.queues["2"].logs)) - assert.Equal(t, 790, len(bufV1.queues["3"].logs)) - assert.Equal(t, 790, len(bufV1.queues["4"].logs)) - assert.Equal(t, 790, len(bufV1.queues["5"].logs)) + assert.Equal(t, 790, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 790, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 790, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 790, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 790, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - blockWindowCounts[l.BlockNumber]++ + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) } } - // all 10 logs should have been dequeued from the second block window, since the first block window has met it's minimum commitment assert.Equal(t, 0, blockWindowCounts[1]) assert.Equal(t, 30, blockWindowCounts[2]) @@ -1137,10 +1139,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { bufV1 := provider.bufferV1.(*logBuffer) for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1151,11 +1153,11 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 15, blockWindowCounts[100]) // the block window starting at block 100 is only 3/4 complete as of block 102 // each upkeep should have 10 logs * 102 blocks = 1020 logs - assert.Equal(t, 993, len(bufV1.queues["1"].logs)) - assert.Equal(t, 993, len(bufV1.queues["2"].logs)) - assert.Equal(t, 993, len(bufV1.queues["3"].logs)) - assert.Equal(t, 993, len(bufV1.queues["4"].logs)) - assert.Equal(t, 993, len(bufV1.queues["5"].logs)) + assert.Equal(t, 993, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 993, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 993, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 993, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 993, countLogs(bufV1.queues["5"].logs)) payloads, err := provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -1164,19 +1166,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 991, len(bufV1.queues["1"].logs)) - assert.Equal(t, 991, len(bufV1.queues["2"].logs)) - assert.Equal(t, 991, len(bufV1.queues["3"].logs)) - assert.Equal(t, 991, len(bufV1.queues["4"].logs)) - assert.Equal(t, 991, len(bufV1.queues["5"].logs)) + assert.Equal(t, 991, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 991, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 991, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 991, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 991, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1193,19 +1195,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 989, len(bufV1.queues["1"].logs)) - assert.Equal(t, 989, len(bufV1.queues["2"].logs)) - assert.Equal(t, 989, len(bufV1.queues["3"].logs)) - assert.Equal(t, 989, len(bufV1.queues["4"].logs)) - assert.Equal(t, 989, len(bufV1.queues["5"].logs)) + assert.Equal(t, 989, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 989, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 989, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 989, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 989, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1223,19 +1225,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { } // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 943, len(bufV1.queues["1"].logs)) - assert.Equal(t, 943, len(bufV1.queues["2"].logs)) - assert.Equal(t, 943, len(bufV1.queues["3"].logs)) - assert.Equal(t, 943, len(bufV1.queues["4"].logs)) - assert.Equal(t, 943, len(bufV1.queues["5"].logs)) + assert.Equal(t, 943, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 943, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 943, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 943, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 943, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1253,19 +1255,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 941, len(bufV1.queues["1"].logs)) - assert.Equal(t, 941, len(bufV1.queues["2"].logs)) - assert.Equal(t, 941, len(bufV1.queues["3"].logs)) - assert.Equal(t, 941, len(bufV1.queues["4"].logs)) - assert.Equal(t, 941, len(bufV1.queues["5"].logs)) + assert.Equal(t, 941, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 941, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 941, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 941, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 941, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1282,19 +1284,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 939, len(bufV1.queues["1"].logs)) - assert.Equal(t, 939, len(bufV1.queues["2"].logs)) - assert.Equal(t, 939, len(bufV1.queues["3"].logs)) - assert.Equal(t, 939, len(bufV1.queues["4"].logs)) - assert.Equal(t, 939, len(bufV1.queues["5"].logs)) + assert.Equal(t, 939, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 939, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 939, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 939, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 939, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1311,19 +1313,19 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 937, len(bufV1.queues["1"].logs)) - assert.Equal(t, 937, len(bufV1.queues["2"].logs)) - assert.Equal(t, 937, len(bufV1.queues["3"].logs)) - assert.Equal(t, 937, len(bufV1.queues["4"].logs)) - assert.Equal(t, 937, len(bufV1.queues["5"].logs)) + assert.Equal(t, 937, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 937, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 937, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 937, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 937, countLogs(bufV1.queues["5"].logs)) blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1349,11 +1351,11 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) // the dequeue is evenly distributed across the 5 upkeeps - assert.Equal(t, 935, len(bufV1.queues["1"].logs)) - assert.Equal(t, 935, len(bufV1.queues["2"].logs)) - assert.Equal(t, 935, len(bufV1.queues["3"].logs)) - assert.Equal(t, 935, len(bufV1.queues["4"].logs)) - assert.Equal(t, 935, len(bufV1.queues["5"].logs)) + assert.Equal(t, 935, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 935, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 935, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 935, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 935, countLogs(bufV1.queues["5"].logs)) for i := 0; i < 467; i++ { _, err = provider.GetLatestPayloads(ctx) @@ -1363,10 +1365,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1384,10 +1386,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1474,10 +1476,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts := map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1498,10 +1500,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1588,10 +1590,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts := map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1608,10 +1610,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1655,10 +1657,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1673,10 +1675,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1703,10 +1705,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1792,10 +1794,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts := map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1811,10 +1813,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1856,10 +1858,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1877,10 +1879,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } @@ -1895,16 +1897,401 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { blockWindowCounts = map[int64]int{} for _, q := range bufV1.queues { - for _, l := range q.logs { - startWindow, _ := getBlockWindow(l.BlockNumber, 4) + for blockNumber, logs := range q.logs { + startWindow, _ := getBlockWindow(blockNumber, 4) - blockWindowCounts[startWindow]++ + blockWindowCounts[startWindow] += len(logs) } } assert.Equal(t, 70, blockWindowCounts[0]) assert.Equal(t, 190, blockWindowCounts[4]) }) + + t.Run("min dequeue followed by best effort followed by reorg followed by best effort", func(t *testing.T) { + oldMaxPayloads := MaxPayloads + MaxPayloads = 10 + defer func() { + MaxPayloads = oldMaxPayloads + }() + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + logGenerator := func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i < end; i++ { + for j := 0; j < 10; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i+1)), + BlockNumber: i + 1, + }) + } + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 100, nil + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + err := provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + // each upkeep should have 10 logs * 100 blocks = 1000 logs + assert.Equal(t, 1000, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 1000, countLogs(bufV1.queues["5"].logs)) + + for i := 0; i < 100; i++ { + _, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + } + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 800, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 800, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts := map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + // min dequeue should have happened for all block windows + assert.Equal(t, 40, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[100]) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 798, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 798, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + // best effort dequeues first block window + assert.Equal(t, 30, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 796, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 796, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + // best effort dequeues first block window + assert.Equal(t, 20, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[2]) + + // reorg happens + logGenerator = func(start, end int64) []logpoller.Log { + var res []logpoller.Log + for i := start; i < end; i++ { + if i == 97 { + for j := 0; j < 10; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%de", i+1)), + BlockNumber: i + 1, + }) + } + } else { + for j := 0; j < 10; j++ { + res = append(res, logpoller.Log{ + LogIndex: int64(j), + BlockHash: common.HexToHash(fmt.Sprintf("%d", i+1)), + BlockNumber: i + 1, + }) + } + } + } + return res + } + // use a log poller that will create logs for the queried block range + provider.poller = &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 102, nil + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 20, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[98]) + + err = provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 20, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[97]) + assert.Equal(t, 50, blockWindowCounts[98]) // reorg block window has had new logs added after reorg + assert.Equal(t, 40, blockWindowCounts[99]) + + assert.Equal(t, 818, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 818, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 818, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 818, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 818, countLogs(bufV1.queues["5"].logs)) + + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[1]) + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[97]) + assert.Equal(t, false, provider.dequeueCoordinator.dequeuedMinimum[98]) // this window has no min commitment met due to reorg + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[99]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[1]) + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[97]) + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[98]) // this window has had min commitment met following reorg + assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[99]) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 816, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 816, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 816, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 816, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 816, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 20, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[98]) // this block window has had its min dequeue met following a reorg + assert.Equal(t, 50, blockWindowCounts[101]) + assert.Equal(t, 50, blockWindowCounts[102]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 814, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 814, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 814, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 814, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 814, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + // best effort dequeues first block window + assert.Equal(t, 20, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[98]) + assert.Equal(t, 40, blockWindowCounts[101]) + assert.Equal(t, 50, blockWindowCounts[102]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 812, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 812, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 812, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 812, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 812, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 20, blockWindowCounts[1]) + assert.Equal(t, 40, blockWindowCounts[98]) + assert.Equal(t, 40, blockWindowCounts[101]) + assert.Equal(t, 40, blockWindowCounts[102]) // latest block window has now had min dequeue met + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 810, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 810, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 810, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 810, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 810, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 10, blockWindowCounts[1]) // best effort resumes on the first block window + assert.Equal(t, 40, blockWindowCounts[98]) + assert.Equal(t, 40, blockWindowCounts[101]) + assert.Equal(t, 40, blockWindowCounts[102]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 808, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 808, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 808, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 808, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 808, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 0, blockWindowCounts[1]) // best effort completes on the first block window + assert.Equal(t, 40, blockWindowCounts[2]) + assert.Equal(t, 40, blockWindowCounts[98]) + assert.Equal(t, 40, blockWindowCounts[101]) + assert.Equal(t, 40, blockWindowCounts[102]) + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + // we dequeue a maximum of 10 logs + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps + assert.Equal(t, 806, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 806, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 806, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 806, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 806, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 0, blockWindowCounts[1]) + assert.Equal(t, 30, blockWindowCounts[2]) // best effort continues on the second block window + assert.Equal(t, 40, blockWindowCounts[98]) + assert.Equal(t, 40, blockWindowCounts[101]) + assert.Equal(t, 40, blockWindowCounts[102]) + }) } type mockedPacker struct { From 240f744866ce437c64140cff3dbbb831b0ae4e49 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Wed, 12 Jun 2024 23:46:20 +0100 Subject: [PATCH 04/10] Clean up --- .../evmregistry/v21/logprovider/buffer_v1.go | 44 +++++++++-- .../v21/logprovider/buffer_v1_test.go | 38 ++++++---- .../v21/logprovider/dequeue_coordinator.go | 76 +++++++++++++------ .../evmregistry/v21/logprovider/log.go | 22 ------ .../evmregistry/v21/logprovider/provider.go | 23 ++---- 5 files changed, 122 insertions(+), 81 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index b284be1d2eb..073017e9c0f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -114,7 +114,7 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { if lastBlockSeen := b.lastBlockSeen.Load(); lastBlockSeen < latestLogBlock { b.lastBlockSeen.Store(latestLogBlock) } else if latestLogBlock < lastBlockSeen { - b.lggr.Debugw("enqueuing logs with a latest block older older than latest seen block", "logBlock", latestLogBlock, "lastBlockSeen", lastBlockSeen) + b.lggr.Warnw("enqueuing logs with a latest block older older than latest seen block", "logBlock", latestLogBlock, "lastBlockSeen", lastBlockSeen) } b.trackBlockNumbersForUpkeep(uid, uniqueBlocks) @@ -129,14 +129,44 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { return buf.enqueue(blockThreshold, logs...) } +// blockStatistics returns the latest block number, a set of unique block numbers, and a set of reorgd blocks +// from the given logs +func (b *logBuffer) blockStatistics(logs ...logpoller.Log) (int64, map[int64]bool, map[int64]bool) { + b.lock.Lock() + defer b.lock.Unlock() + + var latest int64 + uniqueBlocks := map[int64]bool{} + reorgBlocks := map[int64]bool{} + + for _, l := range logs { + if l.BlockNumber > latest { + latest = l.BlockNumber + } + uniqueBlocks[l.BlockNumber] = true + if hash, ok := b.blockHashes[l.BlockNumber]; ok { + if hash != l.BlockHash.String() { + reorgBlocks[l.BlockNumber] = true + b.lggr.Debugw("encountered reorgd block", "blockNumber", l.BlockNumber) + } + } + b.blockHashes[l.BlockNumber] = l.BlockHash.String() + } + + return latest, uniqueBlocks, reorgBlocks +} + func (b *logBuffer) evictReorgdLogs(reorgBlocks map[int64]bool) { - for _, queue := range b.queues { - for blockNumber := range reorgBlocks { + b.lock.Lock() + defer b.lock.Unlock() + + for blockNumber := range reorgBlocks { + for _, queue := range b.queues { if _, ok := queue.logs[blockNumber]; ok { queue.logs[blockNumber] = []logpoller.Log{} - b.dequeueCoordinator.markReorg(blockNumber, b.opts.blockRate.Load()) } } + b.dequeueCoordinator.MarkReorg(blockNumber, b.opts.blockRate.Load()) } } @@ -408,7 +438,6 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo q.lock.Lock() defer q.lock.Unlock() - logs := q.logs var added int for _, log := range logsToAdd { if log.BlockNumber < blockThreshold { @@ -428,9 +457,9 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo } else { q.logs[log.BlockNumber] = []logpoller.Log{log} q.blockNumbers = append(q.blockNumbers, log.BlockNumber) + sort.Slice(q.blockNumbers, func(i, j int) bool { return q.blockNumbers[i] < q.blockNumbers[j] }) } } - q.logs = logs var dropped int if added > 0 { @@ -469,7 +498,8 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { windowLimit := int(q.opts.windowLimit.Load()) // helper variables to keep track of the current window capacity currentWindowCapacity, currentWindowStart := 0, int64(0) - for blockNumber, logs := range q.logs { + for _, blockNumber := range q.blockNumbers { + logs := q.logs[blockNumber] updated := make([]logpoller.Log, 0) for _, l := range logs { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index 755f5328a48..0c2da32dffa 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -15,7 +15,7 @@ import ( ) func TestLogEventBufferV1(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, newDequeueCoordinator()) + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, NewDequeueCoordinator()) buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -35,7 +35,7 @@ func TestLogEventBufferV1(t *testing.T) { } func TestLogEventBufferV1_SyncFilters(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, newDequeueCoordinator()) + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, NewDequeueCoordinator()) buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -56,6 +56,7 @@ func TestLogEventBufferV1_SyncFilters(t *testing.T) { type readableLogger struct { logger.Logger DebugwFn func(msg string, keysAndValues ...interface{}) + WarnwFn func(msg string, keysAndValues ...interface{}) NamedFn func(name string) logger.Logger WithFn func(args ...interface{}) logger.Logger } @@ -64,6 +65,10 @@ func (l *readableLogger) Debugw(msg string, keysAndValues ...interface{}) { l.DebugwFn(msg, keysAndValues...) } +func (l *readableLogger) Warnw(msg string, keysAndValues ...interface{}) { + l.WarnwFn(msg, keysAndValues...) +} + func (l *readableLogger) Named(name string) logger.Logger { return l } @@ -74,20 +79,26 @@ func (l *readableLogger) With(args ...interface{}) logger.Logger { func TestLogEventBufferV1_EnqueueViolations(t *testing.T) { t.Run("enqueuing logs for a block older than latest seen logs a message", func(t *testing.T) { - logReceived := false + warnLogReceived := false + debugLogReceived := false readableLogger := &readableLogger{ - DebugwFn: func(msg string, keysAndValues ...interface{}) { + WarnwFn: func(msg string, keysAndValues ...interface{}) { if msg == "enqueuing logs from a block older than latest seen block" { - logReceived = true + warnLogReceived = true assert.Equal(t, "logBlock", keysAndValues[0]) assert.Equal(t, int64(1), keysAndValues[1]) assert.Equal(t, "lastBlockSeen", keysAndValues[2]) assert.Equal(t, int64(2), keysAndValues[3]) } }, + DebugwFn: func(msg string, keysAndValues ...interface{}) { + if msg == "Enqueued logs" { + debugLogReceived = true + } + }, } - logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1, newDequeueCoordinator()) + logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1, NewDequeueCoordinator()) buf := logBufferV1.(*logBuffer) @@ -101,7 +112,8 @@ func TestLogEventBufferV1_EnqueueViolations(t *testing.T) { assert.Equal(t, 1, buf.enqueuedBlocks[2]["1"]) assert.Equal(t, 1, buf.enqueuedBlocks[1]["2"]) - assert.True(t, true, logReceived) + assert.True(t, true, warnLogReceived) + assert.True(t, true, debugLogReceived) }) t.Run("enqueuing logs for the same block over multiple calls logs a message", func(t *testing.T) { @@ -118,7 +130,7 @@ func TestLogEventBufferV1_EnqueueViolations(t *testing.T) { }, } - logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1, newDequeueCoordinator()) + logBufferV1 := NewLogBuffer(readableLogger, 10, 20, 1, NewDequeueCoordinator()) buf := logBufferV1.(*logBuffer) @@ -229,7 +241,7 @@ func TestLogEventBufferV1_Dequeue(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), uint32(tc.lookback), uint32(tc.args.blockRate), uint32(tc.args.upkeepLimit), newDequeueCoordinator()) + buf := NewLogBuffer(logger.TestLogger(t), uint32(tc.lookback), uint32(tc.args.blockRate), uint32(tc.args.upkeepLimit), NewDequeueCoordinator()) for id, logs := range tc.logsInBuffer { added, dropped := buf.Enqueue(id, logs...) require.Equal(t, len(logs), added+dropped) @@ -247,7 +259,7 @@ func TestLogEventBufferV1_Dequeue_highLoad(t *testing.T) { lookback := uint32(20) blockRate := uint32(1) logLimit := uint32(1) - buf := NewLogBuffer(logger.TestLogger(t), lookback, blockRate, logLimit, newDequeueCoordinator()) + buf := NewLogBuffer(logger.TestLogger(t), lookback, blockRate, logLimit, NewDequeueCoordinator()) upkeepIDs := []*big.Int{ big.NewInt(1), @@ -474,7 +486,7 @@ func TestLogEventBufferV1_Enqueue(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), tc.lookback, tc.blockRate, tc.upkeepLimit, newDequeueCoordinator()) + buf := NewLogBuffer(logger.TestLogger(t), tc.lookback, tc.blockRate, tc.upkeepLimit, NewDequeueCoordinator()) for id, logs := range tc.logsToAdd { added, dropped := buf.Enqueue(id, logs...) sid := id.String() @@ -562,7 +574,7 @@ func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { }) t.Run("happy path", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 5, 1, newDequeueCoordinator()) + buf := NewLogBuffer(logger.TestLogger(t), 10, 5, 1, NewDequeueCoordinator()) buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -698,7 +710,7 @@ func createDummyLogSequence(n, startIndex int, block int64, tx common.Hash) []lo } func Test_trackBlockNumbersForUpkeep(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, newDequeueCoordinator()) + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, NewDequeueCoordinator()) logBuffer := buf.(*logBuffer) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go index 9081a12846b..6134d414bc6 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go @@ -5,6 +5,29 @@ import ( "sync" ) +type DequeueCoordinator interface { + // DequeueBlockWindow identifies a block window ready for processing between the given start and latest block numbers. + // It prioritizes windows that need to have the minimum guaranteed logs dequeued before considering windows with + // remaining logs to be dequeued, as a best effort. + DequeueBlockWindow(start int64, latestBlock int64, blockRate int) (int64, int64, bool) + // GetUpkeepSelector returns a function that accepts an upkeep ID, and performs a modulus against the number of + // iterations, and compares the result against the current iteration. When this comparison returns true, the + // upkeep is selected for the dequeuing. This means that, for a given set of upkeeps, a different subset of + // upkeeps will be dequeued for each iteration once only, and, across all iterations, all upkeeps will be + // dequeued once. + GetUpkeepSelector(startWindow int64, logLimitLow, iterations, currentIteration int) func(id *big.Int) bool + // TrackUpkeeps tracks how many times an upkeep has been dequeued for a given block window. + TrackUpkeeps(startWindow int64, upkeepID *big.Int) + // UpdateBlockWindow updates the status of a block window based on the number of logs dequeued, + // remaining logs, and the number of upkeeps. This function tracks remaining and dequeued logs for the specified + // block window, determines if a block window has had the minimum number of guaranteed logs dequeued, and marks a + // window as not ready if there are not yet any logs available to dequeue from the window. + UpdateBlockWindow(startWindow int64, logs, remaining, numberOfUpkeeps, logLimitLow int) + // MarkReorg handles the detection of a reorg by resetting the state of the affected block window. It ensures that + // upkeeps within the specified block window are marked as not having the minimum number of guaranteed logs dequeued. + MarkReorg(block int64, blockRate uint32) +} + type dequeueCoordinator struct { dequeuedMinimum map[int64]bool notReady map[int64]bool @@ -15,7 +38,18 @@ type dequeueCoordinator struct { mu sync.Mutex } -func (c *dequeueCoordinator) dequeueBlockWindow(start int64, latestBlock int64, blockRate int) (int64, int64, bool) { +func NewDequeueCoordinator() *dequeueCoordinator { + return &dequeueCoordinator{ + dequeuedMinimum: map[int64]bool{}, + notReady: map[int64]bool{}, + remainingLogs: map[int64]int{}, + dequeuedLogs: map[int64]int{}, + completeWindows: map[int64]bool{}, + dequeuedUpkeeps: map[int64]map[string]int{}, + } +} + +func (c *dequeueCoordinator) DequeueBlockWindow(start int64, latestBlock int64, blockRate int) (int64, int64, bool) { c.mu.Lock() defer c.mu.Unlock() @@ -25,7 +59,7 @@ func (c *dequeueCoordinator) dequeueBlockWindow(start int64, latestBlock int64, if latestBlock >= end { c.completeWindows[startWindow] = true } else if c.notReady[startWindow] { // the window is incomplete and has no logs to provide as of yet - return 0, 0, false + break } if hasDequeued, ok := c.dequeuedMinimum[startWindow]; !ok || !hasDequeued { @@ -47,12 +81,7 @@ func (c *dequeueCoordinator) dequeueBlockWindow(start int64, latestBlock int64, return 0, 0, false } -// getUpkeepSelector returns a function that accepts an upkeep ID, and performs a modulus against the number of -// iterations, and compares the result against the current iteration. When this comparison returns true, the -// upkeep is selected for the dequeuing. This means that, for a given set of upkeeps, a different subset of -// upkeeps will be dequeued for each iteration once only, and, across all iterations, all upkeeps will be -// dequeued once. -func (c *dequeueCoordinator) getUpkeepSelector(startWindow int64, logLimitLow, iterations, currentIteration int) func(id *big.Int) bool { +func (c *dequeueCoordinator) GetUpkeepSelector(startWindow int64, logLimitLow, iterations, currentIteration int) func(id *big.Int) bool { c.mu.Lock() defer c.mu.Unlock() @@ -78,7 +107,10 @@ func (c *dequeueCoordinator) getUpkeepSelector(startWindow int64, logLimitLow, i } } -func (c *dequeueCoordinator) trackUpkeeps(startWindow int64, upkeepID *big.Int) { +func (c *dequeueCoordinator) TrackUpkeeps(startWindow int64, upkeepID *big.Int) { + c.mu.Lock() + defer c.mu.Unlock() + if windowUpkeeps, ok := c.dequeuedUpkeeps[startWindow]; ok { windowUpkeeps[upkeepID.String()] = windowUpkeeps[upkeepID.String()] + 1 c.dequeuedUpkeeps[startWindow] = windowUpkeeps @@ -89,19 +121,7 @@ func (c *dequeueCoordinator) trackUpkeeps(startWindow int64, upkeepID *big.Int) } } -func (c *dequeueCoordinator) markReorg(block int64, blockRate uint32) { - c.mu.Lock() - defer c.mu.Unlock() - - startWindow, _ := getBlockWindow(block, int(blockRate)) - c.dequeuedMinimum[startWindow] = false - // TODO instead of wiping the count for all upkeeps, should we wipe for upkeeps only impacted by the reorg? - for upkeepID := range c.dequeuedUpkeeps[startWindow] { - c.dequeuedUpkeeps[startWindow][upkeepID] = 0 - } -} - -func (c *dequeueCoordinator) updateBlockWindow(startWindow int64, logs, remaining, numberOfUpkeeps, logLimitLow int) { +func (c *dequeueCoordinator) UpdateBlockWindow(startWindow int64, logs, remaining, numberOfUpkeeps, logLimitLow int) { c.mu.Lock() defer c.mu.Unlock() @@ -123,3 +143,15 @@ func (c *dequeueCoordinator) updateBlockWindow(startWindow int64, logs, remainin c.notReady[startWindow] = true } } + +func (c *dequeueCoordinator) MarkReorg(block int64, blockRate uint32) { + c.mu.Lock() + defer c.mu.Unlock() + + startWindow, _ := getBlockWindow(block, int(blockRate)) + c.dequeuedMinimum[startWindow] = false + // TODO instead of wiping the count for all upkeeps, should we wipe for upkeeps only impacted by the reorg? + for upkeepID := range c.dequeuedUpkeeps[startWindow] { + c.dequeuedUpkeeps[startWindow][upkeepID] = 0 + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go index 43a8ddc27dc..ba577f57130 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go @@ -56,25 +56,3 @@ func logID(l logpoller.Log) string { copy(ext.BlockHash[:], l.BlockHash[:]) return hex.EncodeToString(ext.LogIdentifier()) } - -// blockStatistics returns the latest block number from the given logs, and a map of unique block numbers -func (b *logBuffer) blockStatistics(logs ...logpoller.Log) (int64, map[int64]bool, map[int64]bool) { - var latest int64 - uniqueBlocks := map[int64]bool{} - reorgBlocks := map[int64]bool{} - - for _, l := range logs { - if l.BlockNumber > latest { - latest = l.BlockNumber - } - uniqueBlocks[l.BlockNumber] = true - if hash, ok := b.blockHashes[l.BlockNumber]; ok { - if hash != l.BlockHash.String() { - reorgBlocks[l.BlockNumber] = true - } - } - b.blockHashes[l.BlockNumber] = l.BlockHash.String() - } - - return latest, uniqueBlocks, reorgBlocks -} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index e9672c007b8..a9a0f2a1561 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -119,22 +119,11 @@ type logEventProvider struct { currentIteration int iterations int - dequeueCoordinator *dequeueCoordinator -} - -func newDequeueCoordinator() *dequeueCoordinator { - return &dequeueCoordinator{ - dequeuedMinimum: map[int64]bool{}, - notReady: map[int64]bool{}, - remainingLogs: map[int64]int{}, - dequeuedLogs: map[int64]int{}, - completeWindows: map[int64]bool{}, - dequeuedUpkeeps: map[int64]map[string]int{}, - } + dequeueCoordinator DequeueCoordinator } func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big.Int, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logEventProvider { - dequeueCoordinator := newDequeueCoordinator() + dequeueCoordinator := NewDequeueCoordinator() return &logEventProvider{ threadCtrl: utils.NewThreadControl(), lggr: lggr.Named("KeepersRegistry.LogEventProvider"), @@ -322,13 +311,13 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up } for len(payloads) < maxResults { - startWindow, end, canDequeue := p.dequeueCoordinator.dequeueBlockWindow(start, latestBlock, blockRate) + startWindow, end, canDequeue := p.dequeueCoordinator.DequeueBlockWindow(start, latestBlock, blockRate) if !canDequeue { p.lggr.Debugw("Nothing to dequeue", "start", start, "latestBlock", latestBlock) break } - upkeepSelectorFn := p.dequeueCoordinator.getUpkeepSelector(startWindow, logLimitLow, p.iterations, p.currentIteration) + upkeepSelectorFn := p.dequeueCoordinator.GetUpkeepSelector(startWindow, logLimitLow, p.iterations, p.currentIteration) logs, remaining := p.bufferV1.Dequeue(startWindow, end, logLimitLow, maxResults-len(payloads), upkeepSelectorFn) if len(logs) > 0 { @@ -338,11 +327,11 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up if err == nil { payloads = append(payloads, payload) } - p.dequeueCoordinator.trackUpkeeps(startWindow, l.ID) + p.dequeueCoordinator.TrackUpkeeps(startWindow, l.ID) } } - p.dequeueCoordinator.updateBlockWindow(startWindow, len(logs), remaining, numOfUpkeeps, logLimitLow) + p.dequeueCoordinator.UpdateBlockWindow(startWindow, len(logs), remaining, numOfUpkeeps, logLimitLow) } p.currentIteration++ default: From f22abd1b9e776e7adeda7b3a24db40993d14b56a Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Thu, 13 Jun 2024 13:19:37 +0100 Subject: [PATCH 05/10] Fix tests --- .../evmregistry/v21/logprovider/buffer_v1.go | 7 ++- .../v21/logprovider/provider_test.go | 46 +++++++++++-------- 2 files changed, 32 insertions(+), 21 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 073017e9c0f..3abfbb196f0 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -493,12 +493,14 @@ func (q *upkeepLogQueue) orderLogs() { // given upkeep was exceeded. Returns the number of logs that were dropped. // NOTE: this method is not thread safe and should be called within a lock. func (q *upkeepLogQueue) clean(blockThreshold int64) int { - var dropped, expired int + var totalDropped int blockRate := int(q.opts.blockRate.Load()) windowLimit := int(q.opts.windowLimit.Load()) // helper variables to keep track of the current window capacity currentWindowCapacity, currentWindowStart := 0, int64(0) for _, blockNumber := range q.blockNumbers { + var dropped, expired int + logs := q.logs[blockNumber] updated := make([]logpoller.Log, 0) @@ -537,6 +539,7 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { } if dropped > 0 || expired > 0 { + totalDropped += dropped q.logs[blockNumber] = updated q.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len before", len(q.logs)) } @@ -544,7 +547,7 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { q.cleanStates(blockThreshold) - return dropped + return totalDropped } // cleanStates removes states that are older than blockThreshold. diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index b93e27b0aba..47d3e5e4af6 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -1494,8 +1494,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) + dequeueCoordinator := provider.dequeueCoordinator.(*dequeueCoordinator) + // the first block window does not contain any logs, so it automatically gets marked as having the minimum dequeued - assert.True(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) + assert.True(t, true, dequeueCoordinator.dequeuedMinimum[0]) blockWindowCounts = map[int64]int{} @@ -1604,8 +1606,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 0, len(payloads)) - assert.Equal(t, false, provider.dequeueCoordinator.dequeuedMinimum[0]) - assert.Equal(t, true, provider.dequeueCoordinator.notReady[0]) + dequeueCoordinator := provider.dequeueCoordinator.(*dequeueCoordinator) + + assert.Equal(t, false, dequeueCoordinator.dequeuedMinimum[0]) + assert.Equal(t, true, dequeueCoordinator.notReady[0]) blockWindowCounts = map[int64]int{} @@ -1634,8 +1638,8 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 0, len(payloads)) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) // now that the window is complete, it should be marked as dequeued minimum - assert.Equal(t, true, provider.dequeueCoordinator.notReady[0]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[0]) // now that the window is complete, it should be marked as dequeued minimum + assert.Equal(t, true, dequeueCoordinator.notReady[0]) provider.poller = &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -1716,9 +1720,9 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 180, blockWindowCounts[4]) assert.Equal(t, 190, blockWindowCounts[8]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[4]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[8]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[0]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[4]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[8]) }) t.Run("an incomplete window with minimum logs already present is marked as min dequeued", func(t *testing.T) { @@ -1808,7 +1812,9 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 10, len(payloads)) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) + dequeueCoordinator := provider.dequeueCoordinator.(*dequeueCoordinator) + + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[0]) blockWindowCounts = map[int64]int{} @@ -1868,8 +1874,8 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 90, blockWindowCounts[0]) assert.Equal(t, 190, blockWindowCounts[4]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[0]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[4]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[0]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[4]) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -2123,10 +2129,12 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 818, countLogs(bufV1.queues["4"].logs)) assert.Equal(t, 818, countLogs(bufV1.queues["5"].logs)) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[1]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[97]) - assert.Equal(t, false, provider.dequeueCoordinator.dequeuedMinimum[98]) // this window has no min commitment met due to reorg - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[99]) + dequeueCoordinator := provider.dequeueCoordinator.(*dequeueCoordinator) + + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[1]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[97]) + assert.Equal(t, false, dequeueCoordinator.dequeuedMinimum[98]) // this window has no min commitment met due to reorg + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[99]) payloads, err = provider.GetLatestPayloads(ctx) assert.NoError(t, err) @@ -2134,10 +2142,10 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { // we dequeue a maximum of 10 logs assert.Equal(t, 10, len(payloads)) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[1]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[97]) - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[98]) // this window has had min commitment met following reorg - assert.Equal(t, true, provider.dequeueCoordinator.dequeuedMinimum[99]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[1]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[97]) + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[98]) // this window has had min commitment met following reorg + assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[99]) // the dequeue is evenly distributed across the 5 upkeeps assert.Equal(t, 816, countLogs(bufV1.queues["1"].logs)) From d9fd8a8add265bb0bef8d8d7f003e2bcd7000350 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Thu, 13 Jun 2024 17:24:41 +0100 Subject: [PATCH 06/10] Evict reorg logs immediately when determining latest block so that the operation is atomic --- .../evmregistry/v21/logprovider/buffer_v1.go | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 3abfbb196f0..31af5cf1c40 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -106,10 +106,7 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { b.setUpkeepQueue(uid, buf) } - latestLogBlock, uniqueBlocks, reorgBlocks := b.blockStatistics(logs...) - if len(reorgBlocks) > 0 { - b.evictReorgdLogs(reorgBlocks) - } + latestLogBlock := b.latestBlockNumber(logs...) if lastBlockSeen := b.lastBlockSeen.Load(); lastBlockSeen < latestLogBlock { b.lastBlockSeen.Store(latestLogBlock) @@ -117,8 +114,6 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { b.lggr.Warnw("enqueuing logs with a latest block older older than latest seen block", "logBlock", latestLogBlock, "lastBlockSeen", lastBlockSeen) } - b.trackBlockNumbersForUpkeep(uid, uniqueBlocks) - blockThreshold := b.lastBlockSeen.Load() - int64(b.opts.lookback.Load()) if blockThreshold <= 0 { blockThreshold = 1 @@ -129,21 +124,17 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { return buf.enqueue(blockThreshold, logs...) } -// blockStatistics returns the latest block number, a set of unique block numbers, and a set of reorgd blocks -// from the given logs -func (b *logBuffer) blockStatistics(logs ...logpoller.Log) (int64, map[int64]bool, map[int64]bool) { +func (b *logBuffer) latestBlockNumber(logs ...logpoller.Log) int64 { b.lock.Lock() defer b.lock.Unlock() var latest int64 - uniqueBlocks := map[int64]bool{} reorgBlocks := map[int64]bool{} for _, l := range logs { if l.BlockNumber > latest { latest = l.BlockNumber } - uniqueBlocks[l.BlockNumber] = true if hash, ok := b.blockHashes[l.BlockNumber]; ok { if hash != l.BlockHash.String() { reorgBlocks[l.BlockNumber] = true @@ -153,7 +144,11 @@ func (b *logBuffer) blockStatistics(logs ...logpoller.Log) (int64, map[int64]boo b.blockHashes[l.BlockNumber] = l.BlockHash.String() } - return latest, uniqueBlocks, reorgBlocks + if len(reorgBlocks) > 0 { + b.evictReorgdLogs(reorgBlocks) + } + + return latest } func (b *logBuffer) evictReorgdLogs(reorgBlocks map[int64]bool) { From 0d597b1ee8fee7bfcce686cc03caabc026a62663 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Thu, 13 Jun 2024 20:30:25 +0100 Subject: [PATCH 07/10] Clean logs and block numbers --- .../evmregistry/v21/logprovider/buffer_v1.go | 73 +++--------- .../v21/logprovider/buffer_v1_test.go | 109 ------------------ 2 files changed, 19 insertions(+), 163 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 31af5cf1c40..dcdda45b4fb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -73,9 +73,6 @@ type logBuffer struct { queues map[string]*upkeepLogQueue lock sync.RWMutex - // map for then number of times we have enqueued logs for a block number - enqueuedBlocks map[int64]map[string]int - enqueuedBlockLock sync.RWMutex queueIDs []string blockHashes map[int64]string dequeueCoordinator *dequeueCoordinator @@ -86,7 +83,6 @@ func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32, dequ lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), opts: newLogBufferOptions(lookback, blockRate, logLimit), lastBlockSeen: new(atomic.Int64), - enqueuedBlocks: map[int64]map[string]int{}, queueIDs: []string{}, queues: make(map[string]*upkeepLogQueue), blockHashes: map[int64]string{}, @@ -119,8 +115,6 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { blockThreshold = 1 } - b.cleanupEnqueuedBlocks(blockThreshold) - return buf.enqueue(blockThreshold, logs...) } @@ -152,9 +146,6 @@ func (b *logBuffer) latestBlockNumber(logs ...logpoller.Log) int64 { } func (b *logBuffer) evictReorgdLogs(reorgBlocks map[int64]bool) { - b.lock.Lock() - defer b.lock.Unlock() - for blockNumber := range reorgBlocks { for _, queue := range b.queues { if _, ok := queue.logs[blockNumber]; ok { @@ -165,47 +156,6 @@ func (b *logBuffer) evictReorgdLogs(reorgBlocks map[int64]bool) { } } -func (b *logBuffer) cleanupEnqueuedBlocks(blockThreshold int64) { - b.enqueuedBlockLock.Lock() - defer b.enqueuedBlockLock.Unlock() - // clean up enqueued block counts - for block := range b.enqueuedBlocks { - if block < blockThreshold { - delete(b.enqueuedBlocks, block) - } - } -} - -// trackBlockNumbersForUpkeep keeps track of the number of times we enqueue logs for an upkeep, -// for a specific block number. The expectation is that we will only enqueue logs for an upkeep for a -// specific block number once, i.e. all logs for an upkeep for a block, will be enqueued in a single -// enqueue call. In the event that we see upkeep logs enqueued for a particular block more than once, -// we log a message. -func (b *logBuffer) trackBlockNumbersForUpkeep(uid *big.Int, uniqueBlocks map[int64]bool) { - b.enqueuedBlockLock.Lock() - defer b.enqueuedBlockLock.Unlock() - - if uid == nil { - return - } - - for blockNumber := range uniqueBlocks { - if blockNumbers, ok := b.enqueuedBlocks[blockNumber]; ok { - if upkeepBlockInstances, ok := blockNumbers[uid.String()]; ok { - blockNumbers[uid.String()] = upkeepBlockInstances + 1 - b.lggr.Debugw("enqueuing logs again for a previously seen block for this upkeep", "blockNumber", blockNumber, "numberOfEnqueues", b.enqueuedBlocks[blockNumber], "upkeepID", uid.String()) - } else { - blockNumbers[uid.String()] = 1 - } - b.enqueuedBlocks[blockNumber] = blockNumbers - } else { - b.enqueuedBlocks[blockNumber] = map[string]int{ - uid.String(): 1, - } - } - } -} - // Dequeue greedly pulls logs from the buffers. // Returns logs and the number of remaining logs in the buffer. func (b *logBuffer) Dequeue(start, end int64, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { @@ -348,12 +298,13 @@ type upkeepLogQueue struct { opts *logBufferOptions // logs is the buffer of logs for the upkeep - logs map[int64][]logpoller.Log + logs map[int64][]logpoller.Log + blockNumbers []int64 + // states keeps track of the state of the logs that are known to the queue // and the block number they were seen at - blockNumbers []int64 - states map[string]logTriggerStateEntry - lock sync.RWMutex + states map[string]logTriggerStateEntry + lock sync.RWMutex } func newUpkeepLogQueue(lggr logger.Logger, id *big.Int, opts *logBufferOptions) *upkeepLogQueue { @@ -493,12 +444,21 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { windowLimit := int(q.opts.windowLimit.Load()) // helper variables to keep track of the current window capacity currentWindowCapacity, currentWindowStart := 0, int64(0) + oldBlockNumbers := make([]int64, 0) + blockNumbers := make([]int64, 0) + for _, blockNumber := range q.blockNumbers { var dropped, expired int logs := q.logs[blockNumber] updated := make([]logpoller.Log, 0) + if blockThreshold > blockNumber { + oldBlockNumbers = append(oldBlockNumbers, blockNumber) + } else { + blockNumbers = append(blockNumbers, blockNumber) + } + for _, l := range logs { if blockThreshold > l.BlockNumber { // old log, removed prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionExpired).Inc() @@ -540,6 +500,11 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { } } + for _, blockNumber := range oldBlockNumbers { + delete(q.logs, blockNumber) + } + q.blockNumbers = blockNumbers + q.cleanStates(blockThreshold) return totalDropped diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index 0c2da32dffa..dac05958bf0 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -110,8 +110,6 @@ func TestLogEventBufferV1_EnqueueViolations(t *testing.T) { logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x2"), LogIndex: 0}, ) - assert.Equal(t, 1, buf.enqueuedBlocks[2]["1"]) - assert.Equal(t, 1, buf.enqueuedBlocks[1]["2"]) assert.True(t, true, warnLogReceived) assert.True(t, true, debugLogReceived) }) @@ -148,9 +146,6 @@ func TestLogEventBufferV1_EnqueueViolations(t *testing.T) { logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3b"), LogIndex: 0}, ) - assert.Equal(t, 1, buf.enqueuedBlocks[2]["2"]) - assert.Equal(t, 1, buf.enqueuedBlocks[1]["1"]) - assert.Equal(t, 2, buf.enqueuedBlocks[3]["3"]) assert.True(t, true, logReceived) }) } @@ -708,107 +703,3 @@ func createDummyLogSequence(n, startIndex int, block int64, tx common.Hash) []lo } return logs } - -func Test_trackBlockNumbersForUpkeep(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1, NewDequeueCoordinator()) - - logBuffer := buf.(*logBuffer) - - for _, tc := range []struct { - uid *big.Int - uniqueBlocks map[int64]bool - wantEnqueuedBlocks map[int64]map[string]int - }{ - { - uid: big.NewInt(1), - uniqueBlocks: map[int64]bool{ - 1: true, - 2: true, - 3: true, - }, - wantEnqueuedBlocks: map[int64]map[string]int{ - 1: { - "1": 1, - }, - 2: { - "1": 1, - }, - 3: { - "1": 1, - }, - }, - }, - { - uid: big.NewInt(2), - uniqueBlocks: map[int64]bool{ - 1: true, - 2: true, - 3: true, - }, - wantEnqueuedBlocks: map[int64]map[string]int{ - 1: { - "1": 1, - "2": 1, - }, - 2: { - "1": 1, - "2": 1, - }, - 3: { - "1": 1, - "2": 1, - }, - }, - }, - { - uid: big.NewInt(2), - uniqueBlocks: map[int64]bool{ - 3: true, - 4: true, - }, - wantEnqueuedBlocks: map[int64]map[string]int{ - 1: { - "1": 1, - "2": 1, - }, - 2: { - "1": 1, - "2": 1, - }, - 3: { - "1": 1, - "2": 2, - }, - 4: { - "2": 1, - }, - }, - }, - { - uniqueBlocks: map[int64]bool{ - 3: true, - 4: true, - }, - wantEnqueuedBlocks: map[int64]map[string]int{ - 1: { - "1": 1, - "2": 1, - }, - 2: { - "1": 1, - "2": 1, - }, - 3: { - "1": 1, - "2": 2, - }, - 4: { - "2": 1, - }, - }, - }, - } { - logBuffer.trackBlockNumbersForUpkeep(tc.uid, tc.uniqueBlocks) - assert.Equal(t, tc.wantEnqueuedBlocks, logBuffer.enqueuedBlocks) - } -} From 4c73949a35485720938beb81d93509d687d209a0 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Sun, 16 Jun 2024 19:34:03 +0100 Subject: [PATCH 08/10] Optimisation on append and cleanup --- .../evmregistry/v21/logprovider/buffer_v1.go | 21 +++++++------------ 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index dcdda45b4fb..c66c4350c02 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -225,21 +225,21 @@ func (b *logBuffer) SyncFilters(filterStore UpkeepFilterStore) error { b.lock.Lock() defer b.lock.Unlock() + var newQueueIDs []string + for _, upkeepID := range b.queueIDs { uid := new(big.Int) _, ok := uid.SetString(upkeepID, 10) if ok && !filterStore.Has(uid) { // remove upkeep that is not in the filter store delete(b.queues, upkeepID) - for i, v := range b.queueIDs { - if v == upkeepID { - b.queueIDs = append(b.queueIDs[:i], b.queueIDs[i+1:]...) - break - } - } + } else { + newQueueIDs = append(newQueueIDs, upkeepID) } } + b.queueIDs = newQueueIDs + return nil } @@ -255,14 +255,7 @@ func (b *logBuffer) setUpkeepQueue(uid *big.Int, buf *upkeepLogQueue) { b.lock.Lock() defer b.lock.Unlock() - found := false - for _, id := range b.queueIDs { - if id == uid.String() { - found = true - break - } - } - if !found { + if _, ok := b.queues[uid.String()]; !ok { b.queueIDs = append(b.queueIDs, uid.String()) } b.queues[uid.String()] = buf From 8ec492ee42d01b71aa26ab93361d4c54f0c97689 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Sun, 16 Jun 2024 21:29:16 +0100 Subject: [PATCH 09/10] Simplify the dequeue coordinator Track enqueued logs Add tests --- .../evmregistry/v21/logprovider/buffer_v1.go | 26 +- .../v21/logprovider/buffer_v1_test.go | 12 +- .../v21/logprovider/dequeue_coordinator.go | 129 +++--- .../logprovider/dequeue_coordinator_test.go | 403 ++++++++++++++++++ .../evmregistry/v21/logprovider/provider.go | 14 +- .../v21/logprovider/provider_test.go | 169 +++++++- 6 files changed, 656 insertions(+), 97 deletions(-) create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator_test.go diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index c66c4350c02..c5cc6d2d6eb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -115,7 +115,9 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { blockThreshold = 1 } - return buf.enqueue(blockThreshold, logs...) + b.dequeueCoordinator.Clean(blockThreshold, b.opts.blockRate.Load()) + + return buf.enqueue(b.dequeueCoordinator, b.opts.blockRate.Load(), blockThreshold, logs...) } func (b *logBuffer) latestBlockNumber(logs ...logpoller.Log) int64 { @@ -342,7 +344,7 @@ func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, for _, blockNumber := range q.blockNumbers { if blockNumber >= start && blockNumber <= end { updatedLogs := make([]logpoller.Log, 0) - + blockResults := 0 for _, l := range q.logs[blockNumber] { if len(results) < limit { results = append(results, l) @@ -351,13 +353,16 @@ func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, s.state = logTriggerStateDequeued q.states[lid] = s } + blockResults++ } else { remaining++ updatedLogs = append(updatedLogs, l) } } - q.logs[blockNumber] = updatedLogs + if blockResults > 0 { + q.logs[blockNumber] = updatedLogs + } } } @@ -373,11 +378,13 @@ func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, // enqueue adds logs to the buffer and might also drop logs if the limit for the // given upkeep was exceeded. Additionally, it will drop logs that are older than blockThreshold. // Returns the number of logs that were added and number of logs that were dropped. -func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Log) (int, int) { +func (q *upkeepLogQueue) enqueue(coordinator *dequeueCoordinator, blockRate uint32, blockThreshold int64, logsToAdd ...logpoller.Log) (int, int) { q.lock.Lock() defer q.lock.Unlock() var added int + + logsAdded := map[int64]int{} for _, log := range logsToAdd { if log.BlockNumber < blockThreshold { // q.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) @@ -390,6 +397,13 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo } q.states[lid] = logTriggerStateEntry{state: logTriggerStateEnqueued, block: log.BlockNumber} added++ + + if count, ok := logsAdded[log.BlockNumber]; ok { + logsAdded[log.BlockNumber] = count + 1 + } else { + logsAdded[log.BlockNumber] = 1 + } + if logList, ok := q.logs[log.BlockNumber]; ok { logList = append(logList, log) q.logs[log.BlockNumber] = logList @@ -400,6 +414,10 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo } } + for blockNumber, logsAddedForBlock := range logsAdded { + coordinator.CountEnqueuedLogsForWindow(blockNumber, blockRate, logsAddedForBlock) + } + var dropped int if added > 0 { q.orderLogs() diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index dac05958bf0..47c54ba06ed 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -504,10 +504,12 @@ func TestLogEventBufferV1_Enqueue(t *testing.T) { } func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { + dequeueCoordinator := NewDequeueCoordinator() + t.Run("enqueue dequeue", func(t *testing.T) { q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) - added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) + added, dropped := q.enqueue(dequeueCoordinator, 1, 10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) require.Equal(t, 0, dropped) require.Equal(t, 1, added) require.Equal(t, 1, q.sizeOfRange(1, 20)) @@ -519,7 +521,7 @@ func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { t.Run("enqueue with limits", func(t *testing.T) { q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) - added, dropped := q.enqueue(10, + added, dropped := q.enqueue(dequeueCoordinator, 1, 10, createDummyLogSequence(15, 0, 20, common.HexToHash("0x20"))..., ) require.Equal(t, 5, dropped) @@ -529,7 +531,7 @@ func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { t.Run("dequeue with limits", func(t *testing.T) { q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 3)) - added, dropped := q.enqueue(10, + added, dropped := q.enqueue(dequeueCoordinator, 1, 10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 1}, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 10}, @@ -544,6 +546,8 @@ func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { } func TestLogEventBufferV1_UpkeepQueue_sizeOfRange(t *testing.T) { + dequeueCoordinator := NewDequeueCoordinator() + t.Run("empty", func(t *testing.T) { q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) @@ -553,7 +557,7 @@ func TestLogEventBufferV1_UpkeepQueue_sizeOfRange(t *testing.T) { t.Run("happy path", func(t *testing.T) { q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) - added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) + added, dropped := q.enqueue(dequeueCoordinator, 1, 10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) require.Equal(t, 0, dropped) require.Equal(t, 1, added) require.Equal(t, 0, q.sizeOfRange(1, 10)) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go index 6134d414bc6..48ad33345cd 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator.go @@ -1,55 +1,54 @@ package logprovider import ( - "math/big" "sync" ) type DequeueCoordinator interface { - // DequeueBlockWindow identifies a block window ready for processing between the given start and latest block numbers. + // CountEnqueuedLogsForWindow tracks how many logs are added for a particular block during the enqueue process. + CountEnqueuedLogsForWindow(block int64, blockRate uint32, added int) + // GetDequeueBlockWindow identifies a block window ready for processing between the given start and latest block numbers. // It prioritizes windows that need to have the minimum guaranteed logs dequeued before considering windows with // remaining logs to be dequeued, as a best effort. - DequeueBlockWindow(start int64, latestBlock int64, blockRate int) (int64, int64, bool) - // GetUpkeepSelector returns a function that accepts an upkeep ID, and performs a modulus against the number of - // iterations, and compares the result against the current iteration. When this comparison returns true, the - // upkeep is selected for the dequeuing. This means that, for a given set of upkeeps, a different subset of - // upkeeps will be dequeued for each iteration once only, and, across all iterations, all upkeeps will be - // dequeued once. - GetUpkeepSelector(startWindow int64, logLimitLow, iterations, currentIteration int) func(id *big.Int) bool - // TrackUpkeeps tracks how many times an upkeep has been dequeued for a given block window. - TrackUpkeeps(startWindow int64, upkeepID *big.Int) - // UpdateBlockWindow updates the status of a block window based on the number of logs dequeued, + GetDequeueBlockWindow(start int64, latestBlock int64, blockRate int, minGuarantee int) (int64, int64, bool) + // CountDequeuedLogsForWindow updates the status of a block window based on the number of logs dequeued, // remaining logs, and the number of upkeeps. This function tracks remaining and dequeued logs for the specified // block window, determines if a block window has had the minimum number of guaranteed logs dequeued, and marks a // window as not ready if there are not yet any logs available to dequeue from the window. - UpdateBlockWindow(startWindow int64, logs, remaining, numberOfUpkeeps, logLimitLow int) + CountDequeuedLogsForWindow(startWindow int64, logs, minGuaranteedLogs int) // MarkReorg handles the detection of a reorg by resetting the state of the affected block window. It ensures that // upkeeps within the specified block window are marked as not having the minimum number of guaranteed logs dequeued. MarkReorg(block int64, blockRate uint32) + // Clean removes any data that is older than the block window of the blockThreshold from the dequeueCoordinator + Clean(blockThreshold int64, blockRate uint32) } type dequeueCoordinator struct { dequeuedMinimum map[int64]bool - notReady map[int64]bool - remainingLogs map[int64]int + enqueuedLogs map[int64]int dequeuedLogs map[int64]int completeWindows map[int64]bool - dequeuedUpkeeps map[int64]map[string]int mu sync.Mutex } func NewDequeueCoordinator() *dequeueCoordinator { return &dequeueCoordinator{ dequeuedMinimum: map[int64]bool{}, - notReady: map[int64]bool{}, - remainingLogs: map[int64]int{}, + enqueuedLogs: map[int64]int{}, dequeuedLogs: map[int64]int{}, completeWindows: map[int64]bool{}, - dequeuedUpkeeps: map[int64]map[string]int{}, } } -func (c *dequeueCoordinator) DequeueBlockWindow(start int64, latestBlock int64, blockRate int) (int64, int64, bool) { +func (c *dequeueCoordinator) CountEnqueuedLogsForWindow(block int64, blockRate uint32, added int) { + c.mu.Lock() + defer c.mu.Unlock() + + startWindow, _ := getBlockWindow(block, int(blockRate)) + c.enqueuedLogs[startWindow] += added +} + +func (c *dequeueCoordinator) GetDequeueBlockWindow(start int64, latestBlock int64, blockRate int, minGuarantee int) (int64, int64, bool) { c.mu.Lock() defer c.mu.Unlock() @@ -58,11 +57,14 @@ func (c *dequeueCoordinator) DequeueBlockWindow(start int64, latestBlock int64, startWindow, end := getBlockWindow(i, blockRate) if latestBlock >= end { c.completeWindows[startWindow] = true - } else if c.notReady[startWindow] { // the window is incomplete and has no logs to provide as of yet + } else if c.enqueuedLogs[startWindow] <= 0 { // the latest window is incomplete and has no logs to provide yet break } - if hasDequeued, ok := c.dequeuedMinimum[startWindow]; !ok || !hasDequeued { + enqueuedLogs := c.enqueuedLogs[startWindow] + dequeuedLogs := c.dequeuedLogs[startWindow] + + if enqueuedLogs > 0 && dequeuedLogs < minGuarantee { return startWindow, end, true } } @@ -71,76 +73,29 @@ func (c *dequeueCoordinator) DequeueBlockWindow(start int64, latestBlock int64, for i := start; i < latestBlock; i += int64(blockRate) { startWindow, end := getBlockWindow(i, blockRate) - if remainingLogs, ok := c.remainingLogs[startWindow]; ok { - if remainingLogs > 0 { - return startWindow, end, true - } + if remainingLogs, ok := c.enqueuedLogs[startWindow]; ok && remainingLogs > 0 { + return startWindow, end, true } } return 0, 0, false } -func (c *dequeueCoordinator) GetUpkeepSelector(startWindow int64, logLimitLow, iterations, currentIteration int) func(id *big.Int) bool { +func (c *dequeueCoordinator) CountDequeuedLogsForWindow(startWindow int64, logs, minGuaranteedLogs int) { c.mu.Lock() defer c.mu.Unlock() - bestEffort := false - - if hasDequeued, ok := c.dequeuedMinimum[startWindow]; ok { - if hasDequeued { - bestEffort = true - } - } - - return func(id *big.Int) bool { - // query the map of block number to upkeep ID for dequeued count here when the block window is incomplete - dequeueUpkeep := true - if !bestEffort { - if windowUpkeeps, ok := c.dequeuedUpkeeps[startWindow]; ok { - if windowUpkeeps[id.String()] >= logLimitLow { - dequeueUpkeep = false - } - } - } - return dequeueUpkeep && id.Int64()%int64(iterations) == int64(currentIteration) - } -} - -func (c *dequeueCoordinator) TrackUpkeeps(startWindow int64, upkeepID *big.Int) { - c.mu.Lock() - defer c.mu.Unlock() - - if windowUpkeeps, ok := c.dequeuedUpkeeps[startWindow]; ok { - windowUpkeeps[upkeepID.String()] = windowUpkeeps[upkeepID.String()] + 1 - c.dequeuedUpkeeps[startWindow] = windowUpkeeps - } else { - c.dequeuedUpkeeps[startWindow] = map[string]int{ - upkeepID.String(): 1, - } - } -} - -func (c *dequeueCoordinator) UpdateBlockWindow(startWindow int64, logs, remaining, numberOfUpkeeps, logLimitLow int) { - c.mu.Lock() - defer c.mu.Unlock() - - c.remainingLogs[startWindow] = remaining + c.enqueuedLogs[startWindow] -= logs c.dequeuedLogs[startWindow] += logs - if isComplete, ok := c.completeWindows[startWindow]; ok { - if isComplete { + if c.completeWindows[startWindow] { + if c.enqueuedLogs[startWindow] <= 0 || c.dequeuedLogs[startWindow] >= minGuaranteedLogs { // if the window is complete, and there are no more logs, then we have to consider this as min dequeued, even if no logs were dequeued - if c.remainingLogs[startWindow] == 0 || c.dequeuedLogs[startWindow] >= numberOfUpkeeps*logLimitLow { - c.dequeuedMinimum[startWindow] = true - } - } else if c.dequeuedLogs[startWindow] >= numberOfUpkeeps*logLimitLow { // this assumes we don't dequeue the same upkeeps more than logLimitLow in min commitment c.dequeuedMinimum[startWindow] = true } - } else if c.dequeuedLogs[startWindow] >= numberOfUpkeeps*logLimitLow { // this assumes we don't dequeue the same upkeeps more than logLimitLow in min commitment + } else if c.dequeuedLogs[startWindow] >= minGuaranteedLogs { + // if the window is not complete, but we were able to dequeue min guaranteed logs from the blocks that were available c.dequeuedMinimum[startWindow] = true - } else if logs == 0 && remaining == 0 { - c.notReady[startWindow] = true } } @@ -150,8 +105,22 @@ func (c *dequeueCoordinator) MarkReorg(block int64, blockRate uint32) { startWindow, _ := getBlockWindow(block, int(blockRate)) c.dequeuedMinimum[startWindow] = false - // TODO instead of wiping the count for all upkeeps, should we wipe for upkeeps only impacted by the reorg? - for upkeepID := range c.dequeuedUpkeeps[startWindow] { - c.dequeuedUpkeeps[startWindow][upkeepID] = 0 + c.enqueuedLogs[startWindow] = 0 + c.dequeuedLogs[startWindow] = 0 +} + +func (c *dequeueCoordinator) Clean(blockThreshold int64, blockRate uint32) { + c.mu.Lock() + defer c.mu.Unlock() + + blockThresholdStartWindow, _ := getBlockWindow(blockThreshold, int(blockRate)) + + for block := range c.enqueuedLogs { + if blockThresholdStartWindow > block { + delete(c.enqueuedLogs, block) + delete(c.dequeuedLogs, block) + delete(c.dequeuedMinimum, block) + delete(c.completeWindows, block) + } } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator_test.go new file mode 100644 index 00000000000..eae892720dc --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/dequeue_coordinator_test.go @@ -0,0 +1,403 @@ +package logprovider + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDequeueCoordinator_DequeueBlockWindow(t *testing.T) { + t.Run("an empty dequeue coordinator should tell us not to dequeue", func(t *testing.T) { + c := NewDequeueCoordinator() + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 10, 1, 10) + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(0), end) + assert.Equal(t, false, canDequeue) + }) + + t.Run("a populated dequeue coordinator should tell us to dequeue the first window with logs", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(3, 1, 0) + c.CountEnqueuedLogsForWindow(4, 1, 10) + c.CountEnqueuedLogsForWindow(5, 1, 10) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 is the first block with no logs dequeued yet + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + }) + + t.Run("a populated dequeue coordinator should tell us to dequeue the next window with logs", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(3, 1, 0) + c.CountEnqueuedLogsForWindow(4, 1, 10) + c.CountEnqueuedLogsForWindow(5, 1, 10) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 is the first block with no logs dequeued yet + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 has been dequeued, so block 5 is the next window to dequeue + assert.Equal(t, int64(5), start) + assert.Equal(t, int64(5), end) + assert.Equal(t, true, canDequeue) + }) + + t.Run("a populated dequeue coordinator with minimum dequeue met should tell us to dequeue the next window with logs as best effort", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(3, 1, 0) + c.CountEnqueuedLogsForWindow(4, 1, 20) + c.CountEnqueuedLogsForWindow(5, 1, 20) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 is the first block with no logs dequeued yet + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 has been dequeued, so block 5 is the next window to dequeue + assert.Equal(t, int64(5), start) + assert.Equal(t, int64(5), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // all windows have had minimum logs dequeued, so we go back to block 4 to dequeue as best effort + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + }) + + t.Run("a fully exhausted dequeue coordinator should not tell us to dequeue", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(3, 1, 0) + c.CountEnqueuedLogsForWindow(4, 1, 20) + c.CountEnqueuedLogsForWindow(5, 1, 20) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 is the first block with no logs dequeued yet + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 has been dequeued, so block 5 is the next window to dequeue + assert.Equal(t, int64(5), start) + assert.Equal(t, int64(5), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // all windows have had minimum logs dequeued, so we go back to block 4 to dequeue as best effort + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 has been fully dequeued, so we dequeue block 5 + assert.Equal(t, int64(5), start) + assert.Equal(t, int64(5), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // all block windows have been fully dequeued so the coordinator tells us not to dequeue + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(0), end) + assert.Equal(t, false, canDequeue) + }) + + t.Run("an incomplete latest window without logs to dequeue gets passed over and best effort is executed", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(0, 4, 10) + c.CountEnqueuedLogsForWindow(1, 4, 10) + c.CountEnqueuedLogsForWindow(2, 4, 10) + c.CountEnqueuedLogsForWindow(3, 4, 10) + c.CountEnqueuedLogsForWindow(4, 4, 0) + c.CountEnqueuedLogsForWindow(5, 4, 0) + c.CountEnqueuedLogsForWindow(6, 4, 0) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + + // multiple dequeues in best effort now exhaust block window 0 + c.CountDequeuedLogsForWindow(start, 10, 10) + c.CountDequeuedLogsForWindow(start, 10, 10) + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(0), end) + assert.Equal(t, false, canDequeue) + }) + + t.Run("an incomplete latest window with min logs to dequeue gets dequeued", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(0, 4, 10) + c.CountEnqueuedLogsForWindow(1, 4, 10) + c.CountEnqueuedLogsForWindow(2, 4, 10) + c.CountEnqueuedLogsForWindow(3, 4, 10) + c.CountEnqueuedLogsForWindow(4, 4, 10) + c.CountEnqueuedLogsForWindow(5, 4, 0) + c.CountEnqueuedLogsForWindow(6, 4, 0) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(7), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + }) + + t.Run("an incomplete latest window with less than min logs to dequeue gets dequeued", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(0, 4, 10) + c.CountEnqueuedLogsForWindow(1, 4, 10) + c.CountEnqueuedLogsForWindow(2, 4, 10) + c.CountEnqueuedLogsForWindow(3, 4, 10) + c.CountEnqueuedLogsForWindow(4, 4, 5) + c.CountEnqueuedLogsForWindow(5, 4, 0) + c.CountEnqueuedLogsForWindow(6, 4, 0) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(7), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 5, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + + // now that the second block window is complete and has enough logs to meet min dequeue, we revert to min guaranteed dequeue + c.CountEnqueuedLogsForWindow(7, 4, 5) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(7), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 5, 10) + + // now we revert to best effort dequeue for the first block window + start, end, canDequeue = c.GetDequeueBlockWindow(1, 6, 4, 10) + + assert.Equal(t, int64(0), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + }) + + t.Run("a reorg causes us to revert to min guaranteed log dequeue", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(3, 1, 0) + c.CountEnqueuedLogsForWindow(4, 1, 20) + c.CountEnqueuedLogsForWindow(5, 1, 20) + c.CountEnqueuedLogsForWindow(6, 1, 20) + c.CountEnqueuedLogsForWindow(7, 1, 20) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 is the first block with no logs dequeued yet + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 4 has been dequeued, so block 5 is the next window to dequeue + assert.Equal(t, int64(5), start) + assert.Equal(t, int64(5), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // block 5 has been dequeued, so block 6 is the next window to dequeue + assert.Equal(t, int64(6), start) + assert.Equal(t, int64(6), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + // reorg happens and only block 4 has been re orgd + c.MarkReorg(4, 1) + + c.CountEnqueuedLogsForWindow(4, 1, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // we now have to go back to block 4 to dequeue minimum guaranteed logs + assert.Equal(t, int64(4), start) + assert.Equal(t, int64(4), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // now that block 4 has been min dequeued, we jump forward to block 7 to continue min dequeue + assert.Equal(t, int64(7), start) + assert.Equal(t, int64(7), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(start, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + // now that all block windows have had min logs dequeued, we go back to the earliest block window with remaining logs to dequeue best effort, i.e. block window 5 + assert.Equal(t, int64(5), start) + assert.Equal(t, int64(5), end) + assert.Equal(t, true, canDequeue) + }) + + t.Run("cleaning deletes data from the coordinator older than the block window of block threshold", func(t *testing.T) { + c := NewDequeueCoordinator() + + c.CountEnqueuedLogsForWindow(1, 1, 20) + c.CountEnqueuedLogsForWindow(2, 1, 20) + c.CountEnqueuedLogsForWindow(3, 1, 20) + c.CountEnqueuedLogsForWindow(4, 1, 20) + c.CountEnqueuedLogsForWindow(5, 1, 20) + c.CountEnqueuedLogsForWindow(6, 1, 20) + c.CountEnqueuedLogsForWindow(7, 1, 20) + c.CountEnqueuedLogsForWindow(8, 1, 20) + c.CountEnqueuedLogsForWindow(9, 1, 20) + + start, end, canDequeue := c.GetDequeueBlockWindow(1, 10, 1, 10) + + assert.Equal(t, int64(1), start) + assert.Equal(t, int64(1), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(1, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + assert.Equal(t, int64(2), start) + assert.Equal(t, int64(2), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(2, 10, 10) + + start, end, canDequeue = c.GetDequeueBlockWindow(1, 10, 1, 10) + + assert.Equal(t, int64(3), start) + assert.Equal(t, int64(3), end) + assert.Equal(t, true, canDequeue) + + c.CountDequeuedLogsForWindow(3, 10, 10) + + assert.Equal(t, 10, c.enqueuedLogs[1]) + assert.Equal(t, 10, c.enqueuedLogs[2]) + assert.Equal(t, 10, c.enqueuedLogs[3]) + + assert.Equal(t, 10, c.dequeuedLogs[1]) + assert.Equal(t, 10, c.dequeuedLogs[2]) + assert.Equal(t, 10, c.dequeuedLogs[3]) + + assert.Equal(t, true, c.dequeuedMinimum[1]) + assert.Equal(t, true, c.dequeuedMinimum[2]) + assert.Equal(t, true, c.dequeuedMinimum[3]) + + assert.Equal(t, true, c.completeWindows[1]) + assert.Equal(t, true, c.completeWindows[2]) + assert.Equal(t, true, c.completeWindows[3]) + + c.Clean(3, 1) + + assert.Equal(t, 0, c.enqueuedLogs[1]) + assert.Equal(t, 0, c.enqueuedLogs[2]) + assert.Equal(t, 10, c.enqueuedLogs[3]) + + assert.Equal(t, 0, c.dequeuedLogs[1]) + assert.Equal(t, 0, c.dequeuedLogs[2]) + assert.Equal(t, 10, c.dequeuedLogs[3]) + + assert.Equal(t, false, c.dequeuedMinimum[1]) + assert.Equal(t, false, c.dequeuedMinimum[2]) + assert.Equal(t, true, c.dequeuedMinimum[3]) + + assert.Equal(t, false, c.completeWindows[1]) + assert.Equal(t, false, c.completeWindows[2]) + assert.Equal(t, true, c.completeWindows[3]) + }) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index a9a0f2a1561..8c09ead1129 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -311,27 +311,29 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up } for len(payloads) < maxResults { - startWindow, end, canDequeue := p.dequeueCoordinator.DequeueBlockWindow(start, latestBlock, blockRate) + startWindow, end, canDequeue := p.dequeueCoordinator.GetDequeueBlockWindow(start, latestBlock, blockRate, numOfUpkeeps*logLimitLow) if !canDequeue { p.lggr.Debugw("Nothing to dequeue", "start", start, "latestBlock", latestBlock) break } - upkeepSelectorFn := p.dequeueCoordinator.GetUpkeepSelector(startWindow, logLimitLow, p.iterations, p.currentIteration) + upkeepSelectorFn := func(id *big.Int) bool { + return id.Int64()%int64(p.iterations) == int64(p.currentIteration) + } logs, remaining := p.bufferV1.Dequeue(startWindow, end, logLimitLow, maxResults-len(payloads), upkeepSelectorFn) if len(logs) > 0 { - p.lggr.Debugw("Dequeued logs", "start", start, "latestBlock", latestBlock, "logs", len(logs)) + p.lggr.Debugw("Dequeued logs", "start", start, "latestBlock", latestBlock, "logs", len(logs), "remaining", remaining) + for _, l := range logs { payload, err := p.createPayload(l.ID, l.Log) if err == nil { payloads = append(payloads, payload) } - p.dequeueCoordinator.TrackUpkeeps(startWindow, l.ID) } } - p.dequeueCoordinator.UpdateBlockWindow(startWindow, len(logs), remaining, numOfUpkeeps, logLimitLow) + p.dequeueCoordinator.CountDequeuedLogsForWindow(startWindow, len(logs), numOfUpkeeps*logLimitLow) } p.currentIteration++ default: @@ -344,6 +346,8 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up } } + p.lggr.Debugw("getLogsFromBuffer returning payloads", "payloads", len(payloads)) + return payloads } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index 47d3e5e4af6..b1a71a10fb0 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -1609,7 +1609,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { dequeueCoordinator := provider.dequeueCoordinator.(*dequeueCoordinator) assert.Equal(t, false, dequeueCoordinator.dequeuedMinimum[0]) - assert.Equal(t, true, dequeueCoordinator.notReady[0]) + //assert.Equal(t, true, dequeueCoordinator.notReady[0]) blockWindowCounts = map[int64]int{} @@ -1638,8 +1638,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 0, len(payloads)) - assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[0]) // now that the window is complete, it should be marked as dequeued minimum - assert.Equal(t, true, dequeueCoordinator.notReady[0]) + //assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[0]) // now that the window is complete, it should be marked as dequeued minimum provider.poller = &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -1720,7 +1719,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 180, blockWindowCounts[4]) assert.Equal(t, 190, blockWindowCounts[8]) - assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[0]) + assert.Equal(t, false, dequeueCoordinator.dequeuedMinimum[0]) assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[4]) assert.Equal(t, true, dequeueCoordinator.dequeuedMinimum[8]) }) @@ -2300,6 +2299,168 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { assert.Equal(t, 40, blockWindowCounts[101]) assert.Equal(t, 40, blockWindowCounts[102]) }) + + t.Run("sparsely populated blocks", func(t *testing.T) { + oldMaxPayloads := MaxPayloads + MaxPayloads = 10 + defer func() { + MaxPayloads = oldMaxPayloads + }() + + upkeepIDs := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + + filterStore := NewUpkeepFilterStore() + + upkeepOmittedOnBlocks := map[int][]int64{ + 1: {5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100}, // upkeep 1 won't have logs on 20 blocks + 2: {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100}, // upkeep 2 won't have logs on 50 blocks + 3: {3, 13, 23, 33, 43, 53, 63, 73, 83, 93}, // upkeep 3 won't appear on 10 blocks + 4: {1, 25, 50, 75, 100}, // upkeep 4 won't appear on 5 blocks + 5: {}, // upkeep 5 appears on all blocks + } + + callCount := 0 + // this gets called once per upkeep ID + logGenerator := func(start, end int64) []logpoller.Log { + callCount++ + var res []logpoller.Log + outer: + for i := start; i < end; i++ { + for _, skip := range upkeepOmittedOnBlocks[callCount] { + if skip == i+1 { + continue outer + } + } + res = append(res, logpoller.Log{ + LogIndex: i, + BlockHash: common.HexToHash(fmt.Sprintf("%d", i+1)), + BlockNumber: i + 1, + }) + } + return res + } + + // use a log poller that will create logs for the queried block range + logPoller := &mockLogPoller{ + LatestBlockFn: func(ctx context.Context) (int64, error) { + return 100, nil + }, + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return logGenerator(start, end), nil + }, + } + + // prepare the filter store with upkeeps + for _, upkeepID := range upkeepIDs { + filterStore.AddActiveUpkeeps( + upkeepFilter{ + addr: []byte(upkeepID.String()), + upkeepID: upkeepID, + topics: []common.Hash{ + common.HexToHash(upkeepID.String()), + }, + }, + ) + } + + opts := NewOptions(200, big.NewInt(1)) + opts.BufferVersion = "v1" + + provider := NewLogProvider(logger.TestLogger(t), logPoller, big.NewInt(1), &mockedPacker{}, filterStore, opts) + + ctx := context.Background() + + err := provider.ReadLogs(ctx, upkeepIDs...) + assert.NoError(t, err) + + assert.Equal(t, 5, provider.bufferV1.NumOfUpkeeps()) + + bufV1 := provider.bufferV1.(*logBuffer) + + blockWindowCounts := map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 4, blockWindowCounts[1]) + assert.Equal(t, 4, blockWindowCounts[2]) + assert.Equal(t, 4, blockWindowCounts[3]) + assert.Equal(t, 4, blockWindowCounts[4]) + assert.Equal(t, 4, blockWindowCounts[5]) + assert.Equal(t, 4, blockWindowCounts[6]) + assert.Equal(t, 5, blockWindowCounts[7]) // block 7 is the first block to contain 1 log for all upkeeps + + assert.Equal(t, 80, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 50, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 90, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 100, countLogs(bufV1.queues["5"].logs)) + + payloads, err := provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps based on availability + assert.Equal(t, 77, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 48, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 88, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 94, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 98, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 0, blockWindowCounts[1]) + assert.Equal(t, 0, blockWindowCounts[2]) + assert.Equal(t, 2, blockWindowCounts[3]) + assert.Equal(t, 4, blockWindowCounts[4]) + assert.Equal(t, 4, blockWindowCounts[5]) + assert.Equal(t, 4, blockWindowCounts[6]) + assert.Equal(t, 5, blockWindowCounts[7]) // block 7 is the first block to contain 1 log for all upkeeps + + payloads, err = provider.GetLatestPayloads(ctx) + assert.NoError(t, err) + + assert.Equal(t, 10, len(payloads)) + + // the dequeue is evenly distributed across the 5 upkeeps based on availability + assert.Equal(t, 76, countLogs(bufV1.queues["1"].logs)) + assert.Equal(t, 47, countLogs(bufV1.queues["2"].logs)) + assert.Equal(t, 86, countLogs(bufV1.queues["3"].logs)) + assert.Equal(t, 91, countLogs(bufV1.queues["4"].logs)) + assert.Equal(t, 95, countLogs(bufV1.queues["5"].logs)) + + blockWindowCounts = map[int64]int{} + + for _, q := range bufV1.queues { + for blockNumber, logs := range q.logs { + blockWindowCounts[blockNumber] += len(logs) + } + } + + assert.Equal(t, 0, blockWindowCounts[1]) + assert.Equal(t, 0, blockWindowCounts[2]) + assert.Equal(t, 0, blockWindowCounts[3]) + assert.Equal(t, 0, blockWindowCounts[4]) + assert.Equal(t, 0, blockWindowCounts[5]) + assert.Equal(t, 4, blockWindowCounts[6]) + assert.Equal(t, 5, blockWindowCounts[7]) // block 7 is the first block to contain 1 log for all upkeeps + }) } type mockedPacker struct { From 65eb2f4d38379bcb876f02dca0e69c9e6b7f8251 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Thu, 20 Jun 2024 16:05:37 +0100 Subject: [PATCH 10/10] WIP --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go | 1 + .../plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index c5cc6d2d6eb..086bb0d1f48 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -96,6 +96,7 @@ func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32, dequ // All logs for an upkeep on a particular block will be enqueued in a single Enqueue call. // Returns the number of logs that were added and number of logs that were dropped. func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { + // TODO: lock the whole enqueue so that we don't evict good logs buf, ok := b.getUpkeepQueue(uid) if !ok || buf == nil { buf = newUpkeepLogQueue(b.lggr, uid, b.opts) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 64833f9269b..ad33c0b226a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -95,7 +95,7 @@ func (o *LogTriggersOptions) defaultBlockRate() uint32 { func (o *LogTriggersOptions) defaultLogLimit() uint32 { switch o.chainID.Int64() { case 42161, 421613, 421614: // Arbitrum - return 1 + return 1 // TODO should this be 2 case 1, 4, 5, 42, 11155111: // Eth return 20 case 10, 420, 56, 97, 137, 80001, 43113, 43114, 8453, 84531: // Optimism, BSC, Polygon, Avax, Base