From 601aa07001bae0ae1ebad04cf83b0feb8956e15a Mon Sep 17 00:00:00 2001 From: Sam Date: Thu, 22 Feb 2024 12:30:38 -0500 Subject: [PATCH 1/6] Mercury 1.0 (parallel composition) (#10810) * Implement Data Streams plugin * Try to make tests a bit more deterministic * lint * Increase DeltaGrace * gomodtidy] * Increase timeouts significantly * Attempt test fix * Add comments * Remove useless comment * Add fromblock to LLO job spec --- GNUmakefile | 1 + core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 +- core/services/chainlink/application.go | 1 + core/services/job/orm.go | 58 +-- core/services/job/spawner_test.go | 2 +- core/services/llo/bm/dummy_transmitter.go | 79 ++++ .../services/llo/bm/dummy_transmitter_test.go | 36 ++ .../llo/channel_definition_cache_factory.go | 58 +++ core/services/llo/data_source.go | 103 +++++ core/services/llo/data_source_test.go | 95 +++++ core/services/llo/delegate.go | 123 ++++++ core/services/llo/evm/report_codec.go | 97 +++++ core/services/llo/evm/report_codec_test.go | 90 +++++ core/services/llo/keyring.go | 79 ++++ core/services/llo/keyring_test.go | 123 ++++++ core/services/llo/offchain_config_digester.go | 123 ++++++ .../llo/offchain_config_digester_test.go | 55 +++ .../llo/onchain_channel_definition_cache.go | 252 ++++++++++++ .../onchain_channel_definition_cache_test.go | 26 ++ core/services/llo/orm.go | 66 ++++ core/services/llo/orm_test.go | 101 +++++ .../llo/static_channel_definitions_cache.go | 56 +++ core/services/llo/transmitter.go | 153 ++++++++ core/services/llo/transmitter_test.go | 7 + core/services/ocr2/delegate.go | 142 ++++++- .../ocr2/plugins/llo/config/config.go | 112 ++++++ .../ocr2/plugins/llo/config/config_test.go | 142 +++++++ .../services/ocr2/plugins/llo/helpers_test.go | 356 +++++++++++++++++ .../ocr2/plugins/llo/integration_test.go | 370 ++++++++++++++++++ ...annel_definition_cache_integration_test.go | 213 ++++++++++ .../ocr2/plugins/mercury/integration_test.go | 14 +- core/services/ocr2/validate/validate.go | 12 + core/services/ocrbootstrap/delegate.go | 15 +- core/services/pipeline/mocks/runner.go | 30 ++ core/services/pipeline/runner.go | 1 + core/services/relay/evm/evm.go | 76 +++- .../services/relay/evm/llo_config_provider.go | 21 + core/services/relay/evm/llo_provider.go | 90 +++++ .../relay/evm/llo_verifier_decoder.go | 67 ++++ .../relay/evm/mercury/config_digest.go | 2 +- .../relay/evm/mercury/wsrpc/pb/mercury.pb.go | 8 +- .../relay/evm/mercury/wsrpc/pb/mercury.proto | 2 +- .../0224_create_channel_definition_caches.sql | 12 + docs/CHANGELOG.md | 1 + go.mod | 4 +- go.sum | 4 +- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 +- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 +- 51 files changed, 3436 insertions(+), 60 deletions(-) create mode 100644 core/services/llo/bm/dummy_transmitter.go create mode 100644 core/services/llo/bm/dummy_transmitter_test.go create mode 100644 core/services/llo/channel_definition_cache_factory.go create mode 100644 core/services/llo/data_source.go create mode 100644 core/services/llo/data_source_test.go create mode 100644 core/services/llo/delegate.go create mode 100644 core/services/llo/evm/report_codec.go create mode 100644 core/services/llo/evm/report_codec_test.go create mode 100644 core/services/llo/keyring.go create mode 100644 core/services/llo/keyring_test.go create mode 100644 core/services/llo/offchain_config_digester.go create mode 100644 core/services/llo/offchain_config_digester_test.go create mode 100644 core/services/llo/onchain_channel_definition_cache.go create mode 100644 core/services/llo/onchain_channel_definition_cache_test.go create mode 100644 core/services/llo/orm.go create mode 100644 core/services/llo/orm_test.go create mode 100644 core/services/llo/static_channel_definitions_cache.go create mode 100644 core/services/llo/transmitter.go create mode 100644 core/services/llo/transmitter_test.go create mode 100644 core/services/ocr2/plugins/llo/config/config.go create mode 100644 core/services/ocr2/plugins/llo/config/config_test.go create mode 100644 core/services/ocr2/plugins/llo/helpers_test.go create mode 100644 core/services/ocr2/plugins/llo/integration_test.go create mode 100644 core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go create mode 100644 core/services/relay/evm/llo_config_provider.go create mode 100644 core/services/relay/evm/llo_provider.go create mode 100644 core/services/relay/evm/llo_verifier_decoder.go create mode 100644 core/store/migrate/migrations/0224_create_channel_definition_caches.sql diff --git a/GNUmakefile b/GNUmakefile index 20f3eb92d51..8cbd0953ee7 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -31,6 +31,7 @@ gomodtidy: ## Run go mod tidy on all modules. go mod tidy cd ./core/scripts && go mod tidy cd ./integration-tests && go mod tidy + cd ./integration-tests/load && go mod tidy .PHONY: godoc godoc: ## Install and run godoc diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 9cf5111cf52..91697eb5f9f 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -250,7 +250,7 @@ require ( github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect github.com/smartcontractkit/chain-selectors v1.0.10 // indirect github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 // indirect + github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 // indirect github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index b6432cbe3fe..7a3f6b2f0c2 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1177,8 +1177,8 @@ github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6c/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 h1:j00D0/EqE9HRu+63v7KwUOe4ZxLc4AN5SOJFiinkkH0= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336/go.mod h1:umLyYLRGqyIuFfGpEREZP3So6+O8iL35cCCqW+OxX5w= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540/go.mod h1:sjAmX8K2kbQhvDarZE1ZZgDgmHJ50s0BBc/66vKY2ek= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 h1:1BcjXuviSAKttOX7BZoVHRZZGfxqoA2+AL8tykmkdoc= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8/go.mod h1:vy1L7NybTy2F/Yv7BOh+oZBa1MACD6gzd1+DkcSkfp8= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e h1:k8HS3GsAFZnxXIW3141VsQP2+EL1XrTtOi/HDt7sdBE= diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index eebb70534c0..d95458838bc 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -417,6 +417,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { bridgeORM, mercuryORM, pipelineRunner, + streamRegistry, peerWrapper, telemetryManager, legacyEVMChains, diff --git a/core/services/job/orm.go b/core/services/job/orm.go index 07b9cb95aae..2e7bb0a90a5 100644 --- a/core/services/job/orm.go +++ b/core/services/job/orm.go @@ -467,34 +467,40 @@ func (o *orm) CreateJob(jb *Job, qopts ...pg.QOpt) error { } // ValidateKeyStoreMatch confirms that the key has a valid match in the keystore -func ValidateKeyStoreMatch(spec *OCR2OracleSpec, keyStore keystore.Master, key string) error { - if spec.PluginType == types.Mercury { - _, err := keyStore.CSA().Get(key) +func ValidateKeyStoreMatch(spec *OCR2OracleSpec, keyStore keystore.Master, key string) (err error) { + switch spec.PluginType { + case types.Mercury, types.LLO: + _, err = keyStore.CSA().Get(key) if err != nil { - return errors.Errorf("no CSA key matching: %q", key) + err = errors.Errorf("no CSA key matching: %q", key) } - } else { - switch spec.Relay { - case relay.EVM: - _, err := keyStore.Eth().Get(key) - if err != nil { - return errors.Errorf("no EVM key matching: %q", key) - } - case relay.Cosmos: - _, err := keyStore.Cosmos().Get(key) - if err != nil { - return errors.Errorf("no Cosmos key matching: %q", key) - } - case relay.Solana: - _, err := keyStore.Solana().Get(key) - if err != nil { - return errors.Errorf("no Solana key matching: %q", key) - } - case relay.StarkNet: - _, err := keyStore.StarkNet().Get(key) - if err != nil { - return errors.Errorf("no Starknet key matching: %q", key) - } + default: + err = validateKeyStoreMatchForRelay(spec.Relay, keyStore, key) + } + return +} + +func validateKeyStoreMatchForRelay(network relay.Network, keyStore keystore.Master, key string) error { + switch network { + case relay.EVM: + _, err := keyStore.Eth().Get(key) + if err != nil { + return errors.Errorf("no EVM key matching: %q", key) + } + case relay.Cosmos: + _, err := keyStore.Cosmos().Get(key) + if err != nil { + return errors.Errorf("no Cosmos key matching: %q", key) + } + case relay.Solana: + _, err := keyStore.Solana().Get(key) + if err != nil { + return errors.Errorf("no Solana key matching: %q", key) + } + case relay.StarkNet: + _, err := keyStore.StarkNet().Get(key) + if err != nil { + return errors.Errorf("no Starknet key matching: %q", key) } } return nil diff --git a/core/services/job/spawner_test.go b/core/services/job/spawner_test.go index b23be49458e..9dde7a47721 100644 --- a/core/services/job/spawner_test.go +++ b/core/services/job/spawner_test.go @@ -304,7 +304,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { processConfig := plugins.NewRegistrarConfig(loop.GRPCOpts{}, func(name string) (*plugins.RegisteredLoop, error) { return nil, nil }) ocr2DelegateConfig := ocr2.NewDelegateConfig(config.OCR2(), config.Mercury(), config.Threshold(), config.Insecure(), config.JobPipeline(), config.Database(), processConfig) - d := ocr2.NewDelegate(nil, orm, nil, nil, nil, nil, monitoringEndpoint, legacyChains, lggr, ocr2DelegateConfig, + d := ocr2.NewDelegate(nil, orm, nil, nil, nil, nil, nil, monitoringEndpoint, legacyChains, lggr, ocr2DelegateConfig, keyStore.OCR2(), keyStore.DKGSign(), keyStore.DKGEncrypt(), ethKeyStore, testRelayGetter, mailMon, capabilities.NewRegistry(lggr)) delegateOCR2 := &delegate{jobOCR2VRF.Type, []job.ServiceCtx{}, 0, nil, d} diff --git a/core/services/llo/bm/dummy_transmitter.go b/core/services/llo/bm/dummy_transmitter.go new file mode 100644 index 00000000000..b998c19cb29 --- /dev/null +++ b/core/services/llo/bm/dummy_transmitter.go @@ -0,0 +1,79 @@ +package bm + +import ( + "context" + "crypto/ed25519" + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +// A dummy transmitter useful for benchmarking and testing + +var ( + transmitSuccessCount = promauto.NewCounter(prometheus.CounterOpts{ + Name: "llo_transmit_success_count", + Help: "Running count of successful transmits", + }) +) + +type Transmitter interface { + llotypes.Transmitter + services.Service +} + +type transmitter struct { + lggr logger.Logger + fromAccount string +} + +func NewTransmitter(lggr logger.Logger, fromAccount ed25519.PublicKey) Transmitter { + return &transmitter{ + lggr.Named("DummyTransmitter"), + fmt.Sprintf("%x", fromAccount), + } +} + +func (t *transmitter) Start(context.Context) error { + return nil +} + +func (t *transmitter) Close() error { + return nil +} + +func (t *transmitter) Transmit( + ctx context.Context, + digest types.ConfigDigest, + seqNr uint64, + report ocr3types.ReportWithInfo[llotypes.ReportInfo], + sigs []types.AttributedOnchainSignature, +) error { + transmitSuccessCount.Inc() + t.lggr.Debugw("Transmit", "digest", digest, "seqNr", seqNr, "report.Report", report.Report, "report.Info", report.Info, "sigs", sigs) + return nil +} + +// FromAccount returns the stringified (hex) CSA public key +func (t *transmitter) FromAccount() (ocr2types.Account, error) { + return ocr2types.Account(t.fromAccount), nil +} + +func (t *transmitter) Ready() error { return nil } + +func (t *transmitter) HealthReport() map[string]error { + report := map[string]error{t.Name(): nil} + return report +} + +func (t *transmitter) Name() string { return t.lggr.Name() } diff --git a/core/services/llo/bm/dummy_transmitter_test.go b/core/services/llo/bm/dummy_transmitter_test.go new file mode 100644 index 00000000000..055b150ad13 --- /dev/null +++ b/core/services/llo/bm/dummy_transmitter_test.go @@ -0,0 +1,36 @@ +package bm + +import ( + "crypto/ed25519" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func Test_DummyTransmitter(t *testing.T) { + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel) + tr := NewTransmitter(lggr, ed25519.PublicKey("dummy")) + + servicetest.Run(t, tr) + + err := tr.Transmit( + testutils.Context(t), + types.ConfigDigest{}, + 42, + ocr3types.ReportWithInfo[llotypes.ReportInfo]{}, + []types.AttributedOnchainSignature{}, + ) + require.NoError(t, err) + + testutils.RequireLogMessage(t, observedLogs, "Transmit") +} diff --git a/core/services/llo/channel_definition_cache_factory.go b/core/services/llo/channel_definition_cache_factory.go new file mode 100644 index 00000000000..51906e0ff1b --- /dev/null +++ b/core/services/llo/channel_definition_cache_factory.go @@ -0,0 +1,58 @@ +package llo + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config" +) + +type ChannelDefinitionCacheFactory interface { + NewCache(cfg lloconfig.PluginConfig) (llotypes.ChannelDefinitionCache, error) +} + +var _ ChannelDefinitionCacheFactory = &channelDefinitionCacheFactory{} + +func NewChannelDefinitionCacheFactory(lggr logger.Logger, orm ChannelDefinitionCacheORM, lp logpoller.LogPoller) ChannelDefinitionCacheFactory { + return &channelDefinitionCacheFactory{ + lggr, + orm, + lp, + make(map[common.Address]struct{}), + sync.Mutex{}, + } +} + +type channelDefinitionCacheFactory struct { + lggr logger.Logger + orm ChannelDefinitionCacheORM + lp logpoller.LogPoller + + caches map[common.Address]struct{} + mu sync.Mutex +} + +func (f *channelDefinitionCacheFactory) NewCache(cfg lloconfig.PluginConfig) (llotypes.ChannelDefinitionCache, error) { + if cfg.ChannelDefinitions != "" { + return NewStaticChannelDefinitionCache(f.lggr, cfg.ChannelDefinitions) + } + + addr := cfg.ChannelDefinitionsContractAddress + fromBlock := cfg.ChannelDefinitionsContractFromBlock + + f.mu.Lock() + defer f.mu.Unlock() + + if _, exists := f.caches[addr]; exists { + // This shouldn't really happen and isn't supported + return nil, fmt.Errorf("cache already exists for contract address %s", addr.Hex()) + } + f.caches[addr] = struct{}{} + return NewChannelDefinitionCache(f.lggr, f.orm, f.lp, addr, fromBlock), nil +} diff --git a/core/services/llo/data_source.go b/core/services/llo/data_source.go new file mode 100644 index 00000000000..a9c3744f9e3 --- /dev/null +++ b/core/services/llo/data_source.go @@ -0,0 +1,103 @@ +package llo + +import ( + "context" + "fmt" + "math/big" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + "github.com/smartcontractkit/chainlink-data-streams/llo" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" +) + +var ( + promMissingStreamCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "llo_stream_missing_count", + Help: "Number of times we tried to observe a stream, but it was missing", + }, + []string{"streamID"}, + ) + promObservationErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "llo_stream_observation_error_count", + Help: "Number of times we tried to observe a stream, but it failed with an error", + }, + []string{"streamID"}, + ) +) + +type ErrMissingStream struct { + id string +} + +type Registry interface { + Get(streamID streams.StreamID) (strm streams.Stream, exists bool) +} + +func (e ErrMissingStream) Error() string { + return fmt.Sprintf("missing stream definition for: %q", e.id) +} + +var _ llo.DataSource = &dataSource{} + +type dataSource struct { + lggr logger.Logger + registry Registry +} + +func newDataSource(lggr logger.Logger, registry Registry) llo.DataSource { + return &dataSource{lggr.Named("DataSource"), registry} +} + +// Observe looks up all streams in the registry and returns a map of stream ID => value +func (d *dataSource) Observe(ctx context.Context, streamIDs map[llotypes.StreamID]struct{}) (llo.StreamValues, error) { + var wg sync.WaitGroup + wg.Add(len(streamIDs)) + sv := make(llo.StreamValues) + var mu sync.Mutex + + for streamID := range streamIDs { + go func(streamID llotypes.StreamID) { + defer wg.Done() + + var res llo.ObsResult[*big.Int] + + stream, exists := d.registry.Get(streamID) + if exists { + run, trrs, err := stream.Run(ctx) + if err != nil { + var runID int64 + if run != nil { + runID = run.ID + } + d.lggr.Debugw("Observation failed for stream", "err", err, "streamID", streamID, "runID", runID) + promObservationErrorCount.WithLabelValues(fmt.Sprintf("%d", streamID)).Inc() + } else { + // TODO: support types other than *big.Int + // https://smartcontract-it.atlassian.net/browse/MERC-3525 + val, err := streams.ExtractBigInt(trrs) + if err == nil { + res.Val = val + res.Valid = true + } + } + } else { + d.lggr.Errorw(fmt.Sprintf("Missing stream: %q", streamID), "streamID", streamID) + promMissingStreamCount.WithLabelValues(fmt.Sprintf("%d", streamID)).Inc() + } + + mu.Lock() + defer mu.Unlock() + sv[streamID] = res + }(streamID) + } + + wg.Wait() + + return sv, nil +} diff --git a/core/services/llo/data_source_test.go b/core/services/llo/data_source_test.go new file mode 100644 index 00000000000..c956e3770c9 --- /dev/null +++ b/core/services/llo/data_source_test.go @@ -0,0 +1,95 @@ +package llo + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-data-streams/llo" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" +) + +type mockStream struct { + run *pipeline.Run + trrs pipeline.TaskRunResults + err error +} + +func (m *mockStream) Run(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { + return m.run, m.trrs, m.err +} + +type mockRegistry struct { + streams map[streams.StreamID]*mockStream +} + +func (m *mockRegistry) Get(streamID streams.StreamID) (strm streams.Stream, exists bool) { + strm, exists = m.streams[streamID] + return +} + +func makeStreamWithSingleResult[T any](res T, err error) *mockStream { + return &mockStream{ + trrs: []pipeline.TaskRunResult{pipeline.TaskRunResult{Task: &pipeline.MemoTask{}, Result: pipeline.Result{Value: res}}}, + err: err, + } +} + +func Test_DataSource(t *testing.T) { + lggr := logger.TestLogger(t) + reg := &mockRegistry{make(map[streams.StreamID]*mockStream)} + ds := newDataSource(lggr, reg) + ctx := testutils.Context(t) + + streamIDs := make(map[streams.StreamID]struct{}) + streamIDs[streams.StreamID(1)] = struct{}{} + streamIDs[streams.StreamID(2)] = struct{}{} + streamIDs[streams.StreamID(3)] = struct{}{} + + t.Run("Observe", func(t *testing.T) { + t.Run("returns errors if no streams are defined", func(t *testing.T) { + vals, err := ds.Observe(ctx, streamIDs) + assert.NoError(t, err) + + assert.Equal(t, llo.StreamValues{ + 2: llo.ObsResult[*big.Int]{Val: nil, Valid: false}, + 1: llo.ObsResult[*big.Int]{Val: nil, Valid: false}, + 3: llo.ObsResult[*big.Int]{Val: nil, Valid: false}, + }, vals) + }) + t.Run("observes each stream with success and returns values matching map argument", func(t *testing.T) { + reg.streams[1] = makeStreamWithSingleResult[*big.Int](big.NewInt(2181), nil) + reg.streams[2] = makeStreamWithSingleResult[*big.Int](big.NewInt(40602), nil) + reg.streams[3] = makeStreamWithSingleResult[*big.Int](big.NewInt(15), nil) + + vals, err := ds.Observe(ctx, streamIDs) + assert.NoError(t, err) + + assert.Equal(t, llo.StreamValues{ + 2: llo.ObsResult[*big.Int]{Val: big.NewInt(40602), Valid: true}, + 1: llo.ObsResult[*big.Int]{Val: big.NewInt(2181), Valid: true}, + 3: llo.ObsResult[*big.Int]{Val: big.NewInt(15), Valid: true}, + }, vals) + }) + t.Run("observes each stream and returns success/errors", func(t *testing.T) { + reg.streams[1] = makeStreamWithSingleResult[*big.Int](big.NewInt(2181), errors.New("something exploded")) + reg.streams[2] = makeStreamWithSingleResult[*big.Int](big.NewInt(40602), nil) + reg.streams[3] = makeStreamWithSingleResult[*big.Int](nil, errors.New("something exploded 2")) + + vals, err := ds.Observe(ctx, streamIDs) + assert.NoError(t, err) + + assert.Equal(t, llo.StreamValues{ + 2: llo.ObsResult[*big.Int]{Val: big.NewInt(40602), Valid: true}, + 1: llo.ObsResult[*big.Int]{Val: nil, Valid: false}, + 3: llo.ObsResult[*big.Int]{Val: nil, Valid: false}, + }, vals) + }) + }) +} diff --git a/core/services/llo/delegate.go b/core/services/llo/delegate.go new file mode 100644 index 00000000000..b64c9c590fe --- /dev/null +++ b/core/services/llo/delegate.go @@ -0,0 +1,123 @@ +package llo + +import ( + "context" + "errors" + "fmt" + + ocrcommontypes "github.com/smartcontractkit/libocr/commontypes" + ocr2plus "github.com/smartcontractkit/libocr/offchainreporting2plus" + ocr3types "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + "github.com/smartcontractkit/chainlink-data-streams/llo" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/llo/evm" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" +) + +var _ job.ServiceCtx = &delegate{} + +type Closer interface { + Close() error +} + +type delegate struct { + services.StateMachine + + cfg DelegateConfig + codecs map[llotypes.ReportFormat]llo.ReportCodec + + prrc llo.PredecessorRetirementReportCache + src llo.ShouldRetireCache + ds llo.DataSource + + oracle Closer +} + +type DelegateConfig struct { + Logger logger.Logger + Queryer pg.Queryer + Runner streams.Runner + Registry Registry + + // LLO + ChannelDefinitionCache llotypes.ChannelDefinitionCache + + // OCR3 + BinaryNetworkEndpointFactory ocr2types.BinaryNetworkEndpointFactory + V2Bootstrappers []ocrcommontypes.BootstrapperLocator + ContractConfigTracker ocr2types.ContractConfigTracker + ContractTransmitter ocr3types.ContractTransmitter[llotypes.ReportInfo] + Database ocr3types.Database + OCRLogger ocrcommontypes.Logger + MonitoringEndpoint ocrcommontypes.MonitoringEndpoint + OffchainConfigDigester ocr2types.OffchainConfigDigester + OffchainKeyring ocr2types.OffchainKeyring + OnchainKeyring ocr3types.OnchainKeyring[llotypes.ReportInfo] + LocalConfig ocr2types.LocalConfig +} + +func NewDelegate(cfg DelegateConfig) (job.ServiceCtx, error) { + if cfg.Queryer == nil { + return nil, errors.New("Queryer must not be nil") + } + if cfg.Runner == nil { + return nil, errors.New("Runner must not be nil") + } + if cfg.Registry == nil { + return nil, errors.New("Registry must not be nil") + } + codecs := make(map[llotypes.ReportFormat]llo.ReportCodec) + + // NOTE: All codecs must be specified here + codecs[llotypes.ReportFormatJSON] = llo.JSONReportCodec{} + codecs[llotypes.ReportFormatEVM] = evm.ReportCodec{} + + // TODO: Do these services need starting? + // https://smartcontract-it.atlassian.net/browse/MERC-3386 + prrc := llo.NewPredecessorRetirementReportCache() + src := llo.NewShouldRetireCache() + ds := newDataSource(cfg.Logger.Named("DataSource"), cfg.Registry) + + return &delegate{services.StateMachine{}, cfg, codecs, prrc, src, ds, nil}, nil +} + +func (d *delegate) Start(ctx context.Context) error { + return d.StartOnce("LLODelegate", func() error { + // create the oracle from config values + oracle, err := ocr2plus.NewOracle(ocr2plus.OCR3OracleArgs[llotypes.ReportInfo]{ + BinaryNetworkEndpointFactory: d.cfg.BinaryNetworkEndpointFactory, + V2Bootstrappers: d.cfg.V2Bootstrappers, + ContractConfigTracker: d.cfg.ContractConfigTracker, + ContractTransmitter: d.cfg.ContractTransmitter, + Database: d.cfg.Database, + LocalConfig: d.cfg.LocalConfig, + Logger: d.cfg.OCRLogger, + MonitoringEndpoint: d.cfg.MonitoringEndpoint, + OffchainConfigDigester: d.cfg.OffchainConfigDigester, + OffchainKeyring: d.cfg.OffchainKeyring, + OnchainKeyring: d.cfg.OnchainKeyring, + ReportingPluginFactory: llo.NewPluginFactory( + d.prrc, d.src, d.cfg.ChannelDefinitionCache, d.ds, d.cfg.Logger.Named("LLOReportingPlugin"), d.codecs, + ), + }) + + if err != nil { + return fmt.Errorf("%w: failed to create new OCR oracle", err) + } + + d.oracle = oracle + + return oracle.Start() + }) +} + +func (d *delegate) Close() error { + return d.StopOnce("LLODelegate", d.oracle.Close) +} diff --git a/core/services/llo/evm/report_codec.go b/core/services/llo/evm/report_codec.go new file mode 100644 index 00000000000..23ae02e1064 --- /dev/null +++ b/core/services/llo/evm/report_codec.go @@ -0,0 +1,97 @@ +package evm + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" + + chainselectors "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink-data-streams/llo" +) + +var ( + _ llo.ReportCodec = ReportCodec{} + Schema = getSchema() +) + +func getSchema() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "configDigest", Type: mustNewType("bytes32")}, + {Name: "chainId", Type: mustNewType("uint64")}, + // TODO: + // could also include address of verifier to make things more specific. + // downside is increased data size. + // for now we assume that a channelId will only be registered on a single + // verifier per chain. + // https://smartcontract-it.atlassian.net/browse/MERC-3652 + {Name: "seqNr", Type: mustNewType("uint64")}, + {Name: "channelId", Type: mustNewType("uint32")}, + {Name: "validAfterSeconds", Type: mustNewType("uint32")}, + {Name: "validUntilSeconds", Type: mustNewType("uint32")}, + {Name: "values", Type: mustNewType("int192[]")}, + {Name: "specimen", Type: mustNewType("bool")}, + }) +} + +type ReportCodec struct{} + +func NewReportCodec() ReportCodec { + return ReportCodec{} +} + +func (ReportCodec) Encode(report llo.Report) ([]byte, error) { + chainID, err := chainselectors.ChainIdFromSelector(report.ChainSelector) + if err != nil { + return nil, fmt.Errorf("failed to get chain ID for selector %d; %w", report.ChainSelector, err) + } + + b, err := Schema.Pack(report.ConfigDigest, chainID, report.SeqNr, report.ChannelID, report.ValidAfterSeconds, report.ValidUntilSeconds, report.Values, report.Specimen) + if err != nil { + return nil, fmt.Errorf("failed to encode report: %w", err) + } + return b, nil +} + +func (ReportCodec) Decode(encoded []byte) (llo.Report, error) { + type decode struct { + ConfigDigest types.ConfigDigest + ChainId uint64 + SeqNr uint64 + ChannelId llotypes.ChannelID + ValidAfterSeconds uint32 + ValidUntilSeconds uint32 + Values []*big.Int + Specimen bool + } + values, err := Schema.Unpack(encoded) + if err != nil { + return llo.Report{}, fmt.Errorf("failed to decode report: %w", err) + } + decoded := new(decode) + if err = Schema.Copy(decoded, values); err != nil { + return llo.Report{}, fmt.Errorf("failed to copy report values to struct: %w", err) + } + chainSelector, err := chainselectors.SelectorFromChainId(decoded.ChainId) + return llo.Report{ + ConfigDigest: decoded.ConfigDigest, + ChainSelector: chainSelector, + SeqNr: decoded.SeqNr, + ChannelID: decoded.ChannelId, + ValidAfterSeconds: decoded.ValidAfterSeconds, + ValidUntilSeconds: decoded.ValidUntilSeconds, + Values: decoded.Values, + Specimen: decoded.Specimen, + }, err +} diff --git a/core/services/llo/evm/report_codec_test.go b/core/services/llo/evm/report_codec_test.go new file mode 100644 index 00000000000..e00314306ae --- /dev/null +++ b/core/services/llo/evm/report_codec_test.go @@ -0,0 +1,90 @@ +package evm + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink-data-streams/llo" +) + +const ethMainnetChainSelector uint64 = 5009297550715157269 + +func newValidReport() llo.Report { + return llo.Report{ + ConfigDigest: types.ConfigDigest{1, 2, 3}, + ChainSelector: ethMainnetChainSelector, // + SeqNr: 32, + ChannelID: llotypes.ChannelID(31), + ValidAfterSeconds: 33, + ValidUntilSeconds: 34, + Values: []*big.Int{big.NewInt(35), big.NewInt(36)}, + Specimen: true, + } +} + +func Test_ReportCodec(t *testing.T) { + rc := ReportCodec{} + + t.Run("Encode errors on zero fields", func(t *testing.T) { + _, err := rc.Encode(llo.Report{}) + require.Error(t, err) + + assert.Contains(t, err.Error(), "failed to get chain ID for selector 0; chain not found for chain selector 0") + }) + + t.Run("Encode constructs a report from observations", func(t *testing.T) { + report := newValidReport() + + encoded, err := rc.Encode(report) + require.NoError(t, err) + + reportElems := make(map[string]interface{}) + err = Schema.UnpackIntoMap(reportElems, encoded) + require.NoError(t, err) + + assert.Equal(t, [32]uint8{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, reportElems["configDigest"]) + assert.Equal(t, uint64(1), reportElems["chainId"]) + assert.Equal(t, uint64(32), reportElems["seqNr"]) + assert.Equal(t, uint32(31), reportElems["channelId"]) + assert.Equal(t, uint32(33), reportElems["validAfterSeconds"]) + assert.Equal(t, uint32(34), reportElems["validUntilSeconds"]) + assert.Equal(t, []*big.Int{big.NewInt(35), big.NewInt(36)}, reportElems["values"]) + assert.Equal(t, true, reportElems["specimen"]) + + assert.Len(t, encoded, 352) + assert.Equal(t, []byte{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x22, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x24}, encoded) + + t.Run("Decode decodes the report", func(t *testing.T) { + decoded, err := rc.Decode(encoded) + require.NoError(t, err) + + assert.Equal(t, report.ConfigDigest, decoded.ConfigDigest) + assert.Equal(t, report.ChainSelector, decoded.ChainSelector) + assert.Equal(t, report.SeqNr, decoded.SeqNr) + assert.Equal(t, report.ChannelID, decoded.ChannelID) + assert.Equal(t, report.ValidAfterSeconds, decoded.ValidAfterSeconds) + assert.Equal(t, report.ValidUntilSeconds, decoded.ValidUntilSeconds) + assert.Equal(t, report.Values, decoded.Values) + assert.Equal(t, report.Specimen, decoded.Specimen) + }) + }) + + t.Run("Decode errors on invalid report", func(t *testing.T) { + _, err := rc.Decode([]byte{1, 2, 3}) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + + longBad := make([]byte, 64) + for i := 0; i < len(longBad); i++ { + longBad[i] = byte(i) + } + _, err = rc.Decode(longBad) + assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint64 value") + }) +} diff --git a/core/services/llo/keyring.go b/core/services/llo/keyring.go new file mode 100644 index 00000000000..1d6eaebad38 --- /dev/null +++ b/core/services/llo/keyring.go @@ -0,0 +1,79 @@ +package llo + +import ( + "fmt" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +type LLOOnchainKeyring ocr3types.OnchainKeyring[llotypes.ReportInfo] + +var _ LLOOnchainKeyring = &onchainKeyring{} + +type Key interface { + Sign3(digest ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) + Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool + PublicKey() ocrtypes.OnchainPublicKey + MaxSignatureLength() int +} + +type onchainKeyring struct { + lggr logger.Logger + keys map[llotypes.ReportFormat]Key +} + +func NewOnchainKeyring(lggr logger.Logger, keys map[llotypes.ReportFormat]Key) LLOOnchainKeyring { + return &onchainKeyring{ + lggr.Named("OnchainKeyring"), keys, + } +} + +func (okr *onchainKeyring) PublicKey() types.OnchainPublicKey { + // All public keys combined + var pk []byte + for _, k := range okr.keys { + pk = append(pk, k.PublicKey()...) + } + return pk +} + +func (okr *onchainKeyring) MaxSignatureLength() (n int) { + // Needs to be max of all chain sigs + for _, k := range okr.keys { + n += k.MaxSignatureLength() + } + return +} + +func (okr *onchainKeyring) Sign(digest types.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[llotypes.ReportInfo]) (signature []byte, err error) { + rf := r.Info.ReportFormat + // HACK: sign/verify JSON payloads with EVM keys for now, this makes + // debugging and testing easier + if rf == llotypes.ReportFormatJSON { + rf = llotypes.ReportFormatEVM + } + if key, exists := okr.keys[rf]; exists { + return key.Sign3(digest, seqNr, r.Report) + } + return nil, fmt.Errorf("Sign failed; unsupported report format: %q", r.Info.ReportFormat) +} + +func (okr *onchainKeyring) Verify(key types.OnchainPublicKey, digest types.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[llotypes.ReportInfo], signature []byte) bool { + rf := r.Info.ReportFormat + // HACK: sign/verify JSON payloads with EVM keys for now, this makes + // debugging and testing easier + if rf == llotypes.ReportFormatJSON { + rf = llotypes.ReportFormatEVM + } + if verifier, exists := okr.keys[rf]; exists { + return verifier.Verify3(key, digest, seqNr, r.Report, signature) + } + okr.lggr.Errorf("Verify failed; unsupported report format: %q", r.Info.ReportFormat) + return false +} diff --git a/core/services/llo/keyring_test.go b/core/services/llo/keyring_test.go new file mode 100644 index 00000000000..bf728813801 --- /dev/null +++ b/core/services/llo/keyring_test.go @@ -0,0 +1,123 @@ +package llo + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocr3types "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +var _ Key = &mockKey{} + +type mockKey struct { + format llotypes.ReportFormat + verify bool + maxSignatureLen int +} + +func (m *mockKey) Sign3(digest ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + return []byte(fmt.Sprintf("sig-%d", m.format)), nil +} + +func (m *mockKey) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + return m.verify +} + +func (m *mockKey) PublicKey() ocrtypes.OnchainPublicKey { + b := make([]byte, m.maxSignatureLen) + for i := 0; i < m.maxSignatureLen; i++ { + b[i] = byte(255) + } + return ocrtypes.OnchainPublicKey(b) +} + +func (m *mockKey) MaxSignatureLength() int { + return m.maxSignatureLen +} + +func (m *mockKey) reset(format llotypes.ReportFormat) { + m.format = format + m.verify = false +} + +func Test_Keyring(t *testing.T) { + lggr := logger.TestLogger(t) + + ks := map[llotypes.ReportFormat]Key{ + llotypes.ReportFormatEVM: &mockKey{format: llotypes.ReportFormatEVM, maxSignatureLen: 1}, + llotypes.ReportFormatSolana: &mockKey{format: llotypes.ReportFormatSolana, maxSignatureLen: 2}, + llotypes.ReportFormatCosmos: &mockKey{format: llotypes.ReportFormatCosmos, maxSignatureLen: 4}, + llotypes.ReportFormatStarknet: &mockKey{format: llotypes.ReportFormatStarknet, maxSignatureLen: 8}, + } + + kr := NewOnchainKeyring(lggr, ks) + + cases := []struct { + format llotypes.ReportFormat + }{ + { + llotypes.ReportFormatEVM, + }, + { + llotypes.ReportFormatSolana, + }, + { + llotypes.ReportFormatCosmos, + }, + { + llotypes.ReportFormatStarknet, + }, + } + + cd, err := ocrtypes.BytesToConfigDigest(testutils.MustRandBytes(32)) + require.NoError(t, err) + seqNr := rand.Uint64() + t.Run("Sign+Verify", func(t *testing.T) { + for _, tc := range cases { + t.Run(tc.format.String(), func(t *testing.T) { + k := ks[tc.format] + defer k.(*mockKey).reset(tc.format) + + sig, err := kr.Sign(cd, seqNr, ocr3types.ReportWithInfo[llotypes.ReportInfo]{Info: llotypes.ReportInfo{ReportFormat: tc.format}}) + require.NoError(t, err) + + assert.Equal(t, []byte(fmt.Sprintf("sig-%d", tc.format)), sig) + + assert.False(t, kr.Verify(nil, cd, seqNr, ocr3types.ReportWithInfo[llotypes.ReportInfo]{Info: llotypes.ReportInfo{ReportFormat: tc.format}}, sig)) + + k.(*mockKey).verify = true + + for _, tc2 := range cases { + verified := kr.Verify(nil, cd, seqNr, ocr3types.ReportWithInfo[llotypes.ReportInfo]{Info: llotypes.ReportInfo{ReportFormat: tc2.format}}, sig) + if tc.format == tc2.format { + assert.True(t, verified, "expected true for %s", tc2.format) + } else { + assert.False(t, verified, "expected false for %s", tc2.format) + } + } + }) + } + }) + + t.Run("MaxSignatureLength", func(t *testing.T) { + assert.Equal(t, 8+4+2+1, kr.MaxSignatureLength()) + }) + t.Run("PublicKey", func(t *testing.T) { + b := make([]byte, 8+4+2+1) + for i := 0; i < len(b); i++ { + b[i] = byte(255) + } + assert.Equal(t, types.OnchainPublicKey(b), kr.PublicKey()) + }) +} diff --git a/core/services/llo/offchain_config_digester.go b/core/services/llo/offchain_config_digester.go new file mode 100644 index 00000000000..cd4d9afa3a0 --- /dev/null +++ b/core/services/llo/offchain_config_digester.go @@ -0,0 +1,123 @@ +package llo + +import ( + "crypto/ed25519" + "encoding/binary" + "encoding/hex" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/pkg/errors" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/wsrpc/credentials" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/exposed_channel_verifier" +) + +// Originally sourced from: https://github.com/smartcontractkit/offchain-reporting/blob/991ebe1462fd56826a1ddfb34287d542acb2baee/lib/offchainreporting2/chains/evmutil/offchain_config_digester.go + +var _ ocrtypes.OffchainConfigDigester = OffchainConfigDigester{} + +func NewOffchainConfigDigester(chainID *big.Int, contractAddress common.Address) OffchainConfigDigester { + return OffchainConfigDigester{chainID, contractAddress} +} + +type OffchainConfigDigester struct { + ChainID *big.Int + ContractAddress common.Address +} + +func (d OffchainConfigDigester) ConfigDigest(cc ocrtypes.ContractConfig) (ocrtypes.ConfigDigest, error) { + signers := []common.Address{} + for i, signer := range cc.Signers { + if len(signer) != 20 { + return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm signer should be a 20 byte address, but got %x", i, signer) + } + a := common.BytesToAddress(signer) + signers = append(signers, a) + } + transmitters := []credentials.StaticSizedPublicKey{} + for i, transmitter := range cc.Transmitters { + if len(transmitter) != 2*ed25519.PublicKeySize { + return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm transmitter should be a 64 character hex-encoded ed25519 public key, but got '%v' (%d chars)", i, transmitter, len(transmitter)) + } + var t credentials.StaticSizedPublicKey + b, err := hex.DecodeString(string(transmitter)) + if err != nil { + return ocrtypes.ConfigDigest{}, errors.Wrapf(err, "%v-th evm transmitter is not valid hex, got: %q", i, transmitter) + } + copy(t[:], b) + + transmitters = append(transmitters, t) + } + + return configDigest( + d.ChainID, + d.ContractAddress, + cc.ConfigCount, + signers, + transmitters, + cc.F, + cc.OnchainConfig, + cc.OffchainConfigVersion, + cc.OffchainConfig, + ) +} + +func (d OffchainConfigDigester) ConfigDigestPrefix() (ocrtypes.ConfigDigestPrefix, error) { + return ocrtypes.ConfigDigestPrefixLLO, nil +} + +func makeConfigDigestArgs() abi.Arguments { + abi, err := abi.JSON(strings.NewReader(exposed_channel_verifier.ExposedChannelVerifierABI)) + if err != nil { + // assertion + panic(fmt.Sprintf("could not parse aggregator ABI: %s", err.Error())) + } + return abi.Methods["exposedConfigDigestFromConfigData"].Inputs +} + +var configDigestArgs = makeConfigDigestArgs() + +func configDigest( + chainID *big.Int, + contractAddress common.Address, + configCount uint64, + oracles []common.Address, + transmitters []credentials.StaticSizedPublicKey, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, +) (types.ConfigDigest, error) { + msg, err := configDigestArgs.Pack( + chainID, + contractAddress, + configCount, + oracles, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + if err != nil { + return types.ConfigDigest{}, fmt.Errorf("could not pack config digest args: %v", err) + } + rawHash := crypto.Keccak256(msg) + configDigest := types.ConfigDigest{} + if n := copy(configDigest[:], rawHash); n != len(configDigest) { + return types.ConfigDigest{}, fmt.Errorf("copied too little data: %d/%d", n, len(configDigest)) + } + binary.BigEndian.PutUint16(configDigest[:2], uint16(ocrtypes.ConfigDigestPrefixLLO)) + if !(configDigest[0] == 0 && configDigest[1] == 9) { + return types.ConfigDigest{}, fmt.Errorf("wrong ConfigDigestPrefix; got: %x, expected: %d", configDigest[:2], ocrtypes.ConfigDigestPrefixLLO) + } + return configDigest, nil +} diff --git a/core/services/llo/offchain_config_digester_test.go b/core/services/llo/offchain_config_digester_test.go new file mode 100644 index 00000000000..0de9117e391 --- /dev/null +++ b/core/services/llo/offchain_config_digester_test.go @@ -0,0 +1,55 @@ +package llo + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/require" +) + +func Test_OffchainConfigDigester_ConfigDigest(t *testing.T) { + // ChainID and ContractAddress are taken into account for computation + cd1, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + cd2, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + cd3, err := OffchainConfigDigester{ChainID: big.NewInt(1)}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + cd4, err := OffchainConfigDigester{ChainID: big.NewInt(1), ContractAddress: common.Address{1}}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + + require.Equal(t, cd1, cd2) + require.NotEqual(t, cd2, cd3) + require.NotEqual(t, cd2, cd4) + require.NotEqual(t, cd3, cd4) + + // malformed signers + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Signers: []types.OnchainPublicKey{{1, 2}}, + }) + require.Error(t, err) + + // malformed transmitters + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"0x"}, + }) + require.Error(t, err) + + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353"}, + }) + require.Error(t, err) + + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaz"}, + }) + require.Error(t, err) + + // well-formed transmitters + _, err = OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaa"}, + }) + require.NoError(t, err) +} diff --git a/core/services/llo/onchain_channel_definition_cache.go b/core/services/llo/onchain_channel_definition_cache.go new file mode 100644 index 00000000000..af35d237b98 --- /dev/null +++ b/core/services/llo/onchain_channel_definition_cache.go @@ -0,0 +1,252 @@ +package llo + +import ( + "context" + "database/sql" + "errors" + "fmt" + "maps" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_config_store" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +type ChannelDefinitionCacheORM interface { + // TODO: What about delete/cleanup? + // https://smartcontract-it.atlassian.net/browse/MERC-3653 + LoadChannelDefinitions(ctx context.Context, addr common.Address) (dfns llotypes.ChannelDefinitions, blockNum int64, err error) + StoreChannelDefinitions(ctx context.Context, addr common.Address, dfns llotypes.ChannelDefinitions, blockNum int64) (err error) +} + +var channelConfigStoreABI abi.ABI + +func init() { + var err error + channelConfigStoreABI, err = abi.JSON(strings.NewReader(channel_config_store.ChannelConfigStoreABI)) + if err != nil { + panic(err) + } +} + +var _ llotypes.ChannelDefinitionCache = &channelDefinitionCache{} + +type channelDefinitionCache struct { + services.StateMachine + + orm ChannelDefinitionCacheORM + + filterName string + lp logpoller.LogPoller + fromBlock int64 + addr common.Address + lggr logger.Logger + + definitionsMu sync.RWMutex + definitions llotypes.ChannelDefinitions + definitionsBlockNum int64 + + wg sync.WaitGroup + chStop chan struct{} +} + +var ( + topicNewChannelDefinition = (channel_config_store.ChannelConfigStoreNewChannelDefinition{}).Topic() + topicChannelDefinitionRemoved = (channel_config_store.ChannelConfigStoreChannelDefinitionRemoved{}).Topic() + + allTopics = []common.Hash{topicNewChannelDefinition, topicChannelDefinitionRemoved} +) + +func NewChannelDefinitionCache(lggr logger.Logger, orm ChannelDefinitionCacheORM, lp logpoller.LogPoller, addr common.Address, fromBlock int64) llotypes.ChannelDefinitionCache { + filterName := logpoller.FilterName("OCR3 LLO ChannelDefinitionCachePoller", addr.String()) + return &channelDefinitionCache{ + services.StateMachine{}, + orm, + filterName, + lp, + 0, + addr, + lggr.Named("ChannelDefinitionCache").With("addr", addr, "fromBlock", fromBlock), + sync.RWMutex{}, + nil, + fromBlock, + sync.WaitGroup{}, + make(chan struct{}), + } +} + +func (c *channelDefinitionCache) Start(ctx context.Context) error { + // Initial load from DB, then async poll from chain thereafter + return c.StartOnce("ChannelDefinitionCache", func() (err error) { + err = c.lp.RegisterFilter(logpoller.Filter{Name: c.filterName, EventSigs: allTopics, Addresses: []common.Address{c.addr}}, pg.WithParentCtx(ctx)) + if err != nil { + return err + } + if definitions, definitionsBlockNum, err := c.orm.LoadChannelDefinitions(ctx, c.addr); err != nil { + return err + } else if definitions != nil { + c.definitions = definitions + c.definitionsBlockNum = definitionsBlockNum + } else { + // ensure non-nil map ready for assignment later + c.definitions = make(llotypes.ChannelDefinitions) + // leave c.definitionsBlockNum as provided fromBlock argument + } + c.wg.Add(1) + go c.poll() + return nil + }) +} + +// TODO: make this configurable? +const pollInterval = 1 * time.Second + +func (c *channelDefinitionCache) poll() { + defer c.wg.Done() + + pollT := time.NewTicker(utils.WithJitter(pollInterval)) + + for { + select { + case <-c.chStop: + return + case <-pollT.C: + if n, err := c.fetchFromChain(); err != nil { + // TODO: retry with backoff? + // https://smartcontract-it.atlassian.net/browse/MERC-3653 + c.lggr.Errorw("Failed to fetch channel definitions from chain", "err", err) + continue + } else { + if n > 0 { + c.lggr.Infow("Updated channel definitions", "nLogs", n, "definitionsBlockNum", c.definitionsBlockNum) + } else { + c.lggr.Debugw("No new channel definitions", "nLogs", 0, "definitionsBlockNum", c.definitionsBlockNum) + } + } + } + } +} + +func (c *channelDefinitionCache) fetchFromChain() (nLogs int, err error) { + // TODO: Pass context + // https://smartcontract-it.atlassian.net/browse/MERC-3653 + latest, err := c.lp.LatestBlock() + if errors.Is(err, sql.ErrNoRows) { + c.lggr.Debug("Logpoller has no logs yet, skipping poll") + return 0, nil + } else if err != nil { + return 0, err + } + toBlock := latest.BlockNumber + + fromBlock := c.definitionsBlockNum + + if toBlock <= fromBlock { + return 0, nil + } + + ctx, cancel := services.StopChan(c.chStop).NewCtx() + defer cancel() + // NOTE: We assume that log poller returns logs in ascending order chronologically + logs, err := c.lp.LogsWithSigs(fromBlock, toBlock, allTopics, c.addr, pg.WithParentCtx(ctx)) + if err != nil { + // TODO: retry? + // https://smartcontract-it.atlassian.net/browse/MERC-3653 + return 0, err + } + for _, log := range logs { + if err = c.applyLog(log); err != nil { + return 0, err + } + } + + // Use context.Background() here because we want to try to save even if we + // are closing + if err = c.orm.StoreChannelDefinitions(context.Background(), c.addr, c.Definitions(), toBlock); err != nil { + return 0, err + } + + c.definitionsBlockNum = toBlock + + return len(logs), nil +} + +func (c *channelDefinitionCache) applyLog(log logpoller.Log) error { + switch log.EventSig { + case topicNewChannelDefinition: + unpacked := new(channel_config_store.ChannelConfigStoreNewChannelDefinition) + + err := channelConfigStoreABI.UnpackIntoInterface(unpacked, "NewChannelDefinition", log.Data) + if err != nil { + return fmt.Errorf("failed to unpack log data: %w", err) + } + + c.applyNewChannelDefinition(unpacked) + case topicChannelDefinitionRemoved: + unpacked := new(channel_config_store.ChannelConfigStoreChannelDefinitionRemoved) + + err := channelConfigStoreABI.UnpackIntoInterface(unpacked, "ChannelDefinitionRemoved", log.Data) + if err != nil { + return fmt.Errorf("failed to unpack log data: %w", err) + } + + c.applyChannelDefinitionRemoved(unpacked) + default: + // don't return error here, we want to ignore unrecognized logs and + // continue rather than interrupting the loop + c.lggr.Errorw("Unexpected log topic", "topic", log.EventSig.Hex()) + } + return nil +} + +func (c *channelDefinitionCache) applyNewChannelDefinition(log *channel_config_store.ChannelConfigStoreNewChannelDefinition) { + streamIDs := make([]llotypes.StreamID, len(log.ChannelDefinition.StreamIDs)) + copy(streamIDs, log.ChannelDefinition.StreamIDs) + c.definitionsMu.Lock() + defer c.definitionsMu.Unlock() + c.definitions[log.ChannelId] = llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormat(log.ChannelDefinition.ReportFormat), + ChainSelector: log.ChannelDefinition.ChainSelector, + StreamIDs: streamIDs, + } +} + +func (c *channelDefinitionCache) applyChannelDefinitionRemoved(log *channel_config_store.ChannelConfigStoreChannelDefinitionRemoved) { + c.definitionsMu.Lock() + defer c.definitionsMu.Unlock() + delete(c.definitions, log.ChannelId) +} + +func (c *channelDefinitionCache) Close() error { + // TODO: unregister filter (on job delete)? + // https://smartcontract-it.atlassian.net/browse/MERC-3653 + return c.StopOnce("ChannelDefinitionCache", func() error { + close(c.chStop) + c.wg.Wait() + return nil + }) +} + +func (c *channelDefinitionCache) HealthReport() map[string]error { + report := map[string]error{c.Name(): c.Healthy()} + return report +} + +func (c *channelDefinitionCache) Name() string { return c.lggr.Name() } + +func (c *channelDefinitionCache) Definitions() llotypes.ChannelDefinitions { + c.definitionsMu.RLock() + defer c.definitionsMu.RUnlock() + return maps.Clone(c.definitions) +} diff --git a/core/services/llo/onchain_channel_definition_cache_test.go b/core/services/llo/onchain_channel_definition_cache_test.go new file mode 100644 index 00000000000..28e89a9c987 --- /dev/null +++ b/core/services/llo/onchain_channel_definition_cache_test.go @@ -0,0 +1,26 @@ +package llo + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" +) + +func Test_ChannelDefinitionCache(t *testing.T) { + t.Run("Definitions", func(t *testing.T) { + // NOTE: this is covered more thoroughly in the integration tests + dfns := llotypes.ChannelDefinitions(map[llotypes.ChannelID]llotypes.ChannelDefinition{ + 1: { + ReportFormat: llotypes.ReportFormat(43), + ChainSelector: 42, + StreamIDs: []llotypes.StreamID{1, 2, 3}, + }, + }) + + cdc := &channelDefinitionCache{definitions: dfns} + + assert.Equal(t, dfns, cdc.Definitions()) + }) +} diff --git a/core/services/llo/orm.go b/core/services/llo/orm.go new file mode 100644 index 00000000000..e046d62ad89 --- /dev/null +++ b/core/services/llo/orm.go @@ -0,0 +1,66 @@ +package llo + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/services/pg" +) + +type ORM interface { + ChannelDefinitionCacheORM +} + +var _ ORM = &orm{} + +type orm struct { + q pg.Queryer + evmChainID *big.Int +} + +func NewORM(q pg.Queryer, evmChainID *big.Int) ORM { + return &orm{q, evmChainID} +} + +func (o *orm) LoadChannelDefinitions(ctx context.Context, addr common.Address) (dfns llotypes.ChannelDefinitions, blockNum int64, err error) { + type scd struct { + Definitions []byte `db:"definitions"` + BlockNum int64 `db:"block_num"` + } + var scanned scd + err = o.q.GetContext(ctx, &scanned, "SELECT definitions, block_num FROM channel_definitions WHERE evm_chain_id = $1 AND addr = $2", o.evmChainID.String(), addr) + if errors.Is(err, sql.ErrNoRows) { + return dfns, blockNum, nil + } else if err != nil { + return nil, 0, fmt.Errorf("failed to LoadChannelDefinitions; %w", err) + } + + if err = json.Unmarshal(scanned.Definitions, &dfns); err != nil { + return nil, 0, fmt.Errorf("failed to LoadChannelDefinitions; JSON Unmarshal failure; %w", err) + } + + return dfns, scanned.BlockNum, nil +} + +// TODO: Test this method +// https://smartcontract-it.atlassian.net/jira/software/c/projects/MERC/issues/MERC-3653 +func (o *orm) StoreChannelDefinitions(ctx context.Context, addr common.Address, dfns llotypes.ChannelDefinitions, blockNum int64) error { + _, err := o.q.ExecContext(ctx, ` +INSERT INTO channel_definitions (evm_chain_id, addr, definitions, block_num, updated_at) +VALUES ($1, $2, $3, $4, NOW()) +ON CONFLICT (evm_chain_id, addr) DO UPDATE +SET definitions = $3, block_num = $4, updated_at = NOW() +`, o.evmChainID.String(), addr, dfns, blockNum) + if err != nil { + return fmt.Errorf("StoreChannelDefinitions failed: %w", err) + } + return nil +} diff --git a/core/services/llo/orm_test.go b/core/services/llo/orm_test.go new file mode 100644 index 00000000000..63a6ac21e3b --- /dev/null +++ b/core/services/llo/orm_test.go @@ -0,0 +1,101 @@ +package llo + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" +) + +func Test_ORM(t *testing.T) { + db := pgtest.NewSqlxDB(t) + orm := NewORM(db, testutils.FixtureChainID) + ctx := testutils.Context(t) + + addr1 := testutils.NewAddress() + addr2 := testutils.NewAddress() + addr3 := testutils.NewAddress() + + t.Run("LoadChannelDefinitions", func(t *testing.T) { + t.Run("returns zero values if nothing in database", func(t *testing.T) { + cd, blockNum, err := orm.LoadChannelDefinitions(ctx, addr1) + require.NoError(t, err) + + assert.Zero(t, cd) + assert.Zero(t, blockNum) + + }) + t.Run("loads channel definitions from database", func(t *testing.T) { + expectedBlockNum := rand.Int63() + expectedBlockNum2 := rand.Int63() + cid1 := rand.Uint32() + cid2 := rand.Uint32() + + channelDefsJSON := fmt.Sprintf(` +{ + "%d": { + "reportFormat": 42, + "chainSelector": 142, + "streamIds": [1, 2] + }, + "%d": { + "reportFormat": 42, + "chainSelector": 142, + "streamIds": [1, 3] + } +} + `, cid1, cid2) + pgtest.MustExec(t, db, ` + INSERT INTO channel_definitions(addr, evm_chain_id, definitions, block_num, updated_at) + VALUES ( $1, $2, $3, $4, NOW()) + `, addr1, testutils.FixtureChainID.String(), channelDefsJSON, expectedBlockNum) + + pgtest.MustExec(t, db, ` + INSERT INTO channel_definitions(addr, evm_chain_id, definitions, block_num, updated_at) + VALUES ( $1, $2, $3, $4, NOW()) + `, addr2, testutils.FixtureChainID.String(), `{}`, expectedBlockNum2) + + { + // alternative chain ID; we expect these ones to be ignored + pgtest.MustExec(t, db, ` + INSERT INTO channel_definitions(addr, evm_chain_id, definitions, block_num, updated_at) + VALUES ( $1, $2, $3, $4, NOW()) + `, addr1, testutils.SimulatedChainID.String(), channelDefsJSON, expectedBlockNum) + pgtest.MustExec(t, db, ` + INSERT INTO channel_definitions(addr, evm_chain_id, definitions, block_num, updated_at) + VALUES ( $1, $2, $3, $4, NOW()) + `, addr3, testutils.SimulatedChainID.String(), channelDefsJSON, expectedBlockNum) + } + + cd, blockNum, err := orm.LoadChannelDefinitions(ctx, addr1) + require.NoError(t, err) + + assert.Equal(t, llotypes.ChannelDefinitions{ + cid1: llotypes.ChannelDefinition{ + ReportFormat: 42, + ChainSelector: 142, + StreamIDs: []llotypes.StreamID{1, 2}, + }, + cid2: llotypes.ChannelDefinition{ + ReportFormat: 42, + ChainSelector: 142, + StreamIDs: []llotypes.StreamID{1, 3}, + }, + }, cd) + assert.Equal(t, expectedBlockNum, blockNum) + + cd, blockNum, err = orm.LoadChannelDefinitions(ctx, addr2) + require.NoError(t, err) + + assert.Equal(t, llotypes.ChannelDefinitions{}, cd) + assert.Equal(t, expectedBlockNum2, blockNum) + }) + }) +} diff --git a/core/services/llo/static_channel_definitions_cache.go b/core/services/llo/static_channel_definitions_cache.go new file mode 100644 index 00000000000..bf26dd781ee --- /dev/null +++ b/core/services/llo/static_channel_definitions_cache.go @@ -0,0 +1,56 @@ +package llo + +import ( + "context" + "encoding/json" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +// A CDC that loads a static JSON of channel definitions; useful for +// benchmarking and testing + +var _ llotypes.ChannelDefinitionCache = &staticCDC{} + +type staticCDC struct { + services.StateMachine + lggr logger.Logger + + definitions llotypes.ChannelDefinitions +} + +func NewStaticChannelDefinitionCache(lggr logger.Logger, dfnstr string) (llotypes.ChannelDefinitionCache, error) { + var definitions llotypes.ChannelDefinitions + if err := json.Unmarshal([]byte(dfnstr), &definitions); err != nil { + return nil, err + } + return &staticCDC{services.StateMachine{}, lggr.Named("StaticChannelDefinitionCache"), definitions}, nil +} + +func (s *staticCDC) Start(context.Context) error { + return s.StartOnce("StaticChannelDefinitionCache", func() error { + return nil + }) +} + +func (s *staticCDC) Close() error { + return s.StopOnce("StaticChannelDefinitionCache", func() error { + return nil + }) +} + +func (s *staticCDC) Definitions() llotypes.ChannelDefinitions { + return s.definitions +} + +func (s *staticCDC) HealthReport() map[string]error { + report := map[string]error{s.Name(): s.Healthy()} + return report +} + +func (s *staticCDC) Name() string { + return s.lggr.Name() +} diff --git a/core/services/llo/transmitter.go b/core/services/llo/transmitter.go new file mode 100644 index 00000000000..eef211ab5d5 --- /dev/null +++ b/core/services/llo/transmitter.go @@ -0,0 +1,153 @@ +package llo + +import ( + "context" + "crypto/ed25519" + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/smartcontractkit/libocr/offchainreporting2/chains/evmutil" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +// LLO Transmitter implementation, based on +// core/services/relay/evm/mercury/transmitter.go + +// TODO: prom metrics (common with mercury/transmitter.go?) +// https://smartcontract-it.atlassian.net/browse/MERC-3659 + +const ( + // Mercury server error codes + DuplicateReport = 2 + // TODO: revisit these values in light of parallel composition + // https://smartcontract-it.atlassian.net/browse/MERC-3659 + // maxTransmitQueueSize = 10_000 + // maxDeleteQueueSize = 10_000 + // transmitTimeout = 5 * time.Second +) + +var PayloadTypes = getPayloadTypes() + +func getPayloadTypes() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "reportContext", Type: mustNewType("bytes32[2]")}, + {Name: "report", Type: mustNewType("bytes")}, + {Name: "rawRs", Type: mustNewType("bytes32[]")}, + {Name: "rawSs", Type: mustNewType("bytes32[]")}, + {Name: "rawVs", Type: mustNewType("bytes32")}, + }) +} + +type Transmitter interface { + llotypes.Transmitter + services.Service +} + +type transmitter struct { + services.StateMachine + lggr logger.Logger + rpcClient wsrpc.Client + fromAccount string +} + +func NewTransmitter(lggr logger.Logger, rpcClient wsrpc.Client, fromAccount ed25519.PublicKey) Transmitter { + return &transmitter{ + services.StateMachine{}, + lggr, + rpcClient, + fmt.Sprintf("%x", fromAccount), + } +} + +func (t *transmitter) Start(ctx context.Context) error { + return nil +} + +func (t *transmitter) Close() error { + return nil +} + +func (t *transmitter) HealthReport() map[string]error { + report := map[string]error{t.Name(): t.Healthy()} + services.CopyHealth(report, t.rpcClient.HealthReport()) + return report +} + +func (t *transmitter) Name() string { return t.lggr.Name() } + +func (t *transmitter) Transmit( + ctx context.Context, + digest types.ConfigDigest, + seqNr uint64, + report ocr3types.ReportWithInfo[llotypes.ReportInfo], + sigs []types.AttributedOnchainSignature, +) (err error) { + var payload []byte + + switch report.Info.ReportFormat { + case llotypes.ReportFormatJSON: + fallthrough + case llotypes.ReportFormatEVM: + payload, err = encodeEVM(digest, seqNr, report.Report, sigs) + default: + return fmt.Errorf("Transmit failed; unsupported report format: %q", report.Info.ReportFormat) + } + + if err != nil { + return fmt.Errorf("Transmit: encode failed; %w", err) + } + + req := &pb.TransmitRequest{ + Payload: payload, + ReportFormat: uint32(report.Info.ReportFormat), + } + + // TODO: persistenceManager and queueing, error handling, retry etc + // https://smartcontract-it.atlassian.net/browse/MERC-3659 + _, err = t.rpcClient.Transmit(ctx, req) + return err +} + +func encodeEVM(digest types.ConfigDigest, seqNr uint64, report ocr2types.Report, sigs []types.AttributedOnchainSignature) ([]byte, error) { + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + for i, as := range sigs { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + return nil, fmt.Errorf("eventTransmit(ev): error in SplitSignature: %w", err) + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + rawReportCtx := ocr2key.RawReportContext3(digest, seqNr) + + payload, err := PayloadTypes.Pack(rawReportCtx, []byte(report), rs, ss, vs) + if err != nil { + return nil, fmt.Errorf("abi.Pack failed; %w", err) + } + return payload, nil +} + +// FromAccount returns the stringified (hex) CSA public key +func (t *transmitter) FromAccount() (ocr2types.Account, error) { + return ocr2types.Account(t.fromAccount), nil +} diff --git a/core/services/llo/transmitter_test.go b/core/services/llo/transmitter_test.go new file mode 100644 index 00000000000..eb231494c0b --- /dev/null +++ b/core/services/llo/transmitter_test.go @@ -0,0 +1,7 @@ +package llo + +import "testing" + +func Test_Transmitter(t *testing.T) { + // TODO: https://smartcontract-it.atlassian.net/browse/MERC-3659 +} diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 754f48013bd..336d1ae3800 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -36,6 +36,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/loop" "github.com/smartcontractkit/chainlink-common/pkg/loop/reportingplugins" "github.com/smartcontractkit/chainlink-common/pkg/types" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" "github.com/smartcontractkit/chainlink/v2/core/bridges" @@ -44,11 +45,14 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" + "github.com/smartcontractkit/chainlink/v2/core/services/llo" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/dkg" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/dkg/persistence" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/generic" + lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/median" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" @@ -70,6 +74,7 @@ import ( evmmercury "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury" mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils" evmrelaytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" "github.com/smartcontractkit/chainlink/v2/core/services/synchronization" "github.com/smartcontractkit/chainlink/v2/core/services/telemetry" "github.com/smartcontractkit/chainlink/v2/plugins" @@ -107,6 +112,7 @@ type Delegate struct { bridgeORM bridges.ORM mercuryORM evmmercury.ORM pipelineRunner pipeline.Runner + streamRegistry streams.Getter peerWrapper *ocrcommon.SingletonPeerWrapper monitoringEndpointGen telemetry.MonitoringEndpointGenerator cfg DelegateConfig @@ -219,6 +225,7 @@ func NewDelegate( bridgeORM bridges.ORM, mercuryORM evmmercury.ORM, pipelineRunner pipeline.Runner, + streamRegistry streams.Getter, peerWrapper *ocrcommon.SingletonPeerWrapper, monitoringEndpointGen telemetry.MonitoringEndpointGenerator, legacyChains legacyevm.LegacyChainContainer, @@ -238,6 +245,7 @@ func NewDelegate( bridgeORM: bridgeORM, mercuryORM: mercuryORM, pipelineRunner: pipelineRunner, + streamRegistry: streamRegistry, peerWrapper: peerWrapper, monitoringEndpointGen: monitoringEndpointGen, legacyChains: legacyChains, @@ -435,6 +443,9 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { case types.Mercury: return d.newServicesMercury(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + case types.LLO: + return d.newServicesLLO(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + case types.Median: return d.newServicesMedian(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) @@ -467,7 +478,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { func GetEVMEffectiveTransmitterID(jb *job.Job, chain legacyevm.Chain, lggr logger.SugaredLogger) (string, error) { spec := jb.OCR2OracleSpec - if spec.PluginType == types.Mercury { + if spec.PluginType == types.Mercury || spec.PluginType == types.LLO { return spec.TransmitterID.String, nil } @@ -775,6 +786,135 @@ func (d *Delegate) newServicesMercury( return mercuryServices, err2 } +func (d *Delegate) newServicesLLO( + ctx context.Context, + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, +) ([]job.ServiceCtx, error) { + lggr = logger.Sugared(lggr.Named("LLO")) + spec := jb.OCR2OracleSpec + transmitterID := spec.TransmitterID.String + if len(transmitterID) != 64 { + return nil, errors.Errorf("ServicesForSpec: streams job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID) + } + if _, err := hex.DecodeString(transmitterID); err != nil { + return nil, errors.Wrapf(err, "ServicesForSpec: streams job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID) + } + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "streams"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("streams services: expected EVM relayer got %s", rid.Network) + } + relayer, err := d.RelayGetter.Get(rid) + if err != nil { + return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: "streams"} + } + + provider, err2 := relayer.NewLLOProvider(ctx, + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(spec.PluginType), + }, types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err2 != nil { + return nil, err2 + } + + var pluginCfg lloconfig.PluginConfig + if err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginCfg); err != nil { + return nil, err + } + + kbm := make(map[llotypes.ReportFormat]llo.Key) + for rfStr, kbid := range pluginCfg.KeyBundleIDs { + k, err3 := d.ks.Get(kbid) + if err3 != nil { + return nil, fmt.Errorf("job %d (%s) specified key bundle ID %q for report format %s, but got error trying to load it: %w", jb.ID, jb.Name.ValueOrZero(), kbid, rfStr, err3) + } + rf, err4 := llotypes.ReportFormatFromString(rfStr) + if err4 != nil { + return nil, fmt.Errorf("job %d (%s) specified key bundle ID %q for report format %s, but it is not a recognized report format: %w", jb.ID, jb.Name.ValueOrZero(), kbid, rfStr, err4) + } + kbm[rf] = k + } + // NOTE: This is a bit messy because we assume chain type matches report + // format, and it may not in all cases. We don't yet know what report + // formats we need or how they correspond to chain types, so assume it's + // 1:1 for now but will change in future + // + // https://smartcontract-it.atlassian.net/browse/MERC-3722 + for _, s := range chaintype.SupportedChainTypes { + rf, err3 := llotypes.ReportFormatFromString(string(s)) + if err3 != nil { + return nil, fmt.Errorf("job %d (%s) has a chain type with no matching report format %s: %w", jb.ID, jb.Name.ValueOrZero(), s, err3) + } + if _, exists := kbm[rf]; !exists { + // Use the first if unspecified + kbs, err4 := d.ks.GetAllOfType(s) + if err4 != nil { + return nil, err4 + } + if len(kbs) == 0 { + // unsupported key type + continue + } else if len(kbs) > 1 { + lggr.Debugf("Multiple on-chain signing keys found for report format %s, using the first", rf.String()) + } + kbm[rf] = kbs[0] + } + } + + // FIXME: This is a bit confusing because the OCR2 key bundle actually + // includes an EVM on-chain key... but LLO only uses the key bundle for the + // offchain keys and the suppoprted onchain keys are defined in the plugin + // config on the job spec instead. + // https://smartcontract-it.atlassian.net/browse/MERC-3594 + lggr.Infof("Using on-chain signing keys for LLO job %d (%s): %v", jb.ID, jb.Name.ValueOrZero(), kbm) + kr := llo.NewOnchainKeyring(lggr, kbm) + + cfg := llo.DelegateConfig{ + Logger: lggr, + Queryer: pg.NewQ(d.db, lggr, d.cfg.Database()), + Runner: d.pipelineRunner, + Registry: d.streamRegistry, + + ChannelDefinitionCache: provider.ChannelDefinitionCache(), + + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: provider.ContractTransmitter(), + ContractConfigTracker: provider.ContractConfigTracker(), + Database: ocrDB, + LocalConfig: lc, + // TODO: Telemetry for llo + // https://smartcontract-it.atlassian.net/browse/MERC-3603 + MonitoringEndpoint: nil, + OffchainConfigDigester: provider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kr, + OCRLogger: ocrLogger, + } + oracle, err := llo.NewDelegate(cfg) + if err != nil { + return nil, err + } + return []job.ServiceCtx{provider, oracle}, nil +} + func (d *Delegate) newServicesMedian( ctx context.Context, lggr logger.SugaredLogger, diff --git a/core/services/ocr2/plugins/llo/config/config.go b/core/services/ocr2/plugins/llo/config/config.go new file mode 100644 index 00000000000..15bb5e816a8 --- /dev/null +++ b/core/services/ocr2/plugins/llo/config/config.go @@ -0,0 +1,112 @@ +// config is a separate package so that we can validate +// the config in other packages, for example in job at job create time. + +package config + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "regexp" + + "github.com/ethereum/go-ethereum/common" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +type PluginConfig struct { + RawServerURL string `json:"serverURL" toml:"serverURL"` + ServerPubKey utils.PlainHexBytes `json:"serverPubKey" toml:"serverPubKey"` + + ChannelDefinitionsContractAddress common.Address `json:"channelDefinitionsContractAddress" toml:"channelDefinitionsContractAddress"` + ChannelDefinitionsContractFromBlock int64 `json:"channelDefinitionsContractFromBlock" toml:"channelDefinitionsContractFromBlock"` + + // NOTE: ChannelDefinitions is an override. + // If Channe}lDefinitions is specified, values for + // ChannelDefinitionsContractAddress and + // ChannelDefinitionsContractFromBlock will be ignored + ChannelDefinitions string `json:"channelDefinitions" toml:"channelDefinitions"` + + // BenchmarkMode is a flag to enable benchmarking mode. In this mode, the + // transmitter will not transmit anything at all and instead emit + // logs/metrics. + BenchmarkMode bool `json:"benchmarkMode" toml:"benchmarkMode"` + + // KeyBundleIDs maps supported keys to their respective bundle IDs + // Key must match llo's ReportFormat + KeyBundleIDs map[string]string `json:"keyBundleIDs" toml:"keyBundleIDs"` +} + +func (p PluginConfig) Validate() (merr error) { + if p.RawServerURL == "" { + merr = errors.New("llo: ServerURL must be specified") + } else { + var normalizedURI string + if schemeRegexp.MatchString(p.RawServerURL) { + normalizedURI = p.RawServerURL + } else { + normalizedURI = fmt.Sprintf("wss://%s", p.RawServerURL) + } + uri, err := url.ParseRequestURI(normalizedURI) + if err != nil { + merr = fmt.Errorf("llo: invalid value for ServerURL: %w", err) + } else if uri.Scheme != "wss" { + merr = fmt.Errorf(`llo: invalid scheme specified for MercuryServer, got: %q (scheme: %q) but expected a websocket url e.g. "192.0.2.2:4242" or "wss://192.0.2.2:4242"`, p.RawServerURL, uri.Scheme) + } + } + + if p.ChannelDefinitions != "" { + if p.ChannelDefinitionsContractAddress != (common.Address{}) { + merr = errors.Join(merr, errors.New("llo: ChannelDefinitionsContractAddress is not allowed if ChannelDefinitions is specified")) + } + if p.ChannelDefinitionsContractFromBlock != 0 { + merr = errors.Join(merr, errors.New("llo: ChannelDefinitionsContractFromBlock is not allowed if ChannelDefinitions is specified")) + } + var cd llotypes.ChannelDefinitions + if err := json.Unmarshal([]byte(p.ChannelDefinitions), &cd); err != nil { + merr = errors.Join(merr, fmt.Errorf("channelDefinitions is invalid JSON: %w", err)) + } + } else { + if p.ChannelDefinitionsContractAddress == (common.Address{}) { + merr = errors.Join(merr, errors.New("llo: ChannelDefinitionsContractAddress is required if ChannelDefinitions is not specified")) + } + } + + if len(p.ServerPubKey) != 32 { + merr = errors.Join(merr, errors.New("llo: ServerPubKey is required and must be a 32-byte hex string")) + } + + merr = errors.Join(merr, validateKeyBundleIDs(p.KeyBundleIDs)) + + return merr +} + +func validateKeyBundleIDs(keyBundleIDs map[string]string) error { + for k, v := range keyBundleIDs { + if k == "" { + return errors.New("llo: KeyBundleIDs: key must not be empty") + } + if v == "" { + return errors.New("llo: KeyBundleIDs: value must not be empty") + } + if _, err := llotypes.ReportFormatFromString(k); err != nil { + return fmt.Errorf("llo: KeyBundleIDs: key must be a recognized report format, got: %s (err: %w)", k, err) + } + if !chaintype.IsSupportedChainType(chaintype.ChainType(k)) { + return fmt.Errorf("llo: KeyBundleIDs: key must be a supported chain type, got: %s", k) + } + + } + return nil +} + +var schemeRegexp = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+.-]*://`) +var wssRegexp = regexp.MustCompile(`^wss://`) + +func (p PluginConfig) ServerURL() string { + return wssRegexp.ReplaceAllString(p.RawServerURL, "") +} diff --git a/core/services/ocr2/plugins/llo/config/config_test.go b/core/services/ocr2/plugins/llo/config/config_test.go new file mode 100644 index 00000000000..136fac87a56 --- /dev/null +++ b/core/services/ocr2/plugins/llo/config/config_test.go @@ -0,0 +1,142 @@ +package config + +import ( + "fmt" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Config(t *testing.T) { + t.Run("unmarshals from toml", func(t *testing.T) { + cdjson := `{ + "42": { + "reportFormat": 42, + "chainSelector": 142, + "streamIds": [1, 2] + }, + "43": { + "reportFormat": 42, + "chainSelector": 142, + "streamIds": [1, 3] + }, + "44": { + "reportFormat": 42, + "chainSelector": 143, + "streamIds": [1, 4] + } +}` + + t.Run("with all possible values set", func(t *testing.T) { + rawToml := fmt.Sprintf(` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + BenchmarkMode = true + ChannelDefinitionsContractAddress = "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" + ChannelDefinitionsContractFromBlock = 1234 + ChannelDefinitions = """ +%s +"""`, cdjson) + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + assert.Equal(t, "example.com:80", mc.RawServerURL) + assert.Equal(t, "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.ServerPubKey.String()) + assert.Equal(t, "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", mc.ChannelDefinitionsContractAddress.Hex()) + assert.Equal(t, int64(1234), mc.ChannelDefinitionsContractFromBlock) + assert.JSONEq(t, cdjson, mc.ChannelDefinitions) + assert.True(t, mc.BenchmarkMode) + + err = mc.Validate() + require.Error(t, err) + + assert.Contains(t, err.Error(), "llo: ChannelDefinitionsContractAddress is not allowed if ChannelDefinitions is specified") + assert.Contains(t, err.Error(), "llo: ChannelDefinitionsContractFromBlock is not allowed if ChannelDefinitions is specified") + }) + + t.Run("with only channelDefinitions", func(t *testing.T) { + rawToml := fmt.Sprintf(` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + ChannelDefinitions = """ +%s +"""`, cdjson) + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + assert.Equal(t, "example.com:80", mc.RawServerURL) + assert.Equal(t, "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.ServerPubKey.String()) + assert.JSONEq(t, cdjson, mc.ChannelDefinitions) + assert.False(t, mc.BenchmarkMode) + + err = mc.Validate() + require.NoError(t, err) + }) + t.Run("with only channelDefinitions contract details", func(t *testing.T) { + rawToml := ` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + ChannelDefinitionsContractAddress = "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + assert.Equal(t, "example.com:80", mc.RawServerURL) + assert.Equal(t, "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.ServerPubKey.String()) + assert.Equal(t, "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", mc.ChannelDefinitionsContractAddress.Hex()) + assert.False(t, mc.BenchmarkMode) + + err = mc.Validate() + require.NoError(t, err) + }) + t.Run("with missing ChannelDefinitionsContractAddress", func(t *testing.T) { + rawToml := ` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93"` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + assert.Equal(t, "example.com:80", mc.RawServerURL) + assert.Equal(t, "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.ServerPubKey.String()) + assert.False(t, mc.BenchmarkMode) + + err = mc.Validate() + require.Error(t, err) + assert.EqualError(t, err, "llo: ChannelDefinitionsContractAddress is required if ChannelDefinitions is not specified") + }) + + t.Run("with invalid values", func(t *testing.T) { + rawToml := ` + ChannelDefinitionsContractFromBlock = "invalid" + ` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.Error(t, err) + assert.EqualError(t, err, `toml: cannot decode TOML string into struct field config.PluginConfig.ChannelDefinitionsContractFromBlock of type int64`) + assert.False(t, mc.BenchmarkMode) + + rawToml = ` + ServerURL = "http://example.com" + ServerPubKey = "4242" + ` + + err = toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + err = mc.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), `invalid scheme specified for MercuryServer, got: "http://example.com" (scheme: "http") but expected a websocket url e.g. "192.0.2.2:4242" or "wss://192.0.2.2:4242"`) + assert.Contains(t, err.Error(), `ServerPubKey is required and must be a 32-byte hex string`) + }) + }) +} diff --git a/core/services/ocr2/plugins/llo/helpers_test.go b/core/services/ocr2/plugins/llo/helpers_test.go new file mode 100644 index 00000000000..ae9850134b9 --- /dev/null +++ b/core/services/ocr2/plugins/llo/helpers_test.go @@ -0,0 +1,356 @@ +package llo_test + +import ( + "context" + "crypto/ed25519" + "errors" + "fmt" + "io" + "math/big" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/smartcontractkit/wsrpc" + "github.com/smartcontractkit/wsrpc/credentials" + "github.com/smartcontractkit/wsrpc/peer" + + "github.com/smartcontractkit/libocr/offchainreporting2/chains/evmutil" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink/v2/core/bridges" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/keystest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" + "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" + "github.com/smartcontractkit/chainlink/v2/core/store/models" +) + +var _ pb.MercuryServer = &mercuryServer{} + +type request struct { + pk credentials.StaticSizedPublicKey + req *pb.TransmitRequest +} + +func (r request) TransmitterID() ocr2types.Account { + return ocr2types.Account(fmt.Sprintf("%x", r.pk)) +} + +type mercuryServer struct { + privKey ed25519.PrivateKey + reqsCh chan request + t *testing.T + buildReport func() []byte +} + +func NewMercuryServer(t *testing.T, privKey ed25519.PrivateKey, reqsCh chan request, buildReport func() []byte) *mercuryServer { + return &mercuryServer{privKey, reqsCh, t, buildReport} +} + +func (s *mercuryServer) Transmit(ctx context.Context, req *pb.TransmitRequest) (*pb.TransmitResponse, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("could not extract public key") + } + r := request{p.PublicKey, req} + s.reqsCh <- r + + return &pb.TransmitResponse{ + Code: 1, + Error: "", + }, nil +} + +func (s *mercuryServer) LatestReport(ctx context.Context, lrr *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("could not extract public key") + } + s.t.Logf("mercury server got latest report from %x for feed id 0x%x", p.PublicKey, lrr.FeedId) + + out := new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = lrr.FeedId + + report := s.buildReport() + payload, err := mercury.PayloadTypes.Pack(evmutil.RawReportContext(ocrtypes.ReportContext{}), report, [][32]byte{}, [][32]byte{}, [32]byte{}) + if err != nil { + require.NoError(s.t, err) + } + out.Report.Payload = payload + return out, nil +} + +func startMercuryServer(t *testing.T, srv *mercuryServer, pubKeys []ed25519.PublicKey) (serverURL string) { + // Set up the wsrpc server + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("[MAIN] failed to listen: %v", err) + } + serverURL = lis.Addr().String() + s := wsrpc.NewServer(wsrpc.Creds(srv.privKey, pubKeys)) + + // Register mercury implementation with the wsrpc server + pb.RegisterMercuryServer(s, srv) + + // Start serving + go s.Serve(lis) + t.Cleanup(s.Stop) + + return +} + +type Node struct { + App chainlink.Application + ClientPubKey credentials.StaticSizedPublicKey + KeyBundle ocr2key.KeyBundle +} + +func (node *Node) AddStreamJob(t *testing.T, spec string) { + job, err := streams.ValidatedStreamSpec(spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) +} + +func (node *Node) AddLLOJob(t *testing.T, spec string) { + c := node.App.GetConfig() + job, err := validate.ValidatedOracleSpecToml(c.OCR2(), c.Insecure(), spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) +} + +func (node *Node) AddBootstrapJob(t *testing.T, spec string) { + job, err := ocrbootstrap.ValidatedBootstrapSpecToml(spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) +} + +func setupNode( + t *testing.T, + port int, + dbName string, + backend *backends.SimulatedBackend, + csaKey csakey.KeyV2, +) (app chainlink.Application, peerID string, clientPubKey credentials.StaticSizedPublicKey, ocr2kb ocr2key.KeyBundle, observedLogs *observer.ObservedLogs) { + k := big.NewInt(int64(port)) // keys unique to port + p2pKey := p2pkey.MustNewV2XXXTestingOnly(k) + rdr := keystest.NewRandReaderFromSeed(int64(port)) + ocr2kb = ocr2key.MustNewInsecure(rdr, chaintype.EVM) + + p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)} + + config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { + // [JobPipeline] + c.JobPipeline.MaxSuccessfulRuns = ptr(uint64(0)) + + // [Feature] + c.Feature.UICSAKeys = ptr(true) + c.Feature.LogPoller = ptr(true) + c.Feature.FeedsManager = ptr(false) + + // [OCR] + c.OCR.Enabled = ptr(false) + + // [OCR2] + c.OCR2.Enabled = ptr(true) + c.OCR2.ContractPollInterval = commonconfig.MustNewDuration(1 * time.Second) + + // [P2P] + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.TraceLogging = ptr(true) + + // [P2P.V2] + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.AnnounceAddresses = &p2paddresses + c.P2P.V2.ListenAddresses = &p2paddresses + c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond) + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + }) + + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel) + app = cltest.NewApplicationWithConfigV2OnSimulatedBlockchain(t, config, backend, p2pKey, ocr2kb, csaKey, lggr.Named(dbName)) + err := app.Start(testutils.Context(t)) + require.NoError(t, err) + + t.Cleanup(func() { + assert.NoError(t, app.Stop()) + }) + + return app, p2pKey.PeerID().Raw(), csaKey.StaticSizedPublicKey(), ocr2kb, observedLogs +} + +func ptr[T any](t T) *T { return &t } + +func addStreamJob( + t *testing.T, + node Node, + streamID uint32, + bridgeName string, +) { + node.AddStreamJob(t, fmt.Sprintf(` +type = "stream" +schemaVersion = 1 +name = "strm-spec-%d" +streamID = %d +observationSource = """ + // Benchmark Price + price1 [type=bridge name="%s" requestData="{\\"data\\":{\\"data\\":\\"foo\\"}}"]; + price1_parse [type=jsonparse path="result"]; + price1_multiply [type=multiply times=100000000 index=0]; + + price1 -> price1_parse -> price1_multiply; +""" + + `, + streamID, + streamID, + bridgeName, + )) +} +func addBootstrapJob(t *testing.T, bootstrapNode Node, chainID *big.Int, verifierAddress common.Address, name string) { + bootstrapNode.AddBootstrapJob(t, fmt.Sprintf(` +type = "bootstrap" +relay = "evm" +schemaVersion = 1 +name = "boot-%s" +contractID = "%s" +contractConfigTrackerPollInterval = "1s" + +[relayConfig] +chainID = %s +providerType = "llo" + `, name, verifierAddress.Hex(), chainID.String())) +} + +func addLLOJob( + t *testing.T, + node Node, + verifierAddress, + configStoreAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + jobName string, + chainID *big.Int, + fromBlock int, +) { + node.AddLLOJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "%[1]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "llo" +transmitterID = "%[5]x" + +[pluginConfig] +serverURL = "%[6]s" +serverPubKey = "%[7]x" +channelDefinitionsContractFromBlock = %[8]d +channelDefinitionsContractAddress = "%[9]s" + +[relayConfig] +chainID = %[10]s +fromBlock = 1`, + jobName, + verifierAddress.Hex(), + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + clientPubKey, + serverURL, + serverPubKey, + fromBlock, + configStoreAddress.Hex(), + chainID.String(), + )) +} + +func addOCRJobs(t *testing.T, streams []Stream, serverPubKey ed25519.PublicKey, serverURL string, verifierAddress common.Address, bootstrapPeerID string, bootstrapNodePort int, nodes []Node, configStoreAddress common.Address, clientPubKeys []ed25519.PublicKey, chainID *big.Int, fromBlock int) { + createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) { + bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + require.Equal(t, `{"data":{"data":"foo"}}`, string(b)) + + res.WriteHeader(http.StatusOK) + val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String() + resp := fmt.Sprintf(`{"result": %s}`, val) + _, err = res.Write([]byte(resp)) + require.NoError(t, err) + })) + t.Cleanup(bridge.Close) + u, _ := url.Parse(bridge.URL) + bridgeName = fmt.Sprintf("bridge-%s-%d", name, i) + require.NoError(t, borm.CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(bridgeName), + URL: models.WebURL(*u), + })) + + return bridgeName + } + + // Add OCR jobs - one per feed on each node + for i, node := range nodes { + for j, strm := range streams { + bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d-%d", strm.id, j), i, strm.baseBenchmarkPrice, node.App.BridgeORM()) + addStreamJob( + t, + node, + strm.id, + bmBridge, + ) + } + addLLOJob( + t, + node, + verifierAddress, + configStoreAddress, + bootstrapPeerID, + bootstrapNodePort, + serverURL, + serverPubKey, + clientPubKeys[i], + "feed-1", + chainID, + fromBlock, + ) + } +} diff --git a/core/services/ocr2/plugins/llo/integration_test.go b/core/services/ocr2/plugins/llo/integration_test.go new file mode 100644 index 00000000000..df77316e4dd --- /dev/null +++ b/core/services/ocr2/plugins/llo/integration_test.go @@ -0,0 +1,370 @@ +package llo_test + +import ( + "crypto/ed25519" + "encoding/hex" + "fmt" + "math/big" + "math/rand" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + chainselectors "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + "github.com/smartcontractkit/chainlink-common/pkg/utils" + datastreamsllo "github.com/smartcontractkit/chainlink-data-streams/llo" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_config_store" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_verifier" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" + "github.com/smartcontractkit/chainlink/v2/core/services/llo" + lloevm "github.com/smartcontractkit/chainlink/v2/core/services/llo/evm" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" +) + +var ( + fNodes = uint8(1) + nNodes = 4 // number of nodes (not including bootstrap) + multiplier int64 = 100000000 +) + +func setupBlockchain(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBackend, *channel_verifier.ChannelVerifier, common.Address, *channel_config_store.ChannelConfigStore, common.Address) { + steve := testutils.MustNewSimTransactor(t) // config contract deployer and owner + genesisData := core.GenesisAlloc{steve.From: {Balance: assets.Ether(1000).ToInt()}} + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + backend.Commit() + backend.Commit() // ensure starting block number at least 1 + + // Deploy contracts + verifierProxyAddr, _, _, err := verifier_proxy.DeployVerifierProxy(steve, backend, common.Address{}) // zero address for access controller disables access control + require.NoError(t, err) + + verifierAddress, _, verifierContract, err := channel_verifier.DeployChannelVerifier(steve, backend, verifierProxyAddr) + require.NoError(t, err) + configStoreAddress, _, configStoreContract, err := channel_config_store.DeployChannelConfigStore(steve, backend) + require.NoError(t, err) + + backend.Commit() + + return steve, backend, verifierContract, verifierAddress, configStoreContract, configStoreAddress +} + +type Stream struct { + id uint32 + baseBenchmarkPrice *big.Int +} + +func TestIntegration_LLO(t *testing.T) { + testStartTimeStamp := uint32(time.Now().Unix()) + + const fromBlock = 1 // cannot use zero, start from block 1 + + // streams + btcStream := Stream{ + id: 51, + baseBenchmarkPrice: big.NewInt(20_000 * multiplier), + } + ethStream := Stream{ + id: 52, + baseBenchmarkPrice: big.NewInt(1_568 * multiplier), + } + linkStream := Stream{ + id: 53, + baseBenchmarkPrice: big.NewInt(7150 * multiplier / 1000), + } + dogeStream := Stream{ + id: 54, + baseBenchmarkPrice: big.NewInt(2_020 * multiplier), + } + streams := []Stream{btcStream, ethStream, linkStream, dogeStream} + streamMap := make(map[uint32]Stream) + for _, strm := range streams { + streamMap[strm.id] = strm + } + + reqs := make(chan request) + serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1)) + serverPubKey := serverKey.PublicKey + srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs, nil) + + clientCSAKeys := make([]csakey.KeyV2, nNodes) + clientPubKeys := make([]ed25519.PublicKey, nNodes) + for i := 0; i < nNodes; i++ { + k := big.NewInt(int64(i)) + key := csakey.MustNewV2XXXTestingOnly(k) + clientCSAKeys[i] = key + clientPubKeys[i] = key.PublicKey + } + serverURL := startMercuryServer(t, srv, clientPubKeys) + chainID := testutils.SimulatedChainID + + steve, backend, verifierContract, verifierAddress, configStoreContract, configStoreAddress := setupBlockchain(t) + + // Setup bootstrap + bootstrapCSAKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1)) + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, _, bootstrapKb, _ := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, bootstrapCSAKey) + bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb} + + // Setup oracle nodes + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + ports := freeport.GetN(t, nNodes) + for i := 0; i < nNodes; i++ { + app, peerID, transmitter, kb, _ := setupNode(t, ports[i], fmt.Sprintf("oracle_streams_%d", i), backend, clientCSAKeys[i]) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + } + + configDigest := setConfig(t, steve, backend, verifierContract, verifierAddress, nodes, oracles) + channelDefinitions := setChannelDefinitions(t, steve, backend, configStoreContract, streams) + + // Bury everything with finality depth + ch, err := nodes[0].App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String()) + require.NoError(t, err) + finalityDepth := ch.Config().EVM().FinalityDepth() + for i := 0; i < int(finalityDepth); i++ { + backend.Commit() + } + + addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, "job-1") + addOCRJobs(t, streams, serverPubKey, serverURL, verifierAddress, bootstrapPeerID, bootstrapNodePort, nodes, configStoreAddress, clientPubKeys, chainID, fromBlock) + + t.Run("receives at least one report per feed from each oracle when EAs are at 100% reliability", func(t *testing.T) { + // Expect at least one report per channel from each oracle (keyed by transmitter ID) + seen := make(map[ocr2types.Account]map[llotypes.ChannelID]struct{}) + + for channelID, defn := range channelDefinitions { + t.Logf("Expect report for channel ID %x (definition: %#v)", channelID, defn) + } + for _, o := range oracles { + t.Logf("Expect report from oracle %s", o.OracleIdentity.TransmitAccount) + seen[o.OracleIdentity.TransmitAccount] = make(map[llotypes.ChannelID]struct{}) + } + + for req := range reqs { + if _, exists := seen[req.TransmitterID()]; !exists { + // oracle already reported on all channels; discard + continue + } + + v := make(map[string]interface{}) + err := llo.PayloadTypes.UnpackIntoMap(v, req.req.Payload) + require.NoError(t, err) + report, exists := v["report"] + if !exists { + t.Fatalf("FAIL: expected payload %#v to contain 'report'", v) + } + + t.Logf("Got report from oracle %x with format: %d", req.pk, req.req.ReportFormat) + + var r datastreamsllo.Report + + switch req.req.ReportFormat { + case uint32(llotypes.ReportFormatJSON): + t.Logf("Got report (JSON) from oracle %x: %s", req.pk, string(report.([]byte))) + var err error + r, err = (datastreamsllo.JSONReportCodec{}).Decode(report.([]byte)) + require.NoError(t, err, "expected valid JSON") + case uint32(llotypes.ReportFormatEVM): + t.Logf("Got report (EVM) from oracle %x: 0x%x", req.pk, report.([]byte)) + var err error + r, err = (lloevm.ReportCodec{}).Decode(report.([]byte)) + require.NoError(t, err, "expected valid EVM encoding") + default: + t.Fatalf("FAIL: unexpected report format: %q", req.req.ReportFormat) + } + + assert.Equal(t, configDigest, r.ConfigDigest) + assert.Equal(t, uint64(0x2ee634951ef71b46), r.ChainSelector) + assert.GreaterOrEqual(t, r.SeqNr, uint64(1)) + assert.GreaterOrEqual(t, r.ValidAfterSeconds, testStartTimeStamp) + assert.Equal(t, r.ValidAfterSeconds+1, r.ValidUntilSeconds) + + // values + defn, exists := channelDefinitions[r.ChannelID] + require.True(t, exists, "expected channel ID to be in channelDefinitions") + + require.Equal(t, len(defn.StreamIDs), len(r.Values)) + + for i, strmID := range defn.StreamIDs { + strm, exists := streamMap[strmID] + require.True(t, exists, "invariant violation: expected stream ID to be present") + assert.InDelta(t, strm.baseBenchmarkPrice.Int64(), r.Values[i].Int64(), 5000000) + } + + assert.False(t, r.Specimen) + + seen[req.TransmitterID()][r.ChannelID] = struct{}{} + t.Logf("Got report from oracle %s with channel: %x)", req.TransmitterID(), r.ChannelID) + + if _, exists := seen[req.TransmitterID()]; exists && len(seen[req.TransmitterID()]) == len(channelDefinitions) { + t.Logf("All channels reported for oracle with transmitterID %s", req.TransmitterID()) + delete(seen, req.TransmitterID()) + } + if len(seen) == 0 { + break // saw all oracles; success! + } + + // bit of a hack here but shouldn't hurt anything, we wanna dump + // `seen` before the test ends to aid in debugging test failures + if d, ok := t.Deadline(); ok { + select { + case <-time.After(time.Until(d.Add(-100 * time.Millisecond))): + if len(seen) > 0 { + t.Fatalf("FAILED: ERROR: missing expected reports: %#v\n", seen) + } + default: + } + } + } + }) + + // TODO: test verification +} + +func setConfig(t *testing.T, steve *bind.TransactOpts, backend *backends.SimulatedBackend, verifierContract *channel_verifier.ChannelVerifier, verifierAddress common.Address, nodes []Node, oracles []confighelper.OracleIdentityExtra) ocr2types.ConfigDigest { + // Setup config on contract + rawOnchainConfig := datastreamsllo.OnchainConfig{} + onchainConfig, err := (&datastreamsllo.JSONOnchainConfigCodec{}).Encode(rawOnchainConfig) + require.NoError(t, err) + + rawReportingPluginConfig := datastreamsllo.OffchainConfig{} + reportingPluginConfig, err := rawReportingPluginConfig.Encode() + require.NoError(t, err) + + signers, _, _, _, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTests( + 2*time.Second, // DeltaProgress + 20*time.Second, // DeltaResend + 400*time.Millisecond, // DeltaInitial + 1000*time.Millisecond, // DeltaRound + 500*time.Millisecond, // DeltaGrace + 300*time.Millisecond, // DeltaCertifiedCommitRequest + 1*time.Minute, // DeltaStage + 100, // rMax + []int{len(nodes)}, // S + oracles, + reportingPluginConfig, // reportingPluginConfig []byte, + 0, // maxDurationQuery + 250*time.Millisecond, // maxDurationObservation + 0, // maxDurationShouldAcceptAttestedReport + 0, // maxDurationShouldTransmitAcceptedReport + int(fNodes), // f + onchainConfig, + ) + + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + + offchainTransmitters := make([][32]byte, nNodes) + for i := 0; i < nNodes; i++ { + offchainTransmitters[i] = nodes[i].ClientPubKey + } + + _, err = verifierContract.SetConfig(steve, signerAddresses, offchainTransmitters, fNodes, offchainConfig, offchainConfigVersion, offchainConfig, nil) + require.NoError(t, err) + + backend.Commit() + + accounts := make([]ocr2types.Account, len(offchainTransmitters)) + for i := range offchainTransmitters { + accounts[i] = ocr2types.Account(fmt.Sprintf("%x", offchainTransmitters[i])) + } + + l, err := verifierContract.LatestConfigDigestAndEpoch(&bind.CallOpts{}) + require.NoError(t, err) + + return l.ConfigDigest +} + +func setChannelDefinitions(t *testing.T, steve *bind.TransactOpts, backend *backends.SimulatedBackend, configStoreContract *channel_config_store.ChannelConfigStore, streams []Stream) map[llotypes.ChannelID]channel_config_store.IChannelConfigStoreChannelDefinition { + channels := []llotypes.ChannelID{ + rand.Uint32(), + rand.Uint32(), + rand.Uint32(), + rand.Uint32(), + } + + chainSelector, err := chainselectors.SelectorFromChainId(testutils.SimulatedChainID.Uint64()) + require.NoError(t, err) + + streamIDs := make([]uint32, len(streams)) + for i := 0; i < len(streams); i++ { + streamIDs[i] = streams[i].id + } + + // First set contains [1,len(streams)] + channel0Def := channel_config_store.IChannelConfigStoreChannelDefinition{ + ReportFormat: uint32(llotypes.ReportFormatJSON), + ChainSelector: chainSelector, + StreamIDs: streamIDs[1:len(streams)], + } + channel1Def := channel_config_store.IChannelConfigStoreChannelDefinition{ + ReportFormat: uint32(llotypes.ReportFormatEVM), + ChainSelector: chainSelector, + StreamIDs: streamIDs[1:len(streams)], + } + + // Second set contains [0,len(streams)-1] + channel2Def := channel_config_store.IChannelConfigStoreChannelDefinition{ + ReportFormat: uint32(llotypes.ReportFormatJSON), + ChainSelector: chainSelector, + StreamIDs: streamIDs[0 : len(streams)-1], + } + channel3Def := channel_config_store.IChannelConfigStoreChannelDefinition{ + ReportFormat: uint32(llotypes.ReportFormatEVM), + ChainSelector: chainSelector, + StreamIDs: streamIDs[0 : len(streams)-1], + } + + require.NoError(t, utils.JustError(configStoreContract.AddChannel(steve, channels[0], channel0Def))) + require.NoError(t, utils.JustError(configStoreContract.AddChannel(steve, channels[1], channel1Def))) + require.NoError(t, utils.JustError(configStoreContract.AddChannel(steve, channels[2], channel2Def))) + require.NoError(t, utils.JustError(configStoreContract.AddChannel(steve, channels[3], channel3Def))) + + backend.Commit() + + channelDefinitions := make(map[llotypes.ChannelID]channel_config_store.IChannelConfigStoreChannelDefinition) + + channelDefinitions[channels[0]] = channel0Def + channelDefinitions[channels[1]] = channel1Def + channelDefinitions[channels[2]] = channel2Def + channelDefinitions[channels[3]] = channel3Def + + backend.Commit() + + return channelDefinitions +} diff --git a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go new file mode 100644 index 00000000000..a9ba09d96bb --- /dev/null +++ b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go @@ -0,0 +1,213 @@ +package llo_test + +import ( + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + chainselectors "github.com/smartcontractkit/chain-selectors" + "github.com/stretchr/testify/require" + "github.com/test-go/testify/assert" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + "github.com/smartcontractkit/chainlink-common/pkg/utils" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_config_store" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/llo" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" +) + +func Test_ChannelDefinitionCache_Integration(t *testing.T) { + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.InfoLevel) + db := pgtest.NewSqlxDB(t) + ctx := testutils.Context(t) + orm := llo.NewORM(db, testutils.SimulatedChainID) + + steve := testutils.MustNewSimTransactor(t) // config contract deployer and owner + genesisData := core.GenesisAlloc{steve.From: {Balance: assets.Ether(1000).ToInt()}} + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + backend.Commit() // ensure starting block number at least 1 + + ethClient := client.NewSimulatedBackendClient(t, backend, testutils.SimulatedChainID) + + configStoreAddress, _, configStoreContract, err := channel_config_store.DeployChannelConfigStore(steve, backend) + require.NoError(t, err) + + channel1 := rand.Uint32() + channel2 := rand.Uint32() + channel3 := rand.Uint32() + + chainSelector, err := chainselectors.SelectorFromChainId(testutils.SimulatedChainID.Uint64()) + require.NoError(t, err) + + streamIDs := []uint32{1, 2, 3} + channel1Def := channel_config_store.IChannelConfigStoreChannelDefinition{ + ReportFormat: uint32(llotypes.ReportFormatSolana), + ChainSelector: chainSelector, + StreamIDs: streamIDs, + } + channel2Def := channel_config_store.IChannelConfigStoreChannelDefinition{ + ReportFormat: uint32(llotypes.ReportFormatEVM), + ChainSelector: chainSelector, + StreamIDs: streamIDs, + } + channel3Def := channel_config_store.IChannelConfigStoreChannelDefinition{ + ReportFormat: uint32(llotypes.ReportFormatEVM), + ChainSelector: chainSelector, + StreamIDs: append(streamIDs, 4), + } + + require.NoError(t, utils.JustError(configStoreContract.AddChannel(steve, channel1, channel1Def))) + require.NoError(t, utils.JustError(configStoreContract.AddChannel(steve, channel2, channel2Def))) + + h := backend.Commit() + channel2Block, err := backend.BlockByHash(ctx, h) + require.NoError(t, err) + + t.Run("with zero fromblock", func(t *testing.T) { + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 1, 3, 2, 1000) + servicetest.Run(t, lp) + cdc := llo.NewChannelDefinitionCache(lggr, orm, lp, configStoreAddress, 0) + + servicetest.Run(t, cdc) + + testutils.WaitForLogMessage(t, observedLogs, "Updated channel definitions") + + dfns := cdc.Definitions() + + require.Len(t, dfns, 2) + require.Contains(t, dfns, channel1) + require.Contains(t, dfns, channel2) + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatSolana, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3}, + }, dfns[channel1]) + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVM, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3}, + }, dfns[channel2]) + + // remove solana + require.NoError(t, utils.JustError(configStoreContract.RemoveChannel(steve, channel1))) + backend.Commit() + testutils.WaitForLogMessageCount(t, observedLogs, "Updated channel definitions", 2) + dfns = cdc.Definitions() + + require.Len(t, dfns, 1) + assert.NotContains(t, dfns, channel1) + require.Contains(t, dfns, channel2) + + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVM, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3}, + }, dfns[channel2]) + + // add channel3 with additional stream + require.NoError(t, utils.JustError(configStoreContract.AddChannel(steve, channel3, channel3Def))) + backend.Commit() + testutils.WaitForLogMessageCount(t, observedLogs, "Updated channel definitions", 3) + dfns = cdc.Definitions() + + require.Len(t, dfns, 2) + require.Contains(t, dfns, channel2) + require.Contains(t, dfns, channel3) + + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVM, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3}, + }, dfns[channel2]) + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVM, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3, 4}, + }, dfns[channel3]) + }) + + t.Run("loads from ORM", func(t *testing.T) { + // Override logpoller to always return no logs + lp := &mockLogPoller{ + LogPoller: logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 1, 3, 2, 1000), + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 0, nil + }, + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{}, nil + }, + } + cdc := llo.NewChannelDefinitionCache(lggr, orm, lp, configStoreAddress, 0) + + servicetest.Run(t, cdc) + + dfns := cdc.Definitions() + + require.Len(t, dfns, 2) + require.Contains(t, dfns, channel2) + require.Contains(t, dfns, channel3) + + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVM, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3}, + }, dfns[channel2]) + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVM, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3, 4}, + }, dfns[channel3]) + }) + + // clear out DB for next test + pgtest.MustExec(t, db, `DELETE FROM channel_definitions`) + + t.Run("with non-zero fromBlock", func(t *testing.T) { + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 1, 3, 2, 1000) + servicetest.Run(t, lp) + cdc := llo.NewChannelDefinitionCache(lggr, orm, lp, configStoreAddress, channel2Block.Number().Int64()+1) + + // should only detect events from AFTER channel 2 was added + servicetest.Run(t, cdc) + + testutils.WaitForLogMessageCount(t, observedLogs, "Updated channel definitions", 4) + + dfns := cdc.Definitions() + + require.Len(t, dfns, 1) + require.Contains(t, dfns, channel3) + + assert.Equal(t, llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVM, + ChainSelector: chainSelector, + StreamIDs: []uint32{1, 2, 3, 4}, + }, dfns[channel3]) + }) +} + +type mockLogPoller struct { + logpoller.LogPoller + LatestBlockFn func(qopts ...pg.QOpt) (int64, error) + LogsWithSigsFn func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) +} + +func (p *mockLogPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return p.LogsWithSigsFn(start, end, eventSigs, address, qopts...) +} +func (p *mockLogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, error) { + block, err := p.LatestBlockFn(qopts...) + return logpoller.LogPollerBlock{BlockNumber: block}, err +} diff --git a/core/services/ocr2/plugins/mercury/integration_test.go b/core/services/ocr2/plugins/mercury/integration_test.go index 0ebc6a5e354..36189475898 100644 --- a/core/services/ocr2/plugins/mercury/integration_test.go +++ b/core/services/ocr2/plugins/mercury/integration_test.go @@ -38,7 +38,7 @@ import ( v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1" v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2" v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3" - relaymercury "github.com/smartcontractkit/chainlink-data-streams/mercury" + datastreamsmercury "github.com/smartcontractkit/chainlink-data-streams/mercury" "github.com/smartcontractkit/chainlink/v2/core/bridges" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" @@ -67,7 +67,7 @@ var ( Min: big.NewInt(0), Max: big.NewInt(math.MaxInt64), } - rawReportingPluginConfig = relaymercury.OffchainConfig{ + rawReportingPluginConfig = datastreamsmercury.OffchainConfig{ ExpirationWindow: 1, BaseUSDFee: decimal.NewFromInt(100), } @@ -273,7 +273,7 @@ func integration_MercuryV1(t *testing.T) { } // Setup config on contract - onchainConfig, err := (relaymercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) + onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) require.NoError(t, err) reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig) @@ -623,7 +623,7 @@ func integration_MercuryV2(t *testing.T) { } // Setup config on contract - onchainConfig, err := (relaymercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) + onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) require.NoError(t, err) reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig) @@ -707,7 +707,7 @@ func integration_MercuryV2(t *testing.T) { continue // already saw all oracles for this feed } - expectedFee := relaymercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee) + expectedFee := datastreamsmercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee) expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp)) @@ -907,7 +907,7 @@ func integration_MercuryV3(t *testing.T) { } // Setup config on contract - onchainConfig, err := (relaymercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) + onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) require.NoError(t, err) reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig) @@ -991,7 +991,7 @@ func integration_MercuryV3(t *testing.T) { continue // already saw all oracles for this feed } - expectedFee := relaymercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee) + expectedFee := datastreamsmercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee) expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp)) diff --git a/core/services/ocr2/validate/validate.go b/core/services/ocr2/validate/validate.go index 9fe779b244f..5846eaa032f 100644 --- a/core/services/ocr2/validate/validate.go +++ b/core/services/ocr2/validate/validate.go @@ -14,6 +14,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink/v2/core/services/job" dkgconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/dkg/config" + lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config" mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config" ocr2vrfconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/config" "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" @@ -113,6 +114,8 @@ func validateSpec(tree *toml.Tree, spec job.Job) error { return nil case types.Mercury: return validateOCR2MercurySpec(spec.OCR2OracleSpec.PluginConfig, *spec.OCR2OracleSpec.FeedID) + case types.LLO: + return validateOCR2LLOSpec(spec.OCR2OracleSpec.PluginConfig) case types.GenericPlugin: return validateOCR2GenericPluginSpec(spec.OCR2OracleSpec.PluginConfig) case "": @@ -256,3 +259,12 @@ func validateOCR2MercurySpec(jsonConfig job.JSONConfig, feedId [32]byte) error { } return pkgerrors.Wrap(mercuryconfig.ValidatePluginConfig(pluginConfig, feedId), "Mercury PluginConfig is invalid") } + +func validateOCR2LLOSpec(jsonConfig job.JSONConfig) error { + var pluginConfig lloconfig.PluginConfig + err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig) + if err != nil { + return pkgerrors.Wrap(err, "error while unmarshaling plugin config") + } + return pkgerrors.Wrap(pluginConfig.Validate(), "LLO PluginConfig is invalid") +} diff --git a/core/services/ocrbootstrap/delegate.go b/core/services/ocrbootstrap/delegate.go index 7912741802c..27ddd53bd52 100644 --- a/core/services/ocrbootstrap/delegate.go +++ b/core/services/ocrbootstrap/delegate.go @@ -39,8 +39,10 @@ type Delegate struct { isNewlyCreatedJob bool } -// Extra fields to enable router proxy contract support. Must match field names of functions' PluginConfig. -type relayConfigRouterContractFields struct { +type relayConfig struct { + // providerType used for determining which type of contract to track config on + ProviderType string `json:"providerType"` + // Extra fields to enable router proxy contract support. Must match field names of functions' PluginConfig. DONID string `json:"donID"` ContractVersion uint32 `json:"contractVersion"` ContractUpdateCheckFrequencySec uint32 `json:"contractUpdateCheckFrequencySec"` @@ -109,14 +111,14 @@ func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err e } ctx := ctxVals.ContextWithValues(context.Background()) - var routerFields relayConfigRouterContractFields - if err = json.Unmarshal(spec.RelayConfig.Bytes(), &routerFields); err != nil { + var relayCfg relayConfig + if err = json.Unmarshal(spec.RelayConfig.Bytes(), &relayCfg); err != nil { return nil, err } var configProvider types.ConfigProvider - if routerFields.DONID != "" { - if routerFields.ContractVersion != 1 || routerFields.ContractUpdateCheckFrequencySec == 0 { + if relayCfg.DONID != "" { + if relayCfg.ContractVersion != 1 || relayCfg.ContractUpdateCheckFrequencySec == 0 { return nil, errors.New("invalid router contract config") } configProvider, err = relayer.NewPluginProvider( @@ -140,6 +142,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err e ContractID: spec.ContractID, New: d.isNewlyCreatedJob, RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: relayCfg.ProviderType, }) } diff --git a/core/services/pipeline/mocks/runner.go b/core/services/pipeline/mocks/runner.go index f6e5033eae9..1de72bbf4c0 100644 --- a/core/services/pipeline/mocks/runner.go +++ b/core/services/pipeline/mocks/runner.go @@ -132,6 +132,36 @@ func (_m *Runner) HealthReport() map[string]error { return r0 } +// InitializePipeline provides a mock function with given fields: spec +func (_m *Runner) InitializePipeline(spec pipeline.Spec) (*pipeline.Pipeline, error) { + ret := _m.Called(spec) + + if len(ret) == 0 { + panic("no return value specified for InitializePipeline") + } + + var r0 *pipeline.Pipeline + var r1 error + if rf, ok := ret.Get(0).(func(pipeline.Spec) (*pipeline.Pipeline, error)); ok { + return rf(spec) + } + if rf, ok := ret.Get(0).(func(pipeline.Spec) *pipeline.Pipeline); ok { + r0 = rf(spec) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pipeline.Pipeline) + } + } + + if rf, ok := ret.Get(1).(func(pipeline.Spec) error); ok { + r1 = rf(spec) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // InsertFinishedRun provides a mock function with given fields: run, saveSuccessfulTaskRuns, qopts func (_m *Runner) InsertFinishedRun(run *pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { _va := make([]interface{}, len(qopts)) diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index 30a35598c3e..cc6214abf5a 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -51,6 +51,7 @@ type Runner interface { ExecuteAndInsertFinishedRun(ctx context.Context, spec Spec, vars Vars, l logger.Logger, saveSuccessfulTaskRuns bool) (runID int64, finalResult FinalResult, err error) OnRunFinished(func(*Run)) + InitializePipeline(spec Spec) (*Pipeline, error) } type runner struct { diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index 4de4e48bd90..dcccbb90c79 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -28,6 +28,9 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/keystore" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" + "github.com/smartcontractkit/chainlink/v2/core/services/llo" + "github.com/smartcontractkit/chainlink/v2/core/services/llo/bm" + lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config" mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config" "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" "github.com/smartcontractkit/chainlink/v2/core/services/pg" @@ -44,6 +47,7 @@ import ( var ( OCR2AggregatorTransmissionContractABI abi.ABI OCR2AggregatorLogDecoder LogDecoder + ChannelVerifierLogDecoder LogDecoder ) func init() { @@ -56,6 +60,10 @@ func init() { if err != nil { panic(err) } + ChannelVerifierLogDecoder, err = newChannelVerifierLogDecoder() + if err != nil { + panic(err) + } } var _ commontypes.Relayer = &Relayer{} //nolint:staticcheck @@ -69,6 +77,10 @@ type Relayer struct { pgCfg pg.QConfig chainReader commontypes.ChainReader codec commontypes.Codec + + // LLO/data streams + cdcFactory llo.ChannelDefinitionCacheFactory + orm llo.ORM } type CSAETHKeystore interface { @@ -107,6 +119,9 @@ func NewRelayer(lggr logger.Logger, chain legacyevm.Chain, opts RelayerOpts) (*R return nil, fmt.Errorf("cannot create evm relayer: %w", err) } lggr = lggr.Named("Relayer") + + orm := llo.NewORM(pg.NewQ(opts.DB, lggr, opts.QConfig), chain.ID()) + cdcFactory := llo.NewChannelDefinitionCacheFactory(lggr, orm, chain.LogPoller()) return &Relayer{ db: opts.DB, chain: chain, @@ -114,6 +129,8 @@ func NewRelayer(lggr logger.Logger, chain legacyevm.Chain, opts RelayerOpts) (*R ks: opts.CSAETHKeystore, mercuryPool: opts.MercuryPool, pgCfg: opts.QConfig, + cdcFactory: cdcFactory, + orm: orm, }, nil } @@ -226,7 +243,60 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty } func (r *Relayer) NewLLOProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.LLOProvider, error) { - return nil, errors.New("not implemented") + relayOpts := types.NewRelayOpts(rargs) + var relayConfig types.RelayConfig + { + var err error + relayConfig, err = relayOpts.RelayConfig() + if err != nil { + return nil, fmt.Errorf("failed to get relay config: %w", err) + } + } + + var lloCfg lloconfig.PluginConfig + if err := json.Unmarshal(pargs.PluginConfig, &lloCfg); err != nil { + return nil, pkgerrors.WithStack(err) + } + if err := lloCfg.Validate(); err != nil { + return nil, err + } + + if relayConfig.ChainID.String() != r.chain.ID().String() { + return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String()) + } + cp, err := newLLOConfigProvider(r.lggr, r.chain, relayOpts) + if err != nil { + return nil, pkgerrors.WithStack(err) + } + + if !relayConfig.EffectiveTransmitterID.Valid { + return nil, pkgerrors.New("EffectiveTransmitterID must be specified") + } + privKey, err := r.ks.CSA().Get(relayConfig.EffectiveTransmitterID.String) + if err != nil { + return nil, pkgerrors.Wrap(err, "failed to get CSA key for mercury connection") + } + + // FIXME: Remove after benchmarking is done + // https://smartcontract-it.atlassian.net/browse/MERC-3487 + var transmitter llo.Transmitter + if lloCfg.BenchmarkMode { + r.lggr.Info("Benchmark mode enabled, using dummy transmitter. NOTE: THIS WILL NOT TRANSMIT ANYTHING") + transmitter = bm.NewTransmitter(r.lggr, privKey.PublicKey) + } else { + var client wsrpc.Client + client, err = r.mercuryPool.Checkout(context.Background(), privKey, lloCfg.ServerPubKey, lloCfg.ServerURL()) + if err != nil { + return nil, err + } + transmitter = llo.NewTransmitter(r.lggr, client, privKey.PublicKey) + } + + cdc, err := r.cdcFactory.NewCache(lloCfg) + if err != nil { + return nil, err + } + return NewLLOProvider(cp, transmitter, r.lggr, cdc), nil } func (r *Relayer) NewFunctionsProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.FunctionsProvider, error) { @@ -263,9 +333,12 @@ func (r *Relayer) NewConfigProvider(args commontypes.RelayArgs) (configProvider configProvider, err = newStandardConfigProvider(lggr, r.chain, relayOpts) case "mercury": configProvider, err = newMercuryConfigProvider(lggr, r.chain, relayOpts) + case "llo": + configProvider, err = newLLOConfigProvider(lggr, r.chain, relayOpts) default: return nil, fmt.Errorf("unrecognized provider type: %q", args.ProviderType) } + if err != nil { // Never return (*configProvider)(nil) return nil, err @@ -471,6 +544,7 @@ func (r *Relayer) NewMedianProvider(rargs commontypes.RelayArgs, pargs commontyp } reportCodec := evmreportcodec.ReportCodec{} + contractTransmitter, err := newOnChainContractTransmitter(lggr, rargs, pargs.TransmitterID, r.ks.Eth(), configWatcher, configTransmitterOpts{}, OCR2AggregatorTransmissionContractABI) if err != nil { return nil, err diff --git a/core/services/relay/evm/llo_config_provider.go b/core/services/relay/evm/llo_config_provider.go new file mode 100644 index 00000000000..bd8dbac8460 --- /dev/null +++ b/core/services/relay/evm/llo_config_provider.go @@ -0,0 +1,21 @@ +package evm + +import ( + "github.com/ethereum/go-ethereum/common" + pkgerrors "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/llo" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" +) + +func newLLOConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (*configWatcher, error) { + if !common.IsHexAddress(opts.ContractID) { + return nil, pkgerrors.Errorf("invalid contractID, expected hex address") + } + + aggregatorAddress := common.HexToAddress(opts.ContractID) + configDigester := llo.NewOffchainConfigDigester(chain.Config().EVM().ChainID(), aggregatorAddress) + return newContractConfigProvider(lggr, chain, opts, aggregatorAddress, ChannelVerifierLogDecoder, configDigester) +} diff --git a/core/services/relay/evm/llo_provider.go b/core/services/relay/evm/llo_provider.go new file mode 100644 index 00000000000..0ab0773a160 --- /dev/null +++ b/core/services/relay/evm/llo_provider.go @@ -0,0 +1,90 @@ +package evm + +import ( + "context" + "errors" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + relaytypes "github.com/smartcontractkit/chainlink-common/pkg/types" + llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + datastreamsllo "github.com/smartcontractkit/chainlink-data-streams/llo" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/llo" +) + +var _ commontypes.LLOProvider = (*lloProvider)(nil) + +type lloProvider struct { + cp commontypes.ConfigProvider + transmitter llo.Transmitter + logger logger.Logger + channelDefinitionCache llotypes.ChannelDefinitionCache + + ms services.MultiStart +} + +func NewLLOProvider( + cp commontypes.ConfigProvider, + transmitter llo.Transmitter, + lggr logger.Logger, + channelDefinitionCache llotypes.ChannelDefinitionCache, +) relaytypes.LLOProvider { + return &lloProvider{ + cp, + transmitter, + lggr.Named("LLOProvider"), + channelDefinitionCache, + services.MultiStart{}, + } +} + +func (p *lloProvider) Start(ctx context.Context) error { + err := p.ms.Start(ctx, p.cp, p.transmitter, p.channelDefinitionCache) + return err +} + +func (p *lloProvider) Close() error { + return p.ms.Close() +} + +func (p *lloProvider) Ready() error { + return errors.Join(p.cp.Ready(), p.transmitter.Ready(), p.channelDefinitionCache.Ready()) +} + +func (p *lloProvider) Name() string { + return p.logger.Name() +} + +func (p *lloProvider) HealthReport() map[string]error { + report := map[string]error{} + services.CopyHealth(report, p.cp.HealthReport()) + services.CopyHealth(report, p.transmitter.HealthReport()) + services.CopyHealth(report, p.channelDefinitionCache.HealthReport()) + return report +} + +func (p *lloProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return p.cp.ContractConfigTracker() +} + +func (p *lloProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return p.cp.OffchainConfigDigester() +} + +func (p *lloProvider) OnchainConfigCodec() datastreamsllo.OnchainConfigCodec { + // TODO: This should probably be moved to core since its chain-specific + // https://smartcontract-it.atlassian.net/browse/MERC-3661 + return &datastreamsllo.JSONOnchainConfigCodec{} +} + +func (p *lloProvider) ContractTransmitter() llotypes.Transmitter { + return p.transmitter +} + +func (p *lloProvider) ChannelDefinitionCache() llotypes.ChannelDefinitionCache { + return p.channelDefinitionCache +} diff --git a/core/services/relay/evm/llo_verifier_decoder.go b/core/services/relay/evm/llo_verifier_decoder.go new file mode 100644 index 00000000000..922b83bec0d --- /dev/null +++ b/core/services/relay/evm/llo_verifier_decoder.go @@ -0,0 +1,67 @@ +package evm + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_verifier" +) + +var _ LogDecoder = &channelVerifierLogDecoder{} + +type channelVerifierLogDecoder struct { + eventName string + eventSig common.Hash + abi *abi.ABI +} + +func newChannelVerifierLogDecoder() (*channelVerifierLogDecoder, error) { + const eventName = "ConfigSet" + abi, err := channel_verifier.ChannelVerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return &channelVerifierLogDecoder{ + eventName: eventName, + eventSig: abi.Events[eventName].ID, + abi: abi, + }, nil +} + +func (d *channelVerifierLogDecoder) Decode(rawLog []byte) (ocrtypes.ContractConfig, error) { + unpacked := new(channel_verifier.ChannelVerifierConfigSet) + err := d.abi.UnpackIntoInterface(unpacked, d.eventName, rawLog) + if err != nil { + return ocrtypes.ContractConfig{}, errors.Wrap(err, "failed to unpack log data") + } + + var transmitAccounts []ocrtypes.Account + for _, addr := range unpacked.OffchainTransmitters { + transmitAccounts = append(transmitAccounts, ocrtypes.Account(fmt.Sprintf("%x", addr))) + } + var signers []ocrtypes.OnchainPublicKey + for _, addr := range unpacked.Signers { + addr := addr + signers = append(signers, addr[:]) + } + + return ocrtypes.ContractConfig{ + ConfigDigest: unpacked.ConfigDigest, + ConfigCount: unpacked.ConfigCount, + Signers: signers, + Transmitters: transmitAccounts, + F: unpacked.F, + OnchainConfig: unpacked.OnchainConfig, + OffchainConfigVersion: unpacked.OffchainConfigVersion, + OffchainConfig: unpacked.OffchainConfig, + }, nil +} + +func (d *channelVerifierLogDecoder) EventSig() common.Hash { + return d.eventSig +} diff --git a/core/services/relay/evm/mercury/config_digest.go b/core/services/relay/evm/mercury/config_digest.go index b9431fe923f..291a723ee3a 100644 --- a/core/services/relay/evm/mercury/config_digest.go +++ b/core/services/relay/evm/mercury/config_digest.go @@ -61,7 +61,7 @@ func configDigest( panic("copy too little data") } binary.BigEndian.PutUint16(configDigest[:2], uint16(types.ConfigDigestPrefixMercuryV02)) - if !(configDigest[0] == 0 || configDigest[1] == 6) { + if !(configDigest[0] == 0 && configDigest[1] == 6) { // assertion panic("unexpected mismatch") } diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go b/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go index ce4125bd579..ab4d2f68dad 100644 --- a/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go +++ b/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go @@ -26,7 +26,7 @@ type TransmitRequest struct { unknownFields protoimpl.UnknownFields Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` - ReportFormat string `protobuf:"bytes,2,opt,name=reportFormat,proto3" json:"reportFormat,omitempty"` + ReportFormat uint32 `protobuf:"varint,2,opt,name=reportFormat,proto3" json:"reportFormat,omitempty"` } func (x *TransmitRequest) Reset() { @@ -68,11 +68,11 @@ func (x *TransmitRequest) GetPayload() []byte { return nil } -func (x *TransmitRequest) GetReportFormat() string { +func (x *TransmitRequest) GetReportFormat() uint32 { if x != nil { return x.ReportFormat } - return "" + return 0 } type TransmitResponse struct { @@ -454,7 +454,7 @@ var file_mercury_proto_rawDesc = []byte{ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x6f, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x3c, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto b/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto index 184b0572046..6b71404a6a6 100644 --- a/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto +++ b/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto @@ -11,7 +11,7 @@ service Mercury { message TransmitRequest { bytes payload = 1; - string reportFormat = 2; + uint32 reportFormat = 2; } message TransmitResponse { diff --git a/core/store/migrate/migrations/0224_create_channel_definition_caches.sql b/core/store/migrate/migrations/0224_create_channel_definition_caches.sql new file mode 100644 index 00000000000..9c46f1ceacf --- /dev/null +++ b/core/store/migrate/migrations/0224_create_channel_definition_caches.sql @@ -0,0 +1,12 @@ +-- +goose Up +CREATE TABLE channel_definitions ( + evm_chain_id NUMERIC(78) NOT NULL, + addr bytea CHECK (octet_length(addr) = 20), + definitions JSONB NOT NULL, + block_num BIGINT NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY (evm_chain_id, addr) +); + +-- +goose Down +DROP TABLE channel_definitions; diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 8fff0e36cad..6647dcc1efd 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Gas bumping logic to the `SuggestedPriceEstimator`. The bumping mechanism for this estimator refetches the price from the RPC and adds a buffer on top using the greater of `BumpPercent` and `BumpMin`. +- Add preliminary support for "llo" job type (Data Streams V1) ### Fixed diff --git a/go.mod b/go.mod index c0ef2b819de..2291388def2 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6c github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 - github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 + github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 @@ -82,6 +82,7 @@ require ( github.com/smartcontractkit/wsrpc v0.7.2 github.com/spf13/cast v1.6.0 github.com/stretchr/testify v1.8.4 + github.com/test-go/testify v1.1.4 github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a github.com/tidwall/gjson v1.17.0 github.com/ugorji/go/codec v1.2.12 @@ -342,4 +343,5 @@ replace ( // until merged upstream: https://github.com/mwitkow/grpc-proxy/pull/69 github.com/mwitkow/grpc-proxy => github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f + ) diff --git a/go.sum b/go.sum index 88ea058b06e..13d684fd34d 100644 --- a/go.sum +++ b/go.sum @@ -1172,8 +1172,8 @@ github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6c/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 h1:j00D0/EqE9HRu+63v7KwUOe4ZxLc4AN5SOJFiinkkH0= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336/go.mod h1:umLyYLRGqyIuFfGpEREZP3So6+O8iL35cCCqW+OxX5w= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540/go.mod h1:sjAmX8K2kbQhvDarZE1ZZgDgmHJ50s0BBc/66vKY2ek= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 h1:1BcjXuviSAKttOX7BZoVHRZZGfxqoA2+AL8tykmkdoc= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8/go.mod h1:vy1L7NybTy2F/Yv7BOh+oZBa1MACD6gzd1+DkcSkfp8= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e h1:k8HS3GsAFZnxXIW3141VsQP2+EL1XrTtOi/HDt7sdBE= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index ae2ac344c9b..955c0bbb1e5 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -370,7 +370,7 @@ require ( github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect github.com/smartcontractkit/chain-selectors v1.0.10 // indirect github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 // indirect + github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 // indirect github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index fe81cc3ac68..e5f34a3d982 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1511,8 +1511,8 @@ github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6c/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 h1:j00D0/EqE9HRu+63v7KwUOe4ZxLc4AN5SOJFiinkkH0= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336/go.mod h1:umLyYLRGqyIuFfGpEREZP3So6+O8iL35cCCqW+OxX5w= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540/go.mod h1:sjAmX8K2kbQhvDarZE1ZZgDgmHJ50s0BBc/66vKY2ek= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 h1:1BcjXuviSAKttOX7BZoVHRZZGfxqoA2+AL8tykmkdoc= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8/go.mod h1:vy1L7NybTy2F/Yv7BOh+oZBa1MACD6gzd1+DkcSkfp8= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e h1:k8HS3GsAFZnxXIW3141VsQP2+EL1XrTtOi/HDt7sdBE= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 174ea743c23..de4360ea4e4 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -357,7 +357,7 @@ require ( github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect github.com/smartcontractkit/chain-selectors v1.0.10 // indirect github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 // indirect + github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 // indirect github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 1aaac86bd00..f85521c2f7f 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1494,8 +1494,8 @@ github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240221153538-1ea85cf3dc6c/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336 h1:j00D0/EqE9HRu+63v7KwUOe4ZxLc4AN5SOJFiinkkH0= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240214203158-47dae5de1336/go.mod h1:umLyYLRGqyIuFfGpEREZP3So6+O8iL35cCCqW+OxX5w= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= +github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540/go.mod h1:sjAmX8K2kbQhvDarZE1ZZgDgmHJ50s0BBc/66vKY2ek= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 h1:1BcjXuviSAKttOX7BZoVHRZZGfxqoA2+AL8tykmkdoc= github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8/go.mod h1:vy1L7NybTy2F/Yv7BOh+oZBa1MACD6gzd1+DkcSkfp8= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240216142700-c5869534c19e h1:k8HS3GsAFZnxXIW3141VsQP2+EL1XrTtOi/HDt7sdBE= From 0b9dc18fc7e6a223758af0703dfc82d50a99e940 Mon Sep 17 00:00:00 2001 From: Sam Date: Thu, 22 Feb 2024 12:55:25 -0500 Subject: [PATCH 2/6] Ignore .venv (#12145) --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index ae69535d352..e56774fbefc 100644 --- a/.gitignore +++ b/.gitignore @@ -91,4 +91,7 @@ tools/flakeytests/coverage.txt **/testdata/fuzz/* # Runtime test configuration that might contain secrets -overrides.toml \ No newline at end of file +overrides.toml + +# Pythin venv +.venv/ From 85cc5909de2427f9eb61a2dae27713e0ff585263 Mon Sep 17 00:00:00 2001 From: Gabriel Paradiso Date: Thu, 22 Feb 2024 19:23:17 +0100 Subject: [PATCH 3/6] [FUN-877] make persisting allowlist compatible with older contract (#11907) * feat: make persisting allowlist compatible with older contract * feat: check tos contract version instead of a config feature flag * extract contract version comparison to helper function * fix: use semver to compare versions --- .../handlers/functions/allowlist/allowlist.go | 58 ++- .../functions/allowlist/allowlist_test.go | 390 ++++++++++++++---- .../handlers/functions/allowlist/mocks/orm.go | 24 ++ .../handlers/functions/allowlist/orm.go | 24 ++ .../handlers/functions/allowlist/orm_test.go | 65 +++ go.mod | 2 +- 6 files changed, 467 insertions(+), 96 deletions(-) diff --git a/core/services/gateway/handlers/functions/allowlist/allowlist.go b/core/services/gateway/handlers/functions/allowlist/allowlist.go index 20dc92ced70..020de2359c2 100644 --- a/core/services/gateway/handlers/functions/allowlist/allowlist.go +++ b/core/services/gateway/handlers/functions/allowlist/allowlist.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "math/big" + "regexp" "sync" "sync/atomic" "time" @@ -12,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" + "golang.org/x/mod/semver" "github.com/smartcontractkit/chainlink-common/pkg/services" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" @@ -23,9 +25,10 @@ import ( ) const ( - defaultStoredAllowlistBatchSize = 1000 - defaultOnchainAllowlistBatchSize = 100 - defaultFetchingDelayInRangeSec = 1 + defaultStoredAllowlistBatchSize = 1000 + defaultOnchainAllowlistBatchSize = 100 + defaultFetchingDelayInRangeSec = 1 + tosContractMinBatchProcessingVersion = "v1.1.0" ) type OnchainAllowlistConfig struct { @@ -38,8 +41,6 @@ type OnchainAllowlistConfig struct { UpdateTimeoutSec uint `json:"updateTimeoutSec"` StoredAllowlistBatchSize uint `json:"storedAllowlistBatchSize"` OnchainAllowlistBatchSize uint `json:"onchainAllowlistBatchSize"` - // StoreAllowedSendersEnabled is a feature flag that enables storing in db a copy of the allowlist. - StoreAllowedSendersEnabled bool `json:"storeAllowedSendersEnabled"` // FetchingDelayInRangeSec prevents RPC client being rate limited when fetching the allowlist in ranges. FetchingDelayInRangeSec uint `json:"fetchingDelayInRangeSec"` } @@ -210,7 +211,31 @@ func (a *onchainAllowlist) updateFromContractV1(ctx context.Context, blockNum *b } var allowedSenderList []common.Address - if !a.config.StoreAllowedSendersEnabled { + typeAndVersion, err := tosContract.TypeAndVersion(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNum, + Context: ctx, + }) + if err != nil { + return errors.Wrap(err, "failed to fetch the tos contract type and version") + } + + currentVersion, err := ExtractContractVersion(typeAndVersion) + if err != nil { + return fmt.Errorf("failed to extract version: %w", err) + } + + if semver.Compare(tosContractMinBatchProcessingVersion, currentVersion) <= 0 { + err = a.syncBlockedSenders(ctx, tosContract, blockNum) + if err != nil { + return errors.Wrap(err, "failed to sync the stored allowed and blocked senders") + } + + allowedSenderList, err = a.getAllowedSendersBatched(ctx, tosContract, blockNum) + if err != nil { + return errors.Wrap(err, "failed to get allowed senders in rage") + } + } else { allowedSenderList, err = tosContract.GetAllAllowedSenders(&bind.CallOpts{ Pending: false, BlockNumber: blockNum, @@ -219,15 +244,15 @@ func (a *onchainAllowlist) updateFromContractV1(ctx context.Context, blockNum *b if err != nil { return errors.Wrap(err, "error calling GetAllAllowedSenders") } - } else { - err = a.syncBlockedSenders(ctx, tosContract, blockNum) + + err = a.orm.PurgeAllowedSenders() if err != nil { - return errors.Wrap(err, "failed to sync the stored allowed and blocked senders") + a.lggr.Errorf("failed to purge allowedSenderList: %w", err) } - allowedSenderList, err = a.getAllowedSendersBatched(ctx, tosContract, blockNum) + err = a.orm.CreateAllowedSenders(allowedSenderList) if err != nil { - return errors.Wrap(err, "failed to get allowed senders in rage") + a.lggr.Errorf("failed to update stored allowedSenderList: %w", err) } } @@ -344,3 +369,14 @@ func (a *onchainAllowlist) loadStoredAllowedSenderList() { a.update(allowedList) } + +func ExtractContractVersion(str string) (string, error) { + pattern := `v(\d+).(\d+).(\d+)` + re := regexp.MustCompile(pattern) + + match := re.FindStringSubmatch(str) + if len(match) != 4 { + return "", fmt.Errorf("version not found in string: %s", str) + } + return fmt.Sprintf("v%s.%s.%s", match[1], match[2], match[3]), nil +} diff --git a/core/services/gateway/handlers/functions/allowlist/allowlist_test.go b/core/services/gateway/handlers/functions/allowlist/allowlist_test.go index e8cbca80b94..735c0bff7dc 100644 --- a/core/services/gateway/handlers/functions/allowlist/allowlist_test.go +++ b/core/services/gateway/handlers/functions/allowlist/allowlist_test.go @@ -3,11 +3,14 @@ package allowlist_test import ( "context" "encoding/hex" + "fmt" "math/big" "testing" "time" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/onsi/gomega" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,55 +21,105 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/functions/allowlist" amocks "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/functions/allowlist/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" ) const ( - addr1 = "9ed925d8206a4f88a2f643b28b3035b315753cd6" - addr2 = "ea6721ac65bced841b8ec3fc5fedea6141a0ade4" - addr3 = "84689acc87ff22841b8ec378300da5e141a99911" + addr1 = "9ed925d8206a4f88a2f643b28b3035b315753cd6" + addr2 = "ea6721ac65bced841b8ec3fc5fedea6141a0ade4" + addr3 = "84689acc87ff22841b8ec378300da5e141a99911" + ToSContractV100 = "Functions Terms of Service Allow List v1.0.0" + ToSContractV110 = "Functions Terms of Service Allow List v1.1.0" ) -func sampleEncodedAllowlist(t *testing.T) []byte { - abiEncodedAddresses := - "0000000000000000000000000000000000000000000000000000000000000020" + - "0000000000000000000000000000000000000000000000000000000000000002" + - "000000000000000000000000" + addr1 + - "000000000000000000000000" + addr2 - rawData, err := hex.DecodeString(abiEncodedAddresses) - require.NoError(t, err) - return rawData -} - -func TestAllowlist_UpdateAndCheck(t *testing.T) { +func TestUpdateAndCheck(t *testing.T) { t.Parallel() - client := mocks.NewClient(t) - client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) - client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(sampleEncodedAllowlist(t), nil) - config := allowlist.OnchainAllowlistConfig{ - ContractVersion: 1, - ContractAddress: common.Address{}, - BlockConfirmations: 1, - } + t.Run("OK-with_ToS_V1.0.0", func(t *testing.T) { + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) - orm := amocks.NewORM(t) - allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) - require.NoError(t, err) + addr := common.HexToAddress("0x0000000000000000000000000000000000000020") + typeAndVersionResponse, err := encodeTypeAndVersionResponse(ToSContractV100) + require.NoError(t, err) - err = allowlist.Start(testutils.Context(t)) - require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, allowlist.Close()) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // typeAndVersion + To: &addr, + Data: hexutil.MustDecode("0x181f5a77"), + }, mock.Anything).Return(typeAndVersionResponse, nil) + + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(sampleEncodedAllowlist(t), nil) + + config := allowlist.OnchainAllowlistConfig{ + ContractVersion: 1, + ContractAddress: common.Address{}, + BlockConfirmations: 1, + } + + orm := amocks.NewORM(t) + orm.On("PurgeAllowedSenders").Times(1).Return(nil) + orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(1).Return(nil) + + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.Start(testutils.Context(t)) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, allowlist.Close()) + }) + + require.NoError(t, allowlist.UpdateFromContract(testutils.Context(t))) + require.False(t, allowlist.Allow(common.Address{})) + require.True(t, allowlist.Allow(common.HexToAddress(addr1))) + require.True(t, allowlist.Allow(common.HexToAddress(addr2))) + require.False(t, allowlist.Allow(common.HexToAddress(addr3))) }) - require.NoError(t, allowlist.UpdateFromContract(testutils.Context(t))) - require.False(t, allowlist.Allow(common.Address{})) - require.True(t, allowlist.Allow(common.HexToAddress(addr1))) - require.True(t, allowlist.Allow(common.HexToAddress(addr2))) - require.False(t, allowlist.Allow(common.HexToAddress(addr3))) + t.Run("OK-with_ToS_V1.1.0", func(t *testing.T) { + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + + typeAndVersionResponse, err := encodeTypeAndVersionResponse(ToSContractV110) + require.NoError(t, err) + + addr := common.HexToAddress("0x0000000000000000000000000000000000000020") + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // typeAndVersion + To: &addr, + Data: hexutil.MustDecode("0x181f5a77"), + }, mock.Anything).Return(typeAndVersionResponse, nil) + + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(sampleEncodedAllowlist(t), nil) + + config := allowlist.OnchainAllowlistConfig{ + ContractVersion: 1, + ContractAddress: common.Address{}, + BlockConfirmations: 1, + } + + orm := amocks.NewORM(t) + orm.On("DeleteAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(1).Return(nil) + orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(1).Return(nil) + + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.Start(testutils.Context(t)) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, allowlist.Close()) + }) + + require.NoError(t, allowlist.UpdateFromContract(testutils.Context(t))) + require.False(t, allowlist.Allow(common.Address{})) + require.True(t, allowlist.Allow(common.HexToAddress(addr1))) + require.True(t, allowlist.Allow(common.HexToAddress(addr2))) + require.False(t, allowlist.Allow(common.HexToAddress(addr3))) + }) } -func TestAllowlist_UnsupportedVersion(t *testing.T) { +func TestUnsupportedVersion(t *testing.T) { t.Parallel() client := mocks.NewClient(t) @@ -81,64 +134,132 @@ func TestAllowlist_UnsupportedVersion(t *testing.T) { require.Error(t, err) } -func TestAllowlist_UpdatePeriodically(t *testing.T) { +func TestUpdatePeriodically(t *testing.T) { t.Parallel() - ctx, cancel := context.WithCancel(testutils.Context(t)) - client := mocks.NewClient(t) - client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) - client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - cancel() - }).Return(sampleEncodedAllowlist(t), nil) - config := allowlist.OnchainAllowlistConfig{ - ContractAddress: common.Address{}, - ContractVersion: 1, - BlockConfirmations: 1, - UpdateFrequencySec: 2, - UpdateTimeoutSec: 1, - } + t.Run("OK-with_ToS_V1.0.0", func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) - orm := amocks.NewORM(t) - orm.On("GetAllowedSenders", uint(0), uint(1000)).Return([]common.Address{}, nil) + addr := common.HexToAddress("0x0000000000000000000000000000000000000020") + typeAndVersionResponse, err := encodeTypeAndVersionResponse(ToSContractV100) + require.NoError(t, err) - allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) - require.NoError(t, err) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // typeAndVersion + To: &addr, + Data: hexutil.MustDecode("0x181f5a77"), + }, mock.Anything).Return(typeAndVersionResponse, nil) - err = allowlist.Start(ctx) - require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, allowlist.Close()) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + cancel() + }).Return(sampleEncodedAllowlist(t), nil) + config := allowlist.OnchainAllowlistConfig{ + ContractAddress: common.Address{}, + ContractVersion: 1, + BlockConfirmations: 1, + UpdateFrequencySec: 2, + UpdateTimeoutSec: 1, + } + + orm := amocks.NewORM(t) + orm.On("PurgeAllowedSenders").Times(1).Return(nil) + orm.On("GetAllowedSenders", uint(0), uint(1000)).Return([]common.Address{}, nil) + orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(1).Return(nil) + + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, allowlist.Close()) + }) + + gomega.NewGomegaWithT(t).Eventually(func() bool { + return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) }) - gomega.NewGomegaWithT(t).Eventually(func() bool { - return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) - }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + t.Run("OK-with_ToS_V1.1.0", func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + + addr := common.HexToAddress("0x0000000000000000000000000000000000000020") + typeAndVersionResponse, err := encodeTypeAndVersionResponse(ToSContractV110) + require.NoError(t, err) + + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // typeAndVersion + To: &addr, + Data: hexutil.MustDecode("0x181f5a77"), + }, mock.Anything).Return(typeAndVersionResponse, nil) + + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + cancel() + }).Return(sampleEncodedAllowlist(t), nil) + config := allowlist.OnchainAllowlistConfig{ + ContractAddress: common.Address{}, + ContractVersion: 1, + BlockConfirmations: 1, + UpdateFrequencySec: 2, + UpdateTimeoutSec: 1, + } + + orm := amocks.NewORM(t) + orm.On("DeleteAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(1).Return(nil) + orm.On("GetAllowedSenders", uint(0), uint(1000)).Return([]common.Address{}, nil) + orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(1).Return(nil) + + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, allowlist.Close()) + }) + + gomega.NewGomegaWithT(t).Eventually(func() bool { + return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + }) } -func TestAllowlist_UpdateFromContract(t *testing.T) { + +func TestUpdateFromContract(t *testing.T) { t.Parallel() - t.Run("OK-iterate_over_list_of_allowed_senders", func(t *testing.T) { + t.Run("OK-fetch_complete_list_of_allowed_senders", func(t *testing.T) { ctx, cancel := context.WithCancel(testutils.Context(t)) client := mocks.NewClient(t) client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + + addr := common.HexToAddress("0x0000000000000000000000000000000000000020") + typeAndVersionResponse, err := encodeTypeAndVersionResponse(ToSContractV100) + require.NoError(t, err) + + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // typeAndVersion + To: &addr, + Data: hexutil.MustDecode("0x181f5a77"), + }, mock.Anything).Return(typeAndVersionResponse, nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(sampleEncodedAllowlist(t), nil) config := allowlist.OnchainAllowlistConfig{ - ContractAddress: common.HexToAddress(addr3), - ContractVersion: 1, - BlockConfirmations: 1, - UpdateFrequencySec: 2, - UpdateTimeoutSec: 1, - StoredAllowlistBatchSize: 2, - OnchainAllowlistBatchSize: 16, - StoreAllowedSendersEnabled: true, - FetchingDelayInRangeSec: 0, + ContractAddress: common.HexToAddress(addr3), + ContractVersion: 1, + BlockConfirmations: 1, + UpdateFrequencySec: 2, + UpdateTimeoutSec: 1, + StoredAllowlistBatchSize: 2, + OnchainAllowlistBatchSize: 16, + FetchingDelayInRangeSec: 0, } orm := amocks.NewORM(t) - orm.On("DeleteAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(2).Return(nil) - orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(2).Return(nil) + orm.On("PurgeAllowedSenders").Times(1).Return(nil) + orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(1).Return(nil) allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) require.NoError(t, err) @@ -151,26 +272,38 @@ func TestAllowlist_UpdateFromContract(t *testing.T) { }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) }) - t.Run("OK-fetch_complete_list_of_allowed_senders_without_storing", func(t *testing.T) { + t.Run("OK-iterate_over_list_of_allowed_senders", func(t *testing.T) { ctx, cancel := context.WithCancel(testutils.Context(t)) client := mocks.NewClient(t) client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + + addr := common.HexToAddress("0x0000000000000000000000000000000000000020") + typeAndVersionResponse, err := encodeTypeAndVersionResponse(ToSContractV110) + require.NoError(t, err) + + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // typeAndVersion + To: &addr, + Data: hexutil.MustDecode("0x181f5a77"), + }, mock.Anything).Return(typeAndVersionResponse, nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(sampleEncodedAllowlist(t), nil) config := allowlist.OnchainAllowlistConfig{ - ContractAddress: common.HexToAddress(addr3), - ContractVersion: 1, - BlockConfirmations: 1, - UpdateFrequencySec: 2, - UpdateTimeoutSec: 1, - StoredAllowlistBatchSize: 2, - OnchainAllowlistBatchSize: 16, - StoreAllowedSendersEnabled: false, - FetchingDelayInRangeSec: 0, + ContractAddress: common.HexToAddress(addr3), + ContractVersion: 1, + BlockConfirmations: 1, + UpdateFrequencySec: 2, + UpdateTimeoutSec: 1, + StoredAllowlistBatchSize: 2, + OnchainAllowlistBatchSize: 16, + FetchingDelayInRangeSec: 0, } orm := amocks.NewORM(t) + orm.On("DeleteAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(2).Return(nil) + orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(2).Return(nil) + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) require.NoError(t, err) @@ -181,4 +314,93 @@ func TestAllowlist_UpdateFromContract(t *testing.T) { return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) }) + +} + +func TestExtractContractVersion(t *testing.T) { + + type tc struct { + name string + versionStr string + expectedResult string + expectedError *string + } + + var errInvalidVersion = func(v string) *string { + ev := fmt.Sprintf("version not found in string: %s", v) + return &ev + } + + tcs := []tc{ + { + name: "OK-Tos_type_and_version", + versionStr: "Functions Terms of Service Allow List v1.1.0", + expectedResult: "v1.1.0", + expectedError: nil, + }, + { + name: "OK-double_digits_minor", + versionStr: "Functions Terms of Service Allow List v1.20.0", + expectedResult: "v1.20.0", + expectedError: nil, + }, + { + name: "NOK-invalid_version", + versionStr: "invalid_version", + expectedResult: "", + expectedError: errInvalidVersion("invalid_version"), + }, + { + name: "NOK-incomplete_version", + versionStr: "v2.0", + expectedResult: "", + expectedError: errInvalidVersion("v2.0"), + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + actualResult, actualError := allowlist.ExtractContractVersion(tc.versionStr) + require.Equal(t, tc.expectedResult, actualResult) + + if tc.expectedError != nil { + require.EqualError(t, actualError, *tc.expectedError) + } else { + require.NoError(t, actualError) + } + }) + } +} + +func encodeTypeAndVersionResponse(typeAndVersion string) ([]byte, error) { + codecName := "my_codec" + evmEncoderConfig := `[{"Name":"typeAndVersion","Type":"string"}]` + codecConfig := types.CodecConfig{Configs: map[string]types.ChainCodecConfig{ + codecName: {TypeABI: evmEncoderConfig}, + }} + encoder, err := evm.NewCodec(codecConfig) + if err != nil { + return nil, err + } + + input := map[string]any{ + "typeAndVersion": typeAndVersion, + } + typeAndVersionResponse, err := encoder.Encode(context.Background(), input, codecName) + if err != nil { + return nil, err + } + + return typeAndVersionResponse, nil +} + +func sampleEncodedAllowlist(t *testing.T) []byte { + abiEncodedAddresses := + "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "000000000000000000000000" + addr1 + + "000000000000000000000000" + addr2 + rawData, err := hex.DecodeString(abiEncodedAddresses) + require.NoError(t, err) + return rawData } diff --git a/core/services/gateway/handlers/functions/allowlist/mocks/orm.go b/core/services/gateway/handlers/functions/allowlist/mocks/orm.go index c2ba27c3a24..daff33d8902 100644 --- a/core/services/gateway/handlers/functions/allowlist/mocks/orm.go +++ b/core/services/gateway/handlers/functions/allowlist/mocks/orm.go @@ -101,6 +101,30 @@ func (_m *ORM) GetAllowedSenders(offset uint, limit uint, qopts ...pg.QOpt) ([]c return r0, r1 } +// PurgeAllowedSenders provides a mock function with given fields: qopts +func (_m *ORM) PurgeAllowedSenders(qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for PurgeAllowedSenders") + } + + var r0 error + if rf, ok := ret.Get(0).(func(...pg.QOpt) error); ok { + r0 = rf(qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewORM(t interface { diff --git a/core/services/gateway/handlers/functions/allowlist/orm.go b/core/services/gateway/handlers/functions/allowlist/orm.go index 07ee1ea3b3b..ccacec81a43 100644 --- a/core/services/gateway/handlers/functions/allowlist/orm.go +++ b/core/services/gateway/handlers/functions/allowlist/orm.go @@ -18,6 +18,7 @@ type ORM interface { GetAllowedSenders(offset, limit uint, qopts ...pg.QOpt) ([]common.Address, error) CreateAllowedSenders(allowedSenders []common.Address, qopts ...pg.QOpt) error DeleteAllowedSenders(blockedSenders []common.Address, qopts ...pg.QOpt) error + PurgeAllowedSenders(qopts ...pg.QOpt) error } type orm struct { @@ -91,6 +92,8 @@ func (o *orm) CreateAllowedSenders(allowedSenders []common.Address, qopts ...pg. return nil } +// DeleteAllowedSenders is used to remove blocked senders from the functions_allowlist table. +// This is achieved by specifying a list of blockedSenders to remove. func (o *orm) DeleteAllowedSenders(blockedSenders []common.Address, qopts ...pg.QOpt) error { var valuesPlaceholder []string for i := 1; i <= len(blockedSenders); i++ { @@ -121,3 +124,24 @@ func (o *orm) DeleteAllowedSenders(blockedSenders []common.Address, qopts ...pg. return nil } + +// PurgeAllowedSenders will remove all the allowed senders for the configured orm routerContractAddress +func (o *orm) PurgeAllowedSenders(qopts ...pg.QOpt) error { + stmt := fmt.Sprintf(` + DELETE FROM %s + WHERE router_contract_address = $1;`, tableName) + + res, err := o.q.WithOpts(qopts...).Exec(stmt, o.routerContractAddress) + if err != nil { + return err + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + return err + } + + o.lggr.Debugf("Successfully purged allowed senders for routerContractAddress: %s. rowsAffected: %d", o.routerContractAddress, rowsAffected) + + return nil +} diff --git a/core/services/gateway/handlers/functions/allowlist/orm_test.go b/core/services/gateway/handlers/functions/allowlist/orm_test.go index 0f63e83cd5f..1d357616fab 100644 --- a/core/services/gateway/handlers/functions/allowlist/orm_test.go +++ b/core/services/gateway/handlers/functions/allowlist/orm_test.go @@ -174,6 +174,71 @@ func TestORM_DeleteAllowedSenders(t *testing.T) { }) } +func TestORM_PurgeAllowedSenders(t *testing.T) { + t.Parallel() + + t.Run("OK-purge_allowed_list", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + add1 := testutils.NewAddress() + add2 := testutils.NewAddress() + add3 := testutils.NewAddress() + err = orm.CreateAllowedSenders([]common.Address{add1, add2, add3}) + require.NoError(t, err) + + results, err := orm.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 3, len(results), "incorrect results length") + require.Equal(t, add1, results[0]) + + err = orm.PurgeAllowedSenders() + require.NoError(t, err) + + results, err = orm.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 0, len(results), "incorrect results length") + }) + + t.Run("OK-purge_allowed_list_for_contract_address", func(t *testing.T) { + orm1, err := setupORM(t) + require.NoError(t, err) + add1 := testutils.NewAddress() + add2 := testutils.NewAddress() + err = orm1.CreateAllowedSenders([]common.Address{add1, add2}) + require.NoError(t, err) + + results, err := orm1.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, add1, results[0]) + + orm2, err := setupORM(t) + require.NoError(t, err) + add3 := testutils.NewAddress() + add4 := testutils.NewAddress() + err = orm2.CreateAllowedSenders([]common.Address{add3, add4}) + require.NoError(t, err) + + results, err = orm2.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, add3, results[0]) + + err = orm2.PurgeAllowedSenders() + require.NoError(t, err) + + results, err = orm2.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 0, len(results), "incorrect results length") + + results, err = orm1.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, add1, results[0]) + require.Equal(t, add2, results[1]) + }) +} + func Test_NewORM(t *testing.T) { t.Run("OK-create_ORM", func(t *testing.T) { _, err := allowlist.NewORM(pgtest.NewSqlxDB(t), logger.TestLogger(t), pgtest.NewQConfig(true), testutils.NewAddress()) diff --git a/go.mod b/go.mod index 2291388def2..1e0c09b06bf 100644 --- a/go.mod +++ b/go.mod @@ -98,6 +98,7 @@ require ( go.uber.org/zap v1.26.0 golang.org/x/crypto v0.19.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + golang.org/x/mod v0.15.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 @@ -316,7 +317,6 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/ratelimit v0.2.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sys v0.17.0 // indirect From 63c286d81d9c5032a334c629616e6fab0ca6597a Mon Sep 17 00:00:00 2001 From: Domino Valdano <2644901+reductionista@users.noreply.github.com> Date: Thu, 22 Feb 2024 10:46:14 -0800 Subject: [PATCH 4/6] Add Topic1, Topic2, Topic3, LogsPerBlock fields to logpoller.Filter and log_poller_filters schema (#11949) * Add Topic1, Topic2, Topic3, LogsPerBlock fields to logpoller.Filter And update evm.log_poller_filters schema to match * Improve SQL QUERY logging []BYTEA was panicing for nil or empty array, which then gets trapped and ignored... making it very difficult to figure out which query the error is coming from. While fixing that bug, updated formating of []BYTEA and TEXT to be closer an actual SQL query you can run by cutting and pasting from the log * Add MaxLogsKept * Address PR comments - Add new index before removing old index to be safer - Change MaxLogsKept & LogsPerBlock from big.Int to uint64 * Update migration # after rebase --- core/chains/evm/logpoller/log_poller.go | 13 ++- .../evm/logpoller/log_poller_internal_test.go | 10 +- core/chains/evm/logpoller/log_poller_test.go | 74 +++++++++----- core/chains/evm/logpoller/orm.go | 37 +++++-- core/chains/evm/logpoller/orm_test.go | 96 +++++++++++++++++++ core/chains/evm/logpoller/query.go | 23 +++++ core/chains/evm/types/types.go | 14 ++- core/services/pg/q.go | 17 +++- core/services/pg/q_test.go | 12 +-- ...ller_filters_add_topics_logs_per_block.sql | 29 ++++++ 10 files changed, 271 insertions(+), 54 deletions(-) create mode 100644 core/store/migrate/migrations/0225_log_poller_filters_add_topics_logs_per_block.sql diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 7006c1762ef..a2c35bec59f 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -153,10 +153,15 @@ func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, pollPeriod time.Durati } type Filter struct { - Name string // see FilterName(id, args) below - EventSigs evmtypes.HashArray - Addresses evmtypes.AddressArray - Retention time.Duration + Name string // see FilterName(id, args) below + Addresses evmtypes.AddressArray + EventSigs evmtypes.HashArray // list of possible values for eventsig (aka topic1) + Topic2 evmtypes.HashArray // list of possible values for topic2 + Topic3 evmtypes.HashArray // list of possible values for topic3 + Topic4 evmtypes.HashArray // list of possible values for topic4 + Retention time.Duration // maximum amount of time to retain logs + MaxLogsKept uint64 // maximum number of logs to retain ( 0 = unlimited ) + LogsPerBlock uint64 // rate limit ( maximum # of logs per block, 0 = unlimited ) } // FilterName is a suggested convenience function for clients to construct unique filter names diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index 863ab0fddea..899efebe42c 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -71,31 +71,31 @@ func TestLogPoller_RegisterFilter(t *testing.T) { require.Equal(t, 1, len(f.Addresses)) assert.Equal(t, common.HexToAddress("0x0000000000000000000000000000000000000000"), f.Addresses[0]) - err := lp.RegisterFilter(Filter{"Emitter Log 1", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{a1}, 0}) + err := lp.RegisterFilter(Filter{Name: "Emitter Log 1", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{a1}}) require.NoError(t, err) assert.Equal(t, []common.Address{a1}, lp.Filter(nil, nil, nil).Addresses) assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, lp.Filter(nil, nil, nil).Topics) validateFiltersTable(t, lp, orm) // Should de-dupe EventSigs - err = lp.RegisterFilter(Filter{"Emitter Log 1 + 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) + err = lp.RegisterFilter(Filter{Name: "Emitter Log 1 + 2", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, Addresses: []common.Address{a2}}) require.NoError(t, err) assert.Equal(t, []common.Address{a1, a2}, lp.Filter(nil, nil, nil).Addresses) assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}}, lp.Filter(nil, nil, nil).Topics) validateFiltersTable(t, lp, orm) // Should de-dupe Addresses - err = lp.RegisterFilter(Filter{"Emitter Log 1 + 2 dupe", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) + err = lp.RegisterFilter(Filter{Name: "Emitter Log 1 + 2 dupe", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, Addresses: []common.Address{a2}}) require.NoError(t, err) assert.Equal(t, []common.Address{a1, a2}, lp.Filter(nil, nil, nil).Addresses) assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}}, lp.Filter(nil, nil, nil).Topics) validateFiltersTable(t, lp, orm) // Address required. - err = lp.RegisterFilter(Filter{"no address", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{}, 0}) + err = lp.RegisterFilter(Filter{Name: "no address", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}}) require.Error(t, err) // Event required - err = lp.RegisterFilter(Filter{"No event", []common.Hash{}, []common.Address{a1}, 0}) + err = lp.RegisterFilter(Filter{Name: "No event", Addresses: []common.Address{a1}}) require.Error(t, err) validateFiltersTable(t, lp, orm) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 2508e676e6c..ab02e783a28 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -150,7 +150,7 @@ func TestLogPoller_Integration(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) th.Client.Commit() // Block 2. Ensure we have finality number of blocks - require.NoError(t, th.LogPoller.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0})) + require.NoError(t, th.LogPoller.RegisterFilter(logpoller.Filter{Name: "Integration test", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}})) require.Len(t, th.LogPoller.Filter(nil, nil, nil).Addresses, 1) require.Len(t, th.LogPoller.Filter(nil, nil, nil).Topics, 1) @@ -188,8 +188,9 @@ func TestLogPoller_Integration(t *testing.T) { // Now let's update the Filter and replay to get Log2 logs. err = th.LogPoller.RegisterFilter(logpoller.Filter{ - "Emitter - log2", []common.Hash{EmitterABI.Events["Log2"].ID}, - []common.Address{th.EmitterAddress1}, 0, + Name: "Emitter - log2", + EventSigs: []common.Hash{EmitterABI.Events["Log2"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, }) require.NoError(t, err) // Replay an invalid block should error @@ -254,11 +255,13 @@ func Test_BackupLogPoller(t *testing.T) { ctx := testutils.Context(t) - filter1 := logpoller.Filter{"filter1", []common.Hash{ - EmitterABI.Events["Log1"].ID, - EmitterABI.Events["Log2"].ID}, - []common.Address{th.EmitterAddress1}, - 0} + filter1 := logpoller.Filter{ + Name: "filter1", + EventSigs: []common.Hash{ + EmitterABI.Events["Log1"].ID, + EmitterABI.Events["Log2"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + } err := th.LogPoller.RegisterFilter(filter1) require.NoError(t, err) @@ -268,9 +271,11 @@ func Test_BackupLogPoller(t *testing.T) { require.Equal(t, filter1, filters["filter1"]) err = th.LogPoller.RegisterFilter( - logpoller.Filter{"filter2", - []common.Hash{EmitterABI.Events["Log1"].ID}, - []common.Address{th.EmitterAddress2}, 0}) + logpoller.Filter{ + Name: "filter2", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress2}, + }) require.NoError(t, err) defer func() { @@ -569,9 +574,9 @@ func TestLogPoller_BlockTimestamps(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) addresses := []common.Address{th.EmitterAddress1, th.EmitterAddress2} - topics := []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID} + events := []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID} - err := th.LogPoller.RegisterFilter(logpoller.Filter{"convertLogs", topics, addresses, 0}) + err := th.LogPoller.RegisterFilter(logpoller.Filter{Name: "convertLogs", EventSigs: events, Addresses: addresses}) require.NoError(t, err) blk, err := th.Client.BlockByNumber(ctx, nil) @@ -619,7 +624,7 @@ func TestLogPoller_BlockTimestamps(t *testing.T) { query := ethereum.FilterQuery{ FromBlock: big.NewInt(2), ToBlock: big.NewInt(5), - Topics: [][]common.Hash{topics}, + Topics: [][]common.Hash{events}, Addresses: []common.Address{th.EmitterAddress1, th.EmitterAddress2}} gethLogs, err := th.Client.FilterLogs(ctx, query) @@ -762,8 +767,9 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { // Set up a log poller listening for log emitter logs. err := th.LogPoller.RegisterFilter(logpoller.Filter{ - "Test Emitter 1 & 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, - []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0, + Name: "Test Emitter 1 & 2", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, + Addresses: []common.Address{th.EmitterAddress1, th.EmitterAddress2}, }) require.NoError(t, err) @@ -1068,12 +1074,22 @@ func TestLogPoller_LoadFilters(t *testing.T) { t.Parallel() th := SetupTH(t, false, 2, 3, 2, 1000) - filter1 := logpoller.Filter{"first Filter", []common.Hash{ - EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0} - filter2 := logpoller.Filter{"second Filter", []common.Hash{ - EmitterABI.Events["Log2"].ID, EmitterABI.Events["Log3"].ID}, []common.Address{th.EmitterAddress2}, 0} - filter3 := logpoller.Filter{"third Filter", []common.Hash{ - EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0} + filter1 := logpoller.Filter{ + Name: "first Filter", + EventSigs: []common.Hash{ + EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, + Addresses: []common.Address{th.EmitterAddress1, th.EmitterAddress2}, + } + filter2 := logpoller.Filter{ + Name: "second Filter", + EventSigs: []common.Hash{EmitterABI.Events["Log2"].ID, EmitterABI.Events["Log3"].ID}, + Addresses: []common.Address{th.EmitterAddress2}, + } + filter3 := logpoller.Filter{ + Name: "third Filter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1, th.EmitterAddress2}, + } assert.True(t, filter1.Contains(nil)) assert.False(t, filter1.Contains(&filter2)) @@ -1119,9 +1135,11 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { t.Parallel() th := SetupTH(t, false, 2, 3, 2, 1000) - err := th.LogPoller.RegisterFilter(logpoller.Filter{"GetBlocks Test", []common.Hash{ - EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0}, - ) + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "GetBlocks Test", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, + Addresses: []common.Address{th.EmitterAddress1, th.EmitterAddress2}, + }) require.NoError(t, err) // LP retrieves 0 blocks @@ -1365,7 +1383,11 @@ func TestTooManyLogResults(t *testing.T) { }) addr := testutils.NewAddress() - err := lp.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{addr}, 0}) + err := lp.RegisterFilter(logpoller.Filter{ + Name: "Integration test", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{addr}, + }) require.NoError(t, err) lp.PollAndSaveLogs(ctx, 5) block, err2 := o.SelectLatestBlock() diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 1db8271ccb6..fe814e8a43c 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -5,6 +5,7 @@ import ( "database/sql" "fmt" "math/big" + "strings" "time" "github.com/ethereum/go-ethereum/common" @@ -13,6 +14,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) @@ -95,26 +97,42 @@ func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimes // Each address/event pair must have a unique job id, so it may be removed when the job is deleted. // If a second job tries to overwrite the same pair, this should fail. func (o *DbORM) InsertFilter(filter Filter, qopts ...pg.QOpt) (err error) { + topicArrays := []types.HashArray{filter.Topic2, filter.Topic3, filter.Topic4} args, err := newQueryArgs(o.chainID). withCustomArg("name", filter.Name). - withCustomArg("retention", filter.Retention). + withRetention(filter.Retention). + withMaxLogsKept(filter.MaxLogsKept). + withLogsPerBlock(filter.LogsPerBlock). withAddressArray(filter.Addresses). withEventSigArray(filter.EventSigs). + withTopicArrays(filter.Topic2, filter.Topic3, filter.Topic4). toArgs() if err != nil { return err } // '::' has to be escaped in the query string // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 - return o.q.WithOpts(qopts...).ExecQNamed(` + var topicsColumns, topicsSql strings.Builder + for n, topicValues := range topicArrays { + if len(topicValues) != 0 { + topicCol := fmt.Sprintf("topic%d", n+2) + fmt.Fprintf(&topicsColumns, ", %s", topicCol) + fmt.Fprintf(&topicsSql, ",\n(SELECT unnest(:%s ::::BYTEA[]) %s) t%d", topicCol, topicCol, n+2) + } + } + query := fmt.Sprintf(` INSERT INTO evm.log_poller_filters - (name, evm_chain_id, retention, created_at, address, event) + (name, evm_chain_id, retention, max_logs_kept, logs_per_block, created_at, address, event %s) SELECT * FROM - (SELECT :name, :evm_chain_id ::::NUMERIC, :retention ::::BIGINT, NOW()) x, + (SELECT :name, :evm_chain_id ::::NUMERIC, :retention ::::BIGINT, :max_logs_kept ::::NUMERIC, :logs_per_block ::::NUMERIC, NOW()) x, (SELECT unnest(:address_array ::::BYTEA[]) addr) a, (SELECT unnest(:event_sig_array ::::BYTEA[]) ev) e - ON CONFLICT (name, evm_chain_id, address, event) - DO UPDATE SET retention=:retention ::::BIGINT`, args) + %s + ON CONFLICT (hash_record_extended((name, evm_chain_id, address, event, topic2, topic3, topic4), 0)) + DO UPDATE SET retention=:retention ::::BIGINT, max_logs_kept=:max_logs_kept ::::NUMERIC, logs_per_block=:logs_per_block ::::NUMERIC`, + topicsColumns.String(), + topicsSql.String()) + return o.q.WithOpts(qopts...).ExecQNamed(query, args) } // DeleteFilter removes all events,address pairs associated with the Filter @@ -130,7 +148,12 @@ func (o *DbORM) LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) { err := q.Select(&rows, `SELECT name, ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, - MAX(retention) AS retention + ARRAY_AGG(DISTINCT topic2 ORDER BY topic2) FILTER(WHERE topic2 IS NOT NULL) AS topic2, + ARRAY_AGG(DISTINCT topic3 ORDER BY topic3) FILTER(WHERE topic3 IS NOT NULL) AS topic3, + ARRAY_AGG(DISTINCT topic4 ORDER BY topic4) FILTER(WHERE topic4 IS NOT NULL) AS topic4, + MAX(logs_per_block) AS logs_per_block, + MAX(retention) AS retention, + MAX(max_logs_kept) AS max_logs_kept FROM evm.log_poller_filters WHERE evm_chain_id = $1 GROUP BY name`, ubig.New(o.chainID)) filters := make(map[string]Filter) diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index bcaa6f72fa0..9824d0e9426 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -2,6 +2,7 @@ package logpoller_test import ( "bytes" + "context" "database/sql" "fmt" "math" @@ -10,6 +11,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/jackc/pgx/v4" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -448,6 +450,100 @@ func TestORM(t *testing.T) { require.Zero(t, len(logs)) } +type PgxLogger struct { + lggr logger.Logger +} + +func NewPgxLogger(lggr logger.Logger) PgxLogger { + return PgxLogger{lggr} +} + +func (l PgxLogger) Log(ctx context.Context, log pgx.LogLevel, msg string, data map[string]interface{}) { + +} + +func TestLogPollerFilters(t *testing.T) { + lggr := logger.Test(t) + chainID := testutils.NewRandomEVMChainID() + + dbx := pgtest.NewSqlxDB(t) + orm := logpoller.NewORM(chainID, dbx, lggr, pgtest.NewQConfig(true)) + + event1 := EmitterABI.Events["Log1"].ID + event2 := EmitterABI.Events["Log2"].ID + address := common.HexToAddress("0x1234") + topicA := common.HexToHash("0x1111") + topicB := common.HexToHash("0x2222") + topicC := common.HexToHash("0x3333") + topicD := common.HexToHash("0x4444") + + filters := []logpoller.Filter{{ + Name: "filter by topic2", + EventSigs: types.HashArray{event1, event2}, + Addresses: types.AddressArray{address}, + Topic2: types.HashArray{topicA, topicB}, + }, { + Name: "filter by topic3", + Addresses: types.AddressArray{address}, + EventSigs: types.HashArray{event1}, + Topic3: types.HashArray{topicB, topicC, topicD}, + }, { + Name: "filter by topic4", + Addresses: types.AddressArray{address}, + EventSigs: types.HashArray{event1}, + Topic4: types.HashArray{topicC}, + }, { + Name: "filter by topics 2 and 4", + Addresses: types.AddressArray{address}, + EventSigs: types.HashArray{event2}, + Topic2: types.HashArray{topicA}, + Topic4: types.HashArray{topicC, topicD}, + }, { + Name: "10 lpb rate limit, 1M max logs", + Addresses: types.AddressArray{address}, + EventSigs: types.HashArray{event1}, + MaxLogsKept: 1000000, + LogsPerBlock: 10, + }, { // ensure that the UNIQUE CONSTRAINT isn't too strict (should only error if all fields are identical) + Name: "duplicate of filter by topic4", + Addresses: types.AddressArray{address}, + EventSigs: types.HashArray{event1}, + Topic3: types.HashArray{topicC}, + }} + + for _, filter := range filters { + t.Run("Save filter: "+filter.Name, func(t *testing.T) { + var count int + err := orm.InsertFilter(filter) + require.NoError(t, err) + err = dbx.Get(&count, `SELECT COUNT(*) FROM evm.log_poller_filters WHERE evm_chain_id = $1 AND name = $2`, ubig.New(chainID), filter.Name) + require.NoError(t, err) + expectedCount := len(filter.Addresses) * len(filter.EventSigs) + if len(filter.Topic2) > 0 { + expectedCount *= len(filter.Topic2) + } + if len(filter.Topic3) > 0 { + expectedCount *= len(filter.Topic3) + } + if len(filter.Topic4) > 0 { + expectedCount *= len(filter.Topic4) + } + assert.Equal(t, count, expectedCount) + }) + } + + // Make sure they all come back the same when we reload them + t.Run("Load filters", func(t *testing.T) { + loadedFilters, err := orm.LoadFilters() + require.NoError(t, err) + for _, filter := range filters { + loadedFilter, ok := loadedFilters[filter.Name] + require.True(t, ok, `Failed to reload filter "%s"`, filter.Name) + assert.Equal(t, filter, loadedFilter) + } + }) +} + func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o *logpoller.DbORM, addr common.Address, blockNumber int, eventSig common.Hash, start, stop int) { var lgs []logpoller.Log for i := start; i <= stop; i++ { diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go index a37b15b2b2d..d8112459743 100644 --- a/core/chains/evm/logpoller/query.go +++ b/core/chains/evm/logpoller/query.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/lib/pq" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) @@ -54,6 +55,16 @@ func (q *queryArgs) withEventSigArray(eventSigs []common.Hash) *queryArgs { return q.withCustomArg("event_sig_array", concatBytes(eventSigs)) } +func (q *queryArgs) withTopicArray(topicValues types.HashArray, topicNum uint64) *queryArgs { + return q.withCustomArg(fmt.Sprintf("topic%d", topicNum), concatBytes(topicValues)) +} + +func (q *queryArgs) withTopicArrays(topic2Vals types.HashArray, topic3Vals types.HashArray, topic4Vals types.HashArray) *queryArgs { + return q.withTopicArray(topic2Vals, 2). + withTopicArray(topic3Vals, 3). + withTopicArray(topic4Vals, 4) +} + func (q *queryArgs) withAddress(address common.Address) *queryArgs { return q.withCustomArg("address", address) } @@ -127,6 +138,18 @@ func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { return q.withCustomHashArg("tx_hash", hash) } +func (q *queryArgs) withRetention(retention time.Duration) *queryArgs { + return q.withCustomArg("retention", retention) +} + +func (q *queryArgs) withLogsPerBlock(logsPerBlock uint64) *queryArgs { + return q.withCustomArg("logs_per_block", logsPerBlock) +} + +func (q *queryArgs) withMaxLogsKept(maxLogsKept uint64) *queryArgs { + return q.withCustomArg("max_logs_kept", maxLogsKept) +} + func (q *queryArgs) withCustomHashArg(name string, arg common.Hash) *queryArgs { return q.withCustomArg(name, arg.Bytes()) } diff --git a/core/chains/evm/types/types.go b/core/chains/evm/types/types.go index 987fd987d3f..c3ad584ebbd 100644 --- a/core/chains/evm/types/types.go +++ b/core/chains/evm/types/types.go @@ -332,7 +332,11 @@ func (a *AddressArray) Scan(src interface{}) error { if err != nil { return errors.Wrap(err, "Expected BYTEA[] column for AddressArray") } - if baArray.Status != pgtype.Present || len(baArray.Dimensions) > 1 { + if baArray.Status != pgtype.Present { + *a = nil + return nil + } + if len(baArray.Dimensions) > 1 { return errors.Errorf("Expected AddressArray to be 1-dimensional. Dimensions = %v", baArray.Dimensions) } @@ -359,14 +363,18 @@ func (h *HashArray) Scan(src interface{}) error { if err != nil { return errors.Wrap(err, "Expected BYTEA[] column for HashArray") } - if baArray.Status != pgtype.Present || len(baArray.Dimensions) > 1 { + if baArray.Status != pgtype.Present { + *h = nil + return nil + } + if len(baArray.Dimensions) > 1 { return errors.Errorf("Expected HashArray to be 1-dimensional. Dimensions = %v", baArray.Dimensions) } for i, ba := range baArray.Elements { hash := common.Hash{} if ba.Status != pgtype.Present { - return errors.Errorf("Expected all addresses in HashArray to be non-NULL. Got HashArray[%d] = NULL", i) + return errors.Errorf("Expected all hashes in HashArray to be non-NULL. Got HashArray[%d] = NULL", i) } err = hash.Scan(ba.Bytes) if err != nil { diff --git a/core/services/pg/q.go b/core/services/pg/q.go index ba2627fa745..49a18817de9 100644 --- a/core/services/pg/q.go +++ b/core/services/pg/q.go @@ -296,14 +296,25 @@ func sprintQ(query string, args []interface{}) string { case common.Hash: pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("'\\x%x'", v.Bytes())) case pq.ByteaArray: + pairs = append(pairs, fmt.Sprintf("$%d", i+1)) + if v == nil { + pairs = append(pairs, "NULL") + continue + } + if len(v) == 0 { + pairs = append(pairs, "ARRAY[]") + continue + } var s strings.Builder - fmt.Fprintf(&s, "('\\x%x'", v[0]) + fmt.Fprintf(&s, "ARRAY['\\x%x'", v[0]) for j := 1; j < len(v); j++ { fmt.Fprintf(&s, ",'\\x%x'", v[j]) } - pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("%s)", s.String())) + pairs = append(pairs, fmt.Sprintf("%s]", s.String())) + case string: + pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("'%s'", v)) default: - pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("%v", arg)) + pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("%v", v)) } } replacer := strings.NewReplacer(pairs...) diff --git a/core/services/pg/q_test.go b/core/services/pg/q_test.go index 7692fb792bd..6e59e499887 100644 --- a/core/services/pg/q_test.go +++ b/core/services/pg/q_test.go @@ -21,27 +21,27 @@ func Test_sprintQ(t *testing.T) { {"one", "SELECT $1 FROM table;", []interface{}{"foo"}, - "SELECT foo FROM table;"}, + "SELECT 'foo' FROM table;"}, {"two", "SELECT $1 FROM table WHERE bar = $2;", []interface{}{"foo", 1}, - "SELECT foo FROM table WHERE bar = 1;"}, + "SELECT 'foo' FROM table WHERE bar = 1;"}, {"limit", "SELECT $1 FROM table LIMIT $2;", []interface{}{"foo", Limit(10)}, - "SELECT foo FROM table LIMIT 10;"}, + "SELECT 'foo' FROM table LIMIT 10;"}, {"limit-all", "SELECT $1 FROM table LIMIT $2;", []interface{}{"foo", Limit(-1)}, - "SELECT foo FROM table LIMIT NULL;"}, + "SELECT 'foo' FROM table LIMIT NULL;"}, {"bytea", "SELECT $1 FROM table WHERE b = $2;", []interface{}{"foo", []byte{0x0a}}, - "SELECT foo FROM table WHERE b = '\\x0a';"}, + "SELECT 'foo' FROM table WHERE b = '\\x0a';"}, {"bytea[]", "SELECT $1 FROM table WHERE b = $2;", []interface{}{"foo", pq.ByteaArray([][]byte{{0xa}, {0xb}})}, - "SELECT foo FROM table WHERE b = ('\\x0a','\\x0b');"}, + "SELECT 'foo' FROM table WHERE b = ARRAY['\\x0a','\\x0b'];"}, } { t.Run(tt.name, func(t *testing.T) { got := sprintQ(tt.query, tt.args) diff --git a/core/store/migrate/migrations/0225_log_poller_filters_add_topics_logs_per_block.sql b/core/store/migrate/migrations/0225_log_poller_filters_add_topics_logs_per_block.sql new file mode 100644 index 00000000000..77e3d5fbd51 --- /dev/null +++ b/core/store/migrate/migrations/0225_log_poller_filters_add_topics_logs_per_block.sql @@ -0,0 +1,29 @@ +-- +goose Up + +ALTER TABLE evm.log_poller_filters + ADD COLUMN topic2 BYTEA CHECK (octet_length(topic2) = 32), + ADD COLUMN topic3 BYTEA CHECK (octet_length(topic3) = 32), + ADD COLUMN topic4 BYTEA CHECK (octet_length(topic4) = 32), + ADD COLUMN max_logs_kept BIGINT, + ADD COLUMN logs_per_block BIGINT; + +-- Ordinary UNIQUE CONSTRAINT can't work for topics because they can be NULL. Any row with any column being NULL automatically satisfies the unique constraint (NULL != NULL) +-- Using a hash of all the columns treats NULL's as the same as any other field. If we ever get to a point where we can require postgresql >= 15 then this can +-- be fixed by using UNIQUE CONSTRAINT NULLS NOT DISTINCT which treats NULL's as if they were ordinary values (NULL == NULL) +CREATE UNIQUE INDEX evm_log_poller_filters_name_chain_address_event_topics_key ON evm.log_poller_filters (hash_record_extended((name, evm_chain_id, address, event, topic2, topic3, topic4), 0)); + +ALTER TABLE evm.log_poller_filters + DROP CONSTRAINT evm_log_poller_filters_name_evm_chain_id_address_event_key; + +-- +goose Down + +ALTER TABLE evm.log_poller_filters + ADD CONSTRAINT evm_log_poller_filters_name_evm_chain_id_address_event_key UNIQUE (name, evm_chain_id, address, event); +DROP INDEX IF EXISTS evm_log_poller_filters_name_chain_address_event_topics_key; + +ALTER TABLE evm.log_poller_filters + DROP COLUMN topic2, + DROP COLUMN topic3, + DROP COLUMN topic4, + DROP COLUMN max_logs_kept, + DROP COLUMN logs_per_block; From 18a004c55738198c8eb5fa828fd4fcacf86a4411 Mon Sep 17 00:00:00 2001 From: Jim W Date: Thu, 22 Feb 2024 15:42:31 -0500 Subject: [PATCH 5/6] change method signature so that it returns a transaction rather than replacing inplace (#12124) --- common/txmgr/broadcaster.go | 4 +-- common/txmgr/types/mocks/tx_store.go | 28 +++++++++++++++------ common/txmgr/types/tx_store.go | 2 +- core/chains/evm/txmgr/evm_tx_store.go | 9 +++++-- core/chains/evm/txmgr/evm_tx_store_test.go | 8 +++--- core/chains/evm/txmgr/mocks/evm_tx_store.go | 28 +++++++++++++++------ 6 files changed, 54 insertions(+), 25 deletions(-) diff --git a/common/txmgr/broadcaster.go b/common/txmgr/broadcaster.go index 9f2204f37e2..4c59a950ac1 100644 --- a/common/txmgr/broadcaster.go +++ b/common/txmgr/broadcaster.go @@ -707,8 +707,8 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) nextUnstartedTransactionWithSequence(fromAddress ADDR) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { ctx, cancel := eb.chStop.NewCtx() defer cancel() - etx := &txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]{} - if err := eb.txStore.FindNextUnstartedTransactionFromAddress(ctx, etx, fromAddress, eb.chainID); err != nil { + etx, err := eb.txStore.FindNextUnstartedTransactionFromAddress(ctx, fromAddress, eb.chainID) + if err != nil { if errors.Is(err, sql.ErrNoRows) { // Finish. No more transactions left to process. Hoorah! return nil, nil diff --git a/common/txmgr/types/mocks/tx_store.go b/common/txmgr/types/mocks/tx_store.go index 353f398316d..814207d3986 100644 --- a/common/txmgr/types/mocks/tx_store.go +++ b/common/txmgr/types/mocks/tx_store.go @@ -280,22 +280,34 @@ func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindLatestS return r0, r1 } -// FindNextUnstartedTransactionFromAddress provides a mock function with given fields: ctx, etx, fromAddress, chainID -func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fromAddress ADDR, chainID CHAIN_ID) error { - ret := _m.Called(ctx, etx, fromAddress, chainID) +// FindNextUnstartedTransactionFromAddress provides a mock function with given fields: ctx, fromAddress, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindNextUnstartedTransactionFromAddress(ctx context.Context, fromAddress ADDR, chainID CHAIN_ID) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, fromAddress, chainID) if len(ret) == 0 { panic("no return value specified for FindNextUnstartedTransactionFromAddress") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], ADDR, CHAIN_ID) error); ok { - r0 = rf(ctx, etx, fromAddress, chainID) + var r0 *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, fromAddress, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, fromAddress, chainID) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, ADDR, CHAIN_ID) error); ok { + r1 = rf(ctx, fromAddress, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // FindTransactionsConfirmedInBlockRange provides a mock function with given fields: ctx, highBlockNumber, lowBlockNumber, chainID diff --git a/common/txmgr/types/tx_store.go b/common/txmgr/types/tx_store.go index 742a1740033..f061f0ea628 100644 --- a/common/txmgr/types/tx_store.go +++ b/common/txmgr/types/tx_store.go @@ -79,7 +79,7 @@ type TransactionStore[ FindTxWithIdempotencyKey(ctx context.Context, idempotencyKey string, chainID CHAIN_ID) (tx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) // Search for Tx using the fromAddress and sequence FindTxWithSequence(ctx context.Context, fromAddress ADDR, seq SEQ) (etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) - FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fromAddress ADDR, chainID CHAIN_ID) error + FindNextUnstartedTransactionFromAddress(ctx context.Context, fromAddress ADDR, chainID CHAIN_ID) (*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) FindTransactionsConfirmedInBlockRange(ctx context.Context, highBlockNumber, lowBlockNumber int64, chainID CHAIN_ID) (etxs []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) FindEarliestUnconfirmedBroadcastTime(ctx context.Context, chainID CHAIN_ID) (null.Time, error) FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context, chainID CHAIN_ID) (null.Int, error) diff --git a/core/chains/evm/txmgr/evm_tx_store.go b/core/chains/evm/txmgr/evm_tx_store.go index ae986acee27..364ee3f04d1 100644 --- a/core/chains/evm/txmgr/evm_tx_store.go +++ b/core/chains/evm/txmgr/evm_tx_store.go @@ -1545,15 +1545,20 @@ func (o *evmTxStore) SaveReplacementInProgressAttempt(ctx context.Context, oldAt } // Finds earliest saved transaction that has yet to be broadcast from the given address -func (o *evmTxStore) FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *Tx, fromAddress common.Address, chainID *big.Int) error { +func (o *evmTxStore) FindNextUnstartedTransactionFromAddress(ctx context.Context, fromAddress common.Address, chainID *big.Int) (*Tx, error) { var cancel context.CancelFunc ctx, cancel = o.mergeContexts(ctx) defer cancel() qq := o.q.WithOpts(pg.WithParentCtx(ctx)) var dbEtx DbEthTx + etx := new(Tx) err := qq.Get(&dbEtx, `SELECT * FROM evm.txes WHERE from_address = $1 AND state = 'unstarted' AND evm_chain_id = $2 ORDER BY value ASC, created_at ASC, id ASC`, fromAddress, chainID.String()) dbEtx.ToTx(etx) - return pkgerrors.Wrap(err, "failed to FindNextUnstartedTransactionFromAddress") + if err != nil { + return nil, pkgerrors.Wrap(err, "failed to FindNextUnstartedTransactionFromAddress") + } + + return etx, nil } func (o *evmTxStore) UpdateTxFatalError(ctx context.Context, etx *Tx) error { diff --git a/core/chains/evm/txmgr/evm_tx_store_test.go b/core/chains/evm/txmgr/evm_tx_store_test.go index 35d684727d1..1e478e09b58 100644 --- a/core/chains/evm/txmgr/evm_tx_store_test.go +++ b/core/chains/evm/txmgr/evm_tx_store_test.go @@ -1261,16 +1261,16 @@ func TestORM_FindNextUnstartedTransactionFromAddress(t *testing.T) { t.Run("cannot find unstarted tx", func(t *testing.T) { mustInsertInProgressEthTxWithAttempt(t, txStore, 13, fromAddress) - resultEtx := new(txmgr.Tx) - err := txStore.FindNextUnstartedTransactionFromAddress(testutils.Context(t), resultEtx, fromAddress, ethClient.ConfiguredChainID()) + resultEtx, err := txStore.FindNextUnstartedTransactionFromAddress(testutils.Context(t), fromAddress, ethClient.ConfiguredChainID()) assert.ErrorIs(t, err, sql.ErrNoRows) + assert.Nil(t, resultEtx) }) t.Run("finds unstarted tx", func(t *testing.T) { mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) - resultEtx := new(txmgr.Tx) - err := txStore.FindNextUnstartedTransactionFromAddress(testutils.Context(t), resultEtx, fromAddress, ethClient.ConfiguredChainID()) + resultEtx, err := txStore.FindNextUnstartedTransactionFromAddress(testutils.Context(t), fromAddress, ethClient.ConfiguredChainID()) require.NoError(t, err) + assert.NotNil(t, resultEtx) }) } diff --git a/core/chains/evm/txmgr/mocks/evm_tx_store.go b/core/chains/evm/txmgr/mocks/evm_tx_store.go index 9690bf9728d..9f1af016fea 100644 --- a/core/chains/evm/txmgr/mocks/evm_tx_store.go +++ b/core/chains/evm/txmgr/mocks/evm_tx_store.go @@ -283,22 +283,34 @@ func (_m *EvmTxStore) FindLatestSequence(ctx context.Context, fromAddress common return r0, r1 } -// FindNextUnstartedTransactionFromAddress provides a mock function with given fields: ctx, etx, fromAddress, chainID -func (_m *EvmTxStore) FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], fromAddress common.Address, chainID *big.Int) error { - ret := _m.Called(ctx, etx, fromAddress, chainID) +// FindNextUnstartedTransactionFromAddress provides a mock function with given fields: ctx, fromAddress, chainID +func (_m *EvmTxStore) FindNextUnstartedTransactionFromAddress(ctx context.Context, fromAddress common.Address, chainID *big.Int) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, fromAddress, chainID) if len(ret) == 0 { panic("no return value specified for FindNextUnstartedTransactionFromAddress") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], common.Address, *big.Int) error); ok { - r0 = rf(ctx, etx, fromAddress, chainID) + var r0 *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, fromAddress, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, fromAddress, chainID) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, fromAddress, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // FindTransactionsConfirmedInBlockRange provides a mock function with given fields: ctx, highBlockNumber, lowBlockNumber, chainID From 16f1b785ca6dfa81ddacfb871e92a1dfa9823040 Mon Sep 17 00:00:00 2001 From: Chris Cushman <104409744+vreff@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:24:50 -0500 Subject: [PATCH 6/6] [VRF-887] Add NativePayment() to VRFCoordinator_2x_Interface and integ tests (#12133) --- core/services/vrf/v2/coordinator_v2x_interface.go | 1 + core/services/vrf/v2/integration_v2_test.go | 1 + 2 files changed, 2 insertions(+) diff --git a/core/services/vrf/v2/coordinator_v2x_interface.go b/core/services/vrf/v2/coordinator_v2x_interface.go index 05a9e5d8918..c99576d7558 100644 --- a/core/services/vrf/v2/coordinator_v2x_interface.go +++ b/core/services/vrf/v2/coordinator_v2x_interface.go @@ -656,6 +656,7 @@ type RandomWordsFulfilled interface { SubID() *big.Int Payment() *big.Int Raw() types.Log + NativePayment() bool } func NewV2RandomWordsFulfilled(event *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled) RandomWordsFulfilled { diff --git a/core/services/vrf/v2/integration_v2_test.go b/core/services/vrf/v2/integration_v2_test.go index 39acc3da3e5..5114015c008 100644 --- a/core/services/vrf/v2/integration_v2_test.go +++ b/core/services/vrf/v2/integration_v2_test.go @@ -751,6 +751,7 @@ func assertRandomWordsFulfilled( for filter.Next() { require.Equal(t, expectedSuccess, filter.Event().Success(), "fulfillment event success not correct, expected: %+v, actual: %+v", expectedSuccess, filter.Event().Success()) require.Equal(t, requestID, filter.Event().RequestID()) + require.Equal(t, nativePayment, filter.Event().NativePayment()) found = true rwfe = filter.Event() }