diff --git a/.github/workflows/helm-publish.yml b/.github/workflows/helm-publish.yml new file mode 100644 index 00000000000..8a14ff1a7e6 --- /dev/null +++ b/.github/workflows/helm-publish.yml @@ -0,0 +1,18 @@ +name: Helm Publish + +on: + pull_request: + types: [ labeled ] + +jobs: + helm_release: + if: ${{ github.event.label.name == 'helm_release' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Release helm chart + uses: J12934/helm-gh-pages-action@master + with: + charts-folder: charts + deploy-branch: helm-release + access-token: ${{ secrets.HELM_PUSH_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/lint-gh-workflows.yml b/.github/workflows/lint-gh-workflows.yml index cc7b44bc284..66c69420606 100644 --- a/.github/workflows/lint-gh-workflows.yml +++ b/.github/workflows/lint-gh-workflows.yml @@ -9,7 +9,7 @@ jobs: - name: Check out Code uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Run actionlint - uses: reviewdog/action-actionlint@67ec075cacebd361442f6e3ef7671f74c6548909 # v1.38.0 + uses: reviewdog/action-actionlint@17ea0452ae2cd009a22ca629732a9ce7f49a55e6 # v1.39.0 - name: Collect Metrics if: always() id: collect-gha-metrics diff --git a/charts/chainlink-cluster/Chart.yaml b/charts/chainlink-cluster/Chart.yaml index 6b64d718717..bfea29c82ec 100644 --- a/charts/chainlink-cluster/Chart.yaml +++ b/charts/chainlink-cluster/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 name: chainlink-cluster description: Chainlink nodes cluster -version: 0.1.0 -appVersion: '0.1.0' \ No newline at end of file +version: 0.1.3 +appVersion: '2.6.0' \ No newline at end of file diff --git a/charts/chainlink-cluster/README.md b/charts/chainlink-cluster/README.md index 179e51f2fa8..f7d4c45fa5f 100644 --- a/charts/chainlink-cluster/README.md +++ b/charts/chainlink-cluster/README.md @@ -1,7 +1,12 @@ # Chainlink cluster Example CL nodes cluster for system level tests -Enter the shell +Install `kubefwd` (no nixpkg for it yet, planned) +``` +brew install txn2/tap/kubefwd +``` + +Enter the shell (from the root project dir) ``` nix develop ``` @@ -20,9 +25,6 @@ export DEVSPACE_IMAGE="${aws_account}.dkr.ecr.us-west-2.amazonaws.com/chainlink- ``` Enter the shell and deploy ``` -nix develop -cd charts/chainlink-cluster - # set your unique namespace if it's a new cluster devspace use namespace cl-cluster devspace deploy @@ -76,11 +78,36 @@ After that all the changes will be synced automatically Check `.profiles` to understand what is uploaded in profiles `runner` and `node` # Helm -If you would like to use `helm` directly, please uncomment data in `values.yaml` -## Install +If you would like to use `helm` directly, please uncomment data in `values-raw-helm.yaml` +## Install from local files ``` helm install -f values-raw-helm.yaml cl-cluster . ``` +Forward all apps (in another terminal) +``` +sudo kubefwd svc +``` +Then you can connect and run your tests + +## Install from release +Add the repository +``` +helm repo add chainlink-cluster https://raw.githubusercontent.com/smartcontractkit/chainlink/helm-release/ +helm repo update +``` +Set default namespace +``` +kubectl create ns cl-cluster +kubectl config set-context --current --namespace cl-cluster +``` + +Install +``` +helm install -f values-raw-helm.yaml cl-cluster chainlink-cluster/chainlink-cluster --version v0.1.2 +``` + +## Create a new release +Bump version in `Chart.yml` add your changes and add `helm_release` label to any PR to trigger a release ## Helm Test ``` diff --git a/charts/chainlink-cluster/devspace.yaml b/charts/chainlink-cluster/devspace.yaml index 63b6f112fec..54b5f9f01e9 100644 --- a/charts/chainlink-cluster/devspace.yaml +++ b/charts/chainlink-cluster/devspace.yaml @@ -43,6 +43,7 @@ deployments: image: ${DEVSPACE_IMAGE} stateful: false geth: + version: v1.12.0 wsrpc-port: 8546 httprpc-port: 8544 networkid: 1337 diff --git a/charts/chainlink-cluster/templates/chainlink-deployment.yaml b/charts/chainlink-cluster/templates/chainlink-deployment.yaml index 3ab1edac602..16665916f59 100644 --- a/charts/chainlink-cluster/templates/chainlink-deployment.yaml +++ b/charts/chainlink-cluster/templates/chainlink-deployment.yaml @@ -47,7 +47,7 @@ spec: name: {{ $.Release.Name }}-{{ $cfg.name }}-cm containers: - name: chainlink-db - image: {{ default "postgres" $.Values.db.image }}:{{ default "11.15" $.Values.db.version }} + image: {{ default "postgres:11.15" $.Values.db.image }} command: - docker-entrypoint.sh args: @@ -164,15 +164,15 @@ spec: limits: memory: {{ default "1024Mi" $.Values.chainlink.resources.limits.memory }} cpu: {{ default "500m" $.Values.chainlink.resources.limits.cpu }} - {{- with $.Values.nodeSelector }} {{ else }} {{ end }} +{{- with $.Values.nodeSelector }} nodeSelector: -{{ toYaml . | indent 8 }} + {{ toYaml . | indent 8 }} {{- end }} {{- with $.Values.affinity }} affinity: -{{ toYaml . | indent 8 }} + {{ toYaml . | indent 8 }} {{- end }} {{- with $.Values.tolerations }} tolerations: diff --git a/charts/chainlink-cluster/templates/geth-deployment.yaml b/charts/chainlink-cluster/templates/geth-deployment.yaml index 72c20892109..11fb0cbee22 100644 --- a/charts/chainlink-cluster/templates/geth-deployment.yaml +++ b/charts/chainlink-cluster/templates/geth-deployment.yaml @@ -4,7 +4,6 @@ kind: Deployment metadata: name: geth spec: - replicas: {{ .Values.replicas }} selector: matchLabels: app: geth @@ -102,11 +101,11 @@ spec: {{ end }} {{- with .Values.nodeSelector }} nodeSelector: -{{ toYaml . | indent 8 }} + {{ toYaml . | indent 8 }} {{- end }} {{- with .Values.affinity }} affinity: -{{ toYaml . | indent 8 }} + {{ toYaml . | indent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: diff --git a/charts/chainlink-cluster/templates/mockserver.yaml b/charts/chainlink-cluster/templates/mockserver.yaml index 998687790ba..96f9582435f 100755 --- a/charts/chainlink-cluster/templates/mockserver.yaml +++ b/charts/chainlink-cluster/templates/mockserver.yaml @@ -43,19 +43,18 @@ spec: limits: memory: {{ default "1024Mi" $.Values.chainlink.resources.limits.memory }} cpu: {{ default "500m" $.Values.chainlink.resources.limits.cpu }} - {{- with $.Values.nodeSelector }} {{ else }} {{ end }} -{{- with .Values.nodeSelector }} + {{- with .Values.nodeSelector }} nodeSelector: -{{ toYaml . | indent 8 }} -{{- end }} -{{- with .Values.affinity }} + {{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} affinity: -{{ toYaml . | indent 8 }} -{{- end }} -{{- with .Values.tolerations }} + {{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - {{ end }} \ No newline at end of file + {{ toYaml . | indent 8 }} + {{- end }} +--- \ No newline at end of file diff --git a/charts/chainlink-cluster/templates/runner-deployment.yaml b/charts/chainlink-cluster/templates/runner-deployment.yaml index 41b24a770f5..5d9025b41c5 100644 --- a/charts/chainlink-cluster/templates/runner-deployment.yaml +++ b/charts/chainlink-cluster/templates/runner-deployment.yaml @@ -49,9 +49,9 @@ spec: limits: memory: {{ default "1024Mi" $.Values.runner.resources.limits.memory }} cpu: {{ default "500m" $.Values.runner.resources.limits.cpu }} - {{- with $.Values.nodeSelector }} {{ else }} {{ end }} +{{- with $.Values.nodeSelector }} nodeSelector: {{ toYaml . | indent 8 }} {{- end }} diff --git a/charts/chainlink-cluster/values-raw-helm.yaml b/charts/chainlink-cluster/values-raw-helm.yaml index cd1bf8503eb..006515f0a33 100644 --- a/charts/chainlink-cluster/values-raw-helm.yaml +++ b/charts/chainlink-cluster/values-raw-helm.yaml @@ -14,16 +14,48 @@ chainlink: p2p_port: 8090 nodes: - name: node-1 + image: "public.ecr.aws/chainlink/chainlink:latest" # override default config per node - #toml: | - # [Log] - # JSONConsole = true - # override image and a tag - # image: public.ecr.aws/chainlink/chainlink - # version: latest + # for example, use OCRv2 P2P setup, the whole config +# toml: | +# RootDir = './clroot' +# [Log] +# JSONConsole = true +# Level = 'debug' +# [WebServer] +# AllowOrigins = '*' +# SecureCookies = false +# SessionTimeout = '999h0m0s' +# [OCR2] +# Enabled = true +# [P2P] +# [P2P.V2] +# Enabled = false +# AnnounceAddresses = [] +# DefaultBootstrappers = [] +# DeltaDial = '15s' +# DeltaReconcile = '1m0s' +# ListenAddresses = [] +# [[EVM]] +# ChainID = '1337' +# MinContractPayment = '0' +# [[EVM.Nodes]] +# Name = 'node-0' +# WSURL = 'ws://geth:8546' +# HTTPURL = 'http://geth:8544' +# [WebServer.TLS] +# HTTPSPort = 0 - name: node-2 - name: node-3 - name: node-4 + resources: + requests: + cpu: 350m + memory: 1024Mi + limits: + cpu: 350m + memory: 1024Mi + # each CL node have a dedicated PostgreSQL 11.15 # use StatefulSet by setting: # @@ -33,24 +65,53 @@ chainlink: # if you are running long tests db: stateful: false + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi # default cluster shipped with latest Geth ( dev mode by default ) geth: + version: v1.12.0 wsrpc-port: 8546 httprpc-port: 8544 networkid: 1337 blocktime: 1 + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi # mockserver is https://www.mock-server.com/where/kubernetes.html # used to stub External Adapters mockserver: port: 1080 + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi runner: stateful: false + resources: + requests: + cpu: 1 + memory: 512Mi + limits: + cpu: 1 + memory: 512Mi # monitoring.coreos.com/v1 PodMonitor for each node prometheusMonitor: false # deployment placement, standard helm stuff -podAnnotations: { } -nodeSelector: { } -tolerations: [ ] -affinity: { } +podAnnotations: +nodeSelector: +tolerations: +affinity: diff --git a/core/chains/evm/config/toml/defaults.go b/core/chains/evm/config/toml/defaults.go index 8c32b81301d..239a97f585b 100644 --- a/core/chains/evm/config/toml/defaults.go +++ b/core/chains/evm/config/toml/defaults.go @@ -164,7 +164,8 @@ func (c *Chain) SetFrom(f *Chain) { c.GasEstimator.setFrom(&f.GasEstimator) if ks := f.KeySpecific; ks != nil { - for _, v := range ks { + for i := range ks { + v := ks[i] if i := slices.IndexFunc(c.KeySpecific, func(k KeySpecific) bool { return k.Key == v.Key }); i == -1 { c.KeySpecific = append(c.KeySpecific, v) } else { diff --git a/core/chains/evm/txmgr/confirmer_test.go b/core/chains/evm/txmgr/confirmer_test.go index 555ea09ff3a..e0070e35b17 100644 --- a/core/chains/evm/txmgr/confirmer_test.go +++ b/core/chains/evm/txmgr/confirmer_test.go @@ -1399,7 +1399,8 @@ func TestEthConfirmer_FindTxsRequiringRebroadcast(t *testing.T) { etx1 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) nonce++ attempt1_1 := etx1.TxAttempts[0] - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&attempt1_1) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1_1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, tooNew, attempt1_1.ID)) attempt1_2 := newBroadcastLegacyEthTxAttempt(t, etx1.ID) attempt1_2.BroadcastBeforeBlockNum = &onTheMoney @@ -1416,7 +1417,8 @@ func TestEthConfirmer_FindTxsRequiringRebroadcast(t *testing.T) { etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) nonce++ attempt2_1 := etx2.TxAttempts[0] - dbAttempt = txmgr.DbEthTxAttemptFromEthTxAttempt(&attempt2_1) + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attempt2_1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, tooNew, attempt2_1.ID)) t.Run("returns nothing when the transaction has attempts that are too new", func(t *testing.T) { @@ -1463,13 +1465,15 @@ func TestEthConfirmer_FindTxsRequiringRebroadcast(t *testing.T) { etx3 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) nonce++ attempt3_1 := etx3.TxAttempts[0] - dbAttempt = txmgr.DbEthTxAttemptFromEthTxAttempt(&attempt3_1) + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attempt3_1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt3_1.ID)) // NOTE: It should ignore qualifying eth_txes from a different address etxOther := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, otherAddress) attemptOther1 := etxOther.TxAttempts[0] - dbAttempt = txmgr.DbEthTxAttemptFromEthTxAttempt(&attemptOther1) + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attemptOther1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attemptOther1.ID)) t.Run("returns the transaction if it is unconfirmed with an attempt that is older than gasBumpThreshold blocks", func(t *testing.T) { @@ -1519,14 +1523,16 @@ func TestEthConfirmer_FindTxsRequiringRebroadcast(t *testing.T) { etx4 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) nonce++ attempt4_1 := etx4.TxAttempts[0] - dbAttempt = txmgr.DbEthTxAttemptFromEthTxAttempt(&attemptOther1) + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attempt4_1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt4_1.ID)) t.Run("ignores pending transactions for another key", func(t *testing.T) { // Re-use etx3 nonce for another key, it should not affect the results for this key etxOther := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, (*etx3.Sequence).Int64(), otherAddress) aOther := etxOther.TxAttempts[0] - dbAttempt = txmgr.DbEthTxAttemptFromEthTxAttempt(&aOther) + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&aOther) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, aOther.ID)) etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 6, 0, &cltest.FixtureChainID) @@ -1659,7 +1665,8 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WithConnectivityCheck(t *testing etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress, originalBroadcastAt) attempt1 := etx.TxAttempts[0] - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&attempt1) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1.ID)) // Send transaction and assume success. @@ -1703,7 +1710,8 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WithConnectivityCheck(t *testing etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastDynamicFeeAttempt(t, txStore, nonce, fromAddress, originalBroadcastAt) attempt1 := etx.TxAttempts[0] - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&attempt1) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1.ID)) // Send transaction and assume success. @@ -1974,7 +1982,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { require.Equal(t, txmgrtypes.TxAttemptBroadcast, etx.TxAttempts[3].State) }) - // Mark original tx as confirmed so we won't pick it up any more + // Mark original tx as confirmed, so we won't pick it up anymore pgtest.MustExec(t, db, `UPDATE evm.txes SET state = 'confirmed'`) etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) @@ -2083,7 +2091,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { assert.Equal(t, txmgrtypes.TxAttemptBroadcast, etx2.TxAttempts[2].State) }) - // Original tx is confirmed so we won't pick it up any more + // Original tx is confirmed, so we won't pick it up anymore etx3 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) nonce++ attempt3_1 := etx3.TxAttempts[0] @@ -2214,7 +2222,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && gasPrice.Cmp(tx.GasPrice()) == 0 - }), fromAddress).Return(clienttypes.Successful, errors.New("already known")).Once() // we already submitted at this price, now its time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx + }), fromAddress).Return(clienttypes.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx // Do the thing require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) @@ -2245,7 +2253,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && gasPrice.Cmp(tx.GasPrice()) == 0 - }), fromAddress).Return(clienttypes.Successful, errors.New("already known")).Once() // we already submitted at this price, now its time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx + }), fromAddress).Return(clienttypes.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx // Do the thing require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) @@ -2430,7 +2438,8 @@ func TestEthConfirmer_RebroadcastWhereNecessary_TerminallyUnderpriced_ThenGoesTh etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) nonce++ legacyAttempt := etx.TxAttempts[0] - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&legacyAttempt) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&legacyAttempt) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, legacyAttempt.ID)) // Fail a few times with terminally underpriced @@ -2462,7 +2471,8 @@ func TestEthConfirmer_RebroadcastWhereNecessary_TerminallyUnderpriced_ThenGoesTh etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastDynamicFeeAttempt(t, txStore, nonce, fromAddress) nonce++ dxFeeAttempt := etx.TxAttempts[0] - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&dxFeeAttempt) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&dxFeeAttempt) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, dxFeeAttempt.ID)) // Fail a few times with terminally underpriced @@ -2513,7 +2523,8 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WhenOutOfEth(t *testing.T) { etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) nonce++ attempt1_1 := etx.TxAttempts[0] - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&attempt1_1) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1_1) require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1_1.ID)) var attempt1_2 txmgr.TxAttempt diff --git a/core/chains/evm/txmgr/evm_tx_store.go b/core/chains/evm/txmgr/evm_tx_store.go index 4585d868603..52cd50cba32 100644 --- a/core/chains/evm/txmgr/evm_tx_store.go +++ b/core/chains/evm/txmgr/evm_tx_store.go @@ -182,66 +182,62 @@ type DbEthTx struct { InitialBroadcastAt *time.Time } -func DbEthTxFromEthTx(ethTx *Tx) DbEthTx { - tx := DbEthTx{ - ID: ethTx.ID, - FromAddress: ethTx.FromAddress, - ToAddress: ethTx.ToAddress, - EncodedPayload: ethTx.EncodedPayload, - Value: assets.Eth(ethTx.Value), - GasLimit: ethTx.FeeLimit, - Error: ethTx.Error, - BroadcastAt: ethTx.BroadcastAt, - CreatedAt: ethTx.CreatedAt, - State: ethTx.State, - Meta: ethTx.Meta, - Subject: ethTx.Subject, - PipelineTaskRunID: ethTx.PipelineTaskRunID, - MinConfirmations: ethTx.MinConfirmations, - TransmitChecker: ethTx.TransmitChecker, - InitialBroadcastAt: ethTx.InitialBroadcastAt, - } - - if ethTx.ChainID != nil { - tx.EVMChainID = *utils.NewBig(ethTx.ChainID) - } - if ethTx.Sequence != nil { - n := ethTx.Sequence.Int64() - tx.Nonce = &n - } - - return tx -} - -func DbEthTxToEthTx(dbEthTx DbEthTx, evmEthTx *Tx) { - evmEthTx.ID = dbEthTx.ID - if dbEthTx.Nonce != nil { - n := evmtypes.Nonce(*dbEthTx.Nonce) - evmEthTx.Sequence = &n - } - evmEthTx.IdempotencyKey = dbEthTx.IdempotencyKey - evmEthTx.FromAddress = dbEthTx.FromAddress - evmEthTx.ToAddress = dbEthTx.ToAddress - evmEthTx.EncodedPayload = dbEthTx.EncodedPayload - evmEthTx.Value = *dbEthTx.Value.ToInt() - evmEthTx.FeeLimit = dbEthTx.GasLimit - evmEthTx.Error = dbEthTx.Error - evmEthTx.BroadcastAt = dbEthTx.BroadcastAt - evmEthTx.CreatedAt = dbEthTx.CreatedAt - evmEthTx.State = dbEthTx.State - evmEthTx.Meta = dbEthTx.Meta - evmEthTx.Subject = dbEthTx.Subject - evmEthTx.PipelineTaskRunID = dbEthTx.PipelineTaskRunID - evmEthTx.MinConfirmations = dbEthTx.MinConfirmations - evmEthTx.ChainID = dbEthTx.EVMChainID.ToInt() - evmEthTx.TransmitChecker = dbEthTx.TransmitChecker - evmEthTx.InitialBroadcastAt = dbEthTx.InitialBroadcastAt +func (db *DbEthTx) FromTx(tx *Tx) { + db.ID = tx.ID + db.FromAddress = tx.FromAddress + db.ToAddress = tx.ToAddress + db.EncodedPayload = tx.EncodedPayload + db.Value = assets.Eth(tx.Value) + db.GasLimit = tx.FeeLimit + db.Error = tx.Error + db.BroadcastAt = tx.BroadcastAt + db.CreatedAt = tx.CreatedAt + db.State = tx.State + db.Meta = tx.Meta + db.Subject = tx.Subject + db.PipelineTaskRunID = tx.PipelineTaskRunID + db.MinConfirmations = tx.MinConfirmations + db.TransmitChecker = tx.TransmitChecker + db.InitialBroadcastAt = tx.InitialBroadcastAt + + if tx.ChainID != nil { + db.EVMChainID = *utils.NewBig(tx.ChainID) + } + if tx.Sequence != nil { + n := tx.Sequence.Int64() + db.Nonce = &n + } +} + +func (db DbEthTx) ToTx(tx *Tx) { + tx.ID = db.ID + if db.Nonce != nil { + n := evmtypes.Nonce(*db.Nonce) + tx.Sequence = &n + } + tx.IdempotencyKey = db.IdempotencyKey + tx.FromAddress = db.FromAddress + tx.ToAddress = db.ToAddress + tx.EncodedPayload = db.EncodedPayload + tx.Value = *db.Value.ToInt() + tx.FeeLimit = db.GasLimit + tx.Error = db.Error + tx.BroadcastAt = db.BroadcastAt + tx.CreatedAt = db.CreatedAt + tx.State = db.State + tx.Meta = db.Meta + tx.Subject = db.Subject + tx.PipelineTaskRunID = db.PipelineTaskRunID + tx.MinConfirmations = db.MinConfirmations + tx.ChainID = db.EVMChainID.ToInt() + tx.TransmitChecker = db.TransmitChecker + tx.InitialBroadcastAt = db.InitialBroadcastAt } func dbEthTxsToEvmEthTxs(dbEthTxs []DbEthTx) []Tx { evmEthTxs := make([]Tx, len(dbEthTxs)) for i, dbTx := range dbEthTxs { - DbEthTxToEthTx(dbTx, &evmEthTxs[i]) + dbTx.ToTx(&evmEthTxs[i]) } return evmEthTxs } @@ -249,7 +245,7 @@ func dbEthTxsToEvmEthTxs(dbEthTxs []DbEthTx) []Tx { func dbEthTxsToEvmEthTxPtrs(dbEthTxs []DbEthTx, evmEthTxs []*Tx) { for i, dbTx := range dbEthTxs { evmEthTxs[i] = &Tx{} - DbEthTxToEthTx(dbTx, evmEthTxs[i]) + dbTx.ToTx(evmEthTxs[i]) } } @@ -270,29 +266,25 @@ type DbEthTxAttempt struct { GasFeeCap *assets.Wei } -func DbEthTxAttemptFromEthTxAttempt(ethTxAttempt *TxAttempt) DbEthTxAttempt { - dbTx := DbEthTxAttempt{ - ID: ethTxAttempt.ID, - EthTxID: ethTxAttempt.TxID, - GasPrice: ethTxAttempt.TxFee.Legacy, - SignedRawTx: ethTxAttempt.SignedRawTx, - Hash: ethTxAttempt.Hash, - BroadcastBeforeBlockNum: ethTxAttempt.BroadcastBeforeBlockNum, - CreatedAt: ethTxAttempt.CreatedAt, - ChainSpecificGasLimit: ethTxAttempt.ChainSpecificFeeLimit, - TxType: ethTxAttempt.TxType, - GasTipCap: ethTxAttempt.TxFee.DynamicTipCap, - GasFeeCap: ethTxAttempt.TxFee.DynamicFeeCap, - } +func (db *DbEthTxAttempt) FromTxAttempt(attempt *TxAttempt) { + db.ID = attempt.ID + db.EthTxID = attempt.TxID + db.GasPrice = attempt.TxFee.Legacy + db.SignedRawTx = attempt.SignedRawTx + db.Hash = attempt.Hash + db.BroadcastBeforeBlockNum = attempt.BroadcastBeforeBlockNum + db.CreatedAt = attempt.CreatedAt + db.ChainSpecificGasLimit = attempt.ChainSpecificFeeLimit + db.TxType = attempt.TxType + db.GasTipCap = attempt.TxFee.DynamicTipCap + db.GasFeeCap = attempt.TxFee.DynamicFeeCap // handle state naming difference between generic + EVM - if ethTxAttempt.State == txmgrtypes.TxAttemptInsufficientFunds { - dbTx.State = "insufficient_eth" + if attempt.State == txmgrtypes.TxAttemptInsufficientFunds { + db.State = "insufficient_eth" } else { - dbTx.State = ethTxAttempt.State.String() + db.State = attempt.State.String() } - - return dbTx } func DbEthTxAttemptStateToTxAttemptState(state string) txmgrtypes.TxAttemptState { @@ -302,27 +294,27 @@ func DbEthTxAttemptStateToTxAttemptState(state string) txmgrtypes.TxAttemptState return txmgrtypes.NewTxAttemptState(state) } -func DbEthTxAttemptToEthTxAttempt(dbEthTxAttempt DbEthTxAttempt, evmAttempt *TxAttempt) { - evmAttempt.ID = dbEthTxAttempt.ID - evmAttempt.TxID = dbEthTxAttempt.EthTxID - evmAttempt.SignedRawTx = dbEthTxAttempt.SignedRawTx - evmAttempt.Hash = dbEthTxAttempt.Hash - evmAttempt.BroadcastBeforeBlockNum = dbEthTxAttempt.BroadcastBeforeBlockNum - evmAttempt.State = DbEthTxAttemptStateToTxAttemptState(dbEthTxAttempt.State) - evmAttempt.CreatedAt = dbEthTxAttempt.CreatedAt - evmAttempt.ChainSpecificFeeLimit = dbEthTxAttempt.ChainSpecificGasLimit - evmAttempt.TxType = dbEthTxAttempt.TxType - evmAttempt.TxFee = gas.EvmFee{ - Legacy: dbEthTxAttempt.GasPrice, - DynamicTipCap: dbEthTxAttempt.GasTipCap, - DynamicFeeCap: dbEthTxAttempt.GasFeeCap, +func (db DbEthTxAttempt) ToTxAttempt(attempt *TxAttempt) { + attempt.ID = db.ID + attempt.TxID = db.EthTxID + attempt.SignedRawTx = db.SignedRawTx + attempt.Hash = db.Hash + attempt.BroadcastBeforeBlockNum = db.BroadcastBeforeBlockNum + attempt.State = DbEthTxAttemptStateToTxAttemptState(db.State) + attempt.CreatedAt = db.CreatedAt + attempt.ChainSpecificFeeLimit = db.ChainSpecificGasLimit + attempt.TxType = db.TxType + attempt.TxFee = gas.EvmFee{ + Legacy: db.GasPrice, + DynamicTipCap: db.GasTipCap, + DynamicFeeCap: db.GasFeeCap, } } func dbEthTxAttemptsToEthTxAttempts(dbEthTxAttempt []DbEthTxAttempt) []TxAttempt { evmEthTxAttempt := make([]TxAttempt, len(dbEthTxAttempt)) for i, dbTxAttempt := range dbEthTxAttempt { - DbEthTxAttemptToEthTxAttempt(dbTxAttempt, &evmEthTxAttempt[i]) + dbTxAttempt.ToTxAttempt(&evmEthTxAttempt[i]) } return evmEthTxAttempt } @@ -379,7 +371,7 @@ func (o *evmTxStore) preloadTxAttempts(txs []Tx) error { for i, tx := range txs { if tx.ID == dbAttempt.EthTxID { var attempt TxAttempt - DbEthTxAttemptToEthTxAttempt(dbAttempt, &attempt) + dbAttempt.ToTxAttempt(&attempt) txs[i].TxAttempts = append(txs[i].TxAttempts, attempt) } } @@ -405,7 +397,7 @@ func (o *evmTxStore) PreloadTxes(attempts []TxAttempt, qopts ...pg.QOpt) error { } for _, dbEtx := range dbEthTxs { etx := ethTxM[dbEtx.ID] - DbEthTxToEthTx(dbEtx, &etx) + dbEtx.ToTx(&etx) ethTxM[etx.ID] = etx } for i, attempt := range attempts { @@ -475,7 +467,7 @@ func (o *evmTxStore) FindTxAttempt(hash common.Hash) (*TxAttempt, error) { } // reuse the preload var attempt TxAttempt - DbEthTxAttemptToEthTxAttempt(dbTxAttempt, &attempt) + dbTxAttempt.ToTxAttempt(&attempt) attempts := []TxAttempt{attempt} err := o.PreloadTxes(attempts) return &attempts[0], err @@ -502,7 +494,7 @@ func (o *evmTxStore) FindTxByHash(hash common.Hash) (*Tx, error) { }, pg.OptReadOnlyTx()) var etx Tx - DbEthTxToEthTx(dbEtx, &etx) + dbEtx.ToTx(&etx) return &etx, pkgerrors.Wrap(err, "FindEthTxByHash failed") } @@ -514,17 +506,19 @@ func (o *evmTxStore) InsertTx(etx *Tx) error { const insertEthTxSQL = `INSERT INTO evm.txes (nonce, from_address, to_address, encoded_payload, value, gas_limit, error, broadcast_at, initial_broadcast_at, created_at, state, meta, subject, pipeline_task_run_id, min_confirmations, evm_chain_id, transmit_checker) VALUES ( :nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit, :error, :broadcast_at, :initial_broadcast_at, :created_at, :state, :meta, :subject, :pipeline_task_run_id, :min_confirmations, :evm_chain_id, :transmit_checker ) RETURNING *` - dbTx := DbEthTxFromEthTx(etx) + var dbTx DbEthTx + dbTx.FromTx(etx) err := o.q.GetNamed(insertEthTxSQL, &dbTx, &dbTx) - DbEthTxToEthTx(dbTx, etx) + dbTx.ToTx(etx) return pkgerrors.Wrap(err, "InsertTx failed") } // InsertTxAttempt inserts a new txAttempt into the database func (o *evmTxStore) InsertTxAttempt(attempt *TxAttempt) error { - dbTxAttempt := DbEthTxAttemptFromEthTxAttempt(attempt) + var dbTxAttempt DbEthTxAttempt + dbTxAttempt.FromTxAttempt(attempt) err := o.q.GetNamed(insertIntoEthTxAttemptsQuery, &dbTxAttempt, &dbTxAttempt) - DbEthTxAttemptToEthTxAttempt(dbTxAttempt, attempt) + dbTxAttempt.ToTxAttempt(attempt) return pkgerrors.Wrap(err, "InsertTxAttempt failed") } @@ -548,7 +542,7 @@ func (o *evmTxStore) FindTxWithAttempts(etxID int64) (etx Tx, err error) { if err = tx.Get(&dbEtx, `SELECT * FROM evm.txes WHERE id = $1 ORDER BY created_at ASC, id ASC`, etxID); err != nil { return pkgerrors.Wrapf(err, "failed to find eth_tx with id %d", etxID) } - DbEthTxToEthTx(dbEtx, &etx) + dbEtx.ToTx(&etx) if err = o.LoadTxAttempts(&etx, pg.WithQueryer(tx)); err != nil { return pkgerrors.Wrapf(err, "failed to load evm.tx_attempts for eth_tx with id %d", etxID) } @@ -591,7 +585,7 @@ func (o *evmTxStore) LoadTxesAttempts(etxs []*Tx, qopts ...pg.QOpt) error { for _, dbAttempt := range dbTxAttempts { etx := ethTxesM[dbAttempt.EthTxID] var attempt TxAttempt - DbEthTxAttemptToEthTxAttempt(dbAttempt, &attempt) + dbAttempt.ToTxAttempt(&attempt) etx.TxAttempts = append(etx.TxAttempts, attempt) } return nil @@ -919,7 +913,7 @@ func (o *evmTxStore) FindTxWithIdempotencyKey(idempotencyKey string, chainID *bi return nil, pkgerrors.Wrap(err, "FindTxWithIdempotencyKey failed to load evm.txes") } etx = new(Tx) - DbEthTxToEthTx(dbEtx, etx) + dbEtx.ToTx(etx) return } @@ -934,7 +928,7 @@ SELECT * FROM evm.txes WHERE from_address = $1 AND nonce = $2 AND state IN ('con if err != nil { return pkgerrors.Wrap(err, "FindEthTxWithNonce failed to load evm.txes") } - DbEthTxToEthTx(dbEtx, etx) + dbEtx.ToTx(etx) err = o.LoadTxAttempts(etx, pg.WithQueryer(tx)) return pkgerrors.Wrap(err, "FindEthTxWithNonce failed to load evm.tx_attempts") }, pg.OptReadOnlyTx()) @@ -1008,7 +1002,8 @@ ORDER BY nonce ASC func saveAttemptWithNewState(q pg.Queryer, timeout time.Duration, logger logger.Logger, attempt TxAttempt, broadcastAt time.Time) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) - dbAttempt := DbEthTxAttemptFromEthTxAttempt(&attempt) + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt) defer cancel() return pg.SqlxTransaction(ctx, q, logger, func(tx pg.Queryer) error { // In case of null broadcast_at (shouldn't happen) we don't want to @@ -1075,7 +1070,8 @@ func (o *evmTxStore) SaveInProgressAttempt(attempt *TxAttempt) error { if attempt.State != txmgrtypes.TxAttemptInProgress { return errors.New("SaveInProgressAttempt failed: attempt state must be in_progress") } - dbAttempt := DbEthTxAttemptFromEthTxAttempt(attempt) + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(attempt) // Insert is the usual mode because the attempt is new if attempt.ID == 0 { query, args, e := o.q.BindNamed(insertIntoEthTxAttemptsQuery, &dbAttempt) @@ -1083,7 +1079,7 @@ func (o *evmTxStore) SaveInProgressAttempt(attempt *TxAttempt) error { return pkgerrors.Wrap(e, "SaveInProgressAttempt failed to BindNamed") } e = o.q.Get(&dbAttempt, query, args...) - DbEthTxAttemptToEthTxAttempt(dbAttempt, attempt) + dbAttempt.ToTxAttempt(attempt) return pkgerrors.Wrap(e, "SaveInProgressAttempt failed to insert into evm.tx_attempts") } // Update only applies to case of insufficient eth and simply changes the state to in_progress @@ -1265,13 +1261,14 @@ func (o *evmTxStore) SaveReplacementInProgressAttempt(oldAttempt TxAttempt, repl if _, err := tx.Exec(`DELETE FROM evm.tx_attempts WHERE id=$1`, oldAttempt.ID); err != nil { return pkgerrors.Wrap(err, "saveReplacementInProgressAttempt failed to delete from evm.tx_attempts") } - dbAttempt := DbEthTxAttemptFromEthTxAttempt(replacementAttempt) + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(replacementAttempt) query, args, e := tx.BindNamed(insertIntoEthTxAttemptsQuery, &dbAttempt) if e != nil { return pkgerrors.Wrap(e, "saveReplacementInProgressAttempt failed to BindNamed") } e = tx.Get(&dbAttempt, query, args...) - DbEthTxAttemptToEthTxAttempt(dbAttempt, replacementAttempt) + dbAttempt.ToTxAttempt(replacementAttempt) return pkgerrors.Wrap(e, "saveReplacementInProgressAttempt failed to insert replacement attempt") }) } @@ -1281,7 +1278,7 @@ func (o *evmTxStore) FindNextUnstartedTransactionFromAddress(etx *Tx, fromAddres qq := o.q.WithOpts(qopts...) var dbEtx DbEthTx err := qq.Get(&dbEtx, `SELECT * FROM evm.txes WHERE from_address = $1 AND state = 'unstarted' AND evm_chain_id = $2 ORDER BY value ASC, created_at ASC, id ASC`, fromAddress, chainID.String()) - DbEthTxToEthTx(dbEtx, etx) + dbEtx.ToTx(etx) return pkgerrors.Wrap(err, "failed to FindNextUnstartedTransactionFromAddress") } @@ -1302,9 +1299,10 @@ func (o *evmTxStore) UpdateTxFatalError(etx *Tx, qopts ...pg.QOpt) error { if _, err := tx.Exec(`DELETE FROM evm.tx_attempts WHERE eth_tx_id = $1`, etx.ID); err != nil { return pkgerrors.Wrapf(err, "saveFatallyErroredTransaction failed to delete eth_tx_attempt with eth_tx.ID %v", etx.ID) } - dbEtx := DbEthTxFromEthTx(etx) + var dbEtx DbEthTx + dbEtx.FromTx(etx) err := pkgerrors.Wrap(tx.Get(&dbEtx, `UPDATE evm.txes SET state=$1, error=$2, broadcast_at=NULL, initial_broadcast_at=NULL, nonce=NULL WHERE id=$3 RETURNING *`, etx.State, etx.Error, etx.ID), "saveFatallyErroredTransaction failed to save eth_tx") - DbEthTxToEthTx(dbEtx, etx) + dbEtx.ToTx(etx) return err }) } @@ -1336,12 +1334,14 @@ func (o *evmTxStore) UpdateTxAttemptInProgressToBroadcast(etx *Tx, attempt TxAtt if err := incrNextNonceCallback(tx); err != nil { return pkgerrors.Wrap(err, "SaveEthTxAttempt failed on incrNextNonceCallback") } - dbEtx := DbEthTxFromEthTx(etx) + var dbEtx DbEthTx + dbEtx.FromTx(etx) if err := tx.Get(&dbEtx, `UPDATE evm.txes SET state=$1, error=$2, broadcast_at=$3, initial_broadcast_at=$4 WHERE id = $5 RETURNING *`, dbEtx.State, dbEtx.Error, dbEtx.BroadcastAt, dbEtx.InitialBroadcastAt, dbEtx.ID); err != nil { return pkgerrors.Wrap(err, "SaveEthTxAttempt failed to save eth_tx") } - DbEthTxToEthTx(dbEtx, etx) - dbAttempt := DbEthTxAttemptFromEthTxAttempt(&attempt) + dbEtx.ToTx(etx) + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt) if err := tx.Get(&dbAttempt, `UPDATE evm.tx_attempts SET state = $1 WHERE id = $2 RETURNING *`, dbAttempt.State, dbAttempt.ID); err != nil { return pkgerrors.Wrap(err, "SaveEthTxAttempt failed to save eth_tx_attempt") } @@ -1380,7 +1380,8 @@ func (o *evmTxStore) UpdateTxUnstartedToInProgress(etx *Tx, attempt *TxAttempt, return err } - dbAttempt := DbEthTxAttemptFromEthTxAttempt(attempt) + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(attempt) query, args, e := tx.BindNamed(insertIntoEthTxAttemptsQuery, &dbAttempt) if e != nil { return pkgerrors.Wrap(e, "failed to BindNamed") @@ -1397,10 +1398,11 @@ func (o *evmTxStore) UpdateTxUnstartedToInProgress(etx *Tx, attempt *TxAttempt, return pkgerrors.Wrap(err, "UpdateTxUnstartedToInProgress failed to create eth_tx_attempt") } } - DbEthTxAttemptToEthTxAttempt(dbAttempt, attempt) - dbEtx := DbEthTxFromEthTx(etx) + dbAttempt.ToTxAttempt(attempt) + var dbEtx DbEthTx + dbEtx.FromTx(etx) err = tx.Get(&dbEtx, `UPDATE evm.txes SET nonce=$1, state=$2, broadcast_at=$3, initial_broadcast_at=$4 WHERE id=$5 RETURNING *`, etx.Sequence, etx.State, etx.BroadcastAt, etx.InitialBroadcastAt, etx.ID) - DbEthTxToEthTx(dbEtx, etx) + dbEtx.ToTx(etx) return pkgerrors.Wrap(err, "UpdateTxUnstartedToInProgress failed to update eth_tx") }) } @@ -1424,7 +1426,7 @@ func (o *evmTxStore) GetTxInProgress(fromAddress common.Address, qopts ...pg.QOp } else if err != nil { return pkgerrors.Wrap(err, "GetTxInProgress failed while loading eth tx") } - DbEthTxToEthTx(dbEtx, etx) + dbEtx.ToTx(etx) if err = o.LoadTxAttempts(etx, pg.WithQueryer(tx)); err != nil { return pkgerrors.Wrap(err, "GetTxInProgress failed while loading EthTxAttempts") } @@ -1536,7 +1538,7 @@ RETURNING "txes".* return nil }) var etx Tx - DbEthTxToEthTx(dbEtx, &etx) + dbEtx.ToTx(&etx) return etx, err } diff --git a/core/chains/evm/txmgr/evm_tx_store_test.go b/core/chains/evm/txmgr/evm_tx_store_test.go index c4837f81e87..913652b7c8b 100644 --- a/core/chains/evm/txmgr/evm_tx_store_test.go +++ b/core/chains/evm/txmgr/evm_tx_store_test.go @@ -1100,7 +1100,8 @@ func TestORM_LoadEthTxesAttempts(t *testing.T) { q := pg.NewQ(db, logger.TestLogger(t), cfg.Database()) newAttempt := cltest.NewDynamicFeeEthTxAttempt(t, etx.ID) - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&newAttempt) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&newAttempt) err := q.Transaction(func(tx pg.Queryer) error { const insertEthTxAttemptSQL = `INSERT INTO evm.tx_attempts (eth_tx_id, gas_price, signed_raw_tx, hash, broadcast_before_block_num, state, created_at, chain_specific_gas_limit, tx_type, gas_tip_cap, gas_fee_cap) VALUES ( :eth_tx_id, :gas_price, :signed_raw_tx, :hash, :broadcast_before_block_num, :state, NOW(), :chain_specific_gas_limit, :tx_type, :gas_tip_cap, :gas_fee_cap diff --git a/core/cmd/evm_transaction_commands_test.go b/core/cmd/evm_transaction_commands_test.go index f213aefb154..eb421b03968 100644 --- a/core/cmd/evm_transaction_commands_test.go +++ b/core/cmd/evm_transaction_commands_test.go @@ -181,7 +181,7 @@ func TestShell_SendEther_From_Txm(t *testing.T) { assert.Equal(t, dbEvmTx.Value.String(), output.Value) assert.Equal(t, fmt.Sprintf("%d", *dbEvmTx.Nonce), output.Nonce) - dbEvmTxAttempt := txmgr.DbEthTxAttempt{} + var dbEvmTxAttempt txmgr.DbEthTxAttempt require.NoError(t, db.Get(&dbEvmTxAttempt, `SELECT * FROM evm.tx_attempts`)) assert.Equal(t, dbEvmTxAttempt.Hash, output.Hash) } @@ -246,7 +246,7 @@ func TestShell_SendEther_From_Txm_WEI(t *testing.T) { assert.Equal(t, dbEvmTx.Value.String(), output.Value) assert.Equal(t, fmt.Sprintf("%d", *dbEvmTx.Nonce), output.Nonce) - dbEvmTxAttempt := txmgr.DbEthTxAttempt{} + var dbEvmTxAttempt txmgr.DbEthTxAttempt require.NoError(t, db.Get(&dbEvmTxAttempt, `SELECT * FROM evm.tx_attempts`)) assert.Equal(t, dbEvmTxAttempt.Hash, output.Hash) } diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go index 051f28d8e7a..af8c1528de8 100644 --- a/core/cmd/shell_local.go +++ b/core/cmd/shell_local.go @@ -712,7 +712,7 @@ func (s *Shell) validateDB(c *cli.Context) error { } // ResetDatabase drops, creates and migrates the database specified by CL_DATABASE_URL or Database.URL -// in secrets TOML. This is useful to setup the database for testing +// in secrets TOML. This is useful to set up the database for testing func (s *Shell) ResetDatabase(c *cli.Context) error { cfg := s.Config.Database() parsed := cfg.URL() @@ -819,7 +819,7 @@ func dropDanglingTestDBs(lggr logger.Logger, db *sqlx.DB) (err error) { return } -// PrepareTestDatabase calls ResetDatabase then loads fixtures required for local +// PrepareTestDatabaseUserOnly calls ResetDatabase then loads only user fixtures required for local // testing against testnets. Does not include fake chain fixtures. func (s *Shell) PrepareTestDatabaseUserOnly(c *cli.Context) error { if err := s.ResetDatabase(c); err != nil { @@ -852,7 +852,7 @@ func (s *Shell) MigrateDatabase(_ *cli.Context) error { return nil } -// VersionDatabase displays the current database version. +// RollbackDatabase rolls back the database via down migrations. func (s *Shell) RollbackDatabase(c *cli.Context) error { var version null.Int if c.Args().Present() { diff --git a/core/gethwrappers/OffchainAggregator/OffchainAggregator.abi b/core/gethwrappers/OffchainAggregator/OffchainAggregator.abi index e33ed2b8f3f..d83251ac189 100644 --- a/core/gethwrappers/OffchainAggregator/OffchainAggregator.abi +++ b/core/gethwrappers/OffchainAggregator/OffchainAggregator.abi @@ -1 +1 @@ -[{"inputs":[{"internalType":"uint32","name":"_maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"_reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"_microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerTransmission","type":"uint32"},{"internalType":"address","name":"_link","type":"address"},{"internalType":"address","name":"_validator","type":"address"},{"internalType":"int192","name":"_minAnswer","type":"int192"},{"internalType":"int192","name":"_maxAnswer","type":"int192"},{"internalType":"contract AccessControllerInterface","name":"_billingAccessController","type":"address"},{"internalType":"contract AccessControllerInterface","name":"_requesterAccessController","type":"address"},{"internalType":"uint8","name":"_decimals","type":"uint8"},{"internalType":"string","name":"_description","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"int256","name":"current","type":"int256"},{"indexed":true,"internalType":"uint256","name":"roundId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"updatedAt","type":"uint256"}],"name":"AnswerUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"contract AccessControllerInterface","name":"old","type":"address"},{"indexed":false,"internalType":"contract AccessControllerInterface","name":"current","type":"address"}],"name":"BillingAccessControllerSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"maximumGasPrice","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"reasonableGasPrice","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"microLinkPerEth","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"linkGweiPerObservation","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"linkGweiPerTransmission","type":"uint32"}],"name":"BillingSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"previousConfigBlockNumber","type":"uint32"},{"indexed":false,"internalType":"uint64","name":"configCount","type":"uint64"},{"indexed":false,"internalType":"address[]","name":"signers","type":"address[]"},{"indexed":false,"internalType":"address[]","name":"transmitters","type":"address[]"},{"indexed":false,"internalType":"uint8","name":"threshold","type":"uint8"},{"indexed":false,"internalType":"uint64","name":"encodedConfigVersion","type":"uint64"},{"indexed":false,"internalType":"bytes","name":"encoded","type":"bytes"}],"name":"ConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"roundId","type":"uint256"},{"indexed":true,"internalType":"address","name":"startedBy","type":"address"},{"indexed":false,"internalType":"uint256","name":"startedAt","type":"uint256"}],"name":"NewRound","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint32","name":"aggregatorRoundId","type":"uint32"},{"indexed":false,"internalType":"int192","name":"answer","type":"int192"},{"indexed":false,"internalType":"address","name":"transmitter","type":"address"},{"indexed":false,"internalType":"int192[]","name":"observations","type":"int192[]"},{"indexed":false,"internalType":"bytes","name":"observers","type":"bytes"},{"indexed":false,"internalType":"bytes32","name":"rawReportContext","type":"bytes32"}],"name":"NewTransmission","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"transmitter","type":"address"},{"indexed":false,"internalType":"address","name":"payee","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"OraclePaid","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"},{"indexed":true,"internalType":"address","name":"proposed","type":"address"}],"name":"PayeeshipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"previous","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"}],"name":"PayeeshipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"contract AccessControllerInterface","name":"old","type":"address"},{"indexed":false,"internalType":"contract AccessControllerInterface","name":"current","type":"address"}],"name":"RequesterAccessControllerSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"requester","type":"address"},{"indexed":false,"internalType":"bytes16","name":"configDigest","type":"bytes16"},{"indexed":false,"internalType":"uint32","name":"epoch","type":"uint32"},{"indexed":false,"internalType":"uint8","name":"round","type":"uint8"}],"name":"RoundRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previous","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"}],"name":"ValidatorUpdated","type":"event"},{"inputs":[],"name":"LINK","outputs":[{"internalType":"contract LinkTokenInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"acceptOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"acceptPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"billingAccessController","outputs":[{"internalType":"contract AccessControllerInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"description","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundId","type":"uint256"}],"name":"getAnswer","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getBilling","outputs":[{"internalType":"uint32","name":"maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"linkGweiPerTransmission","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint80","name":"_roundId","type":"uint80"}],"name":"getRoundData","outputs":[{"internalType":"uint80","name":"roundId","type":"uint80"},{"internalType":"int256","name":"answer","type":"int256"},{"internalType":"uint256","name":"startedAt","type":"uint256"},{"internalType":"uint256","name":"updatedAt","type":"uint256"},{"internalType":"uint80","name":"answeredInRound","type":"uint80"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundId","type":"uint256"}],"name":"getTimestamp","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestAnswer","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestConfigDetails","outputs":[{"internalType":"uint32","name":"configCount","type":"uint32"},{"internalType":"uint32","name":"blockNumber","type":"uint32"},{"internalType":"bytes16","name":"configDigest","type":"bytes16"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestRound","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestRoundData","outputs":[{"internalType":"uint80","name":"roundId","type":"uint80"},{"internalType":"int256","name":"answer","type":"int256"},{"internalType":"uint256","name":"startedAt","type":"uint256"},{"internalType":"uint256","name":"updatedAt","type":"uint256"},{"internalType":"uint80","name":"answeredInRound","type":"uint80"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestTimestamp","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestTransmissionDetails","outputs":[{"internalType":"bytes16","name":"configDigest","type":"bytes16"},{"internalType":"uint32","name":"epoch","type":"uint32"},{"internalType":"uint8","name":"round","type":"uint8"},{"internalType":"int192","name":"latestAnswer","type":"int192"},{"internalType":"uint64","name":"latestTimestamp","type":"uint64"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"linkAvailableForPayment","outputs":[{"internalType":"int256","name":"availableBalance","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"maxAnswer","outputs":[{"internalType":"int192","name":"","type":"int192"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"minAnswer","outputs":[{"internalType":"int192","name":"","type":"int192"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_signerOrTransmitter","type":"address"}],"name":"oracleObservationCount","outputs":[{"internalType":"uint16","name":"","type":"uint16"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"owedPayment","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address payable","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"requestNewRound","outputs":[{"internalType":"uint80","name":"","type":"uint80"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"requesterAccessController","outputs":[{"internalType":"contract AccessControllerInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint32","name":"_maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"_reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"_microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerTransmission","type":"uint32"}],"name":"setBilling","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract AccessControllerInterface","name":"_billingAccessController","type":"address"}],"name":"setBillingAccessController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"_signers","type":"address[]"},{"internalType":"address[]","name":"_transmitters","type":"address[]"},{"internalType":"uint8","name":"_threshold","type":"uint8"},{"internalType":"uint64","name":"_encodedConfigVersion","type":"uint64"},{"internalType":"bytes","name":"_encoded","type":"bytes"}],"name":"setConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"_transmitters","type":"address[]"},{"internalType":"address[]","name":"_payees","type":"address[]"}],"name":"setPayees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract AccessControllerInterface","name":"_requesterAccessController","type":"address"}],"name":"setRequesterAccessController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newValidator","type":"address"}],"name":"setValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"},{"internalType":"address","name":"_proposed","type":"address"}],"name":"transferPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"_report","type":"bytes"},{"internalType":"bytes32[]","name":"_rs","type":"bytes32[]"},{"internalType":"bytes32[]","name":"_ss","type":"bytes32[]"},{"internalType":"bytes32","name":"_rawVs","type":"bytes32"}],"name":"transmit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"transmitters","outputs":[{"internalType":"address[]","name":"","type":"address[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"validator","outputs":[{"internalType":"contract AggregatorValidatorInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"version","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_recipient","type":"address"},{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"withdrawFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"withdrawPayment","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file +[{"inputs":[{"internalType":"uint32","name":"_maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"_reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"_microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerTransmission","type":"uint32"},{"internalType":"address","name":"_link","type":"address"},{"internalType":"address","name":"_validator","type":"address"},{"internalType":"int192","name":"_minAnswer","type":"int192"},{"internalType":"int192","name":"_maxAnswer","type":"int192"},{"internalType":"contract AccessControllerInterface","name":"_billingAccessController","type":"address"},{"internalType":"contract AccessControllerInterface","name":"_requesterAccessController","type":"address"},{"internalType":"uint8","name":"_decimals","type":"uint8"},{"internalType":"string","name":"_description","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"int256","name":"current","type":"int256"},{"indexed":true,"internalType":"uint256","name":"roundId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"updatedAt","type":"uint256"}],"name":"AnswerUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"contract AccessControllerInterface","name":"old","type":"address"},{"indexed":false,"internalType":"contract AccessControllerInterface","name":"current","type":"address"}],"name":"BillingAccessControllerSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"maximumGasPrice","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"reasonableGasPrice","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"microLinkPerEth","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"linkGweiPerObservation","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"linkGweiPerTransmission","type":"uint32"}],"name":"BillingSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"previousConfigBlockNumber","type":"uint32"},{"indexed":false,"internalType":"uint64","name":"configCount","type":"uint64"},{"indexed":false,"internalType":"address[]","name":"signers","type":"address[]"},{"indexed":false,"internalType":"address[]","name":"transmitters","type":"address[]"},{"indexed":false,"internalType":"uint8","name":"threshold","type":"uint8"},{"indexed":false,"internalType":"uint64","name":"encodedConfigVersion","type":"uint64"},{"indexed":false,"internalType":"bytes","name":"encoded","type":"bytes"}],"name":"ConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"roundId","type":"uint256"},{"indexed":true,"internalType":"address","name":"startedBy","type":"address"},{"indexed":false,"internalType":"uint256","name":"startedAt","type":"uint256"}],"name":"NewRound","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint32","name":"aggregatorRoundId","type":"uint32"},{"indexed":false,"internalType":"int192","name":"answer","type":"int192"},{"indexed":false,"internalType":"address","name":"transmitter","type":"address"},{"indexed":false,"internalType":"int192[]","name":"observations","type":"int192[]"},{"indexed":false,"internalType":"bytes","name":"observers","type":"bytes"},{"indexed":false,"internalType":"bytes32","name":"rawReportContext","type":"bytes32"}],"name":"NewTransmission","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"transmitter","type":"address"},{"indexed":false,"internalType":"address","name":"payee","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"OraclePaid","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"},{"indexed":true,"internalType":"address","name":"proposed","type":"address"}],"name":"PayeeshipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"previous","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"}],"name":"PayeeshipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"contract AccessControllerInterface","name":"old","type":"address"},{"indexed":false,"internalType":"contract AccessControllerInterface","name":"current","type":"address"}],"name":"RequesterAccessControllerSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"requester","type":"address"},{"indexed":false,"internalType":"bytes16","name":"configDigest","type":"bytes16"},{"indexed":false,"internalType":"uint32","name":"epoch","type":"uint32"},{"indexed":false,"internalType":"uint8","name":"round","type":"uint8"}],"name":"RoundRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previous","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"}],"name":"ValidatorUpdated","type":"event"},{"inputs":[],"name":"LINK","outputs":[{"internalType":"contract LinkTokenInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"acceptOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"acceptPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"billingAccessController","outputs":[{"internalType":"contract AccessControllerInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"description","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundId","type":"uint256"}],"name":"getAnswer","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getBilling","outputs":[{"internalType":"uint32","name":"maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"linkGweiPerTransmission","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint80","name":"_roundId","type":"uint80"}],"name":"getRoundData","outputs":[{"internalType":"uint80","name":"roundId","type":"uint80"},{"internalType":"int256","name":"answer","type":"int256"},{"internalType":"uint256","name":"startedAt","type":"uint256"},{"internalType":"uint256","name":"updatedAt","type":"uint256"},{"internalType":"uint80","name":"answeredInRound","type":"uint80"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundId","type":"uint256"}],"name":"getTimestamp","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestAnswer","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestConfigDetails","outputs":[{"internalType":"uint32","name":"configCount","type":"uint32"},{"internalType":"uint32","name":"blockNumber","type":"uint32"},{"internalType":"bytes16","name":"configDigest","type":"bytes16"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestRound","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestRoundData","outputs":[{"internalType":"uint80","name":"roundId","type":"uint80"},{"internalType":"int256","name":"answer","type":"int256"},{"internalType":"uint256","name":"startedAt","type":"uint256"},{"internalType":"uint256","name":"updatedAt","type":"uint256"},{"internalType":"uint80","name":"answeredInRound","type":"uint80"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestTimestamp","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestTransmissionDetails","outputs":[{"internalType":"bytes16","name":"configDigest","type":"bytes16"},{"internalType":"uint32","name":"epoch","type":"uint32"},{"internalType":"uint8","name":"round","type":"uint8"},{"internalType":"int192","name":"latestAnswer","type":"int192"},{"internalType":"uint64","name":"latestTimestamp","type":"uint64"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"linkAvailableForPayment","outputs":[{"internalType":"int256","name":"availableBalance","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"maxAnswer","outputs":[{"internalType":"int192","name":"","type":"int192"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"minAnswer","outputs":[{"internalType":"int192","name":"","type":"int192"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_signerOrTransmitter","type":"address"}],"name":"oracleObservationCount","outputs":[{"internalType":"uint16","name":"","type":"uint16"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"owedPayment","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address payable","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"requestNewRound","outputs":[{"internalType":"uint80","name":"","type":"uint80"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"requesterAccessController","outputs":[{"internalType":"contract AccessControllerInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint32","name":"_maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"_reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"_microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerTransmission","type":"uint32"}],"name":"setBilling","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract AccessControllerInterface","name":"_billingAccessController","type":"address"}],"name":"setBillingAccessController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"_signers","type":"address[]"},{"internalType":"address[]","name":"_transmitters","type":"address[]"},{"internalType":"uint8","name":"_threshold","type":"uint8"},{"internalType":"uint64","name":"_encodedConfigVersion","type":"uint64"},{"internalType":"bytes","name":"_encoded","type":"bytes"}],"name":"setConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"_transmitters","type":"address[]"},{"internalType":"address[]","name":"_payees","type":"address[]"}],"name":"setPayees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract AccessControllerInterface","name":"_requesterAccessController","type":"address"}],"name":"setRequesterAccessController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newValidator","type":"address"}],"name":"setValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"},{"internalType":"address","name":"_proposed","type":"address"}],"name":"transferPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"_report","type":"bytes"},{"internalType":"bytes32[]","name":"_rs","type":"bytes32[]"},{"internalType":"bytes32[]","name":"_ss","type":"bytes32[]"},{"internalType":"bytes32","name":"_rawVs","type":"bytes32"}],"name":"transmit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"transmitters","outputs":[{"internalType":"address[]","name":"","type":"address[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"validator","outputs":[{"internalType":"contract AggregatorValidatorInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"version","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_recipient","type":"address"},{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"withdrawFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"withdrawPayment","outputs":[],"stateMutability":"nonpayable","type":"function"}] diff --git a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt index 80f7e8ccfc4..146b87b43cb 100644 --- a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -45,7 +45,7 @@ mock_aggregator_proxy: ../../contracts/solc/v0.8.6/MockAggregatorProxy.abi ../.. mock_ethlink_aggregator_wrapper: ../../contracts/solc/v0.6/MockETHLINKAggregator.abi ../../contracts/solc/v0.6/MockETHLINKAggregator.bin 1c52c24f797b8482aa12b8251dcea1c072827bd5b3426b822621261944b99ca0 mock_gas_aggregator_wrapper: ../../contracts/solc/v0.6/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator.bin bacbb1ea4dc6beac0db8a13ca5c75e2fd61b903d70feea9b3b1c8b10fe8df4f3 multiwordconsumer_wrapper: ../../contracts/solc/v0.7/MultiWordConsumer.abi ../../contracts/solc/v0.7/MultiWordConsumer.bin 6e68abdf614e3ed0f5066c1b5f9d7c1199f1e7c5c5251fe8a471344a59afc6ba -offchain_aggregator_wrapper: OffchainAggregator/OffchainAggregator.abi - 5f97dc197fd4e2b999856b9b3fa7c2aaf0c700c71d7009d7d017d233bc855877 +offchain_aggregator_wrapper: OffchainAggregator/OffchainAggregator.abi - 5c8d6562e94166d4790f1ee6e4321d359d9f7262e6c5452a712b1f1c896f45cf operator_factory: ../../contracts/solc/v0.7/OperatorFactory.abi ../../contracts/solc/v0.7/OperatorFactory.bin 0bbac9ac2e45f988b8365a83a36dff97534c14d315ebe5a1fc725d87f00c15d5 operator_wrapper: ../../contracts/solc/v0.7/Operator.abi ../../contracts/solc/v0.7/Operator.bin 45036dc5046de66ba03f57b48ef8b629700e863af388cb509b2fa259989762e8 oracle_wrapper: ../../contracts/solc/v0.6/Oracle.abi ../../contracts/solc/v0.6/Oracle.bin 7af2fbac22a6e8c2847e8e685a5400cac5101d72ddf5365213beb79e4dede43a diff --git a/core/internal/features/ocr2/features_ocr2_test.go b/core/internal/features/ocr2/features_ocr2_test.go index 82d4fadd708..3883e0319ed 100644 --- a/core/internal/features/ocr2/features_ocr2_test.go +++ b/core/internal/features/ocr2/features_ocr2_test.go @@ -24,13 +24,14 @@ import ( "github.com/onsi/gomega" "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" - testoffchainaggregator2 "github.com/smartcontractkit/libocr/gethwrappers2/testocr2aggregator" - confighelper2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" - ocrtypes2 "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" + testoffchainaggregator2 "github.com/smartcontractkit/libocr/gethwrappers2/testocr2aggregator" + confighelper2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + ocrtypes2 "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/chainlink/v2/core/assets" "github.com/smartcontractkit/chainlink/v2/core/bridges" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders" @@ -43,6 +44,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/testhelpers" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" @@ -238,12 +240,20 @@ func TestIntegration_OCR2(t *testing.T) { require.NoError(t, err) blockBeforeConfig, err := b.BlockByNumber(testutils.Context(t), nil) require.NoError(t, err) - signers, transmitters, threshold, onchainConfig, encodedConfigVersion, encodedConfig, err := confighelper2.ContractSetConfigArgsForEthereumIntegrationTest( + signers, transmitters, threshold, _, encodedConfigVersion, encodedConfig, err := confighelper2.ContractSetConfigArgsForEthereumIntegrationTest( oracles, 1, 1000000000/100, // threshold PPB ) require.NoError(t, err) + + minAnswer, maxAnswer := new(big.Int), new(big.Int) + minAnswer.Exp(big.NewInt(-2), big.NewInt(191), nil) + maxAnswer.Exp(big.NewInt(2), big.NewInt(191), nil) + maxAnswer.Sub(maxAnswer, big.NewInt(1)) + + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(minAnswer, maxAnswer) + require.NoError(t, err) lggr.Debugw("Setting Config on Oracle Contract", "signers", signers, "transmitters", transmitters, @@ -506,13 +516,21 @@ func TestIntegration_OCR2_ForwarderFlow(t *testing.T) { require.NoError(t, err) blockBeforeConfig, err := b.BlockByNumber(testutils.Context(t), nil) require.NoError(t, err) - signers, effectiveTransmitters, threshold, onchainConfig, encodedConfigVersion, encodedConfig, err := confighelper2.ContractSetConfigArgsForEthereumIntegrationTest( + signers, effectiveTransmitters, threshold, _, encodedConfigVersion, encodedConfig, err := confighelper2.ContractSetConfigArgsForEthereumIntegrationTest( oracles, 1, 1000000000/100, // threshold PPB ) require.NoError(t, err) + minAnswer, maxAnswer := new(big.Int), new(big.Int) + minAnswer.Exp(big.NewInt(-2), big.NewInt(191), nil) + maxAnswer.Exp(big.NewInt(2), big.NewInt(191), nil) + maxAnswer.Sub(maxAnswer, big.NewInt(1)) + + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(minAnswer, maxAnswer) + require.NoError(t, err) + lggr.Debugw("Setting Config on Oracle Contract", "signers", signers, "transmitters", transmitters, diff --git a/core/scripts/go.mod b/core/scripts/go.mod index f4da1f46f42..f4b21a3773e 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -16,11 +16,11 @@ require ( github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f github.com/montanaflynn/stats v0.7.1 github.com/olekukonko/tablewriter v0.0.5 - github.com/pelletier/go-toml/v2 v2.0.9 + github.com/pelletier/go-toml/v2 v2.1.0 github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 - github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6 - github.com/smartcontractkit/ocr2keepers v0.7.25 + github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6 + github.com/smartcontractkit/ocr2keepers v0.7.27 github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb github.com/spf13/cobra v1.6.1 @@ -300,7 +300,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47 // indirect - github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3 // indirect + github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918 // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 75148687853..6896a0651f8 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1339,8 +1339,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= @@ -1454,8 +1454,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47 h1:vdieOW3CZGdD2R5zvCSMS+0vksyExPN3/Fa1uVfld/A= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47/go.mod h1:xMwqRdj5vqYhCJXgKVqvyAwdcqM6ZAEhnwEQ4Khsop8= -github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3 h1:FonaZ1kgRK0yY7D0jF5pL3K+0DYUnKcnStOOcIN+Hhg= -github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3/go.mod h1:gWclxGW7rLkbjXn7FGizYlyKhp/boekto4MEYGyiMG4= +github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c h1:be/0dJGClO0wS7gngfr0qUQq7RO/i7aJ8e5wG1b6/Ns= +github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c/go.mod h1:gWclxGW7rLkbjXn7FGizYlyKhp/boekto4MEYGyiMG4= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca h1:x7M0m512gtXw5Z4B1WJPZ52VgshoIv+IvHqQ8hsH4AE= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca/go.mod h1:RIUJXn7EVp24TL2p4FW79dYjyno23x5mjt1nKN+5WEk= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918 h1:ByVauKFXphRlSNG47lNuxZ9aicu+r8AoNp933VRPpCw= @@ -1464,10 +1464,10 @@ github.com/smartcontractkit/go-plugin v0.0.0-20230605132010-0f4d515d1472 h1:x3kN github.com/smartcontractkit/go-plugin v0.0.0-20230605132010-0f4d515d1472/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= -github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6 h1:w+8TI2Vcm3vk8XQz40ddcwy9BNZgoakXIby35Y54iDU= -github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= -github.com/smartcontractkit/ocr2keepers v0.7.25 h1:jkXje8B9SFMxiI1fufauqxstU95GNu8dtaIJofNyZgo= -github.com/smartcontractkit/ocr2keepers v0.7.25/go.mod h1:4e1ZDRz7fpLgcRUjJpq+5mkoD0ga11BxrSp2JTWKADQ= +github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6 h1:eSo9r53fARv2MnIO5pqYvQOXMBsTlAwhHyQ6BAVp6bY= +github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= +github.com/smartcontractkit/ocr2keepers v0.7.27 h1:kwqMrzmEdq6gH4yqNuLQCbdlED0KaIjwZzu3FF+Gves= +github.com/smartcontractkit/ocr2keepers v0.7.27/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas= github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 h1:NwC3SOc25noBTe1KUQjt45fyTIuInhoE2UfgcHAdihM= github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687/go.mod h1:YYZq52t4wcHoMQeITksYsorD+tZcOyuVU5+lvot3VFM= github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb h1:OMaBUb4X9IFPLbGbCHsMU+kw/BPCrewaVwWGIBc0I4A= diff --git a/core/services/blockhashstore/feeder_test.go b/core/services/blockhashstore/feeder_test.go index 08d7c0e9c46..d9e2c1bacdb 100644 --- a/core/services/blockhashstore/feeder_test.go +++ b/core/services/blockhashstore/feeder_test.go @@ -31,16 +31,12 @@ import ( const ( // VRF-only events. - randomWordsRequestedV2Plus string = "RandomWordsRequested" - randomWordsFulfilledV2Plus string = "RandomWordsFulfilled" - randomWordsRequestedV2 string = "RandomWordsRequested" - randomWordsFulfilledV2 string = "RandomWordsFulfilled" - randomWordsRequestedV1 string = "RandomnessRequest" - randomWordsFulfilledV1 string = "RandomnessRequestFulfilled" - randomnessFulfillmentRequestedEvent string = "RandomnessFulfillmentRequested" - randomWordsFulfilledEvent string = "RandomWordsFulfilled" - newTransmissionEvent string = "NewTransmission" - outputsServedEvent string = "OutputsServed" + randomWordsRequestedV2Plus string = "RandomWordsRequested" + randomWordsFulfilledV2Plus string = "RandomWordsFulfilled" + randomWordsRequestedV2 string = "RandomWordsRequested" + randomWordsFulfilledV2 string = "RandomWordsFulfilled" + randomWordsRequestedV1 string = "RandomnessRequest" + randomWordsFulfilledV1 string = "RandomnessRequestFulfilled" ) var ( @@ -50,18 +46,7 @@ var ( _ Coordinator = &TestCoordinator{} _ BHS = &TestBHS{} - tests = []struct { - name string - requests []Event - fulfillments []Event - wait int - lookback int - latest uint64 - bhs TestBHS - expectedStored []uint64 - expectedStoredMapBlocks []uint64 // expected state of stored map in Feeder struct - expectedErrMsg string - }{ + tests = []testCase{ { name: "single unfulfilled request", requests: []Event{{Block: 150, ID: "1000"}}, @@ -363,327 +348,344 @@ func TestStartHeartbeats(t *testing.T) { }) } -func TestFeeder(t *testing.T) { +type testCase struct { + name string + requests []Event + fulfillments []Event + wait int + lookback int + latest uint64 + bhs TestBHS + expectedStored []uint64 + expectedStoredMapBlocks []uint64 // expected state of stored map in Feeder struct + expectedErrMsg string +} +func TestFeeder(t *testing.T) { for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - coordinator := &TestCoordinator{ - RequestEvents: test.requests, - FulfillmentEvents: test.fulfillments, - } - - lp := &mocklp.LogPoller{} - feeder := NewFeeder( - logger.TestLogger(t), - coordinator, - &test.bhs, - lp, - 0, - test.wait, - test.lookback, - 600*time.Second, - func(ctx context.Context) (uint64, error) { - return test.latest, nil - }) - - err := feeder.Run(testutils.Context(t)) - if test.expectedErrMsg == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, test.expectedErrMsg) - } - - require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) - require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) + t.Run(test.name, test.testFeeder) + } +} + +func (test testCase) testFeeder(t *testing.T) { + coordinator := &TestCoordinator{ + RequestEvents: test.requests, + FulfillmentEvents: test.fulfillments, + } + + lp := &mocklp.LogPoller{} + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil }) + + err := feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) } + + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) } func TestFeederWithLogPollerVRFv1(t *testing.T) { + for _, test := range tests { + t.Run(test.name, test.testFeederWithLogPollerVRFv1) + } +} +func (test testCase) testFeederWithLogPollerVRFv1(t *testing.T) { var coordinatorAddress = common.HexToAddress("0x514910771AF9Ca656af840dff83E8264EcF986CA") - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - // Instantiate log poller & coordinator. - lp := &mocklp.LogPoller{} - lp.On("RegisterFilter", mock.Anything).Return(nil) - c, err := solidity_vrf_coordinator_interface.NewVRFCoordinator(coordinatorAddress, nil) - require.NoError(t, err) - coordinator := &V1Coordinator{ - c: c, - lp: lp, - } - - // Assert search window. - latest := int64(test.latest) - fromBlock := mathutil.Max(latest-int64(test.lookback), 0) - toBlock := mathutil.Max(latest-int64(test.wait), 0) - - // Construct request logs. - var requestLogs []logpoller.Log - for _, r := range test.requests { - if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { - continue // do not include blocks outside our search window - } - requestLogs = append( - requestLogs, - newRandomnessRequestedLogV1(t, r.Block, r.ID, coordinatorAddress), - ) - } - - // Construct fulfillment logs. - var fulfillmentLogs []logpoller.Log - for _, r := range test.fulfillments { - fulfillmentLogs = append( - fulfillmentLogs, - newRandomnessFulfilledLogV1(t, r.Block, r.ID, coordinatorAddress), - ) - } - - // Mock log poller. - lp.On("LatestBlock", mock.Anything). - Return(latest, nil) - lp.On( - "LogsWithSigs", - fromBlock, - toBlock, - []common.Hash{ - solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{}.Topic(), - }, - coordinatorAddress, - mock.Anything, - ).Return(requestLogs, nil) - lp.On( - "LogsWithSigs", - fromBlock, - latest, - []common.Hash{ - solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), - }, - coordinatorAddress, - mock.Anything, - ).Return(fulfillmentLogs, nil) - - // Instantiate feeder. - feeder := NewFeeder( - logger.TestLogger(t), - coordinator, - &test.bhs, - lp, - 0, - test.wait, - test.lookback, - 600*time.Second, - func(ctx context.Context) (uint64, error) { - return test.latest, nil - }) - - // Run feeder and assert correct results. - err = feeder.Run(testutils.Context(t)) - if test.expectedErrMsg == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, test.expectedErrMsg) - } - require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) - require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) + // Instantiate log poller & coordinator. + lp := &mocklp.LogPoller{} + lp.On("RegisterFilter", mock.Anything).Return(nil) + c, err := solidity_vrf_coordinator_interface.NewVRFCoordinator(coordinatorAddress, nil) + require.NoError(t, err) + coordinator := &V1Coordinator{ + c: c, + lp: lp, + } + + // Assert search window. + latest := int64(test.latest) + fromBlock := mathutil.Max(latest-int64(test.lookback), 0) + toBlock := mathutil.Max(latest-int64(test.wait), 0) + + // Construct request logs. + var requestLogs []logpoller.Log + for _, r := range test.requests { + if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { + continue // do not include blocks outside our search window + } + requestLogs = append( + requestLogs, + newRandomnessRequestedLogV1(t, r.Block, r.ID, coordinatorAddress), + ) + } + + // Construct fulfillment logs. + var fulfillmentLogs []logpoller.Log + for _, r := range test.fulfillments { + fulfillmentLogs = append( + fulfillmentLogs, + newRandomnessFulfilledLogV1(t, r.Block, r.ID, coordinatorAddress), + ) + } + + // Mock log poller. + lp.On("LatestBlock", mock.Anything). + Return(latest, nil) + lp.On( + "LogsWithSigs", + fromBlock, + toBlock, + []common.Hash{ + solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(requestLogs, nil) + lp.On( + "LogsWithSigs", + fromBlock, + latest, + []common.Hash{ + solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(fulfillmentLogs, nil) + + // Instantiate feeder. + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil }) + + // Run feeder and assert correct results. + err = feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) } + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) } func TestFeederWithLogPollerVRFv2(t *testing.T) { + for _, test := range tests { + t.Run(test.name, test.testFeederWithLogPollerVRFv2) + } +} +func (test testCase) testFeederWithLogPollerVRFv2(t *testing.T) { var coordinatorAddress = common.HexToAddress("0x514910771AF9Ca656af840dff83E8264EcF986CA") - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - // Instantiate log poller & coordinator. - lp := &mocklp.LogPoller{} - lp.On("RegisterFilter", mock.Anything).Return(nil) - c, err := vrf_coordinator_v2.NewVRFCoordinatorV2(coordinatorAddress, nil) - require.NoError(t, err) - coordinator := &V2Coordinator{ - c: c, - lp: lp, - } - - // Assert search window. - latest := int64(test.latest) - fromBlock := mathutil.Max(latest-int64(test.lookback), 0) - toBlock := mathutil.Max(latest-int64(test.wait), 0) - - // Construct request logs. - var requestLogs []logpoller.Log - for _, r := range test.requests { - if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { - continue // do not include blocks outside our search window - } - reqId, ok := big.NewInt(0).SetString(r.ID, 10) - require.True(t, ok) - requestLogs = append( - requestLogs, - newRandomnessRequestedLogV2(t, r.Block, reqId, coordinatorAddress), - ) - } - - // Construct fulfillment logs. - var fulfillmentLogs []logpoller.Log - for _, r := range test.fulfillments { - reqId, ok := big.NewInt(0).SetString(r.ID, 10) - require.True(t, ok) - fulfillmentLogs = append( - fulfillmentLogs, - newRandomnessFulfilledLogV2(t, r.Block, reqId, coordinatorAddress), - ) - } - - // Mock log poller. - lp.On("LatestBlock", mock.Anything). - Return(latest, nil) - lp.On( - "LogsWithSigs", - fromBlock, - toBlock, - []common.Hash{ - vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), - }, - coordinatorAddress, - mock.Anything, - ).Return(requestLogs, nil) - lp.On( - "LogsWithSigs", - fromBlock, - latest, - []common.Hash{ - vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic(), - }, - coordinatorAddress, - mock.Anything, - ).Return(fulfillmentLogs, nil) - - // Instantiate feeder. - feeder := NewFeeder( - logger.TestLogger(t), - coordinator, - &test.bhs, - lp, - 0, - test.wait, - test.lookback, - 600*time.Second, - func(ctx context.Context) (uint64, error) { - return test.latest, nil - }) - - // Run feeder and assert correct results. - err = feeder.Run(testutils.Context(t)) - if test.expectedErrMsg == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, test.expectedErrMsg) - } - require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) - require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) + // Instantiate log poller & coordinator. + lp := &mocklp.LogPoller{} + lp.On("RegisterFilter", mock.Anything).Return(nil) + c, err := vrf_coordinator_v2.NewVRFCoordinatorV2(coordinatorAddress, nil) + require.NoError(t, err) + coordinator := &V2Coordinator{ + c: c, + lp: lp, + } + + // Assert search window. + latest := int64(test.latest) + fromBlock := mathutil.Max(latest-int64(test.lookback), 0) + toBlock := mathutil.Max(latest-int64(test.wait), 0) + + // Construct request logs. + var requestLogs []logpoller.Log + for _, r := range test.requests { + if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { + continue // do not include blocks outside our search window + } + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + requestLogs = append( + requestLogs, + newRandomnessRequestedLogV2(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Construct fulfillment logs. + var fulfillmentLogs []logpoller.Log + for _, r := range test.fulfillments { + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + fulfillmentLogs = append( + fulfillmentLogs, + newRandomnessFulfilledLogV2(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Mock log poller. + lp.On("LatestBlock", mock.Anything). + Return(latest, nil) + lp.On( + "LogsWithSigs", + fromBlock, + toBlock, + []common.Hash{ + vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(requestLogs, nil) + lp.On( + "LogsWithSigs", + fromBlock, + latest, + []common.Hash{ + vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(fulfillmentLogs, nil) + + // Instantiate feeder. + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil }) + + // Run feeder and assert correct results. + err = feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) } + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) } func TestFeederWithLogPollerVRFv2Plus(t *testing.T) { + for _, test := range tests { + t.Run(test.name, test.testFeederWithLogPollerVRFv2Plus) + } +} +func (test testCase) testFeederWithLogPollerVRFv2Plus(t *testing.T) { var coordinatorAddress = common.HexToAddress("0x514910771AF9Ca656af840dff83E8264EcF986CA") - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - // Instantiate log poller & coordinator. - lp := &mocklp.LogPoller{} - lp.On("RegisterFilter", mock.Anything).Return(nil) - c, err := vrf_coordinator_v2plus.NewVRFCoordinatorV2Plus(coordinatorAddress, nil) - require.NoError(t, err) - coordinator := &V2PlusCoordinator{ - c: c, - lp: lp, - } - - // Assert search window. - latest := int64(test.latest) - fromBlock := mathutil.Max(latest-int64(test.lookback), 0) - toBlock := mathutil.Max(latest-int64(test.wait), 0) - - // Construct request logs. - var requestLogs []logpoller.Log - for _, r := range test.requests { - if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { - continue // do not include blocks outside our search window - } - reqId, ok := big.NewInt(0).SetString(r.ID, 10) - require.True(t, ok) - requestLogs = append( - requestLogs, - newRandomnessRequestedLogV2Plus(t, r.Block, reqId, coordinatorAddress), - ) - } - - // Construct fulfillment logs. - var fulfillmentLogs []logpoller.Log - for _, r := range test.fulfillments { - reqId, ok := big.NewInt(0).SetString(r.ID, 10) - require.True(t, ok) - fulfillmentLogs = append( - fulfillmentLogs, - newRandomnessFulfilledLogV2Plus(t, r.Block, reqId, coordinatorAddress), - ) - } - - // Mock log poller. - lp.On("LatestBlock", mock.Anything). - Return(latest, nil) - lp.On( - "LogsWithSigs", - fromBlock, - toBlock, - []common.Hash{ - vrf_coordinator_v2plus.VRFCoordinatorV2PlusRandomWordsRequested{}.Topic(), - }, - coordinatorAddress, - mock.Anything, - ).Return(requestLogs, nil) - lp.On( - "LogsWithSigs", - fromBlock, - latest, - []common.Hash{ - vrf_coordinator_v2plus.VRFCoordinatorV2PlusRandomWordsFulfilled{}.Topic(), - }, - coordinatorAddress, - mock.Anything, - ).Return(fulfillmentLogs, nil) - - // Instantiate feeder. - feeder := NewFeeder( - logger.TestLogger(t), - coordinator, - &test.bhs, - lp, - 0, - test.wait, - test.lookback, - 600*time.Second, - func(ctx context.Context) (uint64, error) { - return test.latest, nil - }) - - // Run feeder and assert correct results. - err = feeder.Run(testutils.Context(t)) - if test.expectedErrMsg == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, test.expectedErrMsg) - } - require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) - require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) + // Instantiate log poller & coordinator. + lp := &mocklp.LogPoller{} + lp.On("RegisterFilter", mock.Anything).Return(nil) + c, err := vrf_coordinator_v2plus.NewVRFCoordinatorV2Plus(coordinatorAddress, nil) + require.NoError(t, err) + coordinator := &V2PlusCoordinator{ + c: c, + lp: lp, + } + + // Assert search window. + latest := int64(test.latest) + fromBlock := mathutil.Max(latest-int64(test.lookback), 0) + toBlock := mathutil.Max(latest-int64(test.wait), 0) + + // Construct request logs. + var requestLogs []logpoller.Log + for _, r := range test.requests { + if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { + continue // do not include blocks outside our search window + } + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + requestLogs = append( + requestLogs, + newRandomnessRequestedLogV2Plus(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Construct fulfillment logs. + var fulfillmentLogs []logpoller.Log + for _, r := range test.fulfillments { + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + fulfillmentLogs = append( + fulfillmentLogs, + newRandomnessFulfilledLogV2Plus(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Mock log poller. + lp.On("LatestBlock", mock.Anything). + Return(latest, nil) + lp.On( + "LogsWithSigs", + fromBlock, + toBlock, + []common.Hash{ + vrf_coordinator_v2plus.VRFCoordinatorV2PlusRandomWordsRequested{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(requestLogs, nil) + lp.On( + "LogsWithSigs", + fromBlock, + latest, + []common.Hash{ + vrf_coordinator_v2plus.VRFCoordinatorV2PlusRandomWordsFulfilled{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(fulfillmentLogs, nil) + + // Instantiate feeder. + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil }) + + // Run feeder and assert correct results. + err = feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) } + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) } func TestFeeder_CachesStoredBlocks(t *testing.T) { diff --git a/core/services/blockheaderfeeder/block_header_feeder_test.go b/core/services/blockheaderfeeder/block_header_feeder_test.go index 0e52ee9447d..6c1ec0946e7 100644 --- a/core/services/blockheaderfeeder/block_header_feeder_test.go +++ b/core/services/blockheaderfeeder/block_header_feeder_test.go @@ -16,25 +16,27 @@ import ( keystoremocks "github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks" ) +type testCase struct { + name string + requests []blockhashstore.Event + fulfillments []blockhashstore.Event + wait int + lookback int + latest uint64 + alreadyStored []uint64 + expectedStored []uint64 + expectedErrMsg string + getBatchSize uint16 + storeBatchSize uint16 + getBatchCallCount uint16 + storeBatchCallCount uint16 + storedEarliest bool + bhs blockhashstore.TestBHS + batchBHS blockhashstore.TestBatchBHS +} + func TestFeeder(t *testing.T) { - tests := []struct { - name string - requests []blockhashstore.Event - fulfillments []blockhashstore.Event - wait int - lookback int - latest uint64 - alreadyStored []uint64 - expectedStored []uint64 - expectedErrMsg string - getBatchSize uint16 - storeBatchSize uint16 - getBatchCallCount uint16 - storeBatchCallCount uint16 - storedEarliest bool - bhs blockhashstore.TestBHS - batchBHS blockhashstore.TestBatchBHS - }{ + tests := []testCase{ { name: "single missing block", requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, @@ -182,51 +184,55 @@ func TestFeeder(t *testing.T) { } for _, test := range tests { - lggr := logger.TestLogger(t) - lggr.Debugf("running test case: %s", test.name) - coordinator := &blockhashstore.TestCoordinator{ - RequestEvents: test.requests, - FulfillmentEvents: test.fulfillments, - } + t.Run(test.name, test.testFeeder) + } +} - test.batchBHS.Stored = append(test.batchBHS.Stored, test.alreadyStored...) +func (test testCase) testFeeder(t *testing.T) { + lggr := logger.TestLogger(t) + lggr.Debugf("running test case: %s", test.name) + coordinator := &blockhashstore.TestCoordinator{ + RequestEvents: test.requests, + FulfillmentEvents: test.fulfillments, + } - blockHeaderProvider := &blockhashstore.TestBlockHeaderProvider{} - fromAddress := "0x469aA2CD13e037DC5236320783dCfd0e641c0559" - fromAddresses := []ethkey.EIP55Address{(ethkey.EIP55Address(fromAddress))} - ks := keystoremocks.NewEth(t) - ks.On("GetRoundRobinAddress", testutils.FixtureChainID, mock.Anything).Maybe().Return(common.HexToAddress(fromAddress), nil) + test.batchBHS.Stored = append(test.batchBHS.Stored, test.alreadyStored...) - feeder := NewBlockHeaderFeeder( - lggr, - coordinator, - &test.bhs, - &test.batchBHS, - blockHeaderProvider, - test.wait, - test.lookback, - func(ctx context.Context) (uint64, error) { - return test.latest, nil - }, - ks, - test.getBatchSize, - test.storeBatchSize, - fromAddresses, - testutils.FixtureChainID, - ) + blockHeaderProvider := &blockhashstore.TestBlockHeaderProvider{} + fromAddress := "0x469aA2CD13e037DC5236320783dCfd0e641c0559" + fromAddresses := []ethkey.EIP55Address{ethkey.EIP55Address(fromAddress)} + ks := keystoremocks.NewEth(t) + ks.On("GetRoundRobinAddress", testutils.FixtureChainID, mock.Anything).Maybe().Return(common.HexToAddress(fromAddress), nil) - err := feeder.Run(testutils.Context(t)) - if test.expectedErrMsg == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, test.expectedErrMsg) - } + feeder := NewBlockHeaderFeeder( + lggr, + coordinator, + &test.bhs, + &test.batchBHS, + blockHeaderProvider, + test.wait, + test.lookback, + func(ctx context.Context) (uint64, error) { + return test.latest, nil + }, + ks, + test.getBatchSize, + test.storeBatchSize, + fromAddresses, + testutils.FixtureChainID, + ) - require.ElementsMatch(t, test.expectedStored, test.batchBHS.Stored) - require.Equal(t, test.storedEarliest, test.bhs.StoredEarliest) - require.Equal(t, test.getBatchCallCount, test.batchBHS.GetBlockhashesCallCounter) - require.Equal(t, test.storeBatchCallCount, test.batchBHS.StoreVerifyHeaderCallCounter) + err := feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) } + + require.ElementsMatch(t, test.expectedStored, test.batchBHS.Stored) + require.Equal(t, test.storedEarliest, test.bhs.StoredEarliest) + require.Equal(t, test.getBatchCallCount, test.batchBHS.GetBlockhashesCallCounter) + require.Equal(t, test.storeBatchCallCount, test.batchBHS.StoreVerifyHeaderCallCounter) } func TestFeeder_CachesStoredBlocks(t *testing.T) { @@ -238,7 +244,7 @@ func TestFeeder_CachesStoredBlocks(t *testing.T) { batchBHS := &blockhashstore.TestBatchBHS{Stored: []uint64{75}} blockHeaderProvider := &blockhashstore.TestBlockHeaderProvider{} fromAddress := "0x469aA2CD13e037DC5236320783dCfd0e641c0559" - fromAddresses := []ethkey.EIP55Address{(ethkey.EIP55Address(fromAddress))} + fromAddresses := []ethkey.EIP55Address{ethkey.EIP55Address(fromAddress)} ks := keystoremocks.NewEth(t) ks.On("GetRoundRobinAddress", testutils.FixtureChainID, mock.Anything).Maybe().Return(common.HexToAddress(fromAddress), nil) diff --git a/core/services/cron/cron.go b/core/services/cron/cron.go index 56c67096e50..e89dd1ceabd 100644 --- a/core/services/cron/cron.go +++ b/core/services/cron/cron.go @@ -79,7 +79,7 @@ func (cr *Cron) runPipeline() { run := pipeline.NewRun(*cr.jobSpec.PipelineSpec, vars) - _, err := cr.pipelineRunner.Run(ctx, &run, cr.logger, false, nil) + _, err := cr.pipelineRunner.Run(ctx, run, cr.logger, false, nil) if err != nil { cr.logger.Errorf("Error executing new run for jobSpec ID %v", cr.jobSpec.ID) } diff --git a/core/services/directrequest/delegate.go b/core/services/directrequest/delegate.go index f0ba5276ce7..39564b8c8bd 100644 --- a/core/services/directrequest/delegate.go +++ b/core/services/directrequest/delegate.go @@ -370,7 +370,7 @@ func (l *listener) handleOracleRequest(request *operator_wrapper.OperatorOracleR }, }) run := pipeline.NewRun(*l.job.PipelineSpec, vars) - _, err := l.pipelineRunner.Run(ctx, &run, l.logger, true, func(tx pg.Queryer) error { + _, err := l.pipelineRunner.Run(ctx, run, l.logger, true, func(tx pg.Queryer) error { l.markLogConsumed(lb, pg.WithQueryer(tx)) return nil }) diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go index 11e9b25be04..0b09655707d 100644 --- a/core/services/fluxmonitorv2/flux_monitor.go +++ b/core/services/fluxmonitorv2/flux_monitor.go @@ -769,7 +769,7 @@ func (fm *FluxMonitor) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggr } err = fm.q.Transaction(func(tx pg.Queryer) error { - if err2 := fm.runner.InsertFinishedRun(&run, false, pg.WithQueryer(tx)); err2 != nil { + if err2 := fm.runner.InsertFinishedRun(run, false, pg.WithQueryer(tx)); err2 != nil { return err2 } if err2 := fm.queueTransactionForTxm(tx, run.ID, answer, roundState.RoundId, &log); err2 != nil { @@ -993,7 +993,7 @@ func (fm *FluxMonitor) pollIfEligible(pollReq PollRequestType, deviationChecker } err = fm.q.Transaction(func(tx pg.Queryer) error { - if err2 := fm.runner.InsertFinishedRun(&run, true, pg.WithQueryer(tx)); err2 != nil { + if err2 := fm.runner.InsertFinishedRun(run, true, pg.WithQueryer(tx)); err2 != nil { return err2 } if err2 := fm.queueTransactionForTxm(tx, run.ID, answer, roundState.RoundId, nil); err2 != nil { diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go index e165ce68205..27d40cd69c7 100644 --- a/core/services/fluxmonitorv2/flux_monitor_test.go +++ b/core/services/fluxmonitorv2/flux_monitor_test.go @@ -455,7 +455,7 @@ func TestFluxMonitor_PollIfEligible(t *testing.T) { }, }, ), mock.Anything). - Return(run, pipeline.TaskRunResults{ + Return(&run, pipeline.TaskRunResults{ { Result: pipeline.Result{ Value: decimal.NewFromInt(answers.polledAnswer), @@ -584,7 +584,7 @@ func TestPollingDeviationChecker_BuffersLogs(t *testing.T) { tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(4), nil) // Round 1 - run := pipeline.Run{ID: 1} + run := &pipeline.Run{ID: 1} tm.orm. On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(1), mock.Anything). Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ @@ -624,7 +624,7 @@ func TestPollingDeviationChecker_BuffersLogs(t *testing.T) { Return(nil).Once() // Round 3 - run = pipeline.Run{ID: 2} + run = &pipeline.Run{ID: 2} tm.orm. On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(3), mock.Anything). Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ @@ -663,7 +663,7 @@ func TestPollingDeviationChecker_BuffersLogs(t *testing.T) { Return(nil).Once() // Round 4 - run = pipeline.Run{ID: 3} + run = &pipeline.Run{ID: 3} tm.orm. On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(4), mock.Anything). Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ @@ -1484,7 +1484,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) { answer = 100 ) - run := pipeline.Run{ID: 1} + run := &pipeline.Run{ID: 1} tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(true).Maybe() @@ -1600,7 +1600,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) { answer = 100 ) - run := pipeline.Run{ID: 1} + run := &pipeline.Run{ID: 1} tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(true).Maybe() @@ -1696,7 +1696,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) { roundID = 3 answer = 100 ) - run := pipeline.Run{ID: 1} + run := &pipeline.Run{ID: 1} tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(true).Maybe() @@ -1901,7 +1901,7 @@ func TestFluxMonitor_DrumbeatTicker(t *testing.T) { }, }, ), mock.Anything). - Return(pipeline.Run{ID: runID}, pipeline.TaskRunResults{ + Return(&pipeline.Run{ID: runID}, pipeline.TaskRunResults{ { Result: pipeline.Result{ Value: decimal.NewFromInt(fetchedAnswer), diff --git a/core/services/gateway/handlers/functions/allowlist.go b/core/services/gateway/handlers/functions/allowlist.go index 19bc61aabad..0ee9b5bcfb9 100644 --- a/core/services/gateway/handlers/functions/allowlist.go +++ b/core/services/gateway/handlers/functions/allowlist.go @@ -2,6 +2,7 @@ package functions import ( "context" + "encoding/hex" "fmt" "math/big" "sync" @@ -177,7 +178,7 @@ func (a *onchainAllowlist) updateFromContractV1(ctx context.Context, blockNum *b if err != nil { return errors.Wrap(err, "unexpected error during functions_router.GetAllowListId") } - a.lggr.Debugw("successfully fetched allowlist route ID", "id", tosID) + a.lggr.Debugw("successfully fetched allowlist route ID", "id", hex.EncodeToString(tosID[:])) if tosID == [32]byte{} { return errors.New("allowlist route ID has not been set") } diff --git a/core/services/keeper/upkeep_executer.go b/core/services/keeper/upkeep_executer.go index 249d3217469..435b245792c 100644 --- a/core/services/keeper/upkeep_executer.go +++ b/core/services/keeper/upkeep_executer.go @@ -223,7 +223,7 @@ func (ex *UpkeepExecuter) execute(upkeep UpkeepRegistration, head *evmtypes.Head ex.job.PipelineSpec.DotDagSource = pipeline.KeepersObservationSource run := pipeline.NewRun(*ex.job.PipelineSpec, vars) - if _, err := ex.pr.Run(ctxService, &run, svcLogger, true, nil); err != nil { + if _, err := ex.pr.Run(ctxService, run, svcLogger, true, nil); err != nil { svcLogger.Error(errors.Wrap(err, "failed executing run")) return } diff --git a/core/services/ocr/delegate.go b/core/services/ocr/delegate.go index 9cb736a58f3..9ed22d01e72 100644 --- a/core/services/ocr/delegate.go +++ b/core/services/ocr/delegate.go @@ -274,7 +274,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job, qopts ...pg.QOpt) (services []job effectiveTransmitterAddress, ) - runResults := make(chan pipeline.Run, chain.Config().JobPipeline().ResultWriteQueueDepth()) + runResults := make(chan *pipeline.Run, chain.Config().JobPipeline().ResultWriteQueueDepth()) var configOverrider ocrtypes.ConfigOverrider configOverriderService, err := d.maybeCreateConfigOverrider(lggr, chain, concreteSpec.ContractAddress) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 2d1ff41ac12..2086eaa6a80 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -290,7 +290,7 @@ func (d *Delegate) cleanupEVM(jb job.Job, q pg.Queryer, relayID relay.ID) error rargs := types.RelayArgs{ ExternalJobID: jb.ExternalJobID, - JobID: spec.ID, + JobID: jb.ID, ContractID: spec.ContractID, New: false, RelayConfig: spec.RelayConfig.Bytes(), @@ -401,7 +401,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job, qopts ...pg.QOpt) ([]job.ServiceC spec.CaptureEATelemetry = d.cfg.OCR2().CaptureEATelemetry() - runResults := make(chan pipeline.Run, d.cfg.JobPipeline().ResultWriteQueueDepth()) + runResults := make(chan *pipeline.Run, d.cfg.JobPipeline().ResultWriteQueueDepth()) ctx := lggrCtx.ContextWithValues(context.Background()) switch spec.PluginType { @@ -479,7 +479,7 @@ func (d *Delegate) newServicesMercury( ctx context.Context, lggr logger.SugaredLogger, jb job.Job, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, @@ -517,7 +517,7 @@ func (d *Delegate) newServicesMercury( provider, err2 := relayer.NewPluginProvider(ctx, types.RelayArgs{ ExternalJobID: jb.ExternalJobID, - JobID: spec.ID, + JobID: jb.ID, ContractID: spec.ContractID, New: d.isNewlyCreatedJob, RelayConfig: spec.RelayConfig.Bytes(), @@ -565,7 +565,7 @@ func (d *Delegate) newServicesMedian( ctx context.Context, lggr logger.SugaredLogger, jb job.Job, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, @@ -632,7 +632,7 @@ func (d *Delegate) newServicesDKG( dkgProvider, err2 := ocr2vrfRelayer.NewDKGProvider( types.RelayArgs{ ExternalJobID: jb.ExternalJobID, - JobID: spec.ID, + JobID: jb.ID, ContractID: spec.ContractID, New: d.isNewlyCreatedJob, RelayConfig: spec.RelayConfig.Bytes(), @@ -677,7 +677,7 @@ func (d *Delegate) newServicesDKG( func (d *Delegate) newServicesOCR2VRF( lggr logger.SugaredLogger, jb job.Job, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, @@ -717,7 +717,7 @@ func (d *Delegate) newServicesOCR2VRF( vrfProvider, err2 := ocr2vrfRelayer.NewOCR2VRFProvider( types.RelayArgs{ ExternalJobID: jb.ExternalJobID, - JobID: spec.ID, + JobID: jb.ID, ContractID: spec.ContractID, New: d.isNewlyCreatedJob, RelayConfig: spec.RelayConfig.Bytes(), @@ -732,7 +732,7 @@ func (d *Delegate) newServicesOCR2VRF( dkgProvider, err2 := ocr2vrfRelayer.NewDKGProvider( types.RelayArgs{ ExternalJobID: jb.ExternalJobID, - JobID: spec.ID, + JobID: jb.ID, ContractID: cfg.DKGContractAddress, RelayConfig: spec.RelayConfig.Bytes(), }, types.PluginArgs{ @@ -865,7 +865,7 @@ func (d *Delegate) newServicesOCR2VRF( func (d *Delegate) newServicesOCR2Keepers( lggr logger.SugaredLogger, jb job.Job, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, @@ -895,7 +895,7 @@ func (d *Delegate) newServicesOCR2Keepers( func (d *Delegate) newServicesOCR2Keepers21( lggr logger.SugaredLogger, jb job.Job, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, @@ -1014,7 +1014,7 @@ func (d *Delegate) newServicesOCR2Keepers21( func (d *Delegate) newServicesOCR2Keepers20( lggr logger.SugaredLogger, jb job.Job, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, @@ -1148,7 +1148,7 @@ func (d *Delegate) newServicesOCR2Keepers20( func (d *Delegate) newServicesOCR2Functions( lggr logger.SugaredLogger, jb job.Job, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, functionsOcrDB *db, @@ -1175,7 +1175,7 @@ func (d *Delegate) newServicesOCR2Functions( chain, types.RelayArgs{ ExternalJobID: jb.ExternalJobID, - JobID: spec.ID, + JobID: jb.ID, ContractID: spec.ContractID, RelayConfig: spec.RelayConfig.Bytes(), New: d.isNewlyCreatedJob, diff --git a/core/services/ocr2/plugins/functions/reporting.go b/core/services/ocr2/plugins/functions/reporting.go index 9f6c6848edf..f2e2f86aba2 100644 --- a/core/services/ocr2/plugins/functions/reporting.go +++ b/core/services/ocr2/plugins/functions/reporting.go @@ -62,6 +62,11 @@ var ( Help: "Metric to track number of reporting plugin Report calls", }, []string{"jobID"}) + promReportingPluginsReportNumObservations = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "functions_reporting_plugin_report_num_observations", + Help: "Metric to track number of observations available in the report phase", + }, []string{"jobID"}) + promReportingAcceptReports = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "functions_reporting_plugin_accept", Help: "Metric to track number of accepting reports", @@ -265,6 +270,7 @@ func (r *functionsReporting) Report(ctx context.Context, ts types.ReportTimestam "oracleID": r.genericConfig.OracleID, "nObservations": len(obs), }) + promReportingPluginsReportNumObservations.WithLabelValues(r.jobID.String()).Set(float64(len(obs))) queryProto := &encoding.Query{} err := proto.Unmarshal(query, queryProto) diff --git a/core/services/ocr2/plugins/median/services.go b/core/services/ocr2/plugins/median/services.go index e435ee747f5..8d121083f3f 100644 --- a/core/services/ocr2/plugins/median/services.go +++ b/core/services/ocr2/plugins/median/services.go @@ -49,7 +49,7 @@ func NewMedianServices(ctx context.Context, isNewlyCreatedJob bool, relayer loop.Relayer, pipelineRunner pipeline.Runner, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, lggr logger.Logger, argsNoPlugin libocr.OCR2OracleArgs, cfg MedianConfig, @@ -70,7 +70,7 @@ func NewMedianServices(ctx context.Context, provider, err := relayer.NewPluginProvider(ctx, types.RelayArgs{ ExternalJobID: jb.ExternalJobID, - JobID: spec.ID, + JobID: jb.ID, ContractID: spec.ContractID, New: isNewlyCreatedJob, RelayConfig: spec.RelayConfig.Bytes(), diff --git a/core/services/ocr2/plugins/mercury/plugin.go b/core/services/ocr2/plugins/mercury/plugin.go index 05e7e968f8b..31fb8ab8c4b 100644 --- a/core/services/ocr2/plugins/mercury/plugin.go +++ b/core/services/ocr2/plugins/mercury/plugin.go @@ -32,7 +32,7 @@ func NewServices( jb job.Job, ocr2Provider relaytypes.MercuryProvider, pipelineRunner pipeline.Runner, - runResults chan pipeline.Run, + runResults chan *pipeline.Run, lggr logger.Logger, argsNoPlugin libocr2.MercuryOracleArgs, cfg Config, diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/recoverer.go b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/recoverer.go index c3c1b99787b..3994f1d8413 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/recoverer.go +++ b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/recoverer.go @@ -375,7 +375,7 @@ func (r *logRecoverer) recoverFilter(ctx context.Context, f upkeepFilter, startB // If recoverer is lagging by a lot (more than 100x recoveryLogsBuffer), allow // a range of recoveryLogsBurst // Exploratory: Store lastRePollBlock in DB to prevent bursts during restarts - // (while also taking into account exisitng pending payloads) + // (while also taking into account existing pending payloads) end = start + recoveryLogsBurst } if end > offsetBlock { diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go b/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go index db50b322147..d3530994702 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go +++ b/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go @@ -31,6 +31,9 @@ func (r *EvmRegistry) CheckUpkeeps(ctx context.Context, keys ...ocr2keepers.Upke for i := range keys { if keys[i].Trigger.BlockNumber == 0 { // check block was not populated, use latest latest := r.bs.latestBlock.Load() + if latest == nil { + return nil, fmt.Errorf("no latest block available") + } copy(keys[i].Trigger.BlockHash[:], latest.Hash[:]) keys[i].Trigger.BlockNumber = latest.Number r.lggr.Debugf("Check upkeep key had no trigger block number, using latest block %v", keys[i].Trigger.BlockNumber) @@ -124,6 +127,13 @@ func (r *EvmRegistry) verifyCheckBlock(_ context.Context, checkBlock, upkeepId * func (r *EvmRegistry) verifyLogExists(upkeepId *big.Int, p ocr2keepers.UpkeepPayload) (encoding.UpkeepFailureReason, encoding.PipelineExecutionState, bool) { logBlockNumber := int64(p.Trigger.LogTriggerExtension.BlockNumber) logBlockHash := common.BytesToHash(p.Trigger.LogTriggerExtension.BlockHash[:]) + checkBlockHash := common.BytesToHash(p.Trigger.BlockHash[:]) + if checkBlockHash.String() == logBlockHash.String() { + // log verification would be covered by checkBlock verification as they are the same. Return early from + // log verificaion. This also helps in preventing some racy conditions when rpc does not return the tx receipt + // for a very new log + return encoding.UpkeepFailureReasonNone, encoding.NoPipelineError, false + } // if log block number is populated, check log block number and block hash if logBlockNumber != 0 { h, ok := r.bs.queryBlocksMap(logBlockNumber) @@ -242,13 +252,17 @@ func (r *EvmRegistry) checkUpkeeps(ctx context.Context, payloads []ocr2keepers.U for i, req := range checkReqs { index := indices[i] if req.Error != nil { + latestBlockNumber := int64(0) latestBlock := r.bs.latestBlock.Load() + if latestBlock != nil { + latestBlockNumber = int64(latestBlock.Number) + } checkBlock, _, _ := r.getBlockAndUpkeepId(payloads[index].UpkeepID, payloads[index].Trigger) // Exploratory: remove reliance on primitive way of checking errors blockNotFound := (strings.Contains(req.Error.Error(), "header not found") || strings.Contains(req.Error.Error(), "missing trie node")) - if blockNotFound && int64(latestBlock.Number)-checkBlock.Int64() > checkBlockTooOldRange { + if blockNotFound && latestBlockNumber-checkBlock.Int64() > checkBlockTooOldRange { // Check block not found in RPC and it is too old, non-retryable error - r.lggr.Warnf("block not found error encountered in check result for upkeepId %s, check block %d, latest block %d: %s", results[index].UpkeepID.String(), checkBlock.Int64(), int64(latestBlock.Number), req.Error) + r.lggr.Warnf("block not found error encountered in check result for upkeepId %s, check block %d, latest block %d: %s", results[index].UpkeepID.String(), checkBlock.Int64(), latestBlockNumber, req.Error) results[index].Retryable = false results[index].PipelineExecutionState = uint8(encoding.CheckBlockTooOld) } else { diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline_test.go b/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline_test.go index cdb5e50b5b1..ee213643194 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline_test.go @@ -75,7 +75,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { tests := []struct { name string checkBlock *big.Int - latestBlock ocr2keepers.BlockKey + latestBlock *ocr2keepers.BlockKey upkeepId *big.Int checkHash common.Hash payload ocr2keepers.UpkeepPayload @@ -88,7 +88,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { { name: "for an invalid check block number, if hash does not match the check hash, return CheckBlockInvalid", checkBlock: big.NewInt(500), - latestBlock: ocr2keepers.BlockKey{Number: 560}, + latestBlock: &ocr2keepers.BlockKey{Number: 560}, upkeepId: big.NewInt(12345), checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), payload: ocr2keepers.UpkeepPayload{ @@ -112,7 +112,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { { name: "for an invalid check block number, if hash does match the check hash, return NoPipelineError", checkBlock: big.NewInt(500), - latestBlock: ocr2keepers.BlockKey{Number: 560}, + latestBlock: &ocr2keepers.BlockKey{Number: 560}, upkeepId: big.NewInt(12345), checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), payload: ocr2keepers.UpkeepPayload{ @@ -136,7 +136,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { { name: "check block hash does not match", checkBlock: big.NewInt(500), - latestBlock: ocr2keepers.BlockKey{Number: 560}, + latestBlock: &ocr2keepers.BlockKey{Number: 560}, upkeepId: big.NewInt(12345), checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), payload: ocr2keepers.UpkeepPayload{ @@ -161,7 +161,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { { name: "check block is valid", checkBlock: big.NewInt(500), - latestBlock: ocr2keepers.BlockKey{Number: 560}, + latestBlock: &ocr2keepers.BlockKey{Number: 560}, upkeepId: big.NewInt(12345), checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), payload: ocr2keepers.UpkeepPayload{ @@ -182,7 +182,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { latestBlock: atomic.Pointer[ocr2keepers.BlockKey]{}, blocks: tc.blocks, } - bs.latestBlock.Store(&tc.latestBlock) + bs.latestBlock.Store(tc.latestBlock) e := &EvmRegistry{ lggr: lggr, bs: bs, @@ -392,7 +392,7 @@ func TestRegistry_CheckUpkeeps(t *testing.T) { name string inputs []ocr2keepers.UpkeepPayload blocks map[int64]string - latestBlock ocr2keepers.BlockKey + latestBlock *ocr2keepers.BlockKey results []ocr2keepers.CheckResult err error ethCalls map[string]bool @@ -427,7 +427,7 @@ func TestRegistry_CheckUpkeeps(t *testing.T) { 570: "0x1222d75217e2dd461cc77e4091c37abe76277430d97f1963a822b4e94ebb83fc", 575: "0x9840e5b709bfccf6a1b44f34c884bc39403f57923f3f5ead6243cc090546b857", }, - latestBlock: ocr2keepers.BlockKey{Number: 580}, + latestBlock: &ocr2keepers.BlockKey{Number: 580}, results: []ocr2keepers.CheckResult{ { PipelineExecutionState: uint8(encoding.CheckBlockInvalid), @@ -494,7 +494,7 @@ func TestRegistry_CheckUpkeeps(t *testing.T) { latestBlock: atomic.Pointer[ocr2keepers.BlockKey]{}, blocks: tc.blocks, } - bs.latestBlock.Store(&tc.latestBlock) + bs.latestBlock.Store(tc.latestBlock) e := &EvmRegistry{ lggr: lggr, bs: bs, diff --git a/core/services/ocr2/plugins/s4/plugin.go b/core/services/ocr2/plugins/s4/plugin.go index 0aca93e55e6..7e4b91be97e 100644 --- a/core/services/ocr2/plugins/s4/plugin.go +++ b/core/services/ocr2/plugins/s4/plugin.go @@ -226,9 +226,10 @@ func (c *plugin) Report(_ context.Context, ts types.ReportTimestamp, _ types.Que promReportingPluginsReportRowsCount.WithLabelValues(c.config.ProductName).Set(float64(len(reportRows))) c.logger.Debug("S4StorageReporting Report", commontypes.LogFields{ - "epoch": ts.Epoch, - "round": ts.Round, - "nReportRows": len(reportRows), + "epoch": ts.Epoch, + "round": ts.Round, + "nReportRows": len(reportRows), + "nObservations": len(aos), }) return true, report, nil diff --git a/core/services/ocr2/plugins/threshold/decryption_queue.go b/core/services/ocr2/plugins/threshold/decryption_queue.go index 1ffc63e5898..442fcffe8b4 100644 --- a/core/services/ocr2/plugins/threshold/decryption_queue.go +++ b/core/services/ocr2/plugins/threshold/decryption_queue.go @@ -148,7 +148,7 @@ func (dq *decryptionQueue) GetRequests(requestCountLimit int, totalBytesLimit in pendingRequest, exists := dq.pendingRequests[string(ciphertextId)] if !exists { - dq.lggr.Debugf("pending decryption request for ciphertextId %s expired", ciphertextId) + dq.lggr.Debugf("decryption request for ciphertextId %s already processed or expired", ciphertextId) indicesToRemove[i] = struct{}{} continue } @@ -232,7 +232,7 @@ func (dq *decryptionQueue) SetResult(ciphertextId decryptionPlugin.CiphertextId, // Cache plaintext result in completedRequests map for cacheTimeoutMs to account for delayed Decrypt() calls timer := time.AfterFunc(dq.completedRequestsCacheTimeout, func() { - dq.lggr.Debugf("expired decryption result for ciphertextId %s from completedRequests cache", ciphertextId) + dq.lggr.Debugf("removing completed decryption result for ciphertextId %s from cache", ciphertextId) dq.mu.Lock() delete(dq.completedRequests, string(ciphertextId)) dq.mu.Unlock() diff --git a/core/services/ocr2/testhelpers/onchain_config.go b/core/services/ocr2/testhelpers/onchain_config.go new file mode 100644 index 00000000000..ec7e619b936 --- /dev/null +++ b/core/services/ocr2/testhelpers/onchain_config.go @@ -0,0 +1,31 @@ +package testhelpers + +import ( + "math/big" + + "github.com/smartcontractkit/libocr/bigbigendian" +) + +func GenerateDefaultOCR2OnchainConfig(minValue *big.Int, maxValue *big.Int) ([]byte, error) { + serializedConfig := make([]byte, 0) + + s1, err := bigbigendian.SerializeSigned(1, big.NewInt(1)) //version + if err != nil { + return nil, err + } + serializedConfig = append(serializedConfig, s1...) + + s2, err := bigbigendian.SerializeSigned(24, minValue) //min + if err != nil { + return nil, err + } + serializedConfig = append(serializedConfig, s2...) + + s3, err := bigbigendian.SerializeSigned(24, maxValue) //max + if err != nil { + return nil, err + } + serializedConfig = append(serializedConfig, s3...) + + return serializedConfig, nil +} diff --git a/core/services/ocrbootstrap/delegate.go b/core/services/ocrbootstrap/delegate.go index d530797367f..9f9efca68e2 100644 --- a/core/services/ocrbootstrap/delegate.go +++ b/core/services/ocrbootstrap/delegate.go @@ -76,10 +76,10 @@ func (d *Delegate) BeforeJobCreated(spec job.Job) { } // ServicesForSpec satisfies the job.Delegate interface. -func (d *Delegate) ServicesForSpec(jobSpec job.Job, qopts ...pg.QOpt) (services []job.ServiceCtx, err error) { - spec := jobSpec.BootstrapSpec +func (d *Delegate) ServicesForSpec(jb job.Job, qopts ...pg.QOpt) (services []job.ServiceCtx, err error) { + spec := jb.BootstrapSpec if spec == nil { - return nil, errors.Errorf("Bootstrap.Delegate expects an *job.BootstrapSpec to be present, got %v", jobSpec) + return nil, errors.Errorf("Bootstrap.Delegate expects an *job.BootstrapSpec to be present, got %v", jb) } if d.peerWrapper == nil { return nil, errors.New("cannot setup OCR2 job service, libp2p peer was missing") @@ -101,8 +101,8 @@ func (d *Delegate) ServicesForSpec(jobSpec job.Job, qopts ...pg.QOpt) (services } ctxVals := loop.ContextValues{ - JobID: jobSpec.ID, - JobName: jobSpec.Name.ValueOrZero(), + JobID: jb.ID, + JobName: jb.Name.ValueOrZero(), ContractID: spec.ContractID, FeedID: spec.FeedID, } @@ -121,8 +121,8 @@ func (d *Delegate) ServicesForSpec(jobSpec job.Job, qopts ...pg.QOpt) (services configProvider, err = relayer.NewPluginProvider( ctx, types.RelayArgs{ - ExternalJobID: jobSpec.ExternalJobID, - JobID: spec.ID, + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, ContractID: spec.ContractID, RelayConfig: spec.RelayConfig.Bytes(), New: d.isNewlyCreatedJob, @@ -134,8 +134,8 @@ func (d *Delegate) ServicesForSpec(jobSpec job.Job, qopts ...pg.QOpt) (services ) } else { configProvider, err = relayer.NewConfigProvider(ctx, types.RelayArgs{ - ExternalJobID: jobSpec.ExternalJobID, - JobID: spec.ID, + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, ContractID: spec.ContractID, New: d.isNewlyCreatedJob, RelayConfig: spec.RelayConfig.Bytes(), @@ -166,7 +166,7 @@ func (d *Delegate) ServicesForSpec(jobSpec job.Job, qopts ...pg.QOpt) (services Database: NewDB(d.db.DB, spec.ID, lggr), LocalConfig: lc, Logger: relaylogger.NewOCRWrapper(lggr.Named("OCRBootstrap"), d.ocr2Cfg.TraceLogging(), func(msg string) { - logger.Sugared(lggr).ErrorIf(d.jobORM.RecordError(jobSpec.ID, msg), "unable to record error") + logger.Sugared(lggr).ErrorIf(d.jobORM.RecordError(jb.ID, msg), "unable to record error") }), OffchainConfigDigester: configProvider.OffchainConfigDigester(), } diff --git a/core/services/ocrcommon/data_source.go b/core/services/ocrcommon/data_source.go index 4ea7cb7a9a9..ed832e45fcf 100644 --- a/core/services/ocrcommon/data_source.go +++ b/core/services/ocrcommon/data_source.go @@ -35,7 +35,7 @@ type inMemoryDataSource struct { type dataSourceBase struct { inMemoryDataSource - runResults chan<- pipeline.Run + runResults chan<- *pipeline.Run } // dataSource implements dataSourceBase with the proper Observe return type for ocr1 @@ -55,7 +55,7 @@ type ObservationTimestamp struct { ConfigDigest string } -func NewDataSourceV1(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, runResults chan<- pipeline.Run, chEnhancedTelemetry chan EnhancedTelemetryData) ocr1types.DataSource { +func NewDataSourceV1(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, runResults chan<- *pipeline.Run, chEnhancedTelemetry chan EnhancedTelemetryData) ocr1types.DataSource { return &dataSource{ dataSourceBase: dataSourceBase{ inMemoryDataSource: inMemoryDataSource{ @@ -70,7 +70,7 @@ func NewDataSourceV1(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr lo } } -func NewDataSourceV2(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, runResults chan<- pipeline.Run, enhancedTelemChan chan EnhancedTelemetryData) median.DataSource { +func NewDataSourceV2(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, runResults chan<- *pipeline.Run, enhancedTelemChan chan EnhancedTelemetryData) median.DataSource { return &dataSourceV2{ dataSourceBase: dataSourceBase{ inMemoryDataSource: inMemoryDataSource{ @@ -113,7 +113,7 @@ func (ds *inMemoryDataSource) currentAnswer() (*big.Int, *big.Int) { // The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). // Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. -func (ds *inMemoryDataSource) executeRun(ctx context.Context, timestamp ObservationTimestamp) (pipeline.Run, pipeline.FinalResult, error) { +func (ds *inMemoryDataSource) executeRun(ctx context.Context, timestamp ObservationTimestamp) (*pipeline.Run, pipeline.FinalResult, error) { md, err := bridges.MarshalBridgeMetaData(ds.currentAnswer()) if err != nil { ds.lggr.Warnw("unable to attach metadata for run", "err", err) @@ -132,7 +132,7 @@ func (ds *inMemoryDataSource) executeRun(ctx context.Context, timestamp Observat run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) if err != nil { - return pipeline.Run{}, pipeline.FinalResult{}, errors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + return nil, pipeline.FinalResult{}, errors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) } finalResult := trrs.FinalResult(ds.lggr) promSetBridgeParseMetrics(ds, &trrs) diff --git a/core/services/ocrcommon/data_source_test.go b/core/services/ocrcommon/data_source_test.go index f40637cd999..51a004f1f05 100644 --- a/core/services/ocrcommon/data_source_test.go +++ b/core/services/ocrcommon/data_source_test.go @@ -28,7 +28,7 @@ var ( func Test_InMemoryDataSource(t *testing.T) { runner := pipelinemocks.NewRunner(t) runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). - Return(pipeline.Run{}, pipeline.TaskRunResults{ + Return(&pipeline.Run{}, pipeline.TaskRunResults{ { Result: pipeline.Result{ Value: mockValue, @@ -65,7 +65,7 @@ func Test_InMemoryDataSourceWithProm(t *testing.T) { }}, []pipeline.Task{}, 2) runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). - Return(pipeline.Run{}, pipeline.TaskRunResults([]pipeline.TaskRunResult{ + Return(&pipeline.Run{}, pipeline.TaskRunResults([]pipeline.TaskRunResult{ { Task: &bridgeTask, Result: pipeline.Result{}, @@ -96,7 +96,7 @@ func Test_InMemoryDataSourceWithProm(t *testing.T) { func Test_NewDataSourceV2(t *testing.T) { runner := pipelinemocks.NewRunner(t) runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). - Return(pipeline.Run{}, pipeline.TaskRunResults{ + Return(&pipeline.Run{}, pipeline.TaskRunResults{ { Result: pipeline.Result{ Value: mockValue, @@ -106,18 +106,18 @@ func Test_NewDataSourceV2(t *testing.T) { }, }, nil) - resChan := make(chan pipeline.Run, 100) + resChan := make(chan *pipeline.Run, 100) ds := ocrcommon.NewDataSourceV2(runner, job.Job{}, pipeline.Spec{}, logger.TestLogger(t), resChan, nil) val, err := ds.Observe(testutils.Context(t), types.ReportTimestamp{}) require.NoError(t, err) - assert.Equal(t, mockValue, val.String()) // returns expected value after pipeline run - assert.Equal(t, pipeline.Run{}, <-resChan) // expected data properly passed to channel + assert.Equal(t, mockValue, val.String()) // returns expected value after pipeline run + assert.Equal(t, &pipeline.Run{}, <-resChan) // expected data properly passed to channel } func Test_NewDataSourceV1(t *testing.T) { runner := pipelinemocks.NewRunner(t) runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). - Return(pipeline.Run{}, pipeline.TaskRunResults{ + Return(&pipeline.Run{}, pipeline.TaskRunResults{ { Result: pipeline.Result{ Value: mockValue, @@ -127,10 +127,10 @@ func Test_NewDataSourceV1(t *testing.T) { }, }, nil) - resChan := make(chan pipeline.Run, 100) + resChan := make(chan *pipeline.Run, 100) ds := ocrcommon.NewDataSourceV1(runner, job.Job{}, pipeline.Spec{}, logger.TestLogger(t), resChan, nil) val, err := ds.Observe(testutils.Context(t), ocrtypes.ReportTimestamp{}) require.NoError(t, err) assert.Equal(t, mockValue, new(big.Int).Set(val).String()) // returns expected value after pipeline run - assert.Equal(t, pipeline.Run{}, <-resChan) // expected data properly passed to channel + assert.Equal(t, &pipeline.Run{}, <-resChan) // expected data properly passed to channel } diff --git a/core/services/ocrcommon/run_saver.go b/core/services/ocrcommon/run_saver.go index 7a7ea0c9d0a..3aa3aff876e 100644 --- a/core/services/ocrcommon/run_saver.go +++ b/core/services/ocrcommon/run_saver.go @@ -12,7 +12,7 @@ type RunResultSaver struct { utils.StartStopOnce maxSuccessfulRuns uint64 - runResults <-chan pipeline.Run + runResults <-chan *pipeline.Run pipelineRunner pipeline.Runner done chan struct{} logger logger.Logger @@ -24,7 +24,7 @@ func (r *RunResultSaver) HealthReport() map[string]error { func (r *RunResultSaver) Name() string { return r.logger.Name() } -func NewResultRunSaver(runResults <-chan pipeline.Run, pipelineRunner pipeline.Runner, done chan struct{}, +func NewResultRunSaver(runResults <-chan *pipeline.Run, pipelineRunner pipeline.Runner, done chan struct{}, logger logger.Logger, maxSuccessfulRuns uint64, ) *RunResultSaver { return &RunResultSaver{ @@ -51,7 +51,7 @@ func (r *RunResultSaver) Start(context.Context) error { r.logger.Tracew("RunSaver: saving job run", "run", run) // We do not want save successful TaskRuns as OCR runs very frequently so a lot of records // are produced and the successful TaskRuns do not provide value. - if err := r.pipelineRunner.InsertFinishedRun(&run, false); err != nil { + if err := r.pipelineRunner.InsertFinishedRun(run, false); err != nil { r.logger.Errorw("error inserting finished results", "err", err) } case <-r.done: @@ -73,7 +73,7 @@ func (r *RunResultSaver) Close() error { select { case run := <-r.runResults: r.logger.Infow("RunSaver: saving job run before exiting", "run", run) - if err := r.pipelineRunner.InsertFinishedRun(&run, false); err != nil { + if err := r.pipelineRunner.InsertFinishedRun(run, false); err != nil { r.logger.Errorw("error inserting finished results", "err", err) } default: diff --git a/core/services/ocrcommon/run_saver_test.go b/core/services/ocrcommon/run_saver_test.go index 0f24f93e97d..7d20a7a202e 100644 --- a/core/services/ocrcommon/run_saver_test.go +++ b/core/services/ocrcommon/run_saver_test.go @@ -14,7 +14,7 @@ import ( func TestRunSaver(t *testing.T) { pipelineRunner := mocks.NewRunner(t) - rr := make(chan pipeline.Run, 100) + rr := make(chan *pipeline.Run, 100) rs := NewResultRunSaver( rr, pipelineRunner, @@ -31,7 +31,7 @@ func TestRunSaver(t *testing.T) { args.Get(0).(*pipeline.Run).ID = int64(d) }). Once() - rr <- pipeline.Run{ID: int64(i)} + rr <- &pipeline.Run{ID: int64(i)} } require.NoError(t, rs.Close()) } diff --git a/core/services/ocrcommon/transmitter_pipeline.go b/core/services/ocrcommon/transmitter_pipeline.go index d07be5a5409..e62f745a941 100644 --- a/core/services/ocrcommon/transmitter_pipeline.go +++ b/core/services/ocrcommon/transmitter_pipeline.go @@ -81,7 +81,7 @@ func (t *pipelineTransmitter) CreateEthTransaction(ctx context.Context, toAddres t.spec.PipelineSpec.DotDagSource = txObservationSource run := pipeline.NewRun(*t.spec.PipelineSpec, vars) - if _, err := t.pr.Run(ctx, &run, t.lgr, true, nil); err != nil { + if _, err := t.pr.Run(ctx, run, t.lgr, true, nil); err != nil { return errors.Wrap(err, "Skipped OCR transmission") } diff --git a/core/services/pipeline/mocks/runner.go b/core/services/pipeline/mocks/runner.go index a43498c100e..e2cc70378e5 100644 --- a/core/services/pipeline/mocks/runner.go +++ b/core/services/pipeline/mocks/runner.go @@ -66,19 +66,21 @@ func (_m *Runner) ExecuteAndInsertFinishedRun(ctx context.Context, spec pipeline } // ExecuteRun provides a mock function with given fields: ctx, spec, vars, l -func (_m *Runner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (pipeline.Run, pipeline.TaskRunResults, error) { +func (_m *Runner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (*pipeline.Run, pipeline.TaskRunResults, error) { ret := _m.Called(ctx, spec, vars, l) - var r0 pipeline.Run + var r0 *pipeline.Run var r1 pipeline.TaskRunResults var r2 error - if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) (pipeline.Run, pipeline.TaskRunResults, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) (*pipeline.Run, pipeline.TaskRunResults, error)); ok { return rf(ctx, spec, vars, l) } - if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) pipeline.Run); ok { + if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) *pipeline.Run); ok { r0 = rf(ctx, spec, vars, l) } else { - r0 = ret.Get(0).(pipeline.Run) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pipeline.Run) + } } if rf, ok := ret.Get(1).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) pipeline.TaskRunResults); ok { diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index 7a755e8fe11..3366a177ba8 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -38,7 +38,7 @@ type Runner interface { // ExecuteRun executes a new run in-memory according to a spec and returns the results. // We expect spec.JobID and spec.JobName to be set for logging/prometheus. - ExecuteRun(ctx context.Context, spec Spec, vars Vars, l logger.Logger) (run Run, trrs TaskRunResults, err error) + ExecuteRun(ctx context.Context, spec Spec, vars Vars, l logger.Logger) (run *Run, trrs TaskRunResults, err error) // InsertFinishedRun saves the run results in the database. InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error InsertFinishedRuns(runs []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error @@ -196,8 +196,8 @@ func (err ErrRunPanicked) Error() string { return fmt.Sprintf("goroutine panicked when executing run: %v", err.v) } -func NewRun(spec Spec, vars Vars) Run { - return Run{ +func NewRun(spec Spec, vars Vars) *Run { + return &Run{ State: RunStatusRunning, PipelineSpec: spec, PipelineSpecID: spec.ID, @@ -218,16 +218,16 @@ func (r *runner) ExecuteRun( spec Spec, vars Vars, l logger.Logger, -) (Run, TaskRunResults, error) { +) (*Run, TaskRunResults, error) { run := NewRun(spec, vars) - pipeline, err := r.initializePipeline(&run) + pipeline, err := r.initializePipeline(run) if err != nil { return run, nil, err } - taskRunResults := r.run(ctx, pipeline, &run, vars, l) + taskRunResults := r.run(ctx, pipeline, run, vars, l) if run.Pending { return run, nil, pkgerrors.Wrapf(err, "unexpected async run for spec ID %v, tried executing via ExecuteAndInsertFinishedRun", spec.ID) @@ -505,7 +505,7 @@ func (r *runner) ExecuteAndInsertFinishedRun(ctx context.Context, spec Spec, var return 0, finalResult, nil } - if err = r.orm.InsertFinishedRun(&run, saveSuccessfulTaskRuns); err != nil { + if err = r.orm.InsertFinishedRun(run, saveSuccessfulTaskRuns); err != nil { return 0, finalResult, pkgerrors.Wrapf(err, "error inserting finished results for spec ID %v", spec.ID) } return run.ID, finalResult, nil diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go index 2554315a46c..22b70829ba5 100644 --- a/core/services/pipeline/runner_test.go +++ b/core/services/pipeline/runner_test.go @@ -635,7 +635,7 @@ ds5 [type=http method="GET" url="%s" index=2] }).Once() orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(false, nil).Once() lggr := logger.TestLogger(t) - incomplete, err := r.Run(testutils.Context(t), &run, lggr, false, nil) + incomplete, err := r.Run(testutils.Context(t), run, lggr, false, nil) require.NoError(t, err) require.Len(t, run.PipelineTaskRuns, 9) // 3 tasks are suspended: ds1_parse, ds1_multiply, median. ds1 is present, but contains ErrPending require.Equal(t, true, incomplete) // still incomplete @@ -644,7 +644,7 @@ ds5 [type=http method="GET" url="%s" index=2] // Trigger run resumption with no new data orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run")).Return(false, nil).Once() - incomplete, err = r.Run(testutils.Context(t), &run, lggr, false, nil) + incomplete, err = r.Run(testutils.Context(t), run, lggr, false, nil) require.NoError(t, err) require.Equal(t, true, incomplete) // still incomplete @@ -657,7 +657,7 @@ ds5 [type=http method="GET" url="%s" index=2] } // Trigger run resumption orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(false, nil).Once() - incomplete, err = r.Run(testutils.Context(t), &run, lggr, false, nil) + incomplete, err = r.Run(testutils.Context(t), run, lggr, false, nil) require.NoError(t, err) require.Equal(t, false, incomplete) // done require.Len(t, run.PipelineTaskRuns, 12) @@ -773,7 +773,7 @@ ds5 [type=http method="GET" url="%s" index=2] }).Once() // StoreRun is called again to store the final result orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(false, nil).Once() - incomplete, err := r.Run(testutils.Context(t), &run, logger.TestLogger(t), false, nil) + incomplete, err := r.Run(testutils.Context(t), run, logger.TestLogger(t), false, nil) require.NoError(t, err) require.Len(t, run.PipelineTaskRuns, 12) require.Equal(t, false, incomplete) // run is complete diff --git a/core/services/pipeline/scheduler_test.go b/core/services/pipeline/scheduler_test.go index bbb9ee80b9b..1d7da59da9d 100644 --- a/core/services/pipeline/scheduler_test.go +++ b/core/services/pipeline/scheduler_test.go @@ -135,7 +135,7 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) vars := NewVarsFrom(nil) run := NewRun(Spec{}, vars) - s := newScheduler(p, &run, vars, logger.TestLogger(t)) + s := newScheduler(p, run, vars, logger.TestLogger(t)) go s.Run() diff --git a/core/services/relay/evm/config_poller.go b/core/services/relay/evm/config_poller.go index 6d8d4588d07..504155bf1e8 100644 --- a/core/services/relay/evm/config_poller.go +++ b/core/services/relay/evm/config_poller.go @@ -3,23 +3,41 @@ package evm import ( "context" "database/sql" + "fmt" "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" + "github.com/smartcontractkit/libocr/gethwrappers2/ocrconfigurationstoreevmsimple" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/pg" evmRelayTypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/utils" ) -// ConfigSet Common to all OCR2 evm based contracts: https://github.com/smartcontractkit/libocr/blob/master/contract2/dev/OCR2Abstract.sol -var ConfigSet common.Hash +var ( + failedRPCContractCalls = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ocr2_failed_rpc_contract_calls", + Help: "Running count of failed RPC contract calls by chain/contract", + }, + []string{"chainID", "contractAddress"}, + ) +) + +var ( + // ConfigSet Common to all OCR2 evm based contracts: https://github.com/smartcontractkit/libocr/blob/master/contract2/dev/OCR2Abstract.sol + ConfigSet common.Hash -var defaultABI abi.ABI + defaultABI abi.ABI +) const configSetEventName = "ConfigSet" @@ -50,7 +68,7 @@ func configFromLog(logData []byte) (ocrtypes.ContractConfig, error) { var transmitAccounts []ocrtypes.Account for _, addr := range unpacked.Transmitters { - transmitAccounts = append(transmitAccounts, ocrtypes.Account(addr.String())) + transmitAccounts = append(transmitAccounts, ocrtypes.Account(addr.Hex())) } var signers []ocrtypes.OnchainPublicKey for _, addr := range unpacked.Signers { @@ -71,36 +89,63 @@ func configFromLog(logData []byte) (ocrtypes.ContractConfig, error) { } type configPoller struct { + utils.StartStopOnce + lggr logger.Logger filterName string destChainLogPoller logpoller.LogPoller - addr common.Address + client client.Client + + aggregatorContractAddr common.Address + aggregatorContract *ocr2aggregator.OCR2Aggregator + + // Some chains "manage" state bloat by deleting older logs. The ConfigStore + // contract allows us work around such restrictions. + configStoreContractAddr *common.Address + configStoreContract *ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimple } func configPollerFilterName(addr common.Address) string { return logpoller.FilterName("OCR2ConfigPoller", addr.String()) } -func NewConfigPoller(lggr logger.Logger, destChainPoller logpoller.LogPoller, addr common.Address) (evmRelayTypes.ConfigPoller, error) { - err := destChainPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(addr), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{addr}}) +func NewConfigPoller(lggr logger.Logger, client client.Client, destChainPoller logpoller.LogPoller, aggregatorContractAddr common.Address, configStoreAddr *common.Address) (evmRelayTypes.ConfigPoller, error) { + return newConfigPoller(lggr, client, destChainPoller, aggregatorContractAddr, configStoreAddr) +} + +func newConfigPoller(lggr logger.Logger, client client.Client, destChainPoller logpoller.LogPoller, aggregatorContractAddr common.Address, configStoreAddr *common.Address) (*configPoller, error) { + err := destChainPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(aggregatorContractAddr), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{aggregatorContractAddr}}) + if err != nil { + return nil, err + } + + aggregatorContract, err := ocr2aggregator.NewOCR2Aggregator(aggregatorContractAddr, client) if err != nil { return nil, err } cp := &configPoller{ - lggr: lggr, - filterName: configPollerFilterName(addr), - destChainLogPoller: destChainPoller, - addr: addr, + lggr: lggr, + filterName: configPollerFilterName(aggregatorContractAddr), + destChainLogPoller: destChainPoller, + aggregatorContractAddr: aggregatorContractAddr, + client: client, + aggregatorContract: aggregatorContract, + } + + if configStoreAddr != nil { + cp.configStoreContractAddr = configStoreAddr + cp.configStoreContract, err = ocrconfigurationstoreevmsimple.NewOCRConfigurationStoreEVMSimple(*configStoreAddr, client) + if err != nil { + return nil, err + } } return cp, nil } -// Start noop method func (cp *configPoller) Start() {} -// Close noop method func (cp *configPoller) Close() error { return nil } @@ -117,10 +162,14 @@ func (cp *configPoller) Replay(ctx context.Context, fromBlock int64) error { // LatestConfigDetails returns the latest config details from the logs func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { - latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, cp.addr, 1, pg.WithParentCtx(ctx)) + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, cp.aggregatorContractAddr, 1, pg.WithParentCtx(ctx)) if err != nil { - // If contract is not configured, we will not have the log. if errors.Is(err, sql.ErrNoRows) { + if cp.isConfigStoreAvailable() { + // Fallback to RPC call in case logs have been pruned and configStoreContract is available + return cp.callLatestConfigDetails(ctx) + } + // log not found means return zero config digest return 0, ocrtypes.ConfigDigest{}, nil } return 0, ocrtypes.ConfigDigest{}, err @@ -134,12 +183,16 @@ func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock // LatestConfig returns the latest config from the logs on a certain block func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { - lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, cp.addr, pg.WithParentCtx(ctx)) + lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, cp.aggregatorContractAddr, pg.WithParentCtx(ctx)) if err != nil { return ocrtypes.ContractConfig{}, err } if len(lgs) == 0 { - return ocrtypes.ContractConfig{}, errors.New("no logs found") + if cp.isConfigStoreAvailable() { + // Fallback to RPC call in case logs have been pruned + return cp.callReadConfigFromStore(ctx) + } + return ocrtypes.ContractConfig{}, fmt.Errorf("no logs found for config on contract %s (chain %s) at block %d", cp.aggregatorContractAddr.Hex(), cp.client.ConfiguredChainID().String(), changedInBlock) } latestConfigSet, err := configFromLog(lgs[len(lgs)-1].Data) if err != nil { @@ -160,3 +213,58 @@ func (cp *configPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint } return uint64(latest), nil } + +func (cp *configPoller) isConfigStoreAvailable() bool { + return cp.configStoreContract != nil +} + +// RPC call for latest config details +func (cp *configPoller) callLatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + details, err := cp.aggregatorContract.LatestConfigDetails(&bind.CallOpts{ + Context: ctx, + }) + if err != nil { + failedRPCContractCalls.WithLabelValues(cp.client.ConfiguredChainID().String(), cp.aggregatorContractAddr.Hex()).Inc() + } + return uint64(details.BlockNumber), details.ConfigDigest, err +} + +// RPC call to read config from config store contract +func (cp *configPoller) callReadConfigFromStore(ctx context.Context) (cfg ocrtypes.ContractConfig, err error) { + _, configDigest, err := cp.LatestConfigDetails(ctx) + if err != nil { + failedRPCContractCalls.WithLabelValues(cp.client.ConfiguredChainID().String(), cp.aggregatorContractAddr.Hex()).Inc() + return cfg, fmt.Errorf("failed to get latest config details: %w", err) + } + if configDigest == (ocrtypes.ConfigDigest{}) { + return cfg, fmt.Errorf("config details missing while trying to lookup config in store; no logs found for contract %s (chain %s)", cp.aggregatorContractAddr.Hex(), cp.client.ConfiguredChainID().String()) + } + + storedConfig, err := cp.configStoreContract.ReadConfig(&bind.CallOpts{ + Context: ctx, + }, configDigest) + if err != nil { + failedRPCContractCalls.WithLabelValues(cp.client.ConfiguredChainID().String(), cp.configStoreContractAddr.Hex()).Inc() + return cfg, fmt.Errorf("failed to read config from config store contract: %w", err) + } + + signers := make([]ocrtypes.OnchainPublicKey, len(storedConfig.Signers)) + for i := range signers { + signers[i] = storedConfig.Signers[i].Bytes() + } + transmitters := make([]ocrtypes.Account, len(storedConfig.Transmitters)) + for i := range transmitters { + transmitters[i] = ocrtypes.Account(storedConfig.Transmitters[i].Hex()) + } + + return ocrtypes.ContractConfig{ + ConfigDigest: configDigest, + ConfigCount: uint64(storedConfig.ConfigCount), + Signers: signers, + Transmitters: transmitters, + F: storedConfig.F, + OnchainConfig: storedConfig.OnchainConfig, + OffchainConfigVersion: storedConfig.OffchainConfigVersion, + OffchainConfig: storedConfig.OffchainConfig, + }, err +} diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index 75e033dfeb7..73c16a19596 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -1,111 +1,325 @@ package evm import ( + "database/sql" "math/big" "testing" "time" + "github.com/ethereum/go-ethereum" + "github.com/smartcontractkit/libocr/gethwrappers2/ocrconfigurationstoreevmsimple" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/onsi/gomega" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" testoffchainaggregator2 "github.com/smartcontractkit/libocr/gethwrappers2/testocr2aggregator" "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" confighelper2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ocrtypes2 "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + evmClientMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/testhelpers" "github.com/smartcontractkit/chainlink/v2/core/utils" ) func TestConfigPoller(t *testing.T) { - key, err := crypto.GenerateKey() - require.NoError(t, err) - user, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - require.NoError(t, err) - b := backends.NewSimulatedBackend(core.GenesisAlloc{ - user.From: {Balance: big.NewInt(1000000000000000000)}}, - 5*ethconfig.Defaults.Miner.GasCeil) - linkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(user, b) - require.NoError(t, err) - accessAddress, _, _, err := testoffchainaggregator2.DeploySimpleWriteAccessController(user, b) - require.NoError(t, err, "failed to deploy test access controller contract") - ocrAddress, _, ocrContract, err := ocr2aggregator.DeployOCR2Aggregator( - user, - b, - linkTokenAddress, - big.NewInt(0), - big.NewInt(10), - accessAddress, - accessAddress, - 9, - "TEST", - ) - require.NoError(t, err) - b.Commit() - - db := pgtest.NewSqlxDB(t) - cfg := pgtest.NewQConfig(false) - ethClient := evmclient.NewSimulatedBackendClient(t, b, big.NewInt(1337)) lggr := logger.TestLogger(t) - ctx := testutils.Context(t) - lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) - lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) - require.NoError(t, lp.Start(ctx)) - t.Cleanup(func() { lp.Close() }) - configPoller, err := NewConfigPoller(lggr, lp, ocrAddress) - require.NoError(t, err) - // Should have no config to begin with. - _, config, err := configPoller.LatestConfigDetails(testutils.Context(t)) - require.NoError(t, err) - require.Equal(t, ocrtypes2.ConfigDigest{}, config) - _, err = configPoller.LatestConfig(testutils.Context(t), 0) - require.Error(t, err) - // Set the config - contractConfig := setConfig(t, median.OffchainConfig{ - AlphaReportInfinite: false, - AlphaReportPPB: 0, - AlphaAcceptInfinite: true, - AlphaAcceptPPB: 0, - DeltaC: 10, - }, ocrContract, user) - b.Commit() - latest, err := b.BlockByNumber(testutils.Context(t), nil) - require.NoError(t, err) - // Ensure we capture this config set log. - require.NoError(t, lp.Replay(testutils.Context(t), latest.Number().Int64()-1)) + var ethClient *client.SimulatedBackendClient + var lp logpoller.LogPoller + var ocrAddress common.Address + var ocrContract *ocr2aggregator.OCR2Aggregator + var configStoreContractAddr common.Address + var configStoreContract *ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimple + var user *bind.TransactOpts + var b *backends.SimulatedBackend + var linkTokenAddress common.Address + var accessAddress common.Address - // Send blocks until we see the config updated. - var configBlock uint64 - var digest [32]byte - gomega.NewGomegaWithT(t).Eventually(func() bool { + { + key, err := crypto.GenerateKey() + require.NoError(t, err) + user, err = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + b = backends.NewSimulatedBackend(core.GenesisAlloc{ + user.From: {Balance: big.NewInt(1000000000000000000)}}, + 5*ethconfig.Defaults.Miner.GasCeil) + linkTokenAddress, _, _, err = link_token_interface.DeployLinkToken(user, b) + require.NoError(t, err) + accessAddress, _, _, err = testoffchainaggregator2.DeploySimpleWriteAccessController(user, b) + require.NoError(t, err, "failed to deploy test access controller contract") + ocrAddress, _, ocrContract, err = ocr2aggregator.DeployOCR2Aggregator( + user, + b, + linkTokenAddress, + big.NewInt(0), + big.NewInt(10), + accessAddress, + accessAddress, + 9, + "TEST", + ) + require.NoError(t, err) + configStoreContractAddr, _, configStoreContract, err = ocrconfigurationstoreevmsimple.DeployOCRConfigurationStoreEVMSimple(user, b) + require.NoError(t, err) b.Commit() - configBlock, digest, err = configPoller.LatestConfigDetails(testutils.Context(t)) + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(false) + ethClient = evmclient.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) + ctx := testutils.Context(t) + lorm := logpoller.NewORM(testutils.SimulatedChainID, db, lggr, cfg) + lp = logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) + require.NoError(t, lp.Start(ctx)) + t.Cleanup(func() { lp.Close() }) + } + + t.Run("LatestConfig errors if there is no config in logs and config store is unconfigured", func(t *testing.T) { + cp, err := NewConfigPoller(lggr, ethClient, lp, ocrAddress, nil) require.NoError(t, err) - return ocrtypes2.ConfigDigest{} != digest - }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) - // Assert the config returned is the one we configured. - newConfig, err := configPoller.LatestConfig(testutils.Context(t), configBlock) - require.NoError(t, err) - // Note we don't check onchainConfig, as that is populated in the contract itself. - assert.Equal(t, digest, [32]byte(newConfig.ConfigDigest)) - assert.Equal(t, contractConfig.Signers, newConfig.Signers) - assert.Equal(t, contractConfig.Transmitters, newConfig.Transmitters) - assert.Equal(t, contractConfig.F, newConfig.F) - assert.Equal(t, contractConfig.OffchainConfigVersion, newConfig.OffchainConfigVersion) - assert.Equal(t, contractConfig.OffchainConfig, newConfig.OffchainConfig) + _, err = cp.LatestConfig(testutils.Context(t), 0) + require.Error(t, err) + assert.Contains(t, err.Error(), "no logs found for config on contract") + }) + + t.Run("happy path (with config store)", func(t *testing.T) { + cp, err := NewConfigPoller(lggr, ethClient, lp, ocrAddress, &configStoreContractAddr) + require.NoError(t, err) + // Should have no config to begin with. + _, configDigest, err := cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, ocrtypes2.ConfigDigest{}, configDigest) + // Should error because there are no logs for config at block 0 + _, err = cp.LatestConfig(testutils.Context(t), 0) + require.Error(t, err) + assert.Contains(t, err.Error(), "config details missing while trying to lookup config in store") + + // Set the config + contractConfig := setConfig(t, median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 0, + AlphaAcceptInfinite: true, + AlphaAcceptPPB: 0, + DeltaC: 10, + }, ocrContract, user) + b.Commit() + latest, err := b.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + // Ensure we capture this config set log. + require.NoError(t, lp.Replay(testutils.Context(t), latest.Number().Int64()-1)) + + // Send blocks until we see the config updated. + var configBlock uint64 + var digest [32]byte + gomega.NewGomegaWithT(t).Eventually(func() bool { + b.Commit() + configBlock, digest, err = cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + return ocrtypes2.ConfigDigest{} != digest + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + // Assert the config returned is the one we configured. + newConfig, err := cp.LatestConfig(testutils.Context(t), configBlock) + require.NoError(t, err) + // Note we don't check onchainConfig, as that is populated in the contract itself. + assert.Equal(t, digest, [32]byte(newConfig.ConfigDigest)) + assert.Equal(t, contractConfig.Signers, newConfig.Signers) + assert.Equal(t, contractConfig.Transmitters, newConfig.Transmitters) + assert.Equal(t, contractConfig.F, newConfig.F) + assert.Equal(t, contractConfig.OffchainConfigVersion, newConfig.OffchainConfigVersion) + assert.Equal(t, contractConfig.OffchainConfig, newConfig.OffchainConfig) + }) + + { + var err error + ocrAddress, _, ocrContract, err = ocr2aggregator.DeployOCR2Aggregator( + user, + b, + linkTokenAddress, + big.NewInt(0), + big.NewInt(10), + accessAddress, + accessAddress, + 9, + "TEST", + ) + require.NoError(t, err) + b.Commit() + } + + t.Run("LatestConfigDetails, when logs have been pruned and config store contract is configured", func(t *testing.T) { + // Give it a log poller that will never return logs + mp := new(mocks.LogPoller) + mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) + + t.Run("if callLatestConfigDetails succeeds", func(t *testing.T) { + cp, err := newConfigPoller(lggr, ethClient, mp, ocrAddress, &configStoreContractAddr) + require.NoError(t, err) + + t.Run("when config has not been set, returns zero values", func(t *testing.T) { + changedInBlock, configDigest, err := cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, 0, int(changedInBlock)) + assert.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + }) + t.Run("when config has been set, returns config details", func(t *testing.T) { + setConfig(t, median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 0, + AlphaAcceptInfinite: true, + AlphaAcceptPPB: 0, + DeltaC: 10, + }, ocrContract, user) + b.Commit() + + changedInBlock, configDigest, err := cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + + latest, err := b.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + + onchainDetails, err := ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + + assert.Equal(t, latest.Number().Int64(), int64(changedInBlock)) + assert.Equal(t, onchainDetails.ConfigDigest, [32]byte(configDigest)) + }) + }) + t.Run("returns error if callLatestConfigDetails fails", func(t *testing.T) { + failingClient := new(evmClientMocks.Client) + failingClient.On("ConfiguredChainID").Return(big.NewInt(42)) + failingClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("something exploded")) + cp, err := newConfigPoller(lggr, failingClient, mp, ocrAddress, &configStoreContractAddr) + require.NoError(t, err) + + cp.configStoreContractAddr = &configStoreContractAddr + cp.configStoreContract = configStoreContract + + _, _, err = cp.LatestConfigDetails(testutils.Context(t)) + assert.EqualError(t, err, "something exploded") + + failingClient.AssertExpectations(t) + }) + }) + + { + var err error + // deploy it again to reset to empty config + ocrAddress, _, ocrContract, err = ocr2aggregator.DeployOCR2Aggregator( + user, + b, + linkTokenAddress, + big.NewInt(0), + big.NewInt(10), + accessAddress, + accessAddress, + 9, + "TEST", + ) + require.NoError(t, err) + b.Commit() + } + + t.Run("LatestConfig, when logs have been pruned and config store contract is configured", func(t *testing.T) { + // Give it a log poller that will never return logs + mp := mocks.NewLogPoller(t) + mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) + + t.Run("if callReadConfig succeeds", func(t *testing.T) { + cp, err := newConfigPoller(lggr, ethClient, mp, ocrAddress, &configStoreContractAddr) + require.NoError(t, err) + + t.Run("when config has not been set, returns error", func(t *testing.T) { + _, err := cp.LatestConfig(testutils.Context(t), 0) + require.Error(t, err) + + assert.Contains(t, err.Error(), "config details missing while trying to lookup config in store") + }) + t.Run("when config has been set, returns config", func(t *testing.T) { + b.Commit() + onchainDetails, err := ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + + contractConfig := setConfig(t, median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 0, + AlphaAcceptInfinite: true, + AlphaAcceptPPB: 0, + DeltaC: 10, + }, ocrContract, user) + + signerAddresses, err := OnchainPublicKeyToAddress(contractConfig.Signers) + require.NoError(t, err) + transmitterAddresses, err := AccountToAddress(contractConfig.Transmitters) + require.NoError(t, err) + + configuration := ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimpleConfigurationEVMSimple{ + Signers: signerAddresses, + Transmitters: transmitterAddresses, + OnchainConfig: contractConfig.OnchainConfig, + OffchainConfig: contractConfig.OffchainConfig, + ContractAddress: ocrAddress, + OffchainConfigVersion: contractConfig.OffchainConfigVersion, + ConfigCount: 1, + F: contractConfig.F, + } + + addConfig(t, user, configStoreContract, configuration) + + b.Commit() + onchainDetails, err = ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + + newConfig, err := cp.LatestConfig(testutils.Context(t), 0) + require.NoError(t, err) + + assert.Equal(t, onchainDetails.ConfigDigest, [32]byte(newConfig.ConfigDigest)) + assert.Equal(t, contractConfig.Signers, newConfig.Signers) + assert.Equal(t, contractConfig.Transmitters, newConfig.Transmitters) + assert.Equal(t, contractConfig.F, newConfig.F) + assert.Equal(t, contractConfig.OffchainConfigVersion, newConfig.OffchainConfigVersion) + assert.Equal(t, contractConfig.OffchainConfig, newConfig.OffchainConfig) + }) + }) + t.Run("returns error if callReadConfig fails", func(t *testing.T) { + failingClient := new(evmClientMocks.Client) + failingClient.On("ConfiguredChainID").Return(big.NewInt(42)) + failingClient.On("CallContract", mock.Anything, mock.MatchedBy(func(callArgs ethereum.CallMsg) bool { + // initial call to retrieve config store address from aggregator + return *callArgs.To == ocrAddress + }), mock.Anything).Return(nil, errors.New("something exploded")).Once() + cp, err := newConfigPoller(lggr, failingClient, mp, ocrAddress, &configStoreContractAddr) + require.NoError(t, err) + + _, err = cp.LatestConfig(testutils.Context(t), 0) + assert.EqualError(t, err, "failed to get latest config details: something exploded") + + failingClient.AssertExpectations(t) + }) + }) } func setConfig(t *testing.T, pluginConfig median.OffchainConfig, ocrContract *ocr2aggregator.OCR2Aggregator, user *bind.TransactOpts) ocrtypes2.ContractConfig { @@ -115,13 +329,16 @@ func setConfig(t *testing.T, pluginConfig median.OffchainConfig, ocrContract *oc oracles = append(oracles, confighelper2.OracleIdentityExtra{ OracleIdentity: confighelper2.OracleIdentity{ OnchainPublicKey: utils.RandomAddress().Bytes(), - TransmitAccount: ocrtypes2.Account(utils.RandomAddress().String()), + TransmitAccount: ocrtypes2.Account(utils.RandomAddress().Hex()), OffchainPublicKey: utils.RandomBytes32(), PeerID: utils.MustNewPeerID(), }, ConfigEncryptionPublicKey: utils.RandomBytes32(), }) } + // Gnerate OnchainConfig + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(big.NewInt(0), big.NewInt(10)) + require.NoError(t, err) // Change the offramp config signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( 2*time.Second, // deltaProgress @@ -139,7 +356,7 @@ func setConfig(t *testing.T, pluginConfig median.OffchainConfig, ocrContract *oc 50*time.Millisecond, 50*time.Millisecond, 1, // faults - nil, + onchainConfig, ) require.NoError(t, err) signerAddresses, err := OnchainPublicKeyToAddress(signers) @@ -157,3 +374,9 @@ func setConfig(t *testing.T, pluginConfig median.OffchainConfig, ocrContract *oc OffchainConfig: offchainConfig, } } + +func addConfig(t *testing.T, user *bind.TransactOpts, configStoreContract *ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimple, config ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimpleConfigurationEVMSimple) { + + _, err := configStoreContract.AddConfig(user, config) + require.NoError(t, err) +} diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index 1ce68f2d944..f1731b9c438 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -292,7 +292,7 @@ func newConfigProvider(lggr logger.Logger, chain evm.Chain, opts *types.RelayOpt return nil, errors.Errorf("invalid contractID, expected hex address") } - contractAddress := common.HexToAddress(opts.ContractID) + aggregatorAddress := common.HexToAddress(opts.ContractID) contractABI, err := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorMetaData.ABI)) if err != nil { return nil, errors.Wrap(err, "could not get contract ABI JSON") @@ -307,14 +307,18 @@ func newConfigProvider(lggr logger.Logger, chain evm.Chain, opts *types.RelayOpt cp, err = mercury.NewConfigPoller( lggr, chain.LogPoller(), - contractAddress, + aggregatorAddress, *relayConfig.FeedID, eventBroadcaster, + // TODO: Does mercury need to support config contract? DF-19182 ) } else { - cp, err = NewConfigPoller(lggr, + cp, err = NewConfigPoller( + lggr, + chain.Client(), chain.LogPoller(), - contractAddress, + aggregatorAddress, + relayConfig.ConfigContractAddress, ) } if err != nil { @@ -324,15 +328,15 @@ func newConfigProvider(lggr logger.Logger, chain evm.Chain, opts *types.RelayOpt var offchainConfigDigester ocrtypes.OffchainConfigDigester if relayConfig.FeedID != nil { // Mercury - offchainConfigDigester = mercury.NewOffchainConfigDigester(*relayConfig.FeedID, chain.Config().EVM().ChainID(), contractAddress) + offchainConfigDigester = mercury.NewOffchainConfigDigester(*relayConfig.FeedID, chain.Config().EVM().ChainID(), aggregatorAddress) } else { // Non-mercury offchainConfigDigester = evmutil.EVMOffchainConfigDigester{ ChainID: chain.Config().EVM().ChainID().Uint64(), - ContractAddress: contractAddress, + ContractAddress: aggregatorAddress, } } - return newConfigWatcher(lggr, contractAddress, contractABI, offchainConfigDigester, cp, chain, relayConfig.FromBlock, opts.New), nil + return newConfigWatcher(lggr, aggregatorAddress, contractABI, offchainConfigDigester, cp, chain, relayConfig.FromBlock, opts.New), nil } func newContractTransmitter(lggr logger.Logger, rargs relaytypes.RelayArgs, transmitterID string, configWatcher *configWatcher, ethKeystore keystore.Eth) (*contractTransmitter, error) { diff --git a/core/services/relay/evm/functions/config_poller_test.go b/core/services/relay/evm/functions/config_poller_test.go index b53b2751b15..d6573ef3544 100644 --- a/core/services/relay/evm/functions/config_poller_test.go +++ b/core/services/relay/evm/functions/config_poller_test.go @@ -21,6 +21,7 @@ import ( ocrtypes2 "github.com/smartcontractkit/libocr/offchainreporting2plus/types" functionsConfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/testhelpers" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" @@ -170,6 +171,9 @@ func setFunctionsConfig(t *testing.T, pluginConfig *functionsConfig.ReportingPlu pluginConfigBytes, err := functionsConfig.EncodeReportingPluginConfig(pluginConfig) require.NoError(t, err) + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(big.NewInt(0), big.NewInt(10)) + require.NoError(t, err) + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( 2*time.Second, // deltaProgress 1*time.Second, // deltaResend @@ -186,7 +190,7 @@ func setFunctionsConfig(t *testing.T, pluginConfig *functionsConfig.ReportingPlu 50*time.Millisecond, 50*time.Millisecond, 1, // faults - nil, + onchainConfig, ) require.NoError(t, err) diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index eae1970da93..db2c7fd68ce 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -30,6 +30,7 @@ type logPollerWrapper struct { subscribers map[string]evmRelayTypes.RouteUpdateSubscriber activeCoordinator common.Address proposedCoordinator common.Address + blockOffset int64 nextBlock int64 mu sync.Mutex closeWait sync.WaitGroup @@ -44,10 +45,15 @@ func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig conf if err != nil { return nil, err } + blockOffset := int64(pluginConfig.MinIncomingConfirmations) - 1 + if blockOffset < 0 { + blockOffset = 0 + } return &logPollerWrapper{ routerContract: routerContract, pluginConfig: pluginConfig, + blockOffset: blockOffset, logPoller: logPoller, client: client, subscribers: make(map[string]evmRelayTypes.RouteUpdateSubscriber), @@ -66,11 +72,11 @@ func (l *logPollerWrapper) Start(context.Context) error { l.proposedCoordinator = l.routerContract.Address() } else if l.pluginConfig.ContractVersion == 1 { nextBlock, err := l.logPoller.LatestBlock() - l.nextBlock = nextBlock if err != nil { l.lggr.Errorw("LogPollerWrapper: LatestBlock() failed, starting from 0", "error", err) } else { l.lggr.Debugw("LogPollerWrapper: LatestBlock() got starting block", "block", nextBlock) + l.nextBlock = nextBlock - l.blockOffset } l.closeWait.Add(1) go l.checkForRouteUpdates() @@ -116,6 +122,7 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR l.mu.Unlock() return nil, nil, err } + latest -= l.blockOffset if latest >= nextBlock { l.nextBlock = latest + 1 } diff --git a/core/services/relay/evm/mercury/mocks/pipeline.go b/core/services/relay/evm/mercury/mocks/pipeline.go index 317404e4409..f553ba98509 100644 --- a/core/services/relay/evm/mercury/mocks/pipeline.go +++ b/core/services/relay/evm/mercury/mocks/pipeline.go @@ -13,8 +13,8 @@ type MockRunner struct { Err error } -func (m *MockRunner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run pipeline.Run, trrs pipeline.TaskRunResults, err error) { - return pipeline.Run{ID: 42}, m.Trrs, m.Err +func (m *MockRunner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) { + return &pipeline.Run{ID: 42}, m.Trrs, m.Err } var _ pipeline.Task = &MockTask{} diff --git a/core/services/relay/evm/mercury/v1/data_source.go b/core/services/relay/evm/mercury/v1/data_source.go index ff7a2c0ab7f..5c1f55ddab7 100644 --- a/core/services/relay/evm/mercury/v1/data_source.go +++ b/core/services/relay/evm/mercury/v1/data_source.go @@ -26,7 +26,7 @@ import ( ) type Runner interface { - ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run pipeline.Run, trrs pipeline.TaskRunResults, err error) + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) } // Fetcher fetcher data from Mercury server @@ -40,7 +40,7 @@ type datasource struct { jb job.Job spec pipeline.Spec lggr logger.Logger - runResults chan<- pipeline.Run + runResults chan<- *pipeline.Run orm types.DataSourceORM codec reportcodec.ReportCodec feedID [32]byte @@ -55,7 +55,7 @@ type datasource struct { var _ relaymercuryv1.DataSource = &datasource{} -func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, rr chan pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, chainHeadTracker types.ChainHeadTracker, fetcher Fetcher, initialBlockNumber *int64, feedID [32]byte) *datasource { +func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, rr chan *pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, chainHeadTracker types.ChainHeadTracker, fetcher Fetcher, initialBlockNumber *int64, feedID [32]byte) *datasource { return &datasource{pr, jb, spec, lggr, rr, orm, reportcodec.ReportCodec{}, feedID, sync.RWMutex{}, enhancedTelemChan, chainHeadTracker, fetcher, initialBlockNumber} } @@ -115,7 +115,7 @@ func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestam wg.Add(1) go func() { defer wg.Done() - var run pipeline.Run + var run *pipeline.Run run, trrs, err = ds.executeRun(ctx) if err != nil { err = fmt.Errorf("Observe failed while executing run: %w", err) @@ -238,7 +238,7 @@ func setAsk(o *parseOutput, res pipeline.Result) error { // The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). // Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. -func (ds *datasource) executeRun(ctx context.Context) (pipeline.Run, pipeline.TaskRunResults, error) { +func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { vars := pipeline.NewVarsFrom(map[string]interface{}{ "jb": map[string]interface{}{ "databaseID": ds.jb.ID, @@ -249,7 +249,7 @@ func (ds *datasource) executeRun(ctx context.Context) (pipeline.Run, pipeline.Ta run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) if err != nil { - return pipeline.Run{}, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) } return run, trrs, err diff --git a/core/services/relay/evm/mercury/v1/data_source_test.go b/core/services/relay/evm/mercury/v1/data_source_test.go index a0932990f02..6e460951301 100644 --- a/core/services/relay/evm/mercury/v1/data_source_test.go +++ b/core/services/relay/evm/mercury/v1/data_source_test.go @@ -308,8 +308,8 @@ func TestMercury_Observe(t *testing.T) { trrs[i].Result.Value = "123" trrs[i].Result.Error = nil } + ch := make(chan *pipeline.Run, 1) - ch := make(chan pipeline.Run, 1) ds.runResults = ch _, err := ds.Observe(ctx, repts, false) diff --git a/core/services/relay/evm/mercury/v2/data_source.go b/core/services/relay/evm/mercury/v2/data_source.go index 632278a3c56..caeae8d278a 100644 --- a/core/services/relay/evm/mercury/v2/data_source.go +++ b/core/services/relay/evm/mercury/v2/data_source.go @@ -25,7 +25,7 @@ import ( ) type Runner interface { - ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run pipeline.Run, trrs pipeline.TaskRunResults, err error) + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) } type LatestReportFetcher interface { @@ -39,7 +39,7 @@ type datasource struct { spec pipeline.Spec feedID mercuryutils.FeedID lggr logger.Logger - runResults chan<- pipeline.Run + runResults chan<- *pipeline.Run orm types.DataSourceORM codec reportcodec.ReportCodec @@ -54,7 +54,7 @@ type datasource struct { var _ relaymercuryv2.DataSource = &datasource{} -func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, rr chan pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource { +func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, rr chan *pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource { return &datasource{pr, jb, spec, feedID, lggr, rr, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan} } @@ -84,7 +84,7 @@ func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestam go func() { defer wg.Done() var trrs pipeline.TaskRunResults - var run pipeline.Run + var run *pipeline.Run run, trrs, err = ds.executeRun(ctx) if err != nil { cancel() @@ -218,7 +218,7 @@ func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error { // The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). // Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. -func (ds *datasource) executeRun(ctx context.Context) (pipeline.Run, pipeline.TaskRunResults, error) { +func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { vars := pipeline.NewVarsFrom(map[string]interface{}{ "jb": map[string]interface{}{ "databaseID": ds.jb.ID, @@ -229,7 +229,7 @@ func (ds *datasource) executeRun(ctx context.Context) (pipeline.Run, pipeline.Ta run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) if err != nil { - return pipeline.Run{}, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) } return run, trrs, err diff --git a/core/services/relay/evm/mercury/v3/data_source.go b/core/services/relay/evm/mercury/v3/data_source.go index 8d3895cd62b..79f6c536efd 100644 --- a/core/services/relay/evm/mercury/v3/data_source.go +++ b/core/services/relay/evm/mercury/v3/data_source.go @@ -26,7 +26,7 @@ import ( ) type Runner interface { - ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run pipeline.Run, trrs pipeline.TaskRunResults, err error) + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) } type LatestReportFetcher interface { @@ -40,7 +40,7 @@ type datasource struct { spec pipeline.Spec feedID mercuryutils.FeedID lggr logger.Logger - runResults chan<- pipeline.Run + runResults chan<- *pipeline.Run orm types.DataSourceORM codec reportcodec.ReportCodec @@ -55,7 +55,7 @@ type datasource struct { var _ relaymercuryv3.DataSource = &datasource{} -func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, rr chan pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource { +func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, rr chan *pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource { return &datasource{pr, jb, spec, feedID, lggr, rr, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan} } @@ -85,7 +85,7 @@ func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestam go func() { defer wg.Done() var trrs pipeline.TaskRunResults - var run pipeline.Run + var run *pipeline.Run run, trrs, err = ds.executeRun(ctx) if err != nil { cancel() @@ -256,7 +256,7 @@ func setAsk(o *parseOutput, res pipeline.Result) error { // The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). // Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. -func (ds *datasource) executeRun(ctx context.Context) (pipeline.Run, pipeline.TaskRunResults, error) { +func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { vars := pipeline.NewVarsFrom(map[string]interface{}{ "jb": map[string]interface{}{ "databaseID": ds.jb.ID, @@ -267,7 +267,7 @@ func (ds *datasource) executeRun(ctx context.Context) (pipeline.Run, pipeline.Ta run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) if err != nil { - return pipeline.Run{}, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) } return run, trrs, err diff --git a/core/services/relay/evm/ocr2keeper.go b/core/services/relay/evm/ocr2keeper.go index c6f4f1ad395..baf98b9b006 100644 --- a/core/services/relay/evm/ocr2keeper.go +++ b/core/services/relay/evm/ocr2keeper.go @@ -147,8 +147,11 @@ func newOCR2KeeperConfigProvider(lggr logger.Logger, chain evm.Chain, rargs rela configPoller, err := NewConfigPoller( lggr.With("contractID", rargs.ContractID), + chain.Client(), chain.LogPoller(), contractAddress, + // TODO: Does ocr2keeper need to support config contract? DF-19182 + nil, ) if err != nil { return nil, errors.Wrap(err, "failed to create config poller") diff --git a/core/services/relay/evm/ocr2vrf.go b/core/services/relay/evm/ocr2vrf.go index 6e440112e92..14004d0b1aa 100644 --- a/core/services/relay/evm/ocr2vrf.go +++ b/core/services/relay/evm/ocr2vrf.go @@ -135,8 +135,12 @@ func newOCR2VRFConfigProvider(lggr logger.Logger, chain evm.Chain, rargs relayty } configPoller, err := NewConfigPoller( lggr.With("contractID", rargs.ContractID), + chain.Client(), chain.LogPoller(), - contractAddress) + contractAddress, + // TODO: Does ocr2vrf need to support config contract? DF-19182 + nil, + ) if err != nil { return nil, err } diff --git a/core/services/relay/evm/types/types.go b/core/services/relay/evm/types/types.go index 8671082db25..97eddd7d9cf 100644 --- a/core/services/relay/evm/types/types.go +++ b/core/services/relay/evm/types/types.go @@ -20,9 +20,10 @@ import ( ) type RelayConfig struct { - ChainID *utils.Big `json:"chainID"` - FromBlock uint64 `json:"fromBlock"` - EffectiveTransmitterID null.String `json:"effectiveTransmitterID"` + ChainID *utils.Big `json:"chainID"` + FromBlock uint64 `json:"fromBlock"` + EffectiveTransmitterID null.String `json:"effectiveTransmitterID"` + ConfigContractAddress *common.Address `json:"configContractAddress"` // Contract-specific SendingKeys pq.StringArray `json:"sendingKeys"` diff --git a/core/services/transmission/integration_test.go b/core/services/transmission/integration_test.go index 3aa025f0ae0..0484b1d8cd6 100644 --- a/core/services/transmission/integration_test.go +++ b/core/services/transmission/integration_test.go @@ -63,7 +63,7 @@ func deployTransmissionUniverse(t *testing.T) *EntryPointUniverse { holder1Key := cltest.MustGenerateRandomKey(t) t.Log("Holder key:", holder1Key.String()) - // Construct simulated blockchain environmnet. + // Construct simulated blockchain environment. holder1Transactor, err := bind.NewKeyedTransactorWithChainID(holder1Key.ToEcdsaPrivKey(), testutils.SimulatedChainID) require.NoError(t, err) var ( diff --git a/core/services/vrf/v1/listener_v1.go b/core/services/vrf/v1/listener_v1.go index 39aec367b4a..03b92bc15cb 100644 --- a/core/services/vrf/v1/listener_v1.go +++ b/core/services/vrf/v1/listener_v1.go @@ -429,7 +429,7 @@ func (lsn *Listener) ProcessRequest(ctx context.Context, req request) bool { run := pipeline.NewRun(*lsn.Job.PipelineSpec, vars) // The VRF pipeline has no async tasks, so we don't need to check for `incomplete` - if _, err = lsn.PipelineRunner.Run(ctx, &run, lggr, true, func(tx pg.Queryer) error { + if _, err = lsn.PipelineRunner.Run(ctx, run, lggr, true, func(tx pg.Queryer) error { // Always mark consumed regardless of whether the proof failed or not. if err = lsn.LogBroadcaster.MarkConsumed(req.lb, pg.WithQueryer(tx)); err != nil { lggr.Errorw("Failed mark consumed", "err", err) diff --git a/core/services/vrf/v2/integration_v2_plus_test.go b/core/services/vrf/v2/integration_v2_plus_test.go index 8923bfac643..7e05c5b347c 100644 --- a/core/services/vrf/v2/integration_v2_plus_test.go +++ b/core/services/vrf/v2/integration_v2_plus_test.go @@ -253,7 +253,7 @@ func newVRFCoordinatorV2PlusUniverse(t *testing.T, key ethkey.KeyV2, numConsumer big.NewInt(1e16), // 0.01 eth per link fallbackLinkPrice vrf_coordinator_v2plus.VRFCoordinatorV2PlusFeeConfig{ FulfillmentFlatFeeLinkPPM: uint32(1000), // 0.001 LINK premium - FulfillmentFlatFeeEthPPM: uint32(5), // 0.000005 ETH preimum + FulfillmentFlatFeeEthPPM: uint32(5), // 0.000005 ETH premium }, ) require.NoError(t, err, "failed to set coordinator configuration") diff --git a/core/services/vrf/v2/integration_v2_test.go b/core/services/vrf/v2/integration_v2_test.go index f4054333808..33e613733d5 100644 --- a/core/services/vrf/v2/integration_v2_test.go +++ b/core/services/vrf/v2/integration_v2_test.go @@ -803,7 +803,7 @@ func mineBatch(t *testing.T, requestIDs []*big.Int, subID *big.Int, backend *bac require.NoError(t, err) for _, tx := range txs { var evmTx txmgr.Tx - txmgr.DbEthTxToEthTx(tx, &evmTx) + tx.ToTx(&evmTx) meta, err := evmTx.GetMeta() require.NoError(t, err) for _, requestID := range meta.RequestIDs { @@ -2105,7 +2105,8 @@ func TestStartingCountsV1(t *testing.T) { sql := `INSERT INTO evm.txes (nonce, from_address, to_address, encoded_payload, value, gas_limit, state, created_at, broadcast_at, initial_broadcast_at, meta, subject, evm_chain_id, min_confirmations, pipeline_task_run_id) VALUES (:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit, :state, :created_at, :broadcast_at, :initial_broadcast_at, :meta, :subject, :evm_chain_id, :min_confirmations, :pipeline_task_run_id);` for _, tx := range append(confirmedTxes, unconfirmedTxes...) { - dbEtx := txmgr.DbEthTxFromEthTx(&tx) + var dbEtx txmgr.DbEthTx + dbEtx.FromTx(&tx) //nolint:gosec // just copying fields _, err = db.NamedExec(sql, &dbEtx) require.NoError(t, err) } @@ -2143,10 +2144,10 @@ VALUES (:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit sql = `INSERT INTO evm.tx_attempts (eth_tx_id, gas_price, signed_raw_tx, hash, state, created_at, chain_specific_gas_limit) VALUES (:eth_tx_id, :gas_price, :signed_raw_tx, :hash, :state, :created_at, :chain_specific_gas_limit)` for _, attempt := range txAttempts { - dbAttempt := txmgr.DbEthTxAttemptFromEthTxAttempt(&attempt) //nolint:gosec // just copying fields + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt) //nolint:gosec // just copying fields _, err = db.NamedExec(sql, &dbAttempt) require.NoError(t, err) - txmgr.DbEthTxAttemptToEthTxAttempt(dbAttempt, &attempt) //nolint:gosec // just copying fields } // add evm.receipts @@ -2164,7 +2165,7 @@ VALUES (:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit sql = `INSERT INTO evm.receipts (block_hash, tx_hash, block_number, transaction_index, receipt, created_at) VALUES (:block_hash, :tx_hash, :block_number, :transaction_index, :receipt, :created_at)` for _, r := range receipts { - _, err := db.NamedExec(sql, &r) + _, err := db.NamedExec(sql, r) require.NoError(t, err) } diff --git a/core/services/vrf/v2/listener_v2.go b/core/services/vrf/v2/listener_v2.go index 047a1e6e29a..ff915fba34e 100644 --- a/core/services/vrf/v2/listener_v2.go +++ b/core/services/vrf/v2/listener_v2.go @@ -181,7 +181,7 @@ type vrfPipelineResult struct { // fundsNeeded indicates a "minimum balance" in juels or wei that must be held in the // subscription's account in order to fulfill the request. fundsNeeded *big.Int - run pipeline.Run + run *pipeline.Run payload string gasLimit uint32 req pendingRequest @@ -1098,7 +1098,7 @@ func (lsn *listenerV2) processRequestsPerSubHelper( ll.Infow("Enqueuing fulfillment") var transaction txmgr.Tx err = lsn.q.Transaction(func(tx pg.Queryer) error { - if err = lsn.pipelineRunner.InsertFinishedRun(&p.run, true, pg.WithQueryer(tx)); err != nil { + if err = lsn.pipelineRunner.InsertFinishedRun(p.run, true, pg.WithQueryer(tx)); err != nil { return err } if err = lsn.logBroadcaster.MarkConsumed(p.req.lb, pg.WithQueryer(tx)); err != nil { diff --git a/core/services/vrf/v2/listener_v2_types.go b/core/services/vrf/v2/listener_v2_types.go index 4ad645ac17c..e8c3a8ccb13 100644 --- a/core/services/vrf/v2/listener_v2_types.go +++ b/core/services/vrf/v2/listener_v2_types.go @@ -41,7 +41,7 @@ func newBatchFulfillment(result vrfPipelineResult, fromAddress common.Address, v }, totalGasLimit: result.gasLimit, runs: []*pipeline.Run{ - &result.run, + result.run, }, reqIDs: []*big.Int{ result.req.req.RequestID(), @@ -95,7 +95,7 @@ func (b *batchFulfillments) addRun(result vrfPipelineResult, fromAddress common. currBatch.proofs = append(currBatch.proofs, result.proof) currBatch.commitments = append(currBatch.commitments, result.reqCommitment) currBatch.totalGasLimit += result.gasLimit - currBatch.runs = append(currBatch.runs, &result.run) + currBatch.runs = append(currBatch.runs, result.run) currBatch.reqIDs = append(currBatch.reqIDs, result.req.req.RequestID()) currBatch.lbs = append(currBatch.lbs, result.req.lb) currBatch.maxFees = append(currBatch.maxFees, result.maxFee) diff --git a/core/services/webhook/delegate.go b/core/services/webhook/delegate.go index e373ff8087a..ca85a4d1621 100644 --- a/core/services/webhook/delegate.go +++ b/core/services/webhook/delegate.go @@ -172,7 +172,7 @@ func (r *webhookJobRunner) RunJob(ctx context.Context, jobUUID uuid.UUID, reques run := pipeline.NewRun(*spec.PipelineSpec, vars) - _, err := r.runner.Run(ctx, &run, jobLggr, true, nil) + _, err := r.runner.Run(ctx, run, jobLggr, true, nil) if err != nil { jobLggr.Errorw("Error running pipeline for webhook job", "err", err) return 0, err diff --git a/core/store/models/common_test.go b/core/store/models/common_test.go index 1f514142b80..57b7ca73c6b 100644 --- a/core/store/models/common_test.go +++ b/core/store/models/common_test.go @@ -203,7 +203,7 @@ func TestDuration_MarshalJSON(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - b, err := json.Marshal(&test.input) + b, err := json.Marshal(test.input) assert.NoError(t, err) assert.Equal(t, test.want, string(b)) }) diff --git a/core/utils/big_test.go b/core/utils/big_test.go index ca8be3f90b8..e46d46a0651 100644 --- a/core/utils/big_test.go +++ b/core/utils/big_test.go @@ -19,7 +19,7 @@ func TestBigFloatMarshal(t *testing.T) { } for _, tc := range tests { - buf, err := json.Marshal(&tc.obj) + buf, err := json.Marshal(tc.obj) require.NoError(t, err) assert.Equal(t, tc.exp, string(buf)) } diff --git a/core/web/bridge_types_controller_test.go b/core/web/bridge_types_controller_test.go index c875df94539..7184b05f5e0 100644 --- a/core/web/bridge_types_controller_test.go +++ b/core/web/bridge_types_controller_test.go @@ -105,7 +105,8 @@ func TestValidateBridgeType(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - result := web.ValidateBridgeType(&test.request) + req := test.request + result := web.ValidateBridgeType(&req) assert.Equal(t, test.want, result) }) } diff --git a/core/web/pipeline_job_spec_errors_controller_test.go b/core/web/pipeline_job_spec_errors_controller_test.go index 13c02379674..8ec77a84f05 100644 --- a/core/web/pipeline_job_spec_errors_controller_test.go +++ b/core/web/pipeline_job_spec_errors_controller_test.go @@ -23,7 +23,7 @@ func TestPipelineJobSpecErrorsController_Delete_2(t *testing.T) { j, err := app.JobORM().FindJob(testutils.Context(t), jID) require.NoError(t, err) t.Log(j.JobSpecErrors) - require.GreaterOrEqual(t, len(j.JobSpecErrors), 1) // second 'got nil head' error may have occured also + require.GreaterOrEqual(t, len(j.JobSpecErrors), 1) // second 'got nil head' error may have occurred also var id int64 = -1 for i := range j.JobSpecErrors { jse := j.JobSpecErrors[i] diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 5bc19cbab8f..f0b83bf7574 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Helper migrations function for injecting env vars into goose migrations. This was done to inject chainID into evm chain id not null in specs migrations. +- OCR2 jobs now support querying the state contract for configurations if it has been deployed. This can help on chains such as BSC which "manage" state bloat by arbitrarily deleting logs older than a certain date. In this case, if logs are missing we will query the contract directly and retrieve the latest config from chain state. Chainlink will perform no extra RPC calls unless the job spec has this feature explicitly enabled. On chains that require this, nops may see an increase in RPC calls. This can be enabled for OCR2 jobs by specifying `ConfigContractAddress` in the relay config TOML. ### Removed @@ -35,7 +36,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed health checker to include more services in the prometheus `health` metric and HTTP `/health` endpoint ## 2.5.0 - UNRELEASED -======= - Unauthenticated users executing CLI commands previously generated a confusing error log, which is now removed: ``` diff --git a/go.mod b/go.mod index 33b0a6b121d..00af8d84c20 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/onsi/gomega v1.27.8 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pelletier/go-toml v1.9.5 - github.com/pelletier/go-toml/v2 v2.0.9 + github.com/pelletier/go-toml/v2 v2.1.0 github.com/pkg/errors v0.9.1 github.com/pressly/goose/v3 v3.15.0 github.com/prometheus/client_golang v1.16.0 @@ -67,11 +67,11 @@ require ( github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47 - github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3 + github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918 - github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6 - github.com/smartcontractkit/ocr2keepers v0.7.25 + github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6 + github.com/smartcontractkit/ocr2keepers v0.7.27 github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 diff --git a/go.sum b/go.sum index 5bff997379c..bc2d5b9334b 100644 --- a/go.sum +++ b/go.sum @@ -1344,8 +1344,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= @@ -1457,8 +1457,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47 h1:vdieOW3CZGdD2R5zvCSMS+0vksyExPN3/Fa1uVfld/A= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47/go.mod h1:xMwqRdj5vqYhCJXgKVqvyAwdcqM6ZAEhnwEQ4Khsop8= -github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3 h1:FonaZ1kgRK0yY7D0jF5pL3K+0DYUnKcnStOOcIN+Hhg= -github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3/go.mod h1:gWclxGW7rLkbjXn7FGizYlyKhp/boekto4MEYGyiMG4= +github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c h1:be/0dJGClO0wS7gngfr0qUQq7RO/i7aJ8e5wG1b6/Ns= +github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c/go.mod h1:gWclxGW7rLkbjXn7FGizYlyKhp/boekto4MEYGyiMG4= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca h1:x7M0m512gtXw5Z4B1WJPZ52VgshoIv+IvHqQ8hsH4AE= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca/go.mod h1:RIUJXn7EVp24TL2p4FW79dYjyno23x5mjt1nKN+5WEk= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918 h1:ByVauKFXphRlSNG47lNuxZ9aicu+r8AoNp933VRPpCw= @@ -1467,10 +1467,10 @@ github.com/smartcontractkit/go-plugin v0.0.0-20230605132010-0f4d515d1472 h1:x3kN github.com/smartcontractkit/go-plugin v0.0.0-20230605132010-0f4d515d1472/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= -github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6 h1:w+8TI2Vcm3vk8XQz40ddcwy9BNZgoakXIby35Y54iDU= -github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= -github.com/smartcontractkit/ocr2keepers v0.7.25 h1:jkXje8B9SFMxiI1fufauqxstU95GNu8dtaIJofNyZgo= -github.com/smartcontractkit/ocr2keepers v0.7.25/go.mod h1:4e1ZDRz7fpLgcRUjJpq+5mkoD0ga11BxrSp2JTWKADQ= +github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6 h1:eSo9r53fARv2MnIO5pqYvQOXMBsTlAwhHyQ6BAVp6bY= +github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= +github.com/smartcontractkit/ocr2keepers v0.7.27 h1:kwqMrzmEdq6gH4yqNuLQCbdlED0KaIjwZzu3FF+Gves= +github.com/smartcontractkit/ocr2keepers v0.7.27/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas= github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 h1:NwC3SOc25noBTe1KUQjt45fyTIuInhoE2UfgcHAdihM= github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687/go.mod h1:YYZq52t4wcHoMQeITksYsorD+tZcOyuVU5+lvot3VFM= github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb h1:OMaBUb4X9IFPLbGbCHsMU+kw/BPCrewaVwWGIBc0I4A= diff --git a/integration-tests/actions/ocr2_helpers.go b/integration-tests/actions/ocr2_helpers.go index 293ea2b73c0..aead74f2bdd 100644 --- a/integration-tests/actions/ocr2_helpers.go +++ b/integration-tests/actions/ocr2_helpers.go @@ -35,12 +35,13 @@ func DeployOCRv2Contracts( contractDeployer contracts.ContractDeployer, transmitters []string, client blockchain.EVMClient, + ocrOptions contracts.OffchainOptions, ) ([]contracts.OffchainAggregatorV2, error) { var ocrInstances []contracts.OffchainAggregatorV2 for contractCount := 0; contractCount < numberOfContracts; contractCount++ { ocrInstance, err := contractDeployer.DeployOffchainAggregatorV2( linkTokenContract.Address(), - contracts.DefaultOffChainAggregatorOptions(), + ocrOptions, ) if err != nil { return nil, fmt.Errorf("OCRv2 instance deployment have failed: %w", err) diff --git a/integration-tests/actions/ocr2_helpers_local.go b/integration-tests/actions/ocr2_helpers_local.go index c1e863561da..0b20e4cfee7 100644 --- a/integration-tests/actions/ocr2_helpers_local.go +++ b/integration-tests/actions/ocr2_helpers_local.go @@ -4,6 +4,9 @@ import ( "crypto/ed25519" "encoding/hex" "fmt" + "strings" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" "github.com/lib/pq" @@ -13,14 +16,13 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/testhelpers" "github.com/smartcontractkit/chainlink/v2/core/store/models" "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "golang.org/x/sync/errgroup" "gopkg.in/guregu/null.v4" - "strings" - "time" ) func CreateOCRv2JobsLocal( @@ -128,12 +130,12 @@ func CreateOCRv2JobsLocal( return nil } -func BuildMedianOCR2ConfigLocal(workerNodes []*client.ChainlinkClient) (*contracts.OCRv2Config, error) { +func BuildMedianOCR2ConfigLocal(workerNodes []*client.ChainlinkClient, ocrOffchainOptions contracts.OffchainOptions) (*contracts.OCRv2Config, error) { S, oracleIdentities, err := GetOracleIdentitiesWithKeyIndexLocal(workerNodes, 0) if err != nil { return nil, err } - signerKeys, transmitterAccounts, f_, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + signerKeys, transmitterAccounts, f_, _, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( 30*time.Second, // deltaProgress time.Duration, 30*time.Second, // deltaResend time.Duration, 10*time.Second, // deltaRound time.Duration, @@ -173,6 +175,8 @@ func BuildMedianOCR2ConfigLocal(workerNodes []*client.ChainlinkClient) (*contrac transmitterAddresses = append(transmitterAddresses, common.HexToAddress(string(account))) } + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(ocrOffchainOptions.MinimumAnswer, ocrOffchainOptions.MaximumAnswer) + return &contracts.OCRv2Config{ Signers: signerAddresses, Transmitters: transmitterAddresses, @@ -180,7 +184,7 @@ func BuildMedianOCR2ConfigLocal(workerNodes []*client.ChainlinkClient) (*contrac OnchainConfig: onchainConfig, OffchainConfigVersion: offchainConfigVersion, OffchainConfig: []byte(fmt.Sprintf("0x%s", offchainConfig)), - }, nil + }, err } func GetOracleIdentitiesWithKeyIndexLocal( diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 612fa2724fb..49ff6e0c634 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -15,15 +15,15 @@ require ( github.com/lib/pq v1.10.9 github.com/manifoldco/promptui v0.9.0 github.com/onsi/gomega v1.27.8 - github.com/pelletier/go-toml/v2 v2.0.9 + github.com/pelletier/go-toml/v2 v2.1.0 github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-env v0.36.0 github.com/smartcontractkit/chainlink-testing-framework v1.17.0 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 - github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6 - github.com/smartcontractkit/ocr2keepers v0.7.25 + github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6 + github.com/smartcontractkit/ocr2keepers v0.7.27 github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 github.com/smartcontractkit/wasp v0.3.0 @@ -384,7 +384,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47 // indirect - github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3 // indirect + github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918 // indirect github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 8ea4ed1133a..507d62baf28 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -2192,8 +2192,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= @@ -2360,8 +2360,8 @@ github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc4 github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47/go.mod h1:xMwqRdj5vqYhCJXgKVqvyAwdcqM6ZAEhnwEQ4Khsop8= github.com/smartcontractkit/chainlink-env v0.36.0 h1:CFOjs0c0y3lrHi/fl5qseCH9EQa5W/6CFyOvmhe2VnA= github.com/smartcontractkit/chainlink-env v0.36.0/go.mod h1:NbRExHmJGnKSYXmvNuJx5VErSx26GtE1AEN/CRzYOg8= -github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3 h1:FonaZ1kgRK0yY7D0jF5pL3K+0DYUnKcnStOOcIN+Hhg= -github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230918212835-8a0b08df72a3/go.mod h1:gWclxGW7rLkbjXn7FGizYlyKhp/boekto4MEYGyiMG4= +github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c h1:be/0dJGClO0wS7gngfr0qUQq7RO/i7aJ8e5wG1b6/Ns= +github.com/smartcontractkit/chainlink-relay v0.1.7-0.20230923153757-d0cdb6bea61c/go.mod h1:gWclxGW7rLkbjXn7FGizYlyKhp/boekto4MEYGyiMG4= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca h1:x7M0m512gtXw5Z4B1WJPZ52VgshoIv+IvHqQ8hsH4AE= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20230831134610-680240b97aca/go.mod h1:RIUJXn7EVp24TL2p4FW79dYjyno23x5mjt1nKN+5WEk= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918 h1:ByVauKFXphRlSNG47lNuxZ9aicu+r8AoNp933VRPpCw= @@ -2372,10 +2372,10 @@ github.com/smartcontractkit/go-plugin v0.0.0-20230605132010-0f4d515d1472 h1:x3kN github.com/smartcontractkit/go-plugin v0.0.0-20230605132010-0f4d515d1472/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= -github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6 h1:w+8TI2Vcm3vk8XQz40ddcwy9BNZgoakXIby35Y54iDU= -github.com/smartcontractkit/libocr v0.0.0-20230918212407-dbd4e505b3e6/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= -github.com/smartcontractkit/ocr2keepers v0.7.25 h1:jkXje8B9SFMxiI1fufauqxstU95GNu8dtaIJofNyZgo= -github.com/smartcontractkit/ocr2keepers v0.7.25/go.mod h1:4e1ZDRz7fpLgcRUjJpq+5mkoD0ga11BxrSp2JTWKADQ= +github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6 h1:eSo9r53fARv2MnIO5pqYvQOXMBsTlAwhHyQ6BAVp6bY= +github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= +github.com/smartcontractkit/ocr2keepers v0.7.27 h1:kwqMrzmEdq6gH4yqNuLQCbdlED0KaIjwZzu3FF+Gves= +github.com/smartcontractkit/ocr2keepers v0.7.27/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas= github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 h1:NwC3SOc25noBTe1KUQjt45fyTIuInhoE2UfgcHAdihM= github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687/go.mod h1:YYZq52t4wcHoMQeITksYsorD+tZcOyuVU5+lvot3VFM= github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb h1:OMaBUb4X9IFPLbGbCHsMU+kw/BPCrewaVwWGIBc0I4A= diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go index 2f7c4fa21d9..48d1d20b4dc 100644 --- a/integration-tests/smoke/forwarders_ocr2_test.go +++ b/integration-tests/smoke/forwarders_ocr2_test.go @@ -13,6 +13,7 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink/integration-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" ) @@ -73,7 +74,8 @@ func TestForwarderOCR2Basic(t *testing.T) { transmitters = append(transmitters, forwarderCommonAddress.Hex()) } - ocrInstances, err := actions.DeployOCRv2Contracts(1, linkTokenContract, env.ContractDeployer, transmitters, env.EVMClient) + ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions() + ocrInstances, err := actions.DeployOCRv2Contracts(1, linkTokenContract, env.ContractDeployer, transmitters, env.EVMClient, ocrOffchainOptions) require.NoError(t, err, "Error deploying OCRv2 contracts with forwarders") err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Error waiting for events") @@ -83,7 +85,7 @@ func TestForwarderOCR2Basic(t *testing.T) { err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Error waiting for events") - ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes) + ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes, ocrOffchainOptions) require.NoError(t, err, "Error building OCRv2 config") ocrv2Config.Transmitters = authorizedForwarders diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go index db11f187f5a..a1ad9fa5296 100644 --- a/integration-tests/smoke/ocr2_test.go +++ b/integration-tests/smoke/ocr2_test.go @@ -22,6 +22,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/config" + "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" ) @@ -69,13 +70,14 @@ func TestOCRv2Basic(t *testing.T) { transmitters = append(transmitters, addr) } - aggregatorContracts, err := actions.DeployOCRv2Contracts(1, linkToken, env.ContractDeployer, transmitters, env.EVMClient) + ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions() + aggregatorContracts, err := actions.DeployOCRv2Contracts(1, linkToken, env.ContractDeployer, transmitters, env.EVMClient, ocrOffchainOptions) require.NoError(t, err, "Error deploying OCRv2 aggregator contracts") err = actions.CreateOCRv2JobsLocal(aggregatorContracts, bootstrapNode, workerNodes, env.MockServer.Client, "ocr2", 5, env.EVMClient.GetChainID().Uint64(), false) require.NoError(t, err, "Error creating OCRv2 jobs") - ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes) + ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes, ocrOffchainOptions) require.NoError(t, err, "Error building OCRv2 config") err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, aggregatorContracts) diff --git a/plugins/cmd/chainlink-median/main.go b/plugins/cmd/chainlink-median/main.go index 4d966548525..00836fa7c24 100644 --- a/plugins/cmd/chainlink-median/main.go +++ b/plugins/cmd/chainlink-median/main.go @@ -14,7 +14,7 @@ const ( ) func main() { - s := plugins.StartServer(loggerName) + s := plugins.MustNewStartedServer(loggerName) defer s.Stop() p := median.NewPlugin(s.Logger) diff --git a/plugins/cmd/chainlink-solana/main.go b/plugins/cmd/chainlink-solana/main.go index 132d7244fdf..ec30fa59f41 100644 --- a/plugins/cmd/chainlink-solana/main.go +++ b/plugins/cmd/chainlink-solana/main.go @@ -21,7 +21,7 @@ const ( ) func main() { - s := plugins.StartServer(loggerName) + s := plugins.MustNewStartedServer(loggerName) defer s.Stop() p := &pluginRelayer{Base: plugins.Base{Logger: s.Logger}} diff --git a/plugins/cmd/chainlink-starknet/main.go b/plugins/cmd/chainlink-starknet/main.go index aa69c85fe42..1052f3c1fc6 100644 --- a/plugins/cmd/chainlink-starknet/main.go +++ b/plugins/cmd/chainlink-starknet/main.go @@ -21,7 +21,7 @@ const ( ) func main() { - s := plugins.StartServer(loggerName) + s := plugins.MustNewStartedServer(loggerName) defer s.Stop() p := &pluginRelayer{Base: plugins.Base{Logger: s.Logger}} diff --git a/plugins/server.go b/plugins/server.go index 0d0e0dc62c4..b1d43612480 100644 --- a/plugins/server.go +++ b/plugins/server.go @@ -9,47 +9,78 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services" ) -// StartServer returns a started Server. +// NewStartedServer returns a started Server. // The caller is responsible for calling Server.Stop(). -func StartServer(loggerName string) *Server { - s := Server{ +func NewStartedServer(loggerName string) (*Server, error) { + s, err := newServer(loggerName) + if err != nil { + return nil, err + } + err = s.start() + if err != nil { + return nil, err + } + + return s, nil +} + +// MustNewStartedServer returns a new started Server like NewStartedServer, but logs and exits in the event of error. +// The caller is responsible for calling Server.Stop(). +func MustNewStartedServer(loggerName string) *Server { + s, err := newServer(loggerName) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to start server: %s\n", err) + os.Exit(1) + } + err = s.start() + if err != nil { + s.Logger.Fatalf("Failed to start server: %s", err) + } + + return s +} + +// Server holds common plugin server fields. +type Server struct { + loop.GRPCOpts + Logger logger.SugaredLogger + *PromServer + services.Checker +} + +func newServer(loggerName string) (*Server, error) { + s := &Server{ // default prometheus.Registerer GRPCOpts: loop.SetupTelemetry(nil), } lggr, err := loop.NewLogger() if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create logger: %s\n", err) - os.Exit(1) + return nil, fmt.Errorf("error creating logger: %s", err) } lggr = logger.Named(lggr, loggerName) s.Logger = logger.Sugared(lggr) + return s, nil +} +func (s *Server) start() error { envCfg, err := GetEnvConfig() if err != nil { - lggr.Fatalf("Failed to get environment configuration: %s\n", err) + return fmt.Errorf("error getting environment configuration: %w", err) } - s.PromServer = NewPromServer(envCfg.PrometheusPort(), lggr) + s.PromServer = NewPromServer(envCfg.PrometheusPort(), s.lggr) err = s.PromServer.Start() if err != nil { - lggr.Fatalf("Unrecoverable error starting prometheus server: %s", err) + return fmt.Errorf("error starting prometheus server: %w", err) } s.Checker = services.NewChecker() err = s.Checker.Start() if err != nil { - lggr.Fatalf("Failed to start health checker: %v", err) + return fmt.Errorf("error starting health checker: %w", err) } - return &s -} - -// Server holds common plugin server fields. -type Server struct { - loop.GRPCOpts - Logger logger.SugaredLogger - *PromServer - services.Checker + return nil } // MustRegister registers the Checkable with services.Checker, or exits upon failure. @@ -62,7 +93,7 @@ func (s *Server) MustRegister(c services.Checkable) { // Stop closes resources and flushes logs. func (s *Server) Stop() { s.Logger.ErrorIfFn(s.Checker.Close, "Failed to close health checker") - s.Logger.ErrorIfFn(s.PromServer.Close, "error closing prometheus server") + s.Logger.ErrorIfFn(s.PromServer.Close, "Failed to close prometheus server") if err := s.Logger.Sync(); err != nil { fmt.Println("Failed to sync logger:", err) }