diff --git a/.changeset/brave-dots-breathe.md b/.changeset/brave-dots-breathe.md new file mode 100644 index 00000000000..f1ae4f4d21e --- /dev/null +++ b/.changeset/brave-dots-breathe.md @@ -0,0 +1,7 @@ +--- +"chainlink": minor +--- + +Added a new CLI command, `blocks find-lca,` which finds the latest block that is available in both the database and on the chain for the specified chain. +Added a new CLI command, `node remove-blocks,` which removes all blocks and logs greater than or equal to the specified block number. +#nops #added diff --git a/.changeset/curvy-weeks-cover.md b/.changeset/curvy-weeks-cover.md new file mode 100644 index 00000000000..0b19df8ad16 --- /dev/null +++ b/.changeset/curvy-weeks-cover.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#wip Keystone contract wrappers updated diff --git a/.changeset/pink-schools-provide.md b/.changeset/pink-schools-provide.md new file mode 100644 index 00000000000..6b2aa5ea0c4 --- /dev/null +++ b/.changeset/pink-schools-provide.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#bugfix Fixed an issue where the `rebroadcast-transactions` commands did not execute config validation. diff --git a/.changeset/quick-fishes-heal.md b/.changeset/quick-fishes-heal.md new file mode 100644 index 00000000000..966e74c843a --- /dev/null +++ b/.changeset/quick-fishes-heal.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- +#changed +Added prefix `RPCClient returned error ({RPC_NAME})` to RPC errors to simplify filtering of RPC related issues. diff --git a/.changeset/sour-jars-cross.md b/.changeset/sour-jars-cross.md new file mode 100644 index 00000000000..b904e8e3dd0 --- /dev/null +++ b/.changeset/sour-jars-cross.md @@ -0,0 +1,13 @@ +--- +"chainlink": patch +--- + +#added + +Add configurability to mercury transmitter + +```toml +[Mercury.Transmitter] +TransmitQueueMaxSize = 10_000 # Default +TransmitTimeout = "5s" # Default +``` diff --git a/.changeset/tidy-trees-tie.md b/.changeset/tidy-trees-tie.md new file mode 100644 index 00000000000..7ff415e9de4 --- /dev/null +++ b/.changeset/tidy-trees-tie.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#changed Updating the log trigger log provider's readMaxBatchSize to 56 diff --git a/.github/actions/setup-create-base64-upgrade-config/action.yml b/.github/actions/setup-create-base64-upgrade-config/action.yml index ed25fd6375f..8f514784725 100644 --- a/.github/actions/setup-create-base64-upgrade-config/action.yml +++ b/.github/actions/setup-create-base64-upgrade-config/action.yml @@ -92,6 +92,7 @@ runs: [ChainlinkUpgradeImage] image="$UPGRADE_IMAGE" version="$UPGRADE_VERSION" + postgres_version="$CHAINLINK_POSTGRES_VERSION" [Logging] test_log_collect=$test_log_collect diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 1f3e093cfdc..8dcf32b127e 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -218,40 +218,6 @@ jobs: AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} dep_evm_sha: ${{ inputs.evm-ref }} - build-test-image: - if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'schedule' || contains(join(github.event.pull_request.labels.*.name, ' '), 'build-test-image') - environment: integration - permissions: - id-token: write - contents: read - name: Build Test Image - runs-on: ubuntu22.04-16cores-64GB - needs: [changes] - steps: - - name: Collect Metrics - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ${{ env.COLLECTION_ID }}-build-test-image - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Test Image - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/chainlink - ref: ${{ inputs.cl_ref || github.event.pull_request.head.sha || github.event.merge_group.head_sha }} - - name: Build Test Image - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - uses: ./.github/actions/build-test-image - with: - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - compare-tests: needs: [changes] runs-on: ubuntu-latest @@ -726,7 +692,7 @@ jobs: cache_restore_only: "true" QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + QA_KUBECONFIG: "" should_tidy: "false" - name: Show Otel-Collector Logs if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' @@ -830,6 +796,7 @@ jobs: # Run the setup if the matrix finishes but this time save the cache if we have a cache hit miss # this will also only run if both of the matrix jobs pass eth-smoke-go-mod-cache: + environment: integration needs: [eth-smoke-tests] runs-on: ubuntu-latest @@ -863,7 +830,7 @@ jobs: id-token: write contents: read runs-on: ubuntu-latest - needs: [build-chainlink, changes, build-test-image] + needs: [build-chainlink, changes] # Only run migration tests on new tags if: startsWith(github.ref, 'refs/tags/') env: @@ -876,6 +843,17 @@ jobs: TEST_LOG_LEVEL: debug TEST_SUITE: migration steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ${{ env.COLLECTION_ID }}-migration-tests + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Version Migration Tests + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true - name: Checkout the repo uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: @@ -886,7 +864,12 @@ jobs: run: | untrimmed_ver=$(curl --header "Authorization: token ${{ secrets.GITHUB_TOKEN }}" --request GET https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r .name) latest_version="${untrimmed_ver:1}" - echo "latest_version=${latest_version} | tee -a $GITHUB_OUTPUT" + # Check if latest_version is empty + if [ -z "$latest_version" ]; then + echo "Error: The latest_version is empty. The migration tests need a verison to run." + exit 1 + fi + echo "latest_version=${latest_version}" >> "$GITHUB_OUTPUT" - name: Name Versions run: | echo "Running migration tests from version '${{ steps.get_latest_version.outputs.latest_version }}' to: '${{ inputs.evm-ref || github.sha }}'" @@ -898,13 +881,22 @@ jobs: chainlinkVersion: ${{ steps.get_latest_version.outputs.latest_version }} upgradeImage: ${{ env.UPGRADE_IMAGE }} upgradeVersion: ${{ env.UPGRADE_VERSION }} + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + lokiEndpoint: https://${{ secrets.GRAFANA_INTERNAL_HOST }}/loki/api/v1/push + lokiTenantId: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + lokiBasicAuth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" - name: Run Migration Tests uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@519851800779323566b7b7c22cc21bff95dbb639 # v2.3.11 with: - test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json ./migration 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage + test_command_to_run: cd ./integration-tests && go test -timeout 20m -count=1 -json ./migration 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage test_download_vendor_packages_command: cd ./integration-tests && go mod download cl_repo: ${{ env.CHAINLINK_IMAGE }} cl_image_tag: ${{ steps.get_latest_version.outputs.latest_version }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} artifacts_name: node-migration-test-logs artifacts_location: | ./integration-tests/migration/logs @@ -916,28 +908,24 @@ jobs: cache_restore_only: "true" QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + QA_KUBECONFIG: "" go_coverage_src_dir: /var/tmp/go-coverage go_coverage_dest_dir: ${{ github.workspace }}/.covdata + should_tidy: "false" - name: Upload Coverage Data uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: cl-node-coverage-data-migration-tests path: .covdata retention-days: 1 - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + - name: Notify Slack + if: failure() && github.event_name != 'workflow_dispatch' + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} with: - id: ${{ env.COLLECTION_ID }}-migration-tests - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Version Migration Tests - test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' - continue-on-error: true + channel-id: "#team-test-tooling-internal" + slack-message: ":x: :mild-panic-intensifies: Node Migration Tests Failed: ${{ job.html_url }}\n${{ format('https://github.com/smartcontractkit/chainlink/actions/runs/{0}', github.run_id) }}" ## Solana Section get_solana_sha: diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index a70f1961e36..00000000000 --- a/codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -comment: false - -coverage: - status: - project: - default: - threshold: 1% - -github_checks: - annotations: false - -ignore: - - 'contracts/' # Disabled due to solidity-coverage not reporting coverage - - 'core/internal' - - 'core/scripts' diff --git a/common/client/models.go b/common/client/models.go index 66f1e9cf88b..fd0c3915940 100644 --- a/common/client/models.go +++ b/common/client/models.go @@ -28,6 +28,35 @@ var sendTxSevereErrors = []SendTxReturnCode{Fatal, Underpriced, Unsupported, Exc // sendTxSuccessfulCodes - error codes which signal that transaction was accepted by the node var sendTxSuccessfulCodes = []SendTxReturnCode{Successful, TransactionAlreadyKnown} +func (c SendTxReturnCode) String() string { + switch c { + case Successful: + return "Successful" + case Fatal: + return "Fatal" + case Retryable: + return "Retryable" + case Underpriced: + return "Underpriced" + case Unknown: + return "Unknown" + case Unsupported: + return "Unsupported" + case TransactionAlreadyKnown: + return "TransactionAlreadyKnown" + case InsufficientFunds: + return "InsufficientFunds" + case ExceedsMaxFee: + return "ExceedsMaxFee" + case FeeOutOfValidRange: + return "FeeOutOfValidRange" + case OutOfCounters: + return "OutOfCounters" + default: + return fmt.Sprintf("SendTxReturnCode(%d)", c) + } +} + type NodeTier int const ( diff --git a/common/client/models_test.go b/common/client/models_test.go new file mode 100644 index 00000000000..2d5dc31b373 --- /dev/null +++ b/common/client/models_test.go @@ -0,0 +1,16 @@ +package client + +import ( + "strings" + "testing" +) + +func TestSendTxReturnCode_String(t *testing.T) { + // ensure all the SendTxReturnCodes have proper name + for c := 1; c < int(sendTxReturnCodeLen); c++ { + strC := SendTxReturnCode(c).String() + if strings.Contains(strC, "SendTxReturnCode(") { + t.Errorf("Expected %s to have a proper string representation", strC) + } + } +} diff --git a/common/client/multi_node.go b/common/client/multi_node.go index cc8daed599c..fa413df91aa 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -561,6 +561,13 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return n.RPC().PendingSequenceAt(ctx, addr) } +type sendTxErrors map[SendTxReturnCode][]error + +// String - returns string representation of the errors map. Required by logger to properly represent the value +func (errs sendTxErrors) String() string { + return fmt.Sprint(map[SendTxReturnCode][]error(errs)) +} + func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SendEmptyTransaction( ctx context.Context, newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt any, err error), @@ -602,7 +609,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP ctx, cancel := c.chStop.Ctx(ctx) defer cancel() requiredResults := int(math.Ceil(float64(healthyNodesNum) * sendTxQuorum)) - errorsByCode := map[SendTxReturnCode][]error{} + errorsByCode := sendTxErrors{} var softTimeoutChan <-chan time.Time var resultsCount int loop: @@ -639,7 +646,7 @@ loop: func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) reportSendTxAnomalies(tx TX, txResults <-chan sendTxResult) { defer c.wg.Done() - resultsByCode := map[SendTxReturnCode][]error{} + resultsByCode := sendTxErrors{} // txResults eventually will be closed for txResult := range txResults { resultsByCode[txResult.ResultCode] = append(resultsByCode[txResult.ResultCode], txResult.Err) @@ -653,7 +660,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP } } -func aggregateTxResults(resultsByCode map[SendTxReturnCode][]error) (txResult error, err error) { +func aggregateTxResults(resultsByCode sendTxErrors) (txResult error, err error) { severeErrors, hasSevereErrors := findFirstIn(resultsByCode, sendTxSevereErrors) successResults, hasSuccess := findFirstIn(resultsByCode, sendTxSuccessfulCodes) if hasSuccess { diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 9c09bd57d70..9f6904fcaf2 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -796,13 +796,13 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { Name string ExpectedTxResult string ExpectedCriticalErr string - ResultsByCode map[SendTxReturnCode][]error + ResultsByCode sendTxErrors }{ { Name: "Returns success and logs critical error on success and Fatal", ExpectedTxResult: "success", ExpectedCriticalErr: "found contradictions in nodes replies on SendTransaction: got success and severe error", - ResultsByCode: map[SendTxReturnCode][]error{ + ResultsByCode: sendTxErrors{ Successful: {errors.New("success")}, Fatal: {errors.New("fatal")}, }, @@ -811,7 +811,7 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { Name: "Returns TransactionAlreadyKnown and logs critical error on TransactionAlreadyKnown and Fatal", ExpectedTxResult: "tx_already_known", ExpectedCriticalErr: "found contradictions in nodes replies on SendTransaction: got success and severe error", - ResultsByCode: map[SendTxReturnCode][]error{ + ResultsByCode: sendTxErrors{ TransactionAlreadyKnown: {errors.New("tx_already_known")}, Unsupported: {errors.New("unsupported")}, }, @@ -820,7 +820,7 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { Name: "Prefers sever error to temporary", ExpectedTxResult: "underpriced", ExpectedCriticalErr: "", - ResultsByCode: map[SendTxReturnCode][]error{ + ResultsByCode: sendTxErrors{ Retryable: {errors.New("retryable")}, Underpriced: {errors.New("underpriced")}, }, @@ -829,7 +829,7 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { Name: "Returns temporary error", ExpectedTxResult: "retryable", ExpectedCriticalErr: "", - ResultsByCode: map[SendTxReturnCode][]error{ + ResultsByCode: sendTxErrors{ Retryable: {errors.New("retryable")}, }, }, @@ -837,7 +837,7 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { Name: "Insufficient funds is treated as error", ExpectedTxResult: "", ExpectedCriticalErr: "", - ResultsByCode: map[SendTxReturnCode][]error{ + ResultsByCode: sendTxErrors{ Successful: {nil}, InsufficientFunds: {errors.New("insufficientFunds")}, }, @@ -846,13 +846,13 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { Name: "Logs critical error on empty ResultsByCode", ExpectedTxResult: "expected at least one response on SendTransaction", ExpectedCriticalErr: "expected at least one response on SendTransaction", - ResultsByCode: map[SendTxReturnCode][]error{}, + ResultsByCode: sendTxErrors{}, }, { Name: "Zk out of counter error", ExpectedTxResult: "not enough keccak counters to continue the execution", ExpectedCriticalErr: "", - ResultsByCode: map[SendTxReturnCode][]error{ + ResultsByCode: sendTxErrors{ OutOfCounters: {errors.New("not enough keccak counters to continue the execution")}, }, }, @@ -870,6 +870,9 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { assert.EqualError(t, txResult, testCase.ExpectedTxResult) } + logger.Sugared(logger.Test(t)).Info("Map: " + fmt.Sprint(testCase.ResultsByCode)) + logger.Sugared(logger.Test(t)).Criticalw("observed invariant violation on SendTransaction", "resultsByCode", testCase.ResultsByCode, "err", err) + if testCase.ExpectedCriticalErr == "" { assert.NoError(t, err) } else { @@ -884,5 +887,4 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { delete(codesToCover, codeToIgnore) } assert.Empty(t, codesToCover, "all of the SendTxReturnCode must be covered by this test") - } diff --git a/common/client/poller.go b/common/client/poller.go new file mode 100644 index 00000000000..b21f28fe604 --- /dev/null +++ b/common/client/poller.go @@ -0,0 +1,98 @@ +package client + +import ( + "context" + "sync" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink/v2/common/types" +) + +// Poller is a component that polls a function at a given interval +// and delivers the result to a channel. It is used by multinode to poll +// for new heads and implements the Subscription interface. +type Poller[T any] struct { + services.StateMachine + pollingInterval time.Duration + pollingFunc func(ctx context.Context) (T, error) + pollingTimeout time.Duration + logger logger.Logger + channel chan<- T + errCh chan error + + stopCh services.StopChan + wg sync.WaitGroup +} + +// NewPoller creates a new Poller instance +func NewPoller[ + T any, +](pollingInterval time.Duration, pollingFunc func(ctx context.Context) (T, error), pollingTimeout time.Duration, channel chan<- T, logger logger.Logger) Poller[T] { + return Poller[T]{ + pollingInterval: pollingInterval, + pollingFunc: pollingFunc, + pollingTimeout: pollingTimeout, + channel: channel, + logger: logger, + errCh: make(chan error), + stopCh: make(chan struct{}), + } +} + +var _ types.Subscription = &Poller[any]{} + +func (p *Poller[T]) Start() error { + return p.StartOnce("Poller", func() error { + p.wg.Add(1) + go p.pollingLoop() + return nil + }) +} + +// Unsubscribe cancels the sending of events to the data channel +func (p *Poller[T]) Unsubscribe() { + _ = p.StopOnce("Poller", func() error { + close(p.stopCh) + p.wg.Wait() + close(p.errCh) + return nil + }) +} + +func (p *Poller[T]) Err() <-chan error { + return p.errCh +} + +func (p *Poller[T]) pollingLoop() { + defer p.wg.Done() + + ticker := time.NewTicker(p.pollingInterval) + defer ticker.Stop() + + for { + select { + case <-p.stopCh: + return + case <-ticker.C: + // Set polling timeout + pollingCtx, cancelPolling := context.WithTimeout(context.Background(), p.pollingTimeout) + p.stopCh.CtxCancel(pollingCtx, cancelPolling) + // Execute polling function + result, err := p.pollingFunc(pollingCtx) + cancelPolling() + if err != nil { + p.logger.Warnf("polling error: %v", err) + continue + } + // Send result to channel or block if channel is full + select { + case p.channel <- result: + case <-p.stopCh: + return + } + } + } +} diff --git a/common/client/poller_test.go b/common/client/poller_test.go new file mode 100644 index 00000000000..3f11c759adb --- /dev/null +++ b/common/client/poller_test.go @@ -0,0 +1,207 @@ +package client + +import ( + "context" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" +) + +func Test_Poller(t *testing.T) { + lggr := logger.Test(t) + + t.Run("Test multiple start", func(t *testing.T) { + pollFunc := func(ctx context.Context) (Head, error) { + return nil, nil + } + + channel := make(chan Head, 1) + defer close(channel) + + poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + err := poller.Start() + require.NoError(t, err) + + err = poller.Start() + require.Error(t, err) + poller.Unsubscribe() + }) + + t.Run("Test polling for heads", func(t *testing.T) { + // Mock polling function that returns a new value every time it's called + var pollNumber int + pollLock := sync.Mutex{} + pollFunc := func(ctx context.Context) (Head, error) { + pollLock.Lock() + defer pollLock.Unlock() + pollNumber++ + h := head{ + BlockNumber: int64(pollNumber), + BlockDifficulty: big.NewInt(int64(pollNumber)), + } + return h.ToMockHead(t), nil + } + + // data channel to receive updates from the poller + channel := make(chan Head, 1) + defer close(channel) + + // Create poller and start to receive data + poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + require.NoError(t, poller.Start()) + defer poller.Unsubscribe() + + // Receive updates from the poller + pollCount := 0 + pollMax := 50 + for ; pollCount < pollMax; pollCount++ { + h := <-channel + assert.Equal(t, int64(pollCount+1), h.BlockNumber()) + } + }) + + t.Run("Test polling errors", func(t *testing.T) { + // Mock polling function that returns an error + var pollNumber int + pollLock := sync.Mutex{} + pollFunc := func(ctx context.Context) (Head, error) { + pollLock.Lock() + defer pollLock.Unlock() + pollNumber++ + return nil, fmt.Errorf("polling error %d", pollNumber) + } + + // data channel to receive updates from the poller + channel := make(chan Head, 1) + defer close(channel) + + olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + + // Create poller and subscribe to receive data + poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, olggr) + require.NoError(t, poller.Start()) + defer poller.Unsubscribe() + + // Ensure that all errors were logged as expected + logsSeen := func() bool { + for pollCount := 0; pollCount < 50; pollCount++ { + numLogs := observedLogs.FilterMessage(fmt.Sprintf("polling error: polling error %d", pollCount+1)).Len() + if numLogs != 1 { + return false + } + } + return true + } + require.Eventually(t, logsSeen, time.Second, time.Millisecond) + }) + + t.Run("Test polling timeout", func(t *testing.T) { + pollFunc := func(ctx context.Context) (Head, error) { + if <-ctx.Done(); true { + return nil, ctx.Err() + } + return nil, nil + } + + // Set instant timeout + pollingTimeout := time.Duration(0) + + // data channel to receive updates from the poller + channel := make(chan Head, 1) + defer close(channel) + + olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + + // Create poller and subscribe to receive data + poller := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, channel, olggr) + require.NoError(t, poller.Start()) + defer poller.Unsubscribe() + + // Ensure that timeout errors were logged as expected + logsSeen := func() bool { + return observedLogs.FilterMessage("polling error: context deadline exceeded").Len() >= 1 + } + require.Eventually(t, logsSeen, time.Second, time.Millisecond) + }) + + t.Run("Test unsubscribe during polling", func(t *testing.T) { + wait := make(chan struct{}) + pollFunc := func(ctx context.Context) (Head, error) { + close(wait) + // Block in polling function until context is cancelled + if <-ctx.Done(); true { + return nil, ctx.Err() + } + return nil, nil + } + + // Set long timeout + pollingTimeout := time.Minute + + // data channel to receive updates from the poller + channel := make(chan Head, 1) + defer close(channel) + + olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + + // Create poller and subscribe to receive data + poller := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, channel, olggr) + require.NoError(t, poller.Start()) + + // Unsubscribe while blocked in polling function + <-wait + poller.Unsubscribe() + + // Ensure error was logged + logsSeen := func() bool { + return observedLogs.FilterMessage("polling error: context canceled").Len() >= 1 + } + require.Eventually(t, logsSeen, time.Second, time.Millisecond) + }) +} + +func Test_Poller_Unsubscribe(t *testing.T) { + lggr := logger.Test(t) + pollFunc := func(ctx context.Context) (Head, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + h := head{ + BlockNumber: 0, + BlockDifficulty: big.NewInt(0), + } + return h.ToMockHead(t), nil + } + } + + t.Run("Test multiple unsubscribe", func(t *testing.T) { + channel := make(chan Head, 1) + poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + err := poller.Start() + require.NoError(t, err) + + <-channel + poller.Unsubscribe() + poller.Unsubscribe() + }) + + t.Run("Test unsubscribe with closed channel", func(t *testing.T) { + channel := make(chan Head, 1) + poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + err := poller.Start() + require.NoError(t, err) + + <-channel + close(channel) + poller.Unsubscribe() + }) +} diff --git a/contracts/.changeset/funny-eagles-know.md b/contracts/.changeset/funny-eagles-know.md new file mode 100644 index 00000000000..46827824ad8 --- /dev/null +++ b/contracts/.changeset/funny-eagles-know.md @@ -0,0 +1,5 @@ +--- +"@chainlink/contracts": patch +--- + +#wip addCapability udpates diff --git a/contracts/CHANGELOG.md b/contracts/CHANGELOG.md index 667a5ad2529..3139312e325 100644 --- a/contracts/CHANGELOG.md +++ b/contracts/CHANGELOG.md @@ -1,6 +1,6 @@ -# @chainlink/contracts CHANGELOG.md +# @chainlink/contracts -## 1.1.0 +## 1.1.0 - 2024-04-23 ### Minor Changes diff --git a/contracts/README.md b/contracts/README.md index 8df69057229..26b0a823298 100644 --- a/contracts/README.md +++ b/contracts/README.md @@ -50,6 +50,22 @@ contribution information. Thank you! +### Changesets + +We use [changesets](https://github.com/changesets/changesets) to manage versioning the contracts. + +Every PR that modifies any configuration or code, should most likely accompanied by a changeset file. + +To install `changesets`: + 1. Install `pnpm` if it is not already installed - [docs](https://pnpm.io/installation). + 2. Run `pnpm install`. + +Either after or before you create a commit, run the `pnpm changeset` command in the `contracts` directory to create an accompanying changeset entry which will reflect on the CHANGELOG for the next release. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), + +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + ## License [MIT](https://choosealicense.com/licenses/mit/) diff --git a/contracts/src/v0.8/keystone/CapabilityRegistry.sol b/contracts/src/v0.8/keystone/CapabilityRegistry.sol index faedd858bef..17f5a3dd73f 100644 --- a/contracts/src/v0.8/keystone/CapabilityRegistry.sol +++ b/contracts/src/v0.8/keystone/CapabilityRegistry.sol @@ -3,8 +3,14 @@ pragma solidity ^0.8.0; import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; import {OwnerIsCreator} from "../shared/access/OwnerIsCreator.sol"; +import {IERC165} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {EnumerableSet} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol"; +import {ICapabilityConfiguration} from "./interfaces/ICapabilityConfiguration.sol"; contract CapabilityRegistry is OwnerIsCreator, TypeAndVersionInterface { + // Add the library methods + using EnumerableSet for EnumerableSet.Bytes32Set; + struct NodeOperator { /// @notice The address of the admin that can manage a node /// operator @@ -13,6 +19,16 @@ contract CapabilityRegistry is OwnerIsCreator, TypeAndVersionInterface { string name; } + // CapabilityResponseType indicates whether remote response requires + // aggregation or is an already aggregated report. There are multiple + // possible ways to aggregate. + enum CapabilityResponseType { + // No additional aggregation is needed on the remote response. + REPORT, + // A number of identical observations need to be aggregated. + OBSERVATION_IDENTICAL + } + struct Capability { // Capability type, e.g. "data-streams-reports" // bytes32(string); validation regex: ^[a-z0-9_\-:]{1,32}$ @@ -21,12 +37,39 @@ contract CapabilityRegistry is OwnerIsCreator, TypeAndVersionInterface { // Semver, e.g., "1.2.3" // bytes32(string); must be valid Semver + max 32 characters. bytes32 version; + // responseType indicates whether remote response requires + // aggregation or is an OCR report. There are multiple possible + // ways to aggregate. + CapabilityResponseType responseType; + // An address to the capability configuration contract. Having this defined + // on a capability enforces consistent configuration across DON instances + // serving the same capability. Configuration contract MUST implement + // CapabilityConfigurationContractInterface. + // + // The main use cases are: + // 1) Sharing capability configuration across DON instances + // 2) Inspect and modify on-chain configuration without off-chain + // capability code. + // + // It is not recommended to store configuration which requires knowledge of + // the DON membership. + address configurationContract; } /// @notice This error is thrown when trying to set a node operator's /// admin address to the zero address error InvalidNodeOperatorAdmin(); + /// @notice This error is thrown when trying add a capability that already + /// exists. + error CapabilityAlreadyExists(); + + /// @notice This error is thrown when trying to add a capability with a + /// configuration contract that does not implement the required interface. + /// @param proposedConfigurationContract The address of the proposed + /// configuration contract. + error InvalidCapabilityConfigurationContractInterface(address proposedConfigurationContract); + /// @notice This event is emitted when a new node operator is added /// @param nodeOperatorId The ID of the newly added node operator /// @param admin The address of the admin that can manage the node @@ -43,6 +86,7 @@ contract CapabilityRegistry is OwnerIsCreator, TypeAndVersionInterface { event CapabilityAdded(bytes32 indexed capabilityId); mapping(bytes32 => Capability) private s_capabilities; + EnumerableSet.Bytes32Set private s_capabilityIds; /// @notice Mapping of node operators mapping(uint256 nodeOperatorId => NodeOperator) private s_nodeOperators; @@ -87,7 +131,21 @@ contract CapabilityRegistry is OwnerIsCreator, TypeAndVersionInterface { function addCapability(Capability calldata capability) external onlyOwner { bytes32 capabilityId = getCapabilityID(capability.capabilityType, capability.version); + + if (s_capabilityIds.contains(capabilityId)) revert CapabilityAlreadyExists(); + + if (capability.configurationContract != address(0)) { + if ( + capability.configurationContract.code.length == 0 || + !IERC165(capability.configurationContract).supportsInterface( + ICapabilityConfiguration.getCapabilityConfiguration.selector + ) + ) revert InvalidCapabilityConfigurationContractInterface(capability.configurationContract); + } + + s_capabilityIds.add(capabilityId); s_capabilities[capabilityId] = capability; + emit CapabilityAdded(capabilityId); } diff --git a/contracts/src/v0.8/keystone/interfaces/ICapabilityConfiguration.sol b/contracts/src/v0.8/keystone/interfaces/ICapabilityConfiguration.sol new file mode 100644 index 00000000000..20447c9680a --- /dev/null +++ b/contracts/src/v0.8/keystone/interfaces/ICapabilityConfiguration.sol @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @notice Interface for capability configuration contract. It MUST be +/// implemented for a contract to be used as a capability configuration. +/// The contract MAY store configuration that is shared across multiple +/// DON instances and capability versions. +/// @dev This interface does not guarantee the configuration contract's +/// correctness. It is the responsibility of the contract owner to ensure +/// that the configuration contract emits the CapabilityConfigurationSet +/// event when the configuration is set. +interface ICapabilityConfiguration { + /// @notice Emitted when a capability configuration is set. + event CapabilityConfigurationSet(); + + /// @notice Returns the capability configuration for a particular DON instance. + /// @dev donId is required to get DON-specific configuration. It avoids a + /// situation where configuration size grows too large. + /// @param donId The DON instance ID. These are stored in the CapabilityRegistry. + /// @return configuration DON's configuration for the capability. + function getCapabilityConfiguration(uint256 donId) external view returns (bytes memory configuration); + + // Solidity does not support generic returns types, so this cannot be part of + // the interface. However, the implementation contract MAY implement this + // function to enable configuration decoding on-chain. + // function decodeCapabilityConfiguration(bytes configuration) external returns (TypedCapabilityConfigStruct config) +} diff --git a/contracts/src/v0.8/keystone/test/BaseTest.t.sol b/contracts/src/v0.8/keystone/test/BaseTest.t.sol index 5c4b4c91809..4517a256b15 100644 --- a/contracts/src/v0.8/keystone/test/BaseTest.t.sol +++ b/contracts/src/v0.8/keystone/test/BaseTest.t.sol @@ -3,14 +3,17 @@ pragma solidity ^0.8.19; import {Test} from "forge-std/Test.sol"; import {Constants} from "./Constants.t.sol"; +import {CapabilityConfigurationContract} from "./mocks/CapabilityConfigurationContract.sol"; import {CapabilityRegistry} from "../CapabilityRegistry.sol"; contract BaseTest is Test, Constants { CapabilityRegistry internal s_capabilityRegistry; + CapabilityConfigurationContract internal s_capabilityConfigurationContract; function setUp() public virtual { vm.startPrank(ADMIN); s_capabilityRegistry = new CapabilityRegistry(); + s_capabilityConfigurationContract = new CapabilityConfigurationContract(); } function _getNodeOperators() internal view returns (CapabilityRegistry.NodeOperator[] memory) { diff --git a/contracts/src/v0.8/keystone/test/CapabilityRegistry_AddCapabilityTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilityRegistry_AddCapabilityTest.t.sol index 6fbdc43a2c7..d9e4b6b8383 100644 --- a/contracts/src/v0.8/keystone/test/CapabilityRegistry_AddCapabilityTest.t.sol +++ b/contracts/src/v0.8/keystone/test/CapabilityRegistry_AddCapabilityTest.t.sol @@ -2,16 +2,88 @@ pragma solidity ^0.8.19; import {BaseTest} from "./BaseTest.t.sol"; +import {CapabilityConfigurationContract} from "./mocks/CapabilityConfigurationContract.sol"; + import {CapabilityRegistry} from "../CapabilityRegistry.sol"; contract CapabilityRegistry_AddCapabilityTest is BaseTest { - function test_AddCapability() public { - s_capabilityRegistry.addCapability(CapabilityRegistry.Capability("data-streams-reports", "1.0.0")); + CapabilityRegistry.Capability private basicCapability = + CapabilityRegistry.Capability({ + capabilityType: "data-streams-reports", + version: "1.0.0", + responseType: CapabilityRegistry.CapabilityResponseType.REPORT, + configurationContract: address(0) + }); + + CapabilityRegistry.Capability private capabilityWithConfigurationContract = + CapabilityRegistry.Capability({ + capabilityType: "read-ethereum-mainnet-gas-price", + version: "1.0.2", + responseType: CapabilityRegistry.CapabilityResponseType.OBSERVATION_IDENTICAL, + configurationContract: address(s_capabilityConfigurationContract) + }); + + function test_RevertWhen_CalledByNonAdmin() public { + changePrank(STRANGER); + + vm.expectRevert("Only callable by owner"); + s_capabilityRegistry.addCapability(basicCapability); + } + + function test_RevertWhen_CapabilityExists() public { + // Successfully add the capability the first time + s_capabilityRegistry.addCapability(basicCapability); + + // Try to add the same capability again + vm.expectRevert(CapabilityRegistry.CapabilityAlreadyExists.selector); + s_capabilityRegistry.addCapability(basicCapability); + } + + function test_RevertWhen_ConfigurationContractNotDeployed() public { + address nonExistentContract = address(1); + capabilityWithConfigurationContract.configurationContract = nonExistentContract; + + vm.expectRevert( + abi.encodeWithSelector( + CapabilityRegistry.InvalidCapabilityConfigurationContractInterface.selector, + nonExistentContract + ) + ); + s_capabilityRegistry.addCapability(capabilityWithConfigurationContract); + } + + function test_RevertWhen_ConfigurationContractDoesNotMatchInterface() public { + CapabilityRegistry contractWithoutERC165 = new CapabilityRegistry(); + + vm.expectRevert(); + capabilityWithConfigurationContract.configurationContract = address(contractWithoutERC165); + s_capabilityRegistry.addCapability(capabilityWithConfigurationContract); + } + + function test_AddCapability_NoConfigurationContract() public { + s_capabilityRegistry.addCapability(basicCapability); bytes32 capabilityId = s_capabilityRegistry.getCapabilityID(bytes32("data-streams-reports"), bytes32("1.0.0")); - CapabilityRegistry.Capability memory capability = s_capabilityRegistry.getCapability(capabilityId); + CapabilityRegistry.Capability memory storedCapability = s_capabilityRegistry.getCapability(capabilityId); + + assertEq(storedCapability.capabilityType, basicCapability.capabilityType); + assertEq(storedCapability.version, basicCapability.version); + assertEq(uint256(storedCapability.responseType), uint256(basicCapability.responseType)); + assertEq(storedCapability.configurationContract, basicCapability.configurationContract); + } + + function test_AddCapability_WithConfiguration() public { + s_capabilityRegistry.addCapability(capabilityWithConfigurationContract); + + bytes32 capabilityId = s_capabilityRegistry.getCapabilityID( + bytes32(capabilityWithConfigurationContract.capabilityType), + bytes32(capabilityWithConfigurationContract.version) + ); + CapabilityRegistry.Capability memory storedCapability = s_capabilityRegistry.getCapability(capabilityId); - assertEq(capability.capabilityType, "data-streams-reports"); - assertEq(capability.version, "1.0.0"); + assertEq(storedCapability.capabilityType, capabilityWithConfigurationContract.capabilityType); + assertEq(storedCapability.version, capabilityWithConfigurationContract.version); + assertEq(uint256(storedCapability.responseType), uint256(capabilityWithConfigurationContract.responseType)); + assertEq(storedCapability.configurationContract, capabilityWithConfigurationContract.configurationContract); } } diff --git a/contracts/src/v0.8/keystone/test/mocks/CapabilityConfigurationContract.sol b/contracts/src/v0.8/keystone/test/mocks/CapabilityConfigurationContract.sol new file mode 100644 index 00000000000..0c3d8e0597d --- /dev/null +++ b/contracts/src/v0.8/keystone/test/mocks/CapabilityConfigurationContract.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ICapabilityConfiguration} from "../../interfaces/ICapabilityConfiguration.sol"; +import {ERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/ERC165.sol"; + +contract CapabilityConfigurationContract is ICapabilityConfiguration, ERC165 { + mapping(uint256 => bytes) private s_donConfiguration; + + function getCapabilityConfiguration(uint256 donId) external view returns (bytes memory configuration) { + return s_donConfiguration[donId]; + } +} diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 255b038037a..548acf3206c 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -130,7 +130,7 @@ func (r *rpcClient) Dial(callerCtx context.Context) error { wsrpc, err := rpc.DialWebsocket(ctx, r.ws.uri.String(), "") if err != nil { promEVMPoolRPCNodeDialsFailed.WithLabelValues(r.chainID.String(), r.name).Inc() - return pkgerrors.Wrapf(err, "error while dialing websocket: %v", r.ws.uri.Redacted()) + return r.wrapRPCClientError(pkgerrors.Wrapf(err, "error while dialing websocket: %v", r.ws.uri.Redacted())) } r.ws.rpc = wsrpc @@ -159,7 +159,7 @@ func (r *rpcClient) DialHTTP() error { httprpc, err := rpc.DialHTTP(r.http.uri.String()) if err != nil { promEVMPoolRPCNodeDialsFailed.WithLabelValues(r.chainID.String(), r.name).Inc() - return pkgerrors.Wrapf(err, "error while dialing HTTP: %v", r.http.uri.Redacted()) + return r.wrapRPCClientError(pkgerrors.Wrapf(err, "error while dialing HTTP: %v", r.http.uri.Redacted())) } r.http.rpc = httprpc @@ -295,10 +295,7 @@ func (r *rpcClient) UnsubscribeAllExceptAliveLoop() { // CallContext implementation func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With( "method", method, @@ -307,6 +304,7 @@ func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method lggr.Debug("RPC call: evmclient.Client#CallContext") start := time.Now() + var err error if http != nil { err = r.wrapHTTP(http.rpc.CallContext(ctx, result, method, args...)) } else { @@ -320,15 +318,13 @@ func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method } func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("nBatchElems", len(b), "batchElems", b) lggr.Trace("RPC call: evmclient.Client#BatchCallContext") start := time.Now() + var err error if http != nil { err = r.wrapHTTP(http.rpc.BatchCallContext(ctx, b)) } else { @@ -342,24 +338,23 @@ func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err } func (r *rpcClient) Subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) { - ctx, cancel, ws, _, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, _ := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("args", args) lggr.Debug("RPC call: evmclient.Client#EthSubscribe") start := time.Now() + var sub commontypes.Subscription sub, err := ws.rpc.EthSubscribe(ctx, channel, args...) if err == nil { + sub = newSubscriptionErrorWrapper(sub, r.rpcClientErrorPrefix()) r.registerSub(sub) } duration := time.Since(start) r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") - return sub, err + return sub, r.wrapWS(err) } // GethClient wrappers @@ -370,17 +365,14 @@ func (r *rpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) return nil, err } if receipt == nil { - err = ethereum.NotFound + err = r.wrapRPCClientError(ethereum.NotFound) return } return } func (r *rpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("txHash", txHash) @@ -403,10 +395,7 @@ func (r *rpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Ha return } func (r *rpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("txHash", txHash) @@ -430,10 +419,7 @@ func (r *rpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) ( } func (r *rpcClient) HeaderByNumber(ctx context.Context, number *big.Int) (header *types.Header, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("number", number) @@ -454,10 +440,7 @@ func (r *rpcClient) HeaderByNumber(ctx context.Context, number *big.Int) (header } func (r *rpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header *types.Header, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("hash", hash) @@ -494,7 +477,7 @@ func (r *rpcClient) blockByNumber(ctx context.Context, number string) (head *evm return nil, err } if head == nil { - err = ethereum.NotFound + err = r.wrapRPCClientError(ethereum.NotFound) return } head.EVMChainID = ubig.New(r.chainID) @@ -507,7 +490,7 @@ func (r *rpcClient) BlockByHash(ctx context.Context, hash common.Hash) (head *ev return nil, err } if head == nil { - err = ethereum.NotFound + err = r.wrapRPCClientError(ethereum.NotFound) return } head.EVMChainID = ubig.New(r.chainID) @@ -515,10 +498,7 @@ func (r *rpcClient) BlockByHash(ctx context.Context, hash common.Hash) (head *ev } func (r *rpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (block *types.Block, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("hash", hash) @@ -541,10 +521,7 @@ func (r *rpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (bloc } func (r *rpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (block *types.Block, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("number", number) @@ -567,15 +544,13 @@ func (r *rpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (blo } func (r *rpcClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("tx", tx) lggr.Debug("RPC call: evmclient.Client#SendTransaction") start := time.Now() + var err error if http != nil { err = r.wrapHTTP(http.geth.SendTransaction(ctx, tx)) } else { @@ -607,10 +582,7 @@ func (r *rpcClient) SendEmptyTransaction( // PendingSequenceAt returns one higher than the highest nonce from both mempool and mined transactions func (r *rpcClient) PendingSequenceAt(ctx context.Context, account common.Address) (nonce evmtypes.Nonce, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return 0, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("account", account) @@ -639,10 +611,7 @@ func (r *rpcClient) PendingSequenceAt(ctx context.Context, account common.Addres // mined nonce at the given block number, but it actually returns the total // transaction count which is the highest mined nonce + 1 func (r *rpcClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (nonce evmtypes.Nonce, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return 0, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) @@ -668,10 +637,7 @@ func (r *rpcClient) SequenceAt(ctx context.Context, account common.Address, bloc } func (r *rpcClient) PendingCodeAt(ctx context.Context, account common.Address) (code []byte, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("account", account) @@ -694,10 +660,7 @@ func (r *rpcClient) PendingCodeAt(ctx context.Context, account common.Address) ( } func (r *rpcClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) (code []byte, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) @@ -720,10 +683,7 @@ func (r *rpcClient) CodeAt(ctx context.Context, account common.Address, blockNum } func (r *rpcClient) EstimateGas(ctx context.Context, c interface{}) (gas uint64, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return 0, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() call := c.(ethereum.CallMsg) lggr := r.newRqLggr().With("call", call) @@ -747,10 +707,7 @@ func (r *rpcClient) EstimateGas(ctx context.Context, c interface{}) (gas uint64, } func (r *rpcClient) SuggestGasPrice(ctx context.Context) (price *big.Int, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr() @@ -773,10 +730,7 @@ func (r *rpcClient) SuggestGasPrice(ctx context.Context) (price *big.Int, err er } func (r *rpcClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) (val []byte, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("callMsg", msg, "blockNumber", blockNumber) message := msg.(ethereum.CallMsg) @@ -804,10 +758,7 @@ func (r *rpcClient) CallContract(ctx context.Context, msg interface{}, blockNumb } func (r *rpcClient) PendingCallContract(ctx context.Context, msg interface{}) (val []byte, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("callMsg", msg) message := msg.(ethereum.CallMsg) @@ -841,10 +792,7 @@ func (r *rpcClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { } func (r *rpcClient) BlockNumber(ctx context.Context) (height uint64, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return 0, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr() @@ -867,10 +815,7 @@ func (r *rpcClient) BlockNumber(ctx context.Context) (height uint64, err error) } func (r *rpcClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (balance *big.Int, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("account", account.Hex(), "blockNumber", blockNumber) @@ -907,7 +852,7 @@ func (r *rpcClient) TokenBalance(ctx context.Context, address common.Address, co return numLinkBigInt, err } if _, ok := numLinkBigInt.SetString(result, 0); !ok { - return nil, fmt.Errorf("failed to parse int: %s", result) + return nil, r.wrapRPCClientError(fmt.Errorf("failed to parse int: %s", result)) } return numLinkBigInt, nil } @@ -926,10 +871,7 @@ func (r *rpcClient) FilterEvents(ctx context.Context, q ethereum.FilterQuery) ([ } func (r *rpcClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (l []types.Log, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("q", q) @@ -957,10 +899,7 @@ func (r *rpcClient) ClientVersion(ctx context.Context) (version string, err erro } func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (sub ethereum.Subscription, err error) { - ctx, cancel, ws, _, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, _ := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("q", q) @@ -968,6 +907,7 @@ func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQu start := time.Now() sub, err = ws.geth.SubscribeFilterLogs(ctx, q, ch) if err == nil { + sub = newSubscriptionErrorWrapper(sub, r.rpcClientErrorPrefix()) r.registerSub(sub) } err = r.wrapWS(err) @@ -979,10 +919,7 @@ func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQu } func (r *rpcClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return nil, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr() @@ -1007,7 +944,7 @@ func (r *rpcClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err // Returns the ChainID according to the geth client. This is useful for functions like verify() // the common node. func (r *rpcClient) ChainID(ctx context.Context) (chainID *big.Int, err error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() @@ -1026,6 +963,15 @@ func (r *rpcClient) newRqLggr() logger.SugaredLogger { return r.rpcLog.With("requestID", uuid.New()) } +func (r *rpcClient) wrapRPCClientError(err error) error { + // simple add msg to the error without adding new stack trace + return pkgerrors.WithMessage(err, r.rpcClientErrorPrefix()) +} + +func (r *rpcClient) rpcClientErrorPrefix() string { + return fmt.Sprintf("RPCClient returned error (%s)", r.name) +} + func wrapCallError(err error, tp string) error { if err == nil { return nil @@ -1038,11 +984,12 @@ func wrapCallError(err error, tp string) error { func (r *rpcClient) wrapWS(err error) error { err = wrapCallError(err, fmt.Sprintf("%s websocket (%s)", r.tier.String(), r.ws.uri.Redacted())) - return err + return r.wrapRPCClientError(err) } func (r *rpcClient) wrapHTTP(err error) error { err = wrapCallError(err, fmt.Sprintf("%s http (%s)", r.tier.String(), r.http.uri.Redacted())) + err = r.wrapRPCClientError(err) if err != nil { r.rpcLog.Debugw("Call failed", "err", err) } else { @@ -1052,7 +999,7 @@ func (r *rpcClient) wrapHTTP(err error) error { } // makeLiveQueryCtxAndSafeGetClients wraps makeQueryCtx -func (r *rpcClient) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient, err error) { +func (r *rpcClient) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient) { // Need to wrap in mutex because state transition can cancel and replace the // context r.stateMu.RLock() @@ -1072,16 +1019,14 @@ func (r *rpcClient) makeQueryCtx(ctx context.Context) (context.Context, context. } func (r *rpcClient) IsSyncing(ctx context.Context) (bool, error) { - ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) - if err != nil { - return false, err - } + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr() lggr.Debug("RPC call: evmclient.Client#SyncProgress") var syncProgress *ethereum.SyncProgress start := time.Now() + var err error if http != nil { syncProgress, err = http.geth.SyncProgress(ctx) err = r.wrapHTTP(err) diff --git a/core/chains/evm/client/sub_error_wrapper.go b/core/chains/evm/client/sub_error_wrapper.go new file mode 100644 index 00000000000..689991ce70f --- /dev/null +++ b/core/chains/evm/client/sub_error_wrapper.go @@ -0,0 +1,77 @@ +package client + +import ( + "fmt" + + commontypes "github.com/smartcontractkit/chainlink/v2/common/types" +) + +// subErrorWrapper - adds specified prefix to a subscription error +type subErrorWrapper struct { + sub commontypes.Subscription + errorPrefix string + + done chan struct{} + unSub chan struct{} + errorCh chan error +} + +func newSubscriptionErrorWrapper(sub commontypes.Subscription, errorPrefix string) *subErrorWrapper { + s := &subErrorWrapper{ + sub: sub, + errorPrefix: errorPrefix, + done: make(chan struct{}), + unSub: make(chan struct{}), + errorCh: make(chan error), + } + + go func() { + for { + select { + // sub.Err channel is closed by sub.Unsubscribe + case err, ok := <-sub.Err(): + if !ok { + // might only happen if someone terminated wrapped subscription + // in any case - do our best to release resources + // we can't call Unsubscribe on root sub as this might cause panic + close(s.errorCh) + close(s.done) + return + } + + select { + case s.errorCh <- fmt.Errorf("%s: %w", s.errorPrefix, err): + case <-s.unSub: + s.close() + return + } + case <-s.unSub: + s.close() + return + } + } + }() + + return s +} + +func (s *subErrorWrapper) close() { + s.sub.Unsubscribe() + close(s.errorCh) + close(s.done) +} + +func (s *subErrorWrapper) Unsubscribe() { + select { + // already unsubscribed + case <-s.done: + // signal unsubscribe + case s.unSub <- struct{}{}: + // wait for unsubscribe to complete + <-s.done + } +} + +func (s *subErrorWrapper) Err() <-chan error { + return s.errorCh +} diff --git a/core/chains/evm/client/sub_error_wrapper_test.go b/core/chains/evm/client/sub_error_wrapper_test.go new file mode 100644 index 00000000000..457d392a50e --- /dev/null +++ b/core/chains/evm/client/sub_error_wrapper_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" +) + +func TestSubscriptionErrorWrapper(t *testing.T) { + t.Parallel() + t.Run("Unsubscribe wrapper releases resources", func(t *testing.T) { + t.Parallel() + + mockedSub := NewMockSubscription() + const prefix = "RPC returned error" + wrapper := newSubscriptionErrorWrapper(mockedSub, prefix) + wrapper.Unsubscribe() + + // mock's resources were relased + assert.True(t, mockedSub.unsubscribed) + _, ok := <-mockedSub.Err() + assert.False(t, ok) + // wrapper's channels are closed + _, ok = <-wrapper.Err() + assert.False(t, ok) + // subsequence unsubscribe does not causes panic + wrapper.Unsubscribe() + }) + t.Run("Unsubscribe interrupts error delivery", func(t *testing.T) { + t.Parallel() + sub := NewMockSubscription() + const prefix = "RPC returned error" + wrapper := newSubscriptionErrorWrapper(sub, prefix) + sub.Errors <- fmt.Errorf("error") + + wrapper.Unsubscribe() + _, ok := <-wrapper.Err() + assert.False(t, ok) + }) + t.Run("Successfully wraps error", func(t *testing.T) { + t.Parallel() + sub := NewMockSubscription() + const prefix = "RPC returned error" + wrapper := newSubscriptionErrorWrapper(sub, prefix) + sub.Errors <- fmt.Errorf("root error") + + err, ok := <-wrapper.Err() + assert.True(t, ok) + assert.Equal(t, "RPC returned error: root error", err.Error()) + + wrapper.Unsubscribe() + _, ok = <-wrapper.Err() + assert.False(t, ok) + }) + t.Run("Unsubscribe on root does not cause panic", func(t *testing.T) { + t.Parallel() + mockedSub := NewMockSubscription() + wrapper := newSubscriptionErrorWrapper(mockedSub, "") + + mockedSub.Unsubscribe() + // mock's resources were released + assert.True(t, mockedSub.unsubscribed) + _, ok := <-mockedSub.Err() + assert.False(t, ok) + // wrapper's channels are eventually closed + tests.AssertEventually(t, func() bool { + _, ok = <-wrapper.Err() + return !ok + }) + + }) +} diff --git a/core/chains/evm/gas/block_history_estimator.go b/core/chains/evm/gas/block_history_estimator.go index 8b8c626f725..0ae067e45bf 100644 --- a/core/chains/evm/gas/block_history_estimator.go +++ b/core/chains/evm/gas/block_history_estimator.go @@ -721,7 +721,7 @@ func (b *BlockHistoryEstimator) batchFetch(ctx context.Context, reqs []rpc.Batch err := b.ethClient.BatchCallContext(ctx, reqs[i:j]) if pkgerrors.Is(err, context.DeadlineExceeded) { // We ran out of time, return what we have - b.logger.Warnf("Batch fetching timed out; loaded %d/%d results", i, len(reqs)) + b.logger.Warnf("Batch fetching timed out; loaded %d/%d results: %v", i, len(reqs), err) for k := i; k < len(reqs); k++ { if k < j { reqs[k].Error = pkgerrors.Wrap(err, "request failed") diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go index f3e64378384..6f95b9c55da 100644 --- a/core/chains/evm/logpoller/disabled.go +++ b/core/chains/evm/logpoller/disabled.go @@ -114,3 +114,11 @@ func (d disabled) LatestBlockByEventSigsAddrsWithConfs(ctx context.Context, from func (d disabled) LogsDataWordBetween(ctx context.Context, eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } + +func (d disabled) FindLCA(ctx context.Context) (*LogPollerBlock, error) { + return nil, ErrDisabled +} + +func (d disabled) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { + return ErrDisabled +} diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 7592ec104c4..cd26889627f 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -44,6 +44,8 @@ type LogPoller interface { GetFilters() map[string]Filter LatestBlock(ctx context.Context) (LogPollerBlock, error) GetBlocksRange(ctx context.Context, numbers []uint64) ([]LogPollerBlock, error) + FindLCA(ctx context.Context) (*LogPollerBlock, error) + DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error // General querying Logs(ctx context.Context, start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) @@ -1422,6 +1424,103 @@ func (lp *logPoller) IndexedLogsWithSigsExcluding(ctx context.Context, address c return lp.orm.SelectIndexedLogsWithSigsExcluding(ctx, eventSigA, eventSigB, topicIndex, address, fromBlock, toBlock, confs) } +// DeleteLogsAndBlocksAfter - removes blocks and logs starting from the specified block +func (lp *logPoller) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { + return lp.orm.DeleteLogsAndBlocksAfter(ctx, start) +} + +func (lp *logPoller) FindLCA(ctx context.Context) (*LogPollerBlock, error) { + latest, err := lp.orm.SelectLatestBlock(ctx) + if err != nil { + return nil, fmt.Errorf("failed to select the latest block: %w", err) + } + + oldest, err := lp.orm.SelectOldestBlock(ctx, 0) + if err != nil { + return nil, fmt.Errorf("failed to select the oldest block: %w", err) + } + + if latest == nil || oldest == nil { + return nil, fmt.Errorf("expected at least one block to be present in DB") + } + + lp.lggr.Debugf("Received request to find LCA. Searching in range [%d, %d]", oldest.BlockNumber, latest.BlockNumber) + + // Find the largest block number for which block hash stored in the DB matches one that we get from the RPC. + // `sort.Find` expects slice of following format s = [1, 0, -1] and returns smallest index i for which s[i] = 0. + // To utilise `sort.Find` we represent range of blocks as slice [latestBlock, latestBlock-1, ..., olderBlock+1, oldestBlock] + // and return 1 if DB block was reorged or 0 if it's still present on chain. + lcaI, found := sort.Find(int(latest.BlockNumber-oldest.BlockNumber)+1, func(i int) int { + const notFound = 1 + const found = 0 + // if there is an error - stop the search + if err != nil { + return notFound + } + + // canceled search + if ctx.Err() != nil { + err = fmt.Errorf("aborted, FindLCA request cancelled: %w", ctx.Err()) + return notFound + } + iBlockNumber := latest.BlockNumber - int64(i) + var dbBlock *LogPollerBlock + // Block with specified block number might not exist in the database, to address that we check closest child + // of the iBlockNumber. If the child is present on chain, it's safe to assume that iBlockNumber is present too + dbBlock, err = lp.orm.SelectOldestBlock(ctx, iBlockNumber) + if err != nil { + err = fmt.Errorf("failed to select block %d by number: %w", iBlockNumber, err) + return notFound + } + + if dbBlock == nil { + err = fmt.Errorf("expected block to exist with blockNumber >= %d as observed block with number %d", iBlockNumber, latest.BlockNumber) + return notFound + } + + lp.lggr.Debugf("Looking for matching block on chain blockNumber: %d blockHash: %s", + dbBlock.BlockNumber, dbBlock.BlockHash) + var chainBlock *evmtypes.Head + chainBlock, err = lp.ec.HeadByHash(ctx, dbBlock.BlockHash) + // our block in DB does not exist on chain + if (chainBlock == nil && err == nil) || errors.Is(err, ethereum.NotFound) { + err = nil + return notFound + } + if err != nil { + err = fmt.Errorf("failed to get block %s from RPC: %w", dbBlock.BlockHash, err) + return notFound + } + + if chainBlock.BlockNumber() != dbBlock.BlockNumber { + err = fmt.Errorf("expected block numbers to match (db: %d, chain: %d), if block hashes match "+ + "(db: %s, chain: %s)", dbBlock.BlockNumber, chainBlock.BlockNumber(), dbBlock.BlockHash, chainBlock.Hash) + return notFound + } + + return found + }) + if err != nil { + return nil, fmt.Errorf("failed to find: %w", err) + } + + if !found { + return nil, fmt.Errorf("failed to find LCA, this means that whole database LogPoller state was reorged out of chain or RPC/Core node is misconfigured") + } + + lcaBlockNumber := latest.BlockNumber - int64(lcaI) + lca, err := lp.orm.SelectBlockByNumber(ctx, lcaBlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to select lca from db: %w", err) + } + + if lca == nil { + return nil, fmt.Errorf("expected lca (blockNum: %d) to exist in DB", lcaBlockNumber) + } + + return lca, nil +} + func EvmWord(i uint64) common.Hash { var b = make([]byte, 8) binary.BigEndian.PutUint64(b, i) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 74ec41fa85a..cb211043a4c 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -1921,3 +1921,119 @@ func markBlockAsFinalizedByHash(t *testing.T, th TestHarness, blockHash common.H require.NoError(t, err) th.Client.Blockchain().SetFinalized(b.Header()) } + +func TestFindLCA(t *testing.T) { + ctx := testutils.Context(t) + ec := evmtest.NewEthClientMockWithDefaultChain(t) + lggr := logger.Test(t) + chainID := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + + orm := logpoller.NewORM(chainID, db, lggr) + + lpOpts := logpoller.Opts{ + PollPeriod: time.Hour, + FinalityDepth: 2, + BackfillBatchSize: 20, + RpcBatchSize: 10, + KeepFinalizedBlocksDepth: 1000, + } + + lp := logpoller.NewLogPoller(orm, ec, lggr, lpOpts) + t.Run("Fails, if failed to select oldest block", func(t *testing.T) { + _, err := lp.FindLCA(ctx) + require.ErrorContains(t, err, "failed to select the latest block") + }) + // oldest + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x123"), 10, time.Now(), 0)) + // latest + latestBlockHash := common.HexToHash("0x124") + require.NoError(t, orm.InsertBlock(ctx, latestBlockHash, 16, time.Now(), 0)) + t.Run("Fails, if caller's context canceled", func(t *testing.T) { + lCtx, cancel := context.WithCancel(ctx) + ec.On("HeadByHash", mock.Anything, latestBlockHash).Return(nil, nil).Run(func(_ mock.Arguments) { + cancel() + }).Once() + _, err := lp.FindLCA(lCtx) + require.ErrorContains(t, err, "aborted, FindLCA request cancelled") + + }) + t.Run("Fails, if RPC returns an error", func(t *testing.T) { + expectedError := fmt.Errorf("failed to call RPC") + ec.On("HeadByHash", mock.Anything, latestBlockHash).Return(nil, expectedError).Once() + _, err := lp.FindLCA(ctx) + require.ErrorContains(t, err, expectedError.Error()) + }) + t.Run("Fails, if block numbers do not match", func(t *testing.T) { + ec.On("HeadByHash", mock.Anything, latestBlockHash).Return(&evmtypes.Head{ + Number: 123, + }, nil).Once() + _, err := lp.FindLCA(ctx) + require.ErrorContains(t, err, "expected block numbers to match") + }) + t.Run("Fails, if none of the blocks in db matches on chain", func(t *testing.T) { + ec.On("HeadByHash", mock.Anything, mock.Anything).Return(nil, nil).Times(3) + _, err := lp.FindLCA(ctx) + require.ErrorContains(t, err, "failed to find LCA, this means that whole database LogPoller state was reorged out of chain or RPC/Core node is misconfigured") + }) + + type block struct { + BN int + Exists bool + } + testCases := []struct { + Name string + Blocks []block + ExpectedBlockNumber int + ExpectedError error + }{ + { + Name: "All of the blocks are present on chain - returns the latest", + Blocks: []block{{BN: 1, Exists: true}, {BN: 2, Exists: true}, {BN: 3, Exists: true}, {BN: 4, Exists: true}}, + ExpectedBlockNumber: 4, + }, + { + Name: "None of the blocks exists on chain - returns an erro", + Blocks: []block{{BN: 1, Exists: false}, {BN: 2, Exists: false}, {BN: 3, Exists: false}, {BN: 4, Exists: false}}, + ExpectedBlockNumber: 0, + ExpectedError: fmt.Errorf("failed to find LCA, this means that whole database LogPoller state was reorged out of chain or RPC/Core node is misconfigured"), + }, + { + Name: "Only latest block does not exist", + Blocks: []block{{BN: 1, Exists: true}, {BN: 2, Exists: true}, {BN: 3, Exists: true}, {BN: 4, Exists: false}}, + ExpectedBlockNumber: 3, + }, + { + Name: "Only oldest block exists on chain", + Blocks: []block{{BN: 1, Exists: true}, {BN: 2, Exists: false}, {BN: 3, Exists: false}, {BN: 4, Exists: false}}, + ExpectedBlockNumber: 1, + }, + } + + blockHashI := int64(0) + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + // reset the database + require.NoError(t, orm.DeleteLogsAndBlocksAfter(ctx, 0)) + for _, b := range tc.Blocks { + blockHashI++ + hash := common.BigToHash(big.NewInt(blockHashI)) + require.NoError(t, orm.InsertBlock(ctx, hash, int64(b.BN), time.Now(), 0)) + // Hashes are unique for all test cases + var onChainBlock *evmtypes.Head + if b.Exists { + onChainBlock = &evmtypes.Head{Number: int64(b.BN)} + } + ec.On("HeadByHash", mock.Anything, hash).Return(onChainBlock, nil).Maybe() + } + + result, err := lp.FindLCA(ctx) + if tc.ExpectedError != nil { + require.ErrorContains(t, err, tc.ExpectedError.Error()) + } else { + require.NotNil(t, result) + require.Equal(t, result.BlockNumber, int64(tc.ExpectedBlockNumber), "expected block numbers to match") + } + }) + } +} diff --git a/core/chains/evm/logpoller/mocks/log_poller.go b/core/chains/evm/logpoller/mocks/log_poller.go index 548e9ca3b90..ef3f4dbd428 100644 --- a/core/chains/evm/logpoller/mocks/log_poller.go +++ b/core/chains/evm/logpoller/mocks/log_poller.go @@ -37,6 +37,54 @@ func (_m *LogPoller) Close() error { return r0 } +// DeleteLogsAndBlocksAfter provides a mock function with given fields: ctx, start +func (_m *LogPoller) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { + ret := _m.Called(ctx, start) + + if len(ret) == 0 { + panic("no return value specified for DeleteLogsAndBlocksAfter") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, start) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindLCA provides a mock function with given fields: ctx +func (_m *LogPoller) FindLCA(ctx context.Context) (*logpoller.LogPollerBlock, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for FindLCA") + } + + var r0 *logpoller.LogPollerBlock + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*logpoller.LogPollerBlock, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *logpoller.LogPollerBlock); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*logpoller.LogPollerBlock) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetBlocksRange provides a mock function with given fields: ctx, numbers func (_m *LogPoller) GetBlocksRange(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { ret := _m.Called(ctx, numbers) diff --git a/core/chains/evm/logpoller/observability.go b/core/chains/evm/logpoller/observability.go index 14dec5274ad..8f3cdfe185e 100644 --- a/core/chains/evm/logpoller/observability.go +++ b/core/chains/evm/logpoller/observability.go @@ -151,6 +151,12 @@ func (o *ObservedORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, e }) } +func (o *ObservedORM) SelectOldestBlock(ctx context.Context, minAllowedBlockNumber int64) (*LogPollerBlock, error) { + return withObservedQuery(o, "SelectOldestBlock", func() (*LogPollerBlock, error) { + return o.ORM.SelectOldestBlock(ctx, minAllowedBlockNumber) + }) +} + func (o *ObservedORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { return withObservedQuery(o, "SelectLatestLogByEventSigWithConfs", func() (*Log, error) { return o.ORM.SelectLatestLogByEventSigWithConfs(ctx, eventSig, address, confs) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 838a38c8ebb..5e0a74a9183 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -38,6 +38,7 @@ type ORM interface { SelectBlockByNumber(ctx context.Context, blockNumber int64) (*LogPollerBlock, error) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) + SelectOldestBlock(ctx context.Context, minAllowedBlockNumber int64) (*LogPollerBlock, error) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) ([]Log, error) @@ -202,6 +203,14 @@ func (o *DSORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) return &b, nil } +func (o *DSORM) SelectOldestBlock(ctx context.Context, minAllowedBlockNumber int64) (*LogPollerBlock, error) { + var b LogPollerBlock + if err := o.ds.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 AND block_number >= $2 ORDER BY block_number ASC LIMIT 1`, ubig.New(o.chainID), minAllowedBlockNumber); err != nil { + return nil, err + } + return &b, nil +} + func (o *DSORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withConfs(confs). diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 8a45ff2f1c5..2a1be62dd5b 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -1759,3 +1759,33 @@ func Benchmark_DeleteExpiredLogs(b *testing.B) { assert.NoError(b, err1) } } + +func TestSelectOldestBlock(t *testing.T) { + th := SetupTH(t, lpOpts) + o1 := th.ORM + o2 := th.ORM2 + ctx := testutils.Context(t) + t.Run("Selects oldest within given chain", func(t *testing.T) { + // insert blocks + require.NoError(t, o2.InsertBlock(ctx, common.HexToHash("0x1231"), 11, time.Now(), 0)) + require.NoError(t, o2.InsertBlock(ctx, common.HexToHash("0x1232"), 12, time.Now(), 0)) + // insert newer block from different chain + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1233"), 13, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1231"), 14, time.Now(), 0)) + block, err := o1.SelectOldestBlock(ctx, 0) + require.NoError(t, err) + require.NotNil(t, block) + require.Equal(t, block.BlockNumber, int64(13)) + require.Equal(t, block.BlockHash, common.HexToHash("0x1233")) + }) + t.Run("Does not select blocks older than specified limit", func(t *testing.T) { + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1232"), 11, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1233"), 13, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 15, time.Now(), 0)) + block, err := o1.SelectOldestBlock(ctx, 12) + require.NoError(t, err) + require.NotNil(t, block) + require.Equal(t, block.BlockNumber, int64(13)) + require.Equal(t, block.BlockHash, common.HexToHash("0x1233")) + }) +} diff --git a/core/cmd/blocks_commands.go b/core/cmd/blocks_commands.go index 72b0523e18d..158caf253ab 100644 --- a/core/cmd/blocks_commands.go +++ b/core/cmd/blocks_commands.go @@ -9,6 +9,8 @@ import ( "github.com/pkg/errors" "github.com/urfave/cli" "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink/v2/core/web" ) func initBlocksSubCmds(s *Shell) []cli.Command { @@ -34,6 +36,18 @@ func initBlocksSubCmds(s *Shell) []cli.Command { }, }, }, + { + Name: "find-lca", + Usage: "Find latest common block stored in DB and on chain", + Action: s.FindLCA, + Flags: []cli.Flag{ + cli.Int64Flag{ + Name: "evm-chain-id", + Usage: "Chain ID of the EVM-based blockchain", + Required: true, + }, + }, + }, } } @@ -75,3 +89,47 @@ func (s *Shell) ReplayFromBlock(c *cli.Context) (err error) { fmt.Println("Replay started") return nil } + +// LCAPresenter implements TableRenderer for an LCAResponse. +type LCAPresenter struct { + web.LCAResponse +} + +// ToRow presents the EVMChainResource as a slice of strings. +func (p *LCAPresenter) ToRow() []string { + return []string{p.EVMChainID.String(), p.Hash, strconv.FormatInt(p.BlockNumber, 10)} +} + +// RenderTable implements TableRenderer +// Just renders a single row +func (p LCAPresenter) RenderTable(rt RendererTable) error { + renderList([]string{"ChainID", "Block Hash", "Block Number"}, [][]string{p.ToRow()}, rt.Writer) + + return nil +} + +// FindLCA finds last common block stored in DB and on chain. +func (s *Shell) FindLCA(c *cli.Context) (err error) { + v := url.Values{} + + if c.IsSet("evm-chain-id") { + v.Add("evmChainID", fmt.Sprintf("%d", c.Int64("evm-chain-id"))) + } + + resp, err := s.HTTP.Get(s.ctx(), + fmt.Sprintf( + "/v2/find_lca?%s", + v.Encode(), + )) + if err != nil { + return s.errorOut(err) + } + + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &LCAPresenter{}, "Last Common Ancestor") +} diff --git a/core/cmd/blocks_commands_test.go b/core/cmd/blocks_commands_test.go index 30540748cb1..f7656b94ae1 100644 --- a/core/cmd/blocks_commands_test.go +++ b/core/cmd/blocks_commands_test.go @@ -41,3 +41,28 @@ func Test_ReplayFromBlock(t *testing.T) { c = cli.NewContext(nil, set, nil) require.NoError(t, client.ReplayFromBlock(c)) } + +func Test_FindLCA(t *testing.T) { + t.Parallel() + + //ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) + app := startNewApplicationV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { + c.EVM[0].ChainID = (*ubig.Big)(big.NewInt(5)) + c.EVM[0].Enabled = ptr(true) + }) + + client, _ := app.NewShellAndRenderer() + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.FindLCA, set, "") + + //Incorrect chain ID + require.NoError(t, set.Set("evm-chain-id", "1")) + c := cli.NewContext(nil, set, nil) + require.ErrorContains(t, client.FindLCA(c), "does not match any local chains") + + //Correct chain ID + require.NoError(t, set.Set("evm-chain-id", "5")) + c = cli.NewContext(nil, set, nil) + require.ErrorContains(t, client.FindLCA(c), "FindLCA is only available if LogPoller is enabled") +} diff --git a/core/cmd/shell.go b/core/cmd/shell.go index 0372148e742..adbb66ce63f 100644 --- a/core/cmd/shell.go +++ b/core/cmd/shell.go @@ -174,8 +174,9 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G } evmFactoryCfg := chainlink.EVMFactoryConfig{ - CSAETHKeystore: keyStore, - ChainOpts: legacyevm.ChainOpts{AppConfig: cfg, MailMon: mailMon, DS: ds}, + CSAETHKeystore: keyStore, + ChainOpts: legacyevm.ChainOpts{AppConfig: cfg, MailMon: mailMon, DS: ds}, + MercuryTransmitter: cfg.Mercury().Transmitter(), } // evm always enabled for backward compatibility // TODO BCF-2510 this needs to change in order to clear the path for EVM extraction diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go index 6dbffbe404a..7c9c025d4be 100644 --- a/core/cmd/shell_local.go +++ b/core/cmd/shell_local.go @@ -34,6 +34,7 @@ import ( "github.com/jmoiron/sqlx" cutils "github.com/smartcontractkit/chainlink-common/pkg/utils" + "github.com/smartcontractkit/chainlink/v2/core/build" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" @@ -253,6 +254,23 @@ func initLocalSubCmds(s *Shell, safe bool) []cli.Command { }, }, }, + { + Name: "remove-blocks", + Usage: "Deletes block range and all associated data", + Action: s.RemoveBlocks, + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "start", + Usage: "Beginning of block range to be deleted", + Required: true, + }, + cli.Int64Flag{ + Name: "evm-chain-id", + Usage: "Chain ID of the EVM-based blockchain", + Required: true, + }, + }, + }, } } @@ -580,6 +598,11 @@ func (s *Shell) RebroadcastTransactions(c *cli.Context) (err error) { } } + err = s.Config.Validate() + if err != nil { + return err + } + lggr := logger.Sugared(s.Logger.Named("RebroadcastTransactions")) db, err := pg.OpenUnlockedDB(s.Config.AppID(), s.Config.Database()) if err != nil { @@ -1175,3 +1198,64 @@ func insertFixtures(dbURL url.URL, pathToFixtures string) (err error) { _, err = db.Exec(string(fixturesSQL)) return err } + +// RemoveBlocks - removes blocks after the specified blocks number +func (s *Shell) RemoveBlocks(c *cli.Context) error { + start := c.Int64("start") + if start <= 0 { + return s.errorOut(errors.New("Must pass a positive value in '--start' parameter")) + } + + chainID := big.NewInt(0) + if c.IsSet("evm-chain-id") { + err := chainID.UnmarshalText([]byte(c.String("evm-chain-id"))) + if err != nil { + return s.errorOut(err) + } + } + + cfg := s.Config + err := cfg.Validate() + if err != nil { + return s.errorOut(fmt.Errorf("error validating configuration: %+v", err)) + } + + lggr := logger.Sugared(s.Logger.Named("RemoveBlocks")) + ldb := pg.NewLockedDB(cfg.AppID(), cfg.Database(), cfg.Database().Lock(), lggr) + ctx, cancel := context.WithCancel(context.Background()) + go shutdown.HandleShutdown(func(sig string) { + cancel() + lggr.Info("received signal to stop - closing the database and releasing lock") + + if cErr := ldb.Close(); cErr != nil { + lggr.Criticalf("Failed to close LockedDB: %v", cErr) + } + + if cErr := s.CloseLogger(); cErr != nil { + log.Printf("Failed to close Logger: %v", cErr) + } + }) + + if err = ldb.Open(ctx); err != nil { + // If not successful, we know neither locks nor connection remains opened + return s.errorOut(errors.Wrap(err, "opening db")) + } + defer lggr.ErrorIfFn(ldb.Close, "Error closing db") + + // From now on, DB locks and DB connection will be released on every return. + // Keep watching on logger.Fatal* calls and os.Exit(), because defer will not be executed. + + app, err := s.AppFactory.NewApplication(ctx, s.Config, s.Logger, ldb.DB()) + if err != nil { + return s.errorOut(errors.Wrap(err, "fatal error instantiating application")) + } + + err = app.DeleteLogPollerDataAfter(ctx, chainID, start) + if err != nil { + return s.errorOut(err) + } + + lggr.Infof("RemoveBlocks: successfully removed blocks") + + return nil +} diff --git a/core/cmd/shell_local_test.go b/core/cmd/shell_local_test.go index 7427e6caedb..e7322e513ae 100644 --- a/core/cmd/shell_local_test.go +++ b/core/cmd/shell_local_test.go @@ -2,6 +2,7 @@ package cmd_test import ( "flag" + "fmt" "math/big" "os" "strconv" @@ -514,3 +515,58 @@ func TestShell_CleanupChainTables(t *testing.T) { c := cli.NewContext(nil, set, nil) require.NoError(t, client.CleanupChainTables(c)) } + +func TestShell_RemoveBlocks(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { + s.Password.Keystore = models.NewSecret("dummy") + c.EVM[0].Nodes[0].Name = ptr("fake") + c.EVM[0].Nodes[0].HTTPURL = commonconfig.MustParseURL("http://fake.com") + c.EVM[0].Nodes[0].WSURL = commonconfig.MustParseURL("WSS://fake.com/ws") + // seems to be needed for config validate + c.Insecure.OCRDevelopmentMode = nil + }) + + lggr := logger.TestLogger(t) + + app := mocks.NewApplication(t) + app.On("GetSqlxDB").Maybe().Return(db) + shell := cmd.Shell{ + Config: cfg, + AppFactory: cltest.InstanceAppFactory{App: app}, + FallbackAPIInitializer: cltest.NewMockAPIInitializer(t), + Runner: cltest.EmptyRunner{}, + Logger: lggr, + } + + t.Run("Returns error, if --start is not positive", func(t *testing.T) { + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(shell.RemoveBlocks, set, "") + require.NoError(t, set.Set("start", "0")) + require.NoError(t, set.Set("evm-chain-id", "12")) + c := cli.NewContext(nil, set, nil) + err := shell.RemoveBlocks(c) + require.ErrorContains(t, err, "Must pass a positive value in '--start' parameter") + }) + t.Run("Returns error, if removal fails", func(t *testing.T) { + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(shell.RemoveBlocks, set, "") + require.NoError(t, set.Set("start", "10000")) + require.NoError(t, set.Set("evm-chain-id", "12")) + expectedError := fmt.Errorf("failed to delete log poller's data") + app.On("DeleteLogPollerDataAfter", mock.Anything, big.NewInt(12), int64(10000)).Return(expectedError).Once() + c := cli.NewContext(nil, set, nil) + err := shell.RemoveBlocks(c) + require.ErrorContains(t, err, expectedError.Error()) + }) + t.Run("Happy path", func(t *testing.T) { + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(shell.RemoveBlocks, set, "") + require.NoError(t, set.Set("start", "10000")) + require.NoError(t, set.Set("evm-chain-id", "12")) + app.On("DeleteLogPollerDataAfter", mock.Anything, big.NewInt(12), int64(10000)).Return(nil).Once() + c := cli.NewContext(nil, set, nil) + err := shell.RemoveBlocks(c) + require.NoError(t, err) + }) +} diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml index 605f6ced0bc..92d75430daf 100644 --- a/core/config/docs/core.toml +++ b/core/config/docs/core.toml @@ -622,3 +622,17 @@ LatestReportDeadline = "5s" # Default [Mercury.TLS] # CertFile is the path to a PEM file of trusted root certificate authority certificates CertFile = "/path/to/client/certs.pem" # Example + +# Mercury.Transmitter controls settings for the mercury transmitter +[Mercury.Transmitter] +# TransmitQueueMaxSize controls the size of the transmit queue. This is scoped +# per OCR instance. If the queue is full, the transmitter will start dropping +# the oldest messages in order to make space. +# +# This is useful if mercury server goes offline and the nop needs to buffer +# transmissions. +TransmitQueueMaxSize = 10_000 # Default +# TransmitTimeout controls how long the transmitter will wait for a response +# when sending a message to the mercury server, before aborting and considering +# the transmission to be failed. +TransmitTimeout = "5s" # Default diff --git a/core/config/mercury_config.go b/core/config/mercury_config.go index 1210fd282ef..f16fc4661a5 100644 --- a/core/config/mercury_config.go +++ b/core/config/mercury_config.go @@ -3,6 +3,7 @@ package config import ( "time" + commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink-common/pkg/types" ) @@ -16,8 +17,14 @@ type MercuryTLS interface { CertFile() string } +type MercuryTransmitter interface { + TransmitQueueMaxSize() uint32 + TransmitTimeout() commonconfig.Duration +} + type Mercury interface { Credentials(credName string) *types.MercuryCredentials Cache() MercuryCache TLS() MercuryTLS + Transmitter() MercuryTransmitter } diff --git a/core/config/toml/types.go b/core/config/toml/types.go index ed52c21e34e..ba74528b3b6 100644 --- a/core/config/toml/types.go +++ b/core/config/toml/types.go @@ -1312,14 +1312,30 @@ func (m *MercuryTLS) ValidateConfig() (err error) { return } +type MercuryTransmitter struct { + TransmitQueueMaxSize *uint32 + TransmitTimeout *commonconfig.Duration +} + +func (m *MercuryTransmitter) setFrom(f *MercuryTransmitter) { + if v := f.TransmitQueueMaxSize; v != nil { + m.TransmitQueueMaxSize = v + } + if v := f.TransmitTimeout; v != nil { + m.TransmitTimeout = v + } +} + type Mercury struct { - Cache MercuryCache `toml:",omitempty"` - TLS MercuryTLS `toml:",omitempty"` + Cache MercuryCache `toml:",omitempty"` + TLS MercuryTLS `toml:",omitempty"` + Transmitter MercuryTransmitter `toml:",omitempty"` } func (m *Mercury) setFrom(f *Mercury) { m.Cache.setFrom(&f.Cache) m.TLS.setFrom(&f.TLS) + m.Transmitter.setFrom(&f.Transmitter) } func (m *Mercury) ValidateConfig() (err error) { diff --git a/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go b/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go index 47c65d5276b..7e3ca7027eb 100644 --- a/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go +++ b/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go @@ -31,8 +31,10 @@ var ( ) type CapabilityRegistryCapability struct { - CapabilityType [32]byte - Version [32]byte + CapabilityType [32]byte + Version [32]byte + ResponseType uint8 + ConfigurationContract common.Address } type CapabilityRegistryNodeOperator struct { @@ -41,8 +43,8 @@ type CapabilityRegistryNodeOperator struct { } var CapabilityRegistryMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[],\"name\":\"InvalidNodeOperatorAdmin\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"capabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"name\":\"NodeOperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"NodeOperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityType\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"capability\",\"type\":\"tuple\"}],\"name\":\"addCapability\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator[]\",\"name\":\"nodeOperators\",\"type\":\"tuple[]\"}],\"name\":\"addNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityID\",\"type\":\"bytes32\"}],\"name\":\"getCapability\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityType\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityType\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"}],\"name\":\"getCapabilityID\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"getNodeOperator\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"nodeOperatorIds\",\"type\":\"uint256[]\"}],\"name\":\"removeNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", - Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615610097576100978161009f565b505050610148565b336001600160a01b038216036100f75760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b610ef1806101576000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c80636e5f2869116100765780638da5cb5b1161005b5780638da5cb5b146101b65780639cb7c5f4146101de578063f2fde38b1461024557600080fd5b80636e5f28691461019b57806379ba5097146101ae57600080fd5b8063229111f5116100a7578063229111f514610120578063398f37731461016857806365c14dc71461017b57600080fd5b8063181f5a77146100c35780631cdf63431461010b575b600080fd5b604080518082018252601881527f4361706162696c697479526567697374727920312e302e300000000000000000602082015290516101029190610957565b60405180910390f35b61011e6101193660046109bd565b610258565b005b61015a61012e3660046109ff565b604080516020808201949094528082019290925280518083038201815260609092019052805191012090565b604051908152602001610102565b61011e6101763660046109bd565b61031b565b61018e610189366004610a21565b6104b4565b6040516101029190610a3a565b61011e6101a9366004610a7d565b61059a565b61011e610617565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610102565b61022a6101ec366004610a21565b604080518082019091526000808252602082015250600090815260026020908152604091829020825180840190935280548352600101549082015290565b60408051825181526020928301519281019290925201610102565b61011e610253366004610abe565b610719565b61026061072d565b60005b8181101561031657600083838381811061027f5761027f610ad9565b60209081029290920135600081815260039093526040832080547fffffffffffffffffffffffff00000000000000000000000000000000000000001681559093509190506102d060018301826108a5565b50506040518181527f1e5877d7b3001d1569bf733b76c7eceda58bd6c031e5b8d0b7042308ba2e9d4f9060200160405180910390a15061030f81610b08565b9050610263565b505050565b61032361072d565b60005b8181101561031657600083838381811061034257610342610ad9565b90506020028101906103549190610b67565b61035d90610c4c565b805190915073ffffffffffffffffffffffffffffffffffffffff166103ae576040517feeacd93900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600454604080518082018252835173ffffffffffffffffffffffffffffffffffffffff908116825260208086015181840190815260008681526003909252939020825181547fffffffffffffffffffffffff0000000000000000000000000000000000000000169216919091178155915190919060018201906104319082610db1565b5090505060046000815461044490610b08565b909155508151602083015160405173ffffffffffffffffffffffffffffffffffffffff909216917fda6697b182650034bd205cdc2dbfabb06bdb3a0a83a2b45bfefa3c4881284e0b9161049991859190610ecb565b60405180910390a25050806104ad90610b08565b9050610326565b6040805180820190915260008152606060208201526000828152600360209081526040918290208251808401909352805473ffffffffffffffffffffffffffffffffffffffff168352600181018054919284019161051190610d16565b80601f016020809104026020016040519081016040528092919081815260200182805461053d90610d16565b801561058a5780601f1061055f5761010080835404028352916020019161058a565b820191906000526020600020905b81548152906001019060200180831161056d57829003601f168201915b5050505050815250509050919050565b6105a261072d565b60408051823560208083018290528085013583850181905284518085038601815260609094018086528451948301949094206000818152600290935294822092835560019092019190915582917f65610e5677eedff94555572640e442f89848a109ef8593fa927ac30b2565ff069190a25050565b60015473ffffffffffffffffffffffffffffffffffffffff16331461069d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61072161072d565b61072a816107b0565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146107ae576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610694565b565b3373ffffffffffffffffffffffffffffffffffffffff82160361082f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610694565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b5080546108b190610d16565b6000825580601f106108c1575050565b601f01602090049060005260206000209081019061072a91905b808211156108ef57600081556001016108db565b5090565b6000815180845260005b81811015610919576020818501810151868301820152016108fd565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b60208152600061096a60208301846108f3565b9392505050565b60008083601f84011261098357600080fd5b50813567ffffffffffffffff81111561099b57600080fd5b6020830191508360208260051b85010111156109b657600080fd5b9250929050565b600080602083850312156109d057600080fd5b823567ffffffffffffffff8111156109e757600080fd5b6109f385828601610971565b90969095509350505050565b60008060408385031215610a1257600080fd5b50508035926020909101359150565b600060208284031215610a3357600080fd5b5035919050565b6020815273ffffffffffffffffffffffffffffffffffffffff825116602082015260006020830151604080840152610a7560608401826108f3565b949350505050565b600060408284031215610a8f57600080fd5b50919050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610ab957600080fd5b919050565b600060208284031215610ad057600080fd5b61096a82610a95565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610b60577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc1833603018112610b9b57600080fd5b9190910192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715610bf757610bf7610ba5565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610c4457610c44610ba5565b604052919050565b600060408236031215610c5e57600080fd5b610c66610bd4565b610c6f83610a95565b815260208084013567ffffffffffffffff80821115610c8d57600080fd5b9085019036601f830112610ca057600080fd5b813581811115610cb257610cb2610ba5565b610ce2847fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610bfd565b91508082523684828501011115610cf857600080fd5b80848401858401376000908201840152918301919091525092915050565b600181811c90821680610d2a57607f821691505b602082108103610a8f577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b601f82111561031657600081815260208120601f850160051c81016020861015610d8a5750805b601f850160051c820191505b81811015610da957828155600101610d96565b505050505050565b815167ffffffffffffffff811115610dcb57610dcb610ba5565b610ddf81610dd98454610d16565b84610d63565b602080601f831160018114610e325760008415610dfc5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555610da9565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015610e7f57888601518255948401946001909101908401610e60565b5085821015610ebb57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b828152604060208201526000610a7560408301846108f356fea164736f6c6343000813000a", + ABI: "[{\"inputs\":[],\"name\":\"CapabilityAlreadyExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedConfigurationContract\",\"type\":\"address\"}],\"name\":\"InvalidCapabilityConfigurationContractInterface\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidNodeOperatorAdmin\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"capabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"name\":\"NodeOperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"NodeOperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityType\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"capability\",\"type\":\"tuple\"}],\"name\":\"addCapability\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator[]\",\"name\":\"nodeOperators\",\"type\":\"tuple[]\"}],\"name\":\"addNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityID\",\"type\":\"bytes32\"}],\"name\":\"getCapability\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityType\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"capabilityType\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"}],\"name\":\"getCapabilityID\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"getNodeOperator\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"nodeOperatorIds\",\"type\":\"uint256[]\"}],\"name\":\"removeNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615610097576100978161009f565b505050610148565b336001600160a01b038216036100f75760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6112cb806101576000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c806365c14dc7116100765780638da5cb5b1161005b5780638da5cb5b1461018f5780639cb7c5f4146101b7578063f2fde38b146101d757600080fd5b806365c14dc71461016757806379ba50971461018757600080fd5b80631cdf6343116100a75780631cdf634314610120578063229111f514610133578063398f37731461015457600080fd5b8063117392ce146100c3578063181f5a77146100d8575b600080fd5b6100d66100d1366004610b88565b6101ea565b005b604080518082018252601881527f4361706162696c697479526567697374727920312e302e300000000000000000602082015290516101179190610c04565b60405180910390f35b6100d661012e366004610c63565b610420565b610146610141366004610ca5565b6104e3565b604051908152602001610117565b6100d6610162366004610c63565b610512565b61017a610175366004610cc7565b6106ab565b6040516101179190610ce0565b6100d6610791565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610117565b6101ca6101c5366004610cc7565b61088e565b6040516101179190610d52565b6100d66101e5366004610df0565b610938565b6101f261094c565b6000610203823560208401356104e3565b90506102106003826109cf565b15610247576040517fe288638f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006102596080840160608501610df0565b73ffffffffffffffffffffffffffffffffffffffff16146103c9576102846080830160608401610df0565b73ffffffffffffffffffffffffffffffffffffffff163b158061036457506102b26080830160608401610df0565b6040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527f884efe6100000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff91909116906301ffc9a790602401602060405180830381865afa15801561033e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103629190610e0d565b155b156103c9576103796080830160608401610df0565b6040517fabb5e3fd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911660048201526024015b60405180910390fd5b6103d46003826109ea565b50600081815260026020526040902082906103ef8282610e2f565b505060405181907f65610e5677eedff94555572640e442f89848a109ef8593fa927ac30b2565ff0690600090a25050565b61042861094c565b60005b818110156104de57600083838381811061044757610447610eb1565b60209081029290920135600081815260059093526040832080547fffffffffffffffffffffffff00000000000000000000000000000000000000001681559093509190506104986001830182610b3a565b50506040518181527f1e5877d7b3001d1569bf733b76c7eceda58bd6c031e5b8d0b7042308ba2e9d4f9060200160405180910390a1506104d781610ee0565b905061042b565b505050565b604080516020808201859052818301849052825180830384018152606090920190925280519101205b92915050565b61051a61094c565b60005b818110156104de57600083838381811061053957610539610eb1565b905060200281019061054b9190610f3f565b61055490611024565b805190915073ffffffffffffffffffffffffffffffffffffffff166105a5576040517feeacd93900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600654604080518082018252835173ffffffffffffffffffffffffffffffffffffffff908116825260208086015181840190815260008681526005909252939020825181547fffffffffffffffffffffffff000000000000000000000000000000000000000016921691909117815591519091906001820190610628908261118b565b5090505060066000815461063b90610ee0565b909155508151602083015160405173ffffffffffffffffffffffffffffffffffffffff909216917fda6697b182650034bd205cdc2dbfabb06bdb3a0a83a2b45bfefa3c4881284e0b91610690918591906112a5565b60405180910390a25050806106a490610ee0565b905061051d565b6040805180820190915260008152606060208201526000828152600560209081526040918290208251808401909352805473ffffffffffffffffffffffffffffffffffffffff1683526001810180549192840191610708906110f0565b80601f0160208091040260200160405190810160405280929190818152602001828054610734906110f0565b80156107815780601f1061075657610100808354040283529160200191610781565b820191906000526020600020905b81548152906001019060200180831161076457829003601f168201915b5050505050815250509050919050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610812576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016103c0565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b604080516080808201835260008083526020808401829052838501829052606084018290528582526002808252918590208551938401865280548452600180820154928501929092529182015493949293919284019160ff16908111156108f7576108f7610d23565b600181111561090857610908610d23565b815260029190910154610100900473ffffffffffffffffffffffffffffffffffffffff1660209091015292915050565b61094061094c565b610949816109f6565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146109cd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016103c0565b565b600081815260018301602052604081205415155b9392505050565b60006109e38383610aeb565b3373ffffffffffffffffffffffffffffffffffffffff821603610a75576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016103c0565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000818152600183016020526040812054610b325750815460018181018455600084815260208082209093018490558454848252828601909352604090209190915561050c565b50600061050c565b508054610b46906110f0565b6000825580601f10610b56575050565b601f01602090049060005260206000209081019061094991905b80821115610b845760008155600101610b70565b5090565b600060808284031215610b9a57600080fd5b50919050565b6000815180845260005b81811015610bc657602081850181015186830182015201610baa565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006109e36020830184610ba0565b60008083601f840112610c2957600080fd5b50813567ffffffffffffffff811115610c4157600080fd5b6020830191508360208260051b8501011115610c5c57600080fd5b9250929050565b60008060208385031215610c7657600080fd5b823567ffffffffffffffff811115610c8d57600080fd5b610c9985828601610c17565b90969095509350505050565b60008060408385031215610cb857600080fd5b50508035926020909101359150565b600060208284031215610cd957600080fd5b5035919050565b6020815273ffffffffffffffffffffffffffffffffffffffff825116602082015260006020830151604080840152610d1b6060840182610ba0565b949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b81518152602080830151908201526040820151608082019060028110610da1577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b8060408401525073ffffffffffffffffffffffffffffffffffffffff606084015116606083015292915050565b73ffffffffffffffffffffffffffffffffffffffff8116811461094957600080fd5b600060208284031215610e0257600080fd5b81356109e381610dce565b600060208284031215610e1f57600080fd5b815180151581146109e357600080fd5b813581556020820135600182015560028101604083013560028110610e5357600080fd5b81546060850135610e6381610dce565b74ffffffffffffffffffffffffffffffffffffffff008160081b1660ff84167fffffffffffffffffffffff000000000000000000000000000000000000000000841617178455505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610f38577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc1833603018112610f7357600080fd5b9190910192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715610fcf57610fcf610f7d565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561101c5761101c610f7d565b604052919050565b60006040823603121561103657600080fd5b61103e610fac565b823561104981610dce565b815260208381013567ffffffffffffffff8082111561106757600080fd5b9085019036601f83011261107a57600080fd5b81358181111561108c5761108c610f7d565b6110bc847fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610fd5565b915080825236848285010111156110d257600080fd5b80848401858401376000908201840152918301919091525092915050565b600181811c9082168061110457607f821691505b602082108103610b9a577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b601f8211156104de57600081815260208120601f850160051c810160208610156111645750805b601f850160051c820191505b8181101561118357828155600101611170565b505050505050565b815167ffffffffffffffff8111156111a5576111a5610f7d565b6111b9816111b384546110f0565b8461113d565b602080601f83116001811461120c57600084156111d65750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555611183565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156112595788860151825594840194600190910190840161123a565b508582101561129557878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b828152604060208201526000610d1b6040830184610ba056fea164736f6c6343000813000a", } var CapabilityRegistryABI = CapabilityRegistryMetaData.ABI diff --git a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt index 62a75a8a644..66d26cead2d 100644 --- a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -1,4 +1,4 @@ GETH_VERSION: 1.13.8 forwarder: ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.bin b4c900aae9e022f01abbac7993d41f93912247613ac6270b0c4da4ef6f2016e3 -keystone_capability_registry: ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.abi ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.bin 9eaa36e45c1b33c1fa72b7e25d6d1ca351413225611abb84ae053270b569cca4 +keystone_capability_registry: ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.abi ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.bin d526b850decbf88dbe3dd66ed2160f1c311fb4ee6df0ac50c91f31521b361a52 ocr3_capability: ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.abi ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.bin 9dcbdf55bd5729ba266148da3f17733eb592c871c2108ccca546618628fd9ad2 diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index dc7079e44d9..58cedbb96e1 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -369,7 +369,8 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn MailMon: mailMon, DS: ds, }, - CSAETHKeystore: keyStore, + CSAETHKeystore: keyStore, + MercuryTransmitter: cfg.Mercury().Transmitter(), } if cfg.EVMEnabled() { diff --git a/core/internal/mocks/application.go b/core/internal/mocks/application.go index c83b37a0e5d..f845d46ca8d 100644 --- a/core/internal/mocks/application.go +++ b/core/internal/mocks/application.go @@ -23,6 +23,8 @@ import ( logger "github.com/smartcontractkit/chainlink/v2/core/logger" + logpoller "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + mock "github.com/stretchr/testify/mock" pipeline "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" @@ -147,6 +149,24 @@ func (_m *Application) DeleteJob(ctx context.Context, jobID int32) error { return r0 } +// DeleteLogPollerDataAfter provides a mock function with given fields: ctx, chainID, start +func (_m *Application) DeleteLogPollerDataAfter(ctx context.Context, chainID *big.Int, start int64) error { + ret := _m.Called(ctx, chainID, start) + + if len(ret) == 0 { + panic("no return value specified for DeleteLogPollerDataAfter") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int, int64) error); ok { + r0 = rf(ctx, chainID, start) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // EVMORM provides a mock function with given fields: func (_m *Application) EVMORM() types.Configs { ret := _m.Called() @@ -167,6 +187,36 @@ func (_m *Application) EVMORM() types.Configs { return r0 } +// FindLCA provides a mock function with given fields: ctx, chainID +func (_m *Application) FindLCA(ctx context.Context, chainID *big.Int) (*logpoller.LogPollerBlock, error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindLCA") + } + + var r0 *logpoller.LogPollerBlock + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*logpoller.LogPollerBlock, error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *logpoller.LogPollerBlock); ok { + r0 = rf(ctx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*logpoller.LogPollerBlock) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetAuditLogger provides a mock function with given fields: func (_m *Application) GetAuditLogger() audit.AuditLogger { ret := _m.Called() diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 2aebef3f8f7..ae3db2e7a73 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -22,7 +22,9 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/utils" "github.com/smartcontractkit/chainlink-common/pkg/utils/jsonserializable" "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" + "github.com/smartcontractkit/chainlink/v2/core/capabilities" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/static" "github.com/smartcontractkit/chainlink/v2/core/bridges" @@ -115,6 +117,11 @@ type Application interface { ID() uuid.UUID SecretGenerator() SecretGenerator + + // FindLCA - finds last common ancestor for LogPoller's chain available in the database and RPC chain + FindLCA(ctx context.Context, chainID *big.Int) (*logpoller.LogPollerBlock, error) + // DeleteLogPollerDataAfter - delete LogPoller state starting from the specified block + DeleteLogPollerDataAfter(ctx context.Context, chainID *big.Int, start int64) error } // ChainlinkApplication contains fields for the JobSubscriber, Scheduler, @@ -886,3 +893,39 @@ func (app *ChainlinkApplication) GetWebAuthnConfiguration() sessions.WebAuthnCon func (app *ChainlinkApplication) ID() uuid.UUID { return app.Config.AppID() } + +// FindLCA - finds last common ancestor +func (app *ChainlinkApplication) FindLCA(ctx context.Context, chainID *big.Int) (*logpoller.LogPollerBlock, error) { + chain, err := app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + if err != nil { + return nil, err + } + if !app.Config.Feature().LogPoller() { + return nil, fmt.Errorf("FindLCA is only available if LogPoller is enabled") + } + + lca, err := chain.LogPoller().FindLCA(ctx) + if err != nil { + return nil, fmt.Errorf("failed to find lca: %w", err) + } + + return lca, nil +} + +// DeleteLogPollerDataAfter - delete LogPoller state starting from the specified block +func (app *ChainlinkApplication) DeleteLogPollerDataAfter(ctx context.Context, chainID *big.Int, start int64) error { + chain, err := app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + if err != nil { + return err + } + if !app.Config.Feature().LogPoller() { + return fmt.Errorf("DeleteLogPollerDataAfter is only available if LogPoller is enabled") + } + + err = chain.LogPoller().DeleteLogsAndBlocksAfter(ctx, start) + if err != nil { + return fmt.Errorf("failed to recover LogPoller: %w", err) + } + + return nil +} diff --git a/core/services/chainlink/config_mercury.go b/core/services/chainlink/config_mercury.go index 27303a68899..1b64e0bde45 100644 --- a/core/services/chainlink/config_mercury.go +++ b/core/services/chainlink/config_mercury.go @@ -3,6 +3,7 @@ package chainlink import ( "time" + commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink/v2/core/config" @@ -25,6 +26,8 @@ func (m *mercuryCacheConfig) LatestReportDeadline() time.Duration { return m.c.LatestReportDeadline.Duration() } +var _ config.MercuryTLS = (*mercuryTLSConfig)(nil) + type mercuryTLSConfig struct { c toml.MercuryTLS } @@ -33,6 +36,20 @@ func (m *mercuryTLSConfig) CertFile() string { return *m.c.CertFile } +var _ config.MercuryTransmitter = (*mercuryTransmitterConfig)(nil) + +type mercuryTransmitterConfig struct { + c toml.MercuryTransmitter +} + +func (m *mercuryTransmitterConfig) TransmitQueueMaxSize() uint32 { + return *m.c.TransmitQueueMaxSize +} + +func (m *mercuryTransmitterConfig) TransmitTimeout() commonconfig.Duration { + return *m.c.TransmitTimeout +} + type mercuryConfig struct { c toml.Mercury s toml.MercurySecrets @@ -60,3 +77,7 @@ func (m *mercuryConfig) Cache() config.MercuryCache { func (m *mercuryConfig) TLS() config.MercuryTLS { return &mercuryTLSConfig{c: m.c.TLS} } + +func (m *mercuryConfig) Transmitter() config.MercuryTransmitter { + return &mercuryTransmitterConfig{c: m.c.Transmitter} +} diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index d02948fd07b..0d40697345d 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -710,6 +710,10 @@ func TestConfig_Marshal(t *testing.T) { TLS: toml.MercuryTLS{ CertFile: ptr("/path/to/cert.pem"), }, + Transmitter: toml.MercuryTransmitter{ + TransmitQueueMaxSize: ptr(uint32(123)), + TransmitTimeout: commoncfg.MustNewDuration(234 * time.Second), + }, } for _, tt := range []struct { @@ -1165,6 +1169,10 @@ LatestReportDeadline = '1m42s' [Mercury.TLS] CertFile = '/path/to/cert.pem' + +[Mercury.Transmitter] +TransmitQueueMaxSize = 123 +TransmitTimeout = '3m54s' `}, {"full", full, fullTOML}, {"multi-chain", multiChain, multiChainTOML}, diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go index 00db81cce37..31645b7c54d 100644 --- a/core/services/chainlink/relayer_factory.go +++ b/core/services/chainlink/relayer_factory.go @@ -19,6 +19,7 @@ import ( "github.com/smartcontractkit/chainlink-starknet/relayer/pkg/chainlink/config" "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" + coreconfig "github.com/smartcontractkit/chainlink/v2/core/config" "github.com/smartcontractkit/chainlink/v2/core/config/env" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/keystore" @@ -38,6 +39,7 @@ type RelayerFactory struct { type EVMFactoryConfig struct { legacyevm.ChainOpts evmrelay.CSAETHKeystore + coreconfig.MercuryTransmitter } func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (map[types.RelayID]evmrelay.LoopRelayAdapter, error) { @@ -67,9 +69,10 @@ func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (m } relayerOpts := evmrelay.RelayerOpts{ - DS: ccOpts.DS, - CSAETHKeystore: config.CSAETHKeystore, - MercuryPool: r.MercuryPool, + DS: ccOpts.DS, + CSAETHKeystore: config.CSAETHKeystore, + MercuryPool: r.MercuryPool, + TransmitterConfig: config.MercuryTransmitter, } relayer, err2 := evmrelay.NewRelayer(lggr.Named(relayID.ChainID), chain, relayerOpts) if err2 != nil { diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml index 759a380d15c..38c3ed62017 100644 --- a/core/services/chainlink/testdata/config-empty-effective.toml +++ b/core/services/chainlink/testdata/config-empty-effective.toml @@ -230,6 +230,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index 8a016149e59..b199ae530f5 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -240,6 +240,10 @@ LatestReportDeadline = '1m42s' [Mercury.TLS] CertFile = '/path/to/cert.pem' +[Mercury.Transmitter] +TransmitQueueMaxSize = 123 +TransmitTimeout = '3m54s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 13 diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index a6cba2aaac3..7aa3bb50b35 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -230,6 +230,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/core/services/gateway/handlers/functions/allowlist/orm.go b/core/services/gateway/handlers/functions/allowlist/orm.go index 7867c06d5d4..20a8ed15252 100644 --- a/core/services/gateway/handlers/functions/allowlist/orm.go +++ b/core/services/gateway/handlers/functions/allowlist/orm.go @@ -67,6 +67,11 @@ func (o *orm) GetAllowedSenders(ctx context.Context, offset, limit uint) ([]comm } func (o *orm) CreateAllowedSenders(ctx context.Context, allowedSenders []common.Address) error { + if len(allowedSenders) == 0 { + o.lggr.Debugf("empty allowed senders list: %v for routerContractAddress: %s. skipping...", allowedSenders, o.routerContractAddress) + return nil + } + var valuesPlaceholder []string for i := 1; i <= len(allowedSenders)*2; i += 2 { valuesPlaceholder = append(valuesPlaceholder, fmt.Sprintf("($%d, $%d)", i, i+1)) diff --git a/core/services/gateway/handlers/functions/allowlist/orm_test.go b/core/services/gateway/handlers/functions/allowlist/orm_test.go index 2584e131968..388d47a769b 100644 --- a/core/services/gateway/handlers/functions/allowlist/orm_test.go +++ b/core/services/gateway/handlers/functions/allowlist/orm_test.go @@ -128,6 +128,15 @@ func TestORM_CreateAllowedSenders(t *testing.T) { require.Equal(t, expected[0], results[0]) require.Equal(t, expected[1], results[1]) }) + + // this scenario can happen if the allowlist is empty but we call CreateAllowedSenders + t.Run("OK-empty_list", func(t *testing.T) { + ctx := testutils.Context(t) + orm, err := setupORM(t) + require.NoError(t, err) + err = orm.CreateAllowedSenders(ctx, []common.Address{}) + require.NoError(t, err) + }) } func TestORM_DeleteAllowedSenders(t *testing.T) { diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index dbde65efe40..4e1eb0cc623 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -198,6 +198,7 @@ type mercuryConfig interface { Credentials(credName string) *types.MercuryCredentials Cache() coreconfig.MercuryCache TLS() coreconfig.MercuryTLS + Transmitter() coreconfig.MercuryTransmitter } type thresholdConfig interface { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index b07b08d3354..e2c1a1531e2 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -42,7 +42,7 @@ var ( readJobQueueSize = 64 readLogsTimeout = 10 * time.Second - readMaxBatchSize = 32 + readMaxBatchSize = 56 // reorgBuffer is the number of blocks to add as a buffer to the block range when reading logs. reorgBuffer = int64(32) readerThreads = 4 diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index 9097c217590..737a8e7561e 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -78,7 +78,8 @@ type Relayer struct { codec commontypes.Codec // Mercury - mercuryORM mercury.ORM + mercuryORM mercury.ORM + transmitterCfg mercury.TransmitterConfig // LLO/data streams cdcFactory llo.ChannelDefinitionCacheFactory @@ -93,7 +94,8 @@ type CSAETHKeystore interface { type RelayerOpts struct { DS sqlutil.DataSource CSAETHKeystore - MercuryPool wsrpc.Pool + MercuryPool wsrpc.Pool + TransmitterConfig mercury.TransmitterConfig } func (c RelayerOpts) Validate() error { @@ -122,14 +124,15 @@ func NewRelayer(lggr logger.Logger, chain legacyevm.Chain, opts RelayerOpts) (*R lloORM := llo.NewORM(opts.DS, chain.ID()) cdcFactory := llo.NewChannelDefinitionCacheFactory(lggr, lloORM, chain.LogPoller()) return &Relayer{ - ds: opts.DS, - chain: chain, - lggr: lggr, - ks: opts.CSAETHKeystore, - mercuryPool: opts.MercuryPool, - cdcFactory: cdcFactory, - lloORM: lloORM, - mercuryORM: mercuryORM, + ds: opts.DS, + chain: chain, + lggr: lggr, + ks: opts.CSAETHKeystore, + mercuryPool: opts.MercuryPool, + cdcFactory: cdcFactory, + lloORM: lloORM, + mercuryORM: mercuryORM, + transmitterCfg: opts.TransmitterConfig, }, nil } @@ -246,7 +249,7 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty default: return nil, fmt.Errorf("invalid feed version %d", feedID.Version()) } - transmitter := mercury.NewTransmitter(lggr, clients, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.mercuryORM, transmitterCodec) + transmitter := mercury.NewTransmitter(lggr, r.transmitterCfg, clients, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.mercuryORM, transmitterCodec) return NewMercuryProvider(cp, r.chainReader, r.codec, NewMercuryChainReader(r.chain.HeadTracker()), transmitter, reportCodecV1, reportCodecV2, reportCodecV3, lggr), nil } diff --git a/core/services/relay/evm/mercury/transmitter.go b/core/services/relay/evm/mercury/transmitter.go index 6f49ca91bfc..82a76450e5f 100644 --- a/core/services/relay/evm/mercury/transmitter.go +++ b/core/services/relay/evm/mercury/transmitter.go @@ -23,6 +23,7 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink-common/pkg/types/mercury" @@ -33,12 +34,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/utils" ) -var ( - maxTransmitQueueSize = 10_000 - maxDeleteQueueSize = 10_000 - transmitTimeout = 5 * time.Second -) - const ( // Mercury server error codes DuplicateReport = 2 @@ -104,9 +99,15 @@ type TransmitterReportDecoder interface { var _ Transmitter = (*mercuryTransmitter)(nil) +type TransmitterConfig interface { + TransmitQueueMaxSize() uint32 + TransmitTimeout() commonconfig.Duration +} + type mercuryTransmitter struct { services.StateMachine lggr logger.Logger + cfg TransmitterConfig servers map[string]*server @@ -142,6 +143,8 @@ func getPayloadTypes() abi.Arguments { type server struct { lggr logger.Logger + transmitTimeout time.Duration + c wsrpc.Client pm *PersistenceManager q *TransmitQueue @@ -221,7 +224,7 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, feed // queue was closed return } - ctx, cancel := context.WithTimeout(runloopCtx, utils.WithJitter(transmitTimeout)) + ctx, cancel := context.WithTimeout(runloopCtx, utils.WithJitter(s.transmitTimeout)) res, err := s.c.Transmit(ctx, t.Req) cancel() if runloopCtx.Err() != nil { @@ -272,18 +275,19 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, feed } } -func NewTransmitter(lggr logger.Logger, clients map[string]wsrpc.Client, fromAccount ed25519.PublicKey, jobID int32, feedID [32]byte, orm ORM, codec TransmitterReportDecoder) *mercuryTransmitter { +func NewTransmitter(lggr logger.Logger, cfg TransmitterConfig, clients map[string]wsrpc.Client, fromAccount ed25519.PublicKey, jobID int32, feedID [32]byte, orm ORM, codec TransmitterReportDecoder) *mercuryTransmitter { feedIDHex := fmt.Sprintf("0x%x", feedID[:]) servers := make(map[string]*server, len(clients)) for serverURL, client := range clients { cLggr := lggr.Named(serverURL).With("serverURL", serverURL) - pm := NewPersistenceManager(cLggr, serverURL, orm, jobID, maxTransmitQueueSize, flushDeletesFrequency, pruneFrequency) + pm := NewPersistenceManager(cLggr, serverURL, orm, jobID, int(cfg.TransmitQueueMaxSize()), flushDeletesFrequency, pruneFrequency) servers[serverURL] = &server{ cLggr, + cfg.TransmitTimeout().Duration(), client, pm, - NewTransmitQueue(cLggr, serverURL, feedIDHex, maxTransmitQueueSize, pm), - make(chan *pb.TransmitRequest, maxDeleteQueueSize), + NewTransmitQueue(cLggr, serverURL, feedIDHex, int(cfg.TransmitQueueMaxSize()), pm), + make(chan *pb.TransmitRequest, int(cfg.TransmitQueueMaxSize())), transmitSuccessCount.WithLabelValues(feedIDHex, serverURL), transmitDuplicateCount.WithLabelValues(feedIDHex, serverURL), transmitConnectionErrorCount.WithLabelValues(feedIDHex, serverURL), @@ -295,6 +299,7 @@ func NewTransmitter(lggr logger.Logger, clients map[string]wsrpc.Client, fromAcc return &mercuryTransmitter{ services.StateMachine{}, lggr.Named("MercuryTransmitter").With("feedID", feedIDHex), + cfg, servers, codec, feedID, diff --git a/core/services/relay/evm/mercury/transmitter_test.go b/core/services/relay/evm/mercury/transmitter_test.go index 46bf116ed3a..b0da9bea635 100644 --- a/core/services/relay/evm/mercury/transmitter_test.go +++ b/core/services/relay/evm/mercury/transmitter_test.go @@ -4,6 +4,7 @@ import ( "context" "math/big" "testing" + "time" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/pkg/errors" @@ -12,6 +13,7 @@ import ( ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" @@ -21,6 +23,16 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb" ) +type mockCfg struct{} + +func (m mockCfg) TransmitQueueMaxSize() uint32 { + return 10_000 +} + +func (m mockCfg) TransmitTimeout() commonconfig.Duration { + return *commonconfig.MustNewDuration(1 * time.Hour) +} + func Test_MercuryTransmitter_Transmit(t *testing.T) { lggr := logger.TestLogger(t) db := pgtest.NewSqlxDB(t) @@ -36,7 +48,7 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { report := sampleV1Report c := &mocks.MockWSRPCClient{} clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) // init the queue since we skipped starting transmitter mt.servers[sURL].q.Init([]*Transmission{}) err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) @@ -50,7 +62,7 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { report := sampleV2Report c := &mocks.MockWSRPCClient{} clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) // init the queue since we skipped starting transmitter mt.servers[sURL].q.Init([]*Transmission{}) err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) @@ -64,7 +76,7 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { report := sampleV3Report c := &mocks.MockWSRPCClient{} clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) // init the queue since we skipped starting transmitter mt.servers[sURL].q.Init([]*Transmission{}) err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) @@ -83,7 +95,7 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { clients[sURL2] = c clients[sURL3] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) // init the queue since we skipped starting transmitter mt.servers[sURL].q.Init([]*Transmission{}) mt.servers[sURL2].q.Init([]*Transmission{}) @@ -125,7 +137,7 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) ts, err := mt.LatestTimestamp(testutils.Context(t)) require.NoError(t, err) @@ -141,7 +153,7 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) ts, err := mt.LatestTimestamp(testutils.Context(t)) require.NoError(t, err) @@ -155,7 +167,7 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) _, err := mt.LatestTimestamp(testutils.Context(t)) require.Error(t, err) assert.Contains(t, err.Error(), "something exploded") @@ -185,7 +197,7 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) ts, err := mt.LatestTimestamp(testutils.Context(t)) require.NoError(t, err) @@ -228,7 +240,7 @@ func Test_MercuryTransmitter_LatestPrice(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) t.Run("BenchmarkPriceFromReport succeeds", func(t *testing.T) { codec.val = originalPrice @@ -259,7 +271,7 @@ func Test_MercuryTransmitter_LatestPrice(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) require.NoError(t, err) @@ -273,7 +285,7 @@ func Test_MercuryTransmitter_LatestPrice(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) require.Error(t, err) assert.Contains(t, err.Error(), "something exploded") @@ -303,7 +315,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.NoError(t, err) @@ -319,7 +331,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.NoError(t, err) @@ -332,7 +344,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.Error(t, err) assert.Contains(t, err.Error(), "something exploded") @@ -350,7 +362,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { }, } clients[sURL] = c - mt := NewTransmitter(lggr, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec) _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.Error(t, err) assert.Contains(t, err.Error(), "latestReport failed; mismatched feed IDs, expected: 0x1c916b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472, got: 0x") diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go b/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go index 4d05db4380f..0c31a1d7ac9 100644 --- a/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go +++ b/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go @@ -11,6 +11,7 @@ import ( ) // MercuryClient is the client API for Mercury service. +// type MercuryClient interface { Transmit(ctx context.Context, in *TransmitRequest) (*TransmitResponse, error) LatestReport(ctx context.Context, in *LatestReportRequest) (*LatestReportResponse, error) diff --git a/core/web/api.go b/core/web/api.go index 1f97d59c77d..51f7b855cd5 100644 --- a/core/web/api.go +++ b/core/web/api.go @@ -120,7 +120,7 @@ func ParsePaginatedResponse(input []byte, resource interface{}, links *jsonapi.L func parsePaginatedResponseToDocument(input []byte, resource interface{}, document *jsonapi.Document) error { err := ParseJSONAPIResponse(input, resource) if err != nil { - return errors.Wrap(err, "ParseJSONAPIResponse error") + return errors.Wrapf(err, "ParseJSONAPIResponse error body: %s", string(input)) } // Unmarshal using the stdlib Unmarshal to extract the links part of the document diff --git a/core/web/lca_controller.go b/core/web/lca_controller.go new file mode 100644 index 00000000000..bb4866c3d08 --- /dev/null +++ b/core/web/lca_controller.go @@ -0,0 +1,74 @@ +package web + +import ( + "errors" + "fmt" + "net/http" + + "github.com/gin-gonic/gin" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" +) + +type LCAController struct { + App chainlink.Application +} + +// FindLCA compares chain of blocks available in the DB with chain provided by an RPC and returns last common ancestor +// Example: +// +// "/v2/find_lca" +func (bdc *LCAController) FindLCA(c *gin.Context) { + chain, err := getChain(bdc.App.GetRelayers().LegacyEVMChains(), c.Query("evmChainID")) + if err != nil { + if errors.Is(err, ErrInvalidChainID) || errors.Is(err, ErrMultipleChains) || errors.Is(err, ErrMissingChainID) { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + chainID := chain.ID() + + lca, err := bdc.App.FindLCA(c.Request.Context(), chainID) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + if lca == nil { + jsonAPIError(c, http.StatusNotFound, fmt.Errorf("failed to find last common ancestor")) + return + } + + response := LCAResponse{ + BlockNumber: lca.BlockNumber, + Hash: lca.BlockHash.String(), + EVMChainID: big.New(chainID), + } + jsonAPIResponse(c, &response, "response") + +} + +type LCAResponse struct { + BlockNumber int64 `json:"blockNumber"` + Hash string `json:"hash"` + EVMChainID *big.Big `json:"evmChainID"` +} + +// GetID returns the jsonapi ID. +func (s LCAResponse) GetID() string { + return "LCAResponseID" +} + +// GetName returns the collection name for jsonapi. +func (LCAResponse) GetName() string { + return "lca_response" +} + +// SetID is used to conform to the UnmarshallIdentifier interface for +// deserializing from jsonapi documents. +func (*LCAResponse) SetID(string) error { + return nil +} diff --git a/core/web/lca_controller_test.go b/core/web/lca_controller_test.go new file mode 100644 index 00000000000..7ec476e8eca --- /dev/null +++ b/core/web/lca_controller_test.go @@ -0,0 +1,29 @@ +package web_test + +import ( + _ "embed" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest" +) + +func TestLCAController_FindLCA(t *testing.T) { + cfg := configtest.NewTestGeneralConfig(t) + ec := setupEthClientForControllerTests(t) + app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey, ec) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/v2/find_lca?evmChainID=1") + t.Cleanup(cleanup) + assert.Equal(t, http.StatusUnprocessableEntity, resp.StatusCode) + b, err := io.ReadAll(resp.Body) + require.NoError(t, err) + assert.Contains(t, string(b), "chain id does not match any local chains") +} diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml index 759a380d15c..38c3ed62017 100644 --- a/core/web/resolver/testdata/config-empty-effective.toml +++ b/core/web/resolver/testdata/config-empty-effective.toml @@ -230,6 +230,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index 69d56974130..75fad4d2fc9 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -240,6 +240,10 @@ LatestReportDeadline = '1m42s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 123 +TransmitTimeout = '3m54s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 13 diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index a6cba2aaac3..7aa3bb50b35 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -230,6 +230,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/core/web/router.go b/core/web/router.go index c327583a005..158ea4b411f 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -292,6 +292,8 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { rc := ReplayController{app} authv2.POST("/replay_from_block/:number", auth.RequiresRunRole(rc.ReplayFromBlock)) + lcaC := LCAController{app} + authv2.GET("/find_lca", auth.RequiresRunRole(lcaC.FindLCA)) csakc := CSAKeysController{app} authv2.GET("/keys/csa", csakc.Index) diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 0596fcdd84d..f93d990413f 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -1698,6 +1698,33 @@ CertFile = "/path/to/client/certs.pem" # Example ``` CertFile is the path to a PEM file of trusted root certificate authority certificates +## Mercury.Transmitter +```toml +[Mercury.Transmitter] +TransmitQueueMaxSize = 10_000 # Default +TransmitTimeout = "5s" # Default +``` +Mercury.Transmitter controls settings for the mercury transmitter + +### TransmitQueueMaxSize +```toml +TransmitQueueMaxSize = 10_000 # Default +``` +TransmitQueueMaxSize controls the size of the transmit queue. This is scoped +per OCR instance. If the queue is full, the transmitter will start dropping +the oldest messages in order to make space. + +This is useful if mercury server goes offline and the nop needs to buffer +transmissions. + +### TransmitTimeout +```toml +TransmitTimeout = "5s" # Default +``` +TransmitTimeout controls how long the transmitter will wait for a response +when sending a message to the mercury server, before aborting and considering +the transmission to be failed. + ## EVM EVM defaults depend on ChainID: diff --git a/testdata/scripts/blocks/help.txtar b/testdata/scripts/blocks/help.txtar index 55aaf71858d..5d362a082fd 100644 --- a/testdata/scripts/blocks/help.txtar +++ b/testdata/scripts/blocks/help.txtar @@ -9,7 +9,8 @@ USAGE: chainlink blocks command [command options] [arguments...] COMMANDS: - replay Replays block data from the given number + replay Replays block data from the given number + find-lca Find latest common block stored in DB and on chain OPTIONS: --help, -h show help diff --git a/testdata/scripts/help-all/help-all.txtar b/testdata/scripts/help-all/help-all.txtar index eeaf0da98d1..e111295abb4 100644 --- a/testdata/scripts/help-all/help-all.txtar +++ b/testdata/scripts/help-all/help-all.txtar @@ -16,6 +16,7 @@ admin users list # Lists all API users and their roles attempts # Commands for managing Ethereum Transaction Attempts attempts list # List the Transaction Attempts in descending order blocks # Commands for managing blocks +blocks find-lca # Find latest common block stored in DB and on chain blocks replay # Replays block data from the given number bridges # Commands for Bridges communicating with External Adapters bridges create # Create a new Bridge to an External Adapter @@ -132,6 +133,7 @@ node db status # Display the current database migration status. node db version # Display the current database version. node profile # Collects profile metrics from the node. node rebroadcast-transactions # Manually rebroadcast txs matching nonce range with the specified gas price. This is useful in emergencies e.g. high gas prices and/or network congestion to forcibly clear out the pending TX queue +node remove-blocks # Deletes block range and all associated data node start # Run the Chainlink node node status # Displays the health of various services running inside the node. node validate # Validate the TOML configuration and secrets that are passed as flags to the `node` command. Prints the full effective configuration, with defaults included diff --git a/testdata/scripts/node/help.txtar b/testdata/scripts/node/help.txtar index 33e1fdc90bc..875500b13df 100644 --- a/testdata/scripts/node/help.txtar +++ b/testdata/scripts/node/help.txtar @@ -13,6 +13,7 @@ COMMANDS: rebroadcast-transactions Manually rebroadcast txs matching nonce range with the specified gas price. This is useful in emergencies e.g. high gas prices and/or network congestion to forcibly clear out the pending TX queue validate Validate the TOML configuration and secrets that are passed as flags to the `node` command. Prints the full effective configuration, with defaults included db Commands for managing the database. + remove-blocks Deletes block range and all associated data OPTIONS: --config value, -c value TOML configuration file(s) via flag, or raw TOML via env var. If used, legacy env vars must not be set. Multiple files can be used (-c configA.toml -c configB.toml), and they are applied in order with duplicated fields overriding any earlier values. If the 'CL_CONFIG' env var is specified, it is always processed last with the effect of being the final override. [$CL_CONFIG] diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar index dd3af5f91b6..a8e8e41750d 100644 --- a/testdata/scripts/node/validate/default.txtar +++ b/testdata/scripts/node/validate/default.txtar @@ -242,6 +242,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar index 15a476460da..feaf546f022 100644 --- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -286,6 +286,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar index cc8b4577bfb..b37fed41150 100644 --- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -286,6 +286,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar index c578d200923..6ae02ab38f4 100644 --- a/testdata/scripts/node/validate/disk-based-logging.txtar +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -286,6 +286,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar index 91ae520532d..45c97477bd5 100644 --- a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar +++ b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar @@ -271,6 +271,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar index a5e4b766b6e..df0118bbbbf 100644 --- a/testdata/scripts/node/validate/invalid.txtar +++ b/testdata/scripts/node/validate/invalid.txtar @@ -276,6 +276,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar index c220d7f2e5f..edb07fd5e4f 100644 --- a/testdata/scripts/node/validate/valid.txtar +++ b/testdata/scripts/node/validate/valid.txtar @@ -283,6 +283,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar index 018aaf95f4c..cf121e959e1 100644 --- a/testdata/scripts/node/validate/warnings.txtar +++ b/testdata/scripts/node/validate/warnings.txtar @@ -265,6 +265,10 @@ LatestReportDeadline = '5s' [Mercury.TLS] CertFile = '' +[Mercury.Transmitter] +TransmitQueueMaxSize = 10000 +TransmitTimeout = '5s' + [Capabilities] [Capabilities.Peering] IncomingMessageBufferSize = 10 diff --git a/tools/bin/codecov b/tools/bin/codecov deleted file mode 100755 index 36513ce06df..00000000000 --- a/tools/bin/codecov +++ /dev/null @@ -1,1888 +0,0 @@ -#!/usr/bin/env bash - -# Apache License Version 2.0, January 2004 -# https://github.com/codecov/codecov-bash/blob/master/LICENSE - -set -e +o pipefail - -VERSION="1.0.6" - -codecov_flags=( ) -url="https://codecov.io" -env="$CODECOV_ENV" -service="" -token="" -search_in="" -# shellcheck disable=SC2153 -flags="$CODECOV_FLAGS" -exit_with=0 -curlargs="" -curlawsargs="" -dump="0" -clean="0" -curl_s="-s" -name="$CODECOV_NAME" -include_cov="" -exclude_cov="" -ddp="$HOME/Library/Developer/Xcode/DerivedData" -xp="" -files="" -save_to="" -direct_file_upload="" -cacert="$CODECOV_CA_BUNDLE" -gcov_ignore="-not -path './bower_components/**' -not -path './node_modules/**' -not -path './vendor/**'" -gcov_include="" - -ft_gcov="1" -ft_coveragepy="1" -ft_fix="1" -ft_search="1" -ft_s3="1" -ft_network="1" -ft_xcodellvm="1" -ft_xcodeplist="0" -ft_gcovout="1" -ft_html="0" -ft_yaml="0" - -_git_root=$(git rev-parse --show-toplevel 2>/dev/null || hg root 2>/dev/null || echo "$PWD") -git_root="$_git_root" -remote_addr="" -if [ "$git_root" = "$PWD" ]; -then - git_root="." -fi - -branch_o="" -build_o="" -commit_o="" -pr_o="" -prefix_o="" -network_filter_o="" -search_in_o="" -slug_o="" -tag_o="" -url_o="" -git_ls_files_recurse_submodules_o="" -package="bash" - -commit="$VCS_COMMIT_ID" -branch="$VCS_BRANCH_NAME" -pr="$VCS_PULL_REQUEST" -slug="$VCS_SLUG" -tag="$VCS_TAG" -build_url="$CI_BUILD_URL" -build="$CI_BUILD_ID" -job="$CI_JOB_ID" - -beta_xcode_partials="" - -proj_root="$git_root" -gcov_exe="gcov" -gcov_arg="" - -b="\033[0;36m" -g="\033[0;32m" -r="\033[0;31m" -e="\033[0;90m" -y="\033[0;33m" -x="\033[0m" - -show_help() { -cat << EOF - - Codecov Bash $VERSION - - Global report uploading tool for Codecov - Documentation at https://docs.codecov.io/docs - Contribute at https://github.com/codecov/codecov-bash - - - -h Display this help and exit - -f FILE Target file(s) to upload - - -f "path/to/file" only upload this file - skips searching unless provided patterns below - - -f '!*.bar' ignore all files at pattern *.bar - -f '*.foo' include all files at pattern *.foo - Must use single quotes. - This is non-exclusive, use -s "*.foo" to match specific paths. - - -s DIR Directory to search for coverage reports. - Already searches project root and artifact folders. - -t TOKEN Set the private repository token - (option) set environment variable CODECOV_TOKEN=:uuid - - -t @/path/to/token_file - -t uuid - - -n NAME Custom defined name of the upload. Visible in Codecov UI - - -e ENV Specify environment variables to be included with this build - Also accepting environment variables: CODECOV_ENV=VAR,VAR2 - - -e VAR,VAR2 - - -k prefix Prefix filepaths to help resolve path fixing - - -i prefix Only include files in the network with a certain prefix. Useful for upload-specific path fixing - - -X feature Toggle functionalities - - -X gcov Disable gcov - -X coveragepy Disable python coverage - -X fix Disable report fixing - -X search Disable searching for reports - -X xcode Disable xcode processing - -X network Disable uploading the file network - -X gcovout Disable gcov output - -X html Enable coverage for HTML files - -X recursesubs Enable recurse submodules in git projects when searching for source files - -X yaml Enable coverage for YAML files - - -N The commit SHA of the parent for which you are uploading coverage. If not present, - the parent will be determined using the API of your repository provider. - When using the repository provider's API, the parent is determined via finding - the closest ancestor to the commit. - - -R root dir Used when not in git/hg project to identify project root directory - -F flag Flag the upload to group coverage metrics - - -F unittests This upload is only unittests - -F integration This upload is only integration tests - -F ui,chrome This upload is Chrome - UI tests - - -c Move discovered coverage reports to the trash - -z FILE Upload specified file directly to Codecov and bypass all report generation. - This is inteded to be used only with a pre-formatted Codecov report and is not - expected to work under any other circumstances. - -Z Exit with 1 if not successful. Default will Exit with 0 - - -- xcode -- - -D Custom Derived Data Path for Coverage.profdata and gcov processing - Default '~/Library/Developer/Xcode/DerivedData' - -J Specify packages to build coverage. Uploader will only build these packages. - This can significantly reduces time to build coverage reports. - - -J 'MyAppName' Will match "MyAppName" and "MyAppNameTests" - -J '^ExampleApp$' Will match only "ExampleApp" not "ExampleAppTests" - - -- gcov -- - -g GLOB Paths to ignore during gcov gathering - -G GLOB Paths to include during gcov gathering - -p dir Project root directory - Also used when preparing gcov - -x gcovexe gcov executable to run. Defaults to 'gcov' - -a gcovargs extra arguments to pass to gcov - - -- Override CI Environment Variables -- - These variables are automatically detected by popular CI providers - - -B branch Specify the branch name - -C sha Specify the commit sha - -P pr Specify the pull request number - -b build Specify the build number - -T tag Specify the git tag - - -- Enterprise -- - -u URL Set the target url for Enterprise customers - Not required when retrieving the bash uploader from your CCE - (option) Set environment variable CODECOV_URL=https://my-hosted-codecov.com - -r SLUG owner/repo slug used instead of the private repo token in Enterprise - (option) set environment variable CODECOV_SLUG=:owner/:repo - (option) set in your codecov.yml "codecov.slug" - -S PATH File path to your cacert.pem file used to verify ssl with Codecov Enterprise (optional) - (option) Set environment variable: CODECOV_CA_BUNDLE="/path/to/ca.pem" - -U curlargs Extra curl arguments to communicate with Codecov. e.g., -U "--proxy http://http-proxy" - -A curlargs Extra curl arguments to communicate with AWS. - - -- Debugging -- - -d Don't upload, but dump upload file to stdout - -q PATH Write upload file to path - -K Remove color from the output - -v Verbose mode - -EOF -} - - -say() { - echo -e "$1" -} - - -urlencode() { - echo "$1" | curl -Gso /dev/null -w "%{url_effective}" --data-urlencode @- "" | cut -c 3- | sed -e 's/%0A//' -} - -swiftcov() { - _dir=$(dirname "$1" | sed 's/\(Build\).*/\1/g') - for _type in app framework xctest - do - find "$_dir" -name "*.$_type" | while read -r f - do - _proj=${f##*/} - _proj=${_proj%."$_type"} - if [ "$2" = "" ] || [ "$(echo "$_proj" | grep -i "$2")" != "" ]; - then - say " $g+$x Building reports for $_proj $_type" - dest=$([ -f "$f/$_proj" ] && echo "$f/$_proj" || echo "$f/Contents/MacOS/$_proj") - # shellcheck disable=SC2001 - _proj_name=$(echo "$_proj" | sed -e 's/[[:space:]]//g') - # shellcheck disable=SC2086 - xcrun llvm-cov show $beta_xcode_partials -instr-profile "$1" "$dest" > "$_proj_name.$_type.coverage.txt" \ - || say " ${r}x>${x} llvm-cov failed to produce results for $dest" - fi - done - done -} - - -# Credits to: https://gist.github.com/pkuczynski/8665367 -parse_yaml() { - local prefix=$2 - local s='[[:space:]]*' w='[a-zA-Z0-9_]*' - local fs - fs=$(echo @|tr @ '\034') - sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \ - -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" | - awk -F"$fs" '{ - indent = length($1)/2; - vname[indent] = $2; - for (i in vname) {if (i > indent) {delete vname[i]}} - if (length($3) > 0) { - vn=""; if (indent > 0) {vn=(vn)(vname[0])("_")} - printf("%s%s%s=\"%s\"\n", "'"$prefix"'",vn, $2, $3); - } - }' -} - -if [ $# != 0 ]; -then - while getopts "a:A:b:B:cC:dD:e:f:F:g:G:hi:J:k:Kn:p:P:Q:q:r:R:s:S:t:T:u:U:vx:X:Zz:N:-" o - do - codecov_flags+=( "$o" ) - case "$o" in - "-") - echo -e "${r}Long options are not supported${x}" - exit 2 - ;; - "?") - ;; - "N") - parent=$OPTARG - ;; - "a") - gcov_arg=$OPTARG - ;; - "A") - curlawsargs="$OPTARG" - ;; - "b") - build_o="$OPTARG" - ;; - "B") - branch_o="$OPTARG" - ;; - "c") - clean="1" - ;; - "C") - commit_o="$OPTARG" - ;; - "d") - dump="1" - ;; - "D") - ddp="$OPTARG" - ;; - "e") - env="$env,$OPTARG" - ;; - "f") - if [ "${OPTARG::1}" = "!" ]; - then - exclude_cov="$exclude_cov -not -path '${OPTARG:1}'" - - elif [[ "$OPTARG" = *"*"* ]]; - then - include_cov="$include_cov -or -path '$OPTARG'" - - else - ft_search=0 - if [ "$files" = "" ]; - then - files="$OPTARG" - else - files="$files -$OPTARG" - fi - fi - ;; - "F") - if [ "$flags" = "" ]; - then - flags="$OPTARG" - else - flags="$flags,$OPTARG" - fi - ;; - "g") - gcov_ignore="$gcov_ignore -not -path '$OPTARG'" - ;; - "G") - gcov_include="$gcov_include -path '$OPTARG'" - ;; - "h") - show_help - exit 0; - ;; - "i") - network_filter_o="$OPTARG" - ;; - "J") - ft_xcodellvm="1" - ft_xcodeplist="0" - if [ "$xp" = "" ]; - then - xp="$OPTARG" - else - xp="$xp\|$OPTARG" - fi - ;; - "k") - prefix_o=$(echo "$OPTARG" | sed -e 's:^/*::' -e 's:/*$::') - ;; - "K") - b="" - g="" - r="" - e="" - x="" - ;; - "n") - name="$OPTARG" - ;; - "p") - proj_root="$OPTARG" - ;; - "P") - pr_o="$OPTARG" - ;; - "Q") - # this is only meant for Codecov packages to overwrite - package="$OPTARG" - ;; - "q") - save_to="$OPTARG" - ;; - "r") - slug_o="$OPTARG" - ;; - "R") - git_root="$OPTARG" - ;; - "s") - if [ "$search_in_o" = "" ]; - then - search_in_o="$OPTARG" - else - search_in_o="$search_in_o $OPTARG" - fi - ;; - "S") - # shellcheck disable=SC2089 - cacert="--cacert \"$OPTARG\"" - ;; - "t") - if [ "${OPTARG::1}" = "@" ]; - then - token=$(< "${OPTARG:1}" tr -d ' \n') - else - token="$OPTARG" - fi - ;; - "T") - tag_o="$OPTARG" - ;; - "u") - url_o=$(echo "$OPTARG" | sed -e 's/\/$//') - ;; - "U") - curlargs="$OPTARG" - ;; - "v") - set -x - curl_s="" - ;; - "x") - gcov_exe=$OPTARG - ;; - "X") - if [ "$OPTARG" = "gcov" ]; - then - ft_gcov="0" - elif [ "$OPTARG" = "coveragepy" ] || [ "$OPTARG" = "py" ]; - then - ft_coveragepy="0" - elif [ "$OPTARG" = "gcovout" ]; - then - ft_gcovout="0" - elif [ "$OPTARG" = "xcodellvm" ]; - then - ft_xcodellvm="1" - ft_xcodeplist="0" - elif [ "$OPTARG" = "fix" ] || [ "$OPTARG" = "fixes" ]; - then - ft_fix="0" - elif [ "$OPTARG" = "xcode" ]; - then - ft_xcodellvm="0" - ft_xcodeplist="0" - elif [ "$OPTARG" = "search" ]; - then - ft_search="0" - elif [ "$OPTARG" = "xcodepartials" ]; - then - beta_xcode_partials="-use-color" - elif [ "$OPTARG" = "network" ]; - then - ft_network="0" - elif [ "$OPTARG" = "s3" ]; - then - ft_s3="0" - elif [ "$OPTARG" = "html" ]; - then - ft_html="1" - elif [ "$OPTARG" = "recursesubs" ]; - then - git_ls_files_recurse_submodules_o="--recurse-submodules" - elif [ "$OPTARG" = "yaml" ]; - then - ft_yaml="1" - fi - ;; - "Z") - exit_with=1 - ;; - "z") - direct_file_upload="$OPTARG" - ft_gcov="0" - ft_coveragepy="0" - ft_fix="0" - ft_search="0" - ft_network="0" - ft_xcodellvm="0" - ft_gcovout="0" - include_cov="" - ;; - *) - echo -e "${r}Unexpected flag not supported${x}" - ;; - esac - done -fi - -say " - _____ _ - / ____| | | -| | ___ __| | ___ ___ _____ __ -| | / _ \\ / _\` |/ _ \\/ __/ _ \\ \\ / / -| |___| (_) | (_| | __/ (_| (_) \\ V / - \\_____\\___/ \\__,_|\\___|\\___\\___/ \\_/ - Bash-$VERSION - -" - -# check for installed tools -# git/hg -if [ "$direct_file_upload" = "" ]; -then - if [ -x "$(command -v git)" ]; - then - say "$b==>$x $(git --version) found" - else - say "$y==>$x git not installed, testing for mercurial" - if [ -x "$(command -v hg)" ]; - then - say "$b==>$x $(hg --version) found" - else - say "$r==>$x git nor mercurial are installed. Uploader may fail or have unintended consequences" - fi - fi -fi -# curl -if [ -x "$(command -v curl)" ]; -then - say "$b==>$x $(curl --version)" -else - say "$r==>$x curl not installed. Exiting." - exit ${exit_with}; -fi - -search_in="$proj_root" - -#shellcheck disable=SC2154 -if [ "$JENKINS_URL" != "" ]; -then - say "$e==>$x Jenkins CI detected." - # https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project - # https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables - service="jenkins" - - # shellcheck disable=SC2154 - if [ "$ghprbSourceBranch" != "" ]; - then - branch="$ghprbSourceBranch" - elif [ "$GIT_BRANCH" != "" ]; - then - branch="$GIT_BRANCH" - elif [ "$BRANCH_NAME" != "" ]; - then - branch="$BRANCH_NAME" - fi - - # shellcheck disable=SC2154 - if [ "$ghprbActualCommit" != "" ]; - then - commit="$ghprbActualCommit" - elif [ "$GIT_COMMIT" != "" ]; - then - commit="$GIT_COMMIT" - fi - - # shellcheck disable=SC2154 - if [ "$ghprbPullId" != "" ]; - then - pr="$ghprbPullId" - elif [ "$CHANGE_ID" != "" ]; - then - pr="$CHANGE_ID" - fi - - build="$BUILD_NUMBER" - # shellcheck disable=SC2153 - build_url=$(urlencode "$BUILD_URL") - -elif [ "$CI" = "true" ] && [ "$TRAVIS" = "true" ] && [ "$SHIPPABLE" != "true" ]; -then - say "$e==>$x Travis CI detected." - # https://docs.travis-ci.com/user/environment-variables/ - service="travis" - commit="${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT}" - build="$TRAVIS_JOB_NUMBER" - pr="$TRAVIS_PULL_REQUEST" - job="$TRAVIS_JOB_ID" - slug="$TRAVIS_REPO_SLUG" - env="$env,TRAVIS_OS_NAME" - tag="$TRAVIS_TAG" - if [ "$TRAVIS_BRANCH" != "$TRAVIS_TAG" ]; - then - branch="${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" - fi - - language=$(compgen -A variable | grep "^TRAVIS_.*_VERSION$" | head -1) - if [ "$language" != "" ]; - then - env="$env,${!language}" - fi - -elif [ "$CODEBUILD_CI" = "true" ]; -then - say "$e==>$x AWS Codebuild detected." - # https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-env-vars.html - service="codebuild" - commit="$CODEBUILD_RESOLVED_SOURCE_VERSION" - build="$CODEBUILD_BUILD_ID" - branch="$(echo "$CODEBUILD_WEBHOOK_HEAD_REF" | sed 's/^refs\/heads\///')" - if [ "${CODEBUILD_SOURCE_VERSION/pr}" = "$CODEBUILD_SOURCE_VERSION" ] ; then - pr="false" - else - pr="$(echo "$CODEBUILD_SOURCE_VERSION" | sed 's/^pr\///')" - fi - job="$CODEBUILD_BUILD_ID" - slug="$(echo "$CODEBUILD_SOURCE_REPO_URL" | sed 's/^.*:\/\/[^\/]*\///' | sed 's/\.git$//')" - -elif [ "$CI" = "true" ] && [ "$CI_NAME" = "codeship" ]; -then - say "$e==>$x Codeship CI detected." - # https://www.codeship.io/documentation/continuous-integration/set-environment-variables/ - service="codeship" - branch="$CI_BRANCH" - build="$CI_BUILD_NUMBER" - build_url=$(urlencode "$CI_BUILD_URL") - commit="$CI_COMMIT_ID" - -elif [ -n "$CF_BUILD_URL" ] && [ -n "$CF_BUILD_ID" ]; -then - say "$e==>$x Codefresh CI detected." - # https://docs.codefresh.io/v1.0/docs/variables - service="codefresh" - branch="$CF_BRANCH" - build="$CF_BUILD_ID" - build_url=$(urlencode "$CF_BUILD_URL") - commit="$CF_REVISION" - -elif [ "$TEAMCITY_VERSION" != "" ]; -then - say "$e==>$x TeamCity CI detected." - # https://confluence.jetbrains.com/display/TCD8/Predefined+Build+Parameters - # https://confluence.jetbrains.com/plugins/servlet/mobile#content/view/74847298 - if [ "$TEAMCITY_BUILD_BRANCH" = '' ]; - then - echo " Teamcity does not automatically make build parameters available as environment variables." - echo " Add the following environment parameters to the build configuration" - echo " env.TEAMCITY_BUILD_BRANCH = %teamcity.build.branch%" - echo " env.TEAMCITY_BUILD_ID = %teamcity.build.id%" - echo " env.TEAMCITY_BUILD_URL = %teamcity.serverUrl%/viewLog.html?buildId=%teamcity.build.id%" - echo " env.TEAMCITY_BUILD_COMMIT = %system.build.vcs.number%" - echo " env.TEAMCITY_BUILD_REPOSITORY = %vcsroot..url%" - fi - service="teamcity" - branch="$TEAMCITY_BUILD_BRANCH" - build="$TEAMCITY_BUILD_ID" - build_url=$(urlencode "$TEAMCITY_BUILD_URL") - if [ "$TEAMCITY_BUILD_COMMIT" != "" ]; - then - commit="$TEAMCITY_BUILD_COMMIT" - else - commit="$BUILD_VCS_NUMBER" - fi - remote_addr="$TEAMCITY_BUILD_REPOSITORY" - -elif [ "$CI" = "true" ] && [ "$CIRCLECI" = "true" ]; -then - say "$e==>$x Circle CI detected." - # https://circleci.com/docs/environment-variables - service="circleci" - branch="$CIRCLE_BRANCH" - build="$CIRCLE_BUILD_NUM" - job="$CIRCLE_NODE_INDEX" - if [ "$CIRCLE_PROJECT_REPONAME" != "" ]; - then - slug="$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME" - else - # git@github.com:owner/repo.git - slug="${CIRCLE_REPOSITORY_URL##*:}" - # owner/repo.git - slug="${slug%%.git}" - fi - pr="${CIRCLE_PULL_REQUEST##*/}" - commit="$CIRCLE_SHA1" - search_in="$search_in $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS" - -elif [ "$BUDDYBUILD_BRANCH" != "" ]; -then - say "$e==>$x buddybuild detected" - # http://docs.buddybuild.com/v6/docs/custom-prebuild-and-postbuild-steps - service="buddybuild" - branch="$BUDDYBUILD_BRANCH" - build="$BUDDYBUILD_BUILD_NUMBER" - build_url="https://dashboard.buddybuild.com/public/apps/$BUDDYBUILD_APP_ID/build/$BUDDYBUILD_BUILD_ID" - # BUDDYBUILD_TRIGGERED_BY - if [ "$ddp" = "$HOME/Library/Developer/Xcode/DerivedData" ]; - then - ddp="/private/tmp/sandbox/${BUDDYBUILD_APP_ID}/bbtest" - fi - -elif [ "${bamboo_planRepository_revision}" != "" ]; -then - say "$e==>$x Bamboo detected" - # https://confluence.atlassian.com/bamboo/bamboo-variables-289277087.html#Bamboovariables-Build-specificvariables - service="bamboo" - commit="${bamboo_planRepository_revision}" - # shellcheck disable=SC2154 - branch="${bamboo_planRepository_branch}" - # shellcheck disable=SC2154 - build="${bamboo_buildNumber}" - # shellcheck disable=SC2154 - build_url="${bamboo_buildResultsUrl}" - # shellcheck disable=SC2154 - remote_addr="${bamboo_planRepository_repositoryUrl}" - -elif [ "$CI" = "true" ] && [ "$BITRISE_IO" = "true" ]; -then - # http://devcenter.bitrise.io/faq/available-environment-variables/ - say "$e==>$x Bitrise CI detected." - service="bitrise" - branch="$BITRISE_GIT_BRANCH" - build="$BITRISE_BUILD_NUMBER" - build_url=$(urlencode "$BITRISE_BUILD_URL") - pr="$BITRISE_PULL_REQUEST" - if [ "$GIT_CLONE_COMMIT_HASH" != "" ]; - then - commit="$GIT_CLONE_COMMIT_HASH" - fi - -elif [ "$CI" = "true" ] && [ "$SEMAPHORE" = "true" ]; -then - say "$e==>$x Semaphore CI detected." -# https://docs.semaphoreci.com/ci-cd-environment/environment-variables/#semaphore-related - service="semaphore" - branch="$SEMAPHORE_GIT_BRANCH" - build="$SEMAPHORE_WORKFLOW_NUMBER" - job="$SEMAPHORE_JOB_ID" - pr="$PULL_REQUEST_NUMBER" - slug="$SEMAPHORE_REPO_SLUG" - commit="$REVISION" - env="$env,SEMAPHORE_TRIGGER_SOURCE" - -elif [ "$CI" = "true" ] && [ "$BUILDKITE" = "true" ]; -then - say "$e==>$x Buildkite CI detected." - # https://buildkite.com/docs/guides/environment-variables - service="buildkite" - branch="$BUILDKITE_BRANCH" - build="$BUILDKITE_BUILD_NUMBER" - job="$BUILDKITE_JOB_ID" - build_url=$(urlencode "$BUILDKITE_BUILD_URL") - slug="$BUILDKITE_PROJECT_SLUG" - commit="$BUILDKITE_COMMIT" - if [[ "$BUILDKITE_PULL_REQUEST" != "false" ]]; then - pr="$BUILDKITE_PULL_REQUEST" - fi - tag="$BUILDKITE_TAG" - -elif [ "$CI" = "drone" ] || [ "$DRONE" = "true" ]; -then - say "$e==>$x Drone CI detected." - # http://docs.drone.io/env.html - # drone commits are not full shas - service="drone.io" - branch="$DRONE_BRANCH" - build="$DRONE_BUILD_NUMBER" - build_url=$(urlencode "${DRONE_BUILD_LINK}") - pr="$DRONE_PULL_REQUEST" - job="$DRONE_JOB_NUMBER" - tag="$DRONE_TAG" - -elif [ "$CI" = "true" ] && [ "$HEROKU_TEST_RUN_BRANCH" != "" ]; -then - say "$e==>$x Heroku CI detected." - # https://devcenter.heroku.com/articles/heroku-ci#environment-variables - service="heroku" - branch="$HEROKU_TEST_RUN_BRANCH" - build="$HEROKU_TEST_RUN_ID" - commit="$HEROKU_TEST_RUN_COMMIT_VERSION" - -elif [[ "$CI" = "true" || "$CI" = "True" ]] && [[ "$APPVEYOR" = "true" || "$APPVEYOR" = "True" ]]; -then - say "$e==>$x Appveyor CI detected." - # http://www.appveyor.com/docs/environment-variables - service="appveyor" - branch="$APPVEYOR_REPO_BRANCH" - build=$(urlencode "$APPVEYOR_JOB_ID") - pr="$APPVEYOR_PULL_REQUEST_NUMBER" - job="$APPVEYOR_ACCOUNT_NAME%2F$APPVEYOR_PROJECT_SLUG%2F$APPVEYOR_BUILD_VERSION" - slug="$APPVEYOR_REPO_NAME" - commit="$APPVEYOR_REPO_COMMIT" - build_url=$(urlencode "${APPVEYOR_URL}/project/${APPVEYOR_REPO_NAME}/builds/$APPVEYOR_BUILD_ID/job/${APPVEYOR_JOB_ID}") - -elif [ "$CI" = "true" ] && [ "$WERCKER_GIT_BRANCH" != "" ]; -then - say "$e==>$x Wercker CI detected." - # http://devcenter.wercker.com/articles/steps/variables.html - service="wercker" - branch="$WERCKER_GIT_BRANCH" - build="$WERCKER_MAIN_PIPELINE_STARTED" - slug="$WERCKER_GIT_OWNER/$WERCKER_GIT_REPOSITORY" - commit="$WERCKER_GIT_COMMIT" - -elif [ "$CI" = "true" ] && [ "$MAGNUM" = "true" ]; -then - say "$e==>$x Magnum CI detected." - # https://magnum-ci.com/docs/environment - service="magnum" - branch="$CI_BRANCH" - build="$CI_BUILD_NUMBER" - commit="$CI_COMMIT" - -elif [ "$SHIPPABLE" = "true" ]; -then - say "$e==>$x Shippable CI detected." - # http://docs.shippable.com/ci_configure/ - service="shippable" - # shellcheck disable=SC2153 - branch=$([ "$HEAD_BRANCH" != "" ] && echo "$HEAD_BRANCH" || echo "$BRANCH") - build="$BUILD_NUMBER" - build_url=$(urlencode "$BUILD_URL") - pr="$PULL_REQUEST" - slug="$REPO_FULL_NAME" - # shellcheck disable=SC2153 - commit="$COMMIT" - -elif [ "$TDDIUM" = "true" ]; -then - say "Solano CI detected." - # http://docs.solanolabs.com/Setup/tddium-set-environment-variables/ - service="solano" - commit="$TDDIUM_CURRENT_COMMIT" - branch="$TDDIUM_CURRENT_BRANCH" - build="$TDDIUM_TID" - pr="$TDDIUM_PR_ID" - -elif [ "$GREENHOUSE" = "true" ]; -then - say "$e==>$x Greenhouse CI detected." - # http://docs.greenhouseci.com/docs/environment-variables-files - service="greenhouse" - branch="$GREENHOUSE_BRANCH" - build="$GREENHOUSE_BUILD_NUMBER" - build_url=$(urlencode "$GREENHOUSE_BUILD_URL") - pr="$GREENHOUSE_PULL_REQUEST" - commit="$GREENHOUSE_COMMIT" - search_in="$search_in $GREENHOUSE_EXPORT_DIR" - -elif [ "$GITLAB_CI" != "" ]; -then - say "$e==>$x GitLab CI detected." - # http://doc.gitlab.com/ce/ci/variables/README.html - service="gitlab" - branch="${CI_BUILD_REF_NAME:-$CI_COMMIT_REF_NAME}" - build="${CI_BUILD_ID:-$CI_JOB_ID}" - remote_addr="${CI_BUILD_REPO:-$CI_REPOSITORY_URL}" - commit="${CI_BUILD_REF:-$CI_COMMIT_SHA}" - slug="${CI_PROJECT_PATH}" - -elif [ "$GITHUB_ACTIONS" != "" ]; -then - say "$e==>$x GitHub Actions detected." - say " Env vars used:" - say " -> GITHUB_ACTIONS: ${GITHUB_ACTIONS}" - say " -> GITHUB_HEAD_REF: ${GITHUB_HEAD_REF}" - say " -> GITHUB_REF: ${GITHUB_REF}" - say " -> GITHUB_REPOSITORY: ${GITHUB_REPOSITORY}" - say " -> GITHUB_RUN_ID: ${GITHUB_RUN_ID}" - say " -> GITHUB_SHA: ${GITHUB_SHA}" - say " -> GITHUB_WORKFLOW: ${GITHUB_WORKFLOW}" - - # https://github.com/features/actions - service="github-actions" - - # https://help.github.com/en/articles/virtual-environments-for-github-actions#environment-variables - branch="${GITHUB_REF#refs/heads/}" - if [ "$GITHUB_HEAD_REF" != "" ]; - then - # PR refs are in the format: refs/pull/7/merge - if [[ "$GITHUB_REF" =~ ^refs\/pull\/[0-9]+\/merge$ ]]; - then - pr="${GITHUB_REF#refs/pull/}" - pr="${pr%/merge}" - fi - branch="${GITHUB_HEAD_REF}" - fi - commit="${GITHUB_SHA}" - slug="${GITHUB_REPOSITORY}" - build="${GITHUB_RUN_ID}" - build_url=$(urlencode "${GITHUB_SERVER_URL:-https://github.com}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}") - job="$(urlencode "${GITHUB_WORKFLOW}")" - - # actions/checkout runs in detached HEAD - mc= - if [ -n "$pr" ] && [ "$pr" != false ] && [ "$commit_o" == "" ]; - then - mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "") - - if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]]; - then - mc=$(echo "$mc" | cut -d' ' -f2) - say " Fixing merge commit SHA $commit -> $mc" - commit=$mc - elif [[ "$mc" = "" ]]; - then - say "$r-> Issue detecting commit SHA. Please run actions/checkout with fetch-depth > 1 or set to 0$x" - fi - fi - -elif [ "$SYSTEM_TEAMFOUNDATIONSERVERURI" != "" ]; -then - say "$e==>$x Azure Pipelines detected." - # https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=vsts - # https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&viewFallbackFrom=vsts&tabs=yaml - service="azure_pipelines" - commit="$BUILD_SOURCEVERSION" - build="$BUILD_BUILDNUMBER" - if [ -z "$SYSTEM_PULLREQUEST_PULLREQUESTNUMBER" ]; - then - pr="$SYSTEM_PULLREQUEST_PULLREQUESTID" - else - pr="$SYSTEM_PULLREQUEST_PULLREQUESTNUMBER" - fi - project="${SYSTEM_TEAMPROJECT}" - server_uri="${SYSTEM_TEAMFOUNDATIONSERVERURI}" - job="${BUILD_BUILDID}" - branch="${BUILD_SOURCEBRANCH#"refs/heads/"}" - build_url=$(urlencode "${SYSTEM_TEAMFOUNDATIONSERVERURI}${SYSTEM_TEAMPROJECT}/_build/results?buildId=${BUILD_BUILDID}") - - # azure/pipelines runs in detached HEAD - mc= - if [ -n "$pr" ] && [ "$pr" != false ]; - then - mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "") - - if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]]; - then - mc=$(echo "$mc" | cut -d' ' -f2) - say " Fixing merge commit SHA $commit -> $mc" - commit=$mc - fi - fi - -elif [ "$CI" = "true" ] && [ "$BITBUCKET_BUILD_NUMBER" != "" ]; -then - say "$e==>$x Bitbucket detected." - # https://confluence.atlassian.com/bitbucket/variables-in-pipelines-794502608.html - service="bitbucket" - branch="$BITBUCKET_BRANCH" - build="$BITBUCKET_BUILD_NUMBER" - slug="$BITBUCKET_REPO_OWNER/$BITBUCKET_REPO_SLUG" - job="$BITBUCKET_BUILD_NUMBER" - pr="$BITBUCKET_PR_ID" - commit="$BITBUCKET_COMMIT" - # See https://jira.atlassian.com/browse/BCLOUD-19393 - if [ "${#commit}" = 12 ]; - then - commit=$(git rev-parse "$BITBUCKET_COMMIT") - fi - -elif [ "$CI" = "true" ] && [ "$BUDDY" = "true" ]; -then - say "$e==>$x Buddy CI detected." - # https://buddy.works/docs/pipelines/environment-variables - service="buddy" - branch="$BUDDY_EXECUTION_BRANCH" - build="$BUDDY_EXECUTION_ID" - build_url=$(urlencode "$BUDDY_EXECUTION_URL") - commit="$BUDDY_EXECUTION_REVISION" - pr="$BUDDY_EXECUTION_PULL_REQUEST_NO" - tag="$BUDDY_EXECUTION_TAG" - slug="$BUDDY_REPO_SLUG" - -elif [ "$CIRRUS_CI" != "" ]; -then - say "$e==>$x Cirrus CI detected." - # https://cirrus-ci.org/guide/writing-tasks/#environment-variables - service="cirrus-ci" - slug="$CIRRUS_REPO_FULL_NAME" - branch="$CIRRUS_BRANCH" - pr="$CIRRUS_PR" - commit="$CIRRUS_CHANGE_IN_REPO" - build="$CIRRUS_BUILD_ID" - build_url=$(urlencode "https://cirrus-ci.com/task/$CIRRUS_TASK_ID") - job="$CIRRUS_TASK_NAME" - -elif [ "$DOCKER_REPO" != "" ]; -then - say "$e==>$x Docker detected." - # https://docs.docker.com/docker-cloud/builds/advanced/ - service="docker" - branch="$SOURCE_BRANCH" - commit="$SOURCE_COMMIT" - slug="$DOCKER_REPO" - tag="$CACHE_TAG" - env="$env,IMAGE_NAME" - -else - say "${r}x>${x} No CI provider detected." - say " Testing inside Docker? ${b}http://docs.codecov.io/docs/testing-with-docker${x}" - say " Testing with Tox? ${b}https://docs.codecov.io/docs/python#section-testing-with-tox${x}" - -fi - -say " ${e}current dir: ${x} $PWD" -say " ${e}project root:${x} $git_root" - -# find branch, commit, repo from git command -if [ "$GIT_BRANCH" != "" ]; -then - branch="$GIT_BRANCH" - -elif [ "$branch" = "" ]; -then - branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || hg branch 2>/dev/null || echo "") - if [ "$branch" = "HEAD" ]; - then - branch="" - fi -fi - -if [ "$commit_o" = "" ]; -then - if [ "$GIT_COMMIT" != "" ]; - then - commit="$GIT_COMMIT" - elif [ "$commit" = "" ]; - then - commit=$(git log -1 --format="%H" 2>/dev/null || hg id -i --debug 2>/dev/null | tr -d '+' || echo "") - fi -else - commit="$commit_o" -fi - -if [ "$CODECOV_TOKEN" != "" ] && [ "$token" = "" ]; -then - say "${e}-->${x} token set from env" - token="$CODECOV_TOKEN" -fi - -if [ "$CODECOV_URL" != "" ] && [ "$url_o" = "" ]; -then - say "${e}-->${x} url set from env" - url_o=$(echo "$CODECOV_URL" | sed -e 's/\/$//') -fi - -if [ "$CODECOV_SLUG" != "" ]; -then - say "${e}-->${x} slug set from env" - slug_o="$CODECOV_SLUG" - -elif [ "$slug" = "" ]; -then - if [ "$remote_addr" = "" ]; - then - remote_addr=$(git config --get remote.origin.url || hg paths default || echo '') - fi - if [ "$remote_addr" != "" ]; - then - if echo "$remote_addr" | grep -q "//"; then - # https - slug=$(echo "$remote_addr" | cut -d / -f 4,5 | sed -e 's/\.git$//') - else - # ssh - slug=$(echo "$remote_addr" | cut -d : -f 2 | sed -e 's/\.git$//') - fi - fi - if [ "$slug" = "/" ]; - then - slug="" - fi -fi - -yaml=$(cd "$git_root" && \ - git ls-files "*codecov.yml" "*codecov.yaml" 2>/dev/null \ - || hg locate "*codecov.yml" "*codecov.yaml" 2>/dev/null \ - || cd "$proj_root" && find . -maxdepth 1 -type f -name '*codecov.y*ml' 2>/dev/null \ - || echo '') -yaml=$(echo "$yaml" | head -1) - -if [ "$yaml" != "" ]; -then - say " ${e}Yaml found at:${x} $yaml" - if [[ "$yaml" != /* ]]; then - # relative path for yaml file given, assume relative to the repo root - yaml="$git_root/$yaml" - fi - config=$(parse_yaml "$yaml" || echo '') - - # TODO validate the yaml here - - if [ "$(echo "$config" | grep 'codecov_token="')" != "" ] && [ "$token" = "" ]; - then - say "${e}-->${x} token set from yaml" - token="$(echo "$config" | grep 'codecov_token="' | sed -e 's/codecov_token="//' | sed -e 's/"\.*//')" - fi - - if [ "$(echo "$config" | grep 'codecov_url="')" != "" ] && [ "$url_o" = "" ]; - then - say "${e}-->${x} url set from yaml" - url_o="$(echo "$config" | grep 'codecov_url="' | sed -e 's/codecov_url="//' | sed -e 's/"\.*//')" - fi - - if [ "$(echo "$config" | grep 'codecov_slug="')" != "" ] && [ "$slug_o" = "" ]; - then - say "${e}-->${x} slug set from yaml" - slug_o="$(echo "$config" | grep 'codecov_slug="' | sed -e 's/codecov_slug="//' | sed -e 's/"\.*//')" - fi -else - say " ${g}Yaml not found, that's ok! Learn more at${x} ${b}http://docs.codecov.io/docs/codecov-yaml${x}" -fi - -if [ "$branch_o" != "" ]; -then - branch=$(urlencode "$branch_o") -else - branch=$(urlencode "$branch") -fi - -if [ "$slug_o" = "" ]; -then - urlencoded_slug=$(urlencode "$slug") -else - urlencoded_slug=$(urlencode "$slug_o") -fi - -query="branch=$branch\ - &commit=$commit\ - &build=$([ "$build_o" = "" ] && echo "$build" || echo "$build_o")\ - &build_url=$build_url\ - &name=$(urlencode "$name")\ - &tag=$([ "$tag_o" = "" ] && echo "$tag" || echo "$tag_o")\ - &slug=$urlencoded_slug\ - &service=$service\ - &flags=$flags\ - &pr=$([ "$pr_o" = "" ] && echo "${pr##\#}" || echo "${pr_o##\#}")\ - &job=$job\ - &cmd_args=$(IFS=,; echo "${codecov_flags[*]}")" - -if [ -n "$project" ] && [ -n "$server_uri" ]; -then - query=$(echo "$query&project=$project&server_uri=$server_uri" | tr -d ' ') -fi - -if [ "$parent" != "" ]; -then - query=$(echo "parent=$parent&$query" | tr -d ' ') -fi - -if [ "$ft_search" = "1" ]; -then - # detect bower comoponents location - bower_components="bower_components" - bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "") - if [ "$bower_rc" != "" ]; - then - bower_components=$(echo "$bower_rc" | tr -d '\n' | grep '"directory"' | cut -d'"' -f4 | sed -e 's/\/$//') - if [ "$bower_components" = "" ]; - then - bower_components="bower_components" - fi - fi - - # Swift Coverage - if [ "$ft_xcodellvm" = "1" ] && [ -d "$ddp" ]; - then - say "${e}==>${x} Processing Xcode reports via llvm-cov" - say " DerivedData folder: $ddp" - profdata_files=$(find "$ddp" -name '*.profdata' 2>/dev/null || echo '') - if [ "$profdata_files" != "" ]; - then - # xcode via profdata - if [ "$xp" = "" ]; - then - # xp=$(xcodebuild -showBuildSettings 2>/dev/null | grep -i "^\s*PRODUCT_NAME" | sed -e 's/.*= \(.*\)/\1/') - # say " ${e}->${x} Speed up Xcode processing by adding ${e}-J '$xp'${x}" - say " ${g}hint${x} Speed up Swift processing by using use ${g}-J 'AppName'${x} (regexp accepted)" - say " ${g}hint${x} This will remove Pods/ from your report. Also ${b}https://docs.codecov.io/docs/ignoring-paths${x}" - fi - while read -r profdata; - do - if [ "$profdata" != "" ]; - then - swiftcov "$profdata" "$xp" - fi - done <<< "$profdata_files" - else - say " ${e}->${x} No Swift coverage found" - fi - - # Obj-C Gcov Coverage - if [ "$ft_gcov" = "1" ]; - then - say " ${e}->${x} Running $gcov_exe for Obj-C" - if [ "$ft_gcovout" = "0" ]; - then - # suppress gcov output - bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" >/dev/null 2>&1 || true - else - bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" || true - fi - fi - fi - - if [ "$ft_xcodeplist" = "1" ] && [ -d "$ddp" ]; - then - say "${e}==>${x} Processing Xcode plists" - plists_files=$(find "$ddp" -name '*.xccoverage' 2>/dev/null || echo '') - if [ "$plists_files" != "" ]; - then - while read -r plist; - do - if [ "$plist" != "" ]; - then - say " ${g}Found${x} plist file at $plist" - plutil -convert xml1 -o "$(basename "$plist").plist" -- "$plist" - fi - done <<< "$plists_files" - fi - fi - - # Gcov Coverage - if [ "$ft_gcov" = "1" ]; - then - say "${e}==>${x} Running $gcov_exe in $proj_root ${e}(disable via -X gcov)${x}" - if [ "$ft_gcovout" = "0" ]; - then - # suppress gcov output - bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" >/dev/null 2>&1 || true - else - bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" || true - fi - else - say "${e}==>${x} gcov disabled" - fi - - # Python Coverage - if [ "$ft_coveragepy" = "1" ]; - then - if [ ! -f coverage.xml ]; - then - if command -v coverage >/dev/null 2>&1; - then - say "${e}==>${x} Python coveragepy exists ${e}disable via -X coveragepy${x}" - - dotcoverage=$(find "$git_root" -name '.coverage' -or -name '.coverage.*' | head -1 || echo '') - if [ "$dotcoverage" != "" ]; - then - cd "$(dirname "$dotcoverage")" - if [ ! -f .coverage ]; - then - say " ${e}->${x} Running coverage combine" - coverage combine -a - fi - say " ${e}->${x} Running coverage xml" - if [ "$(coverage xml -i)" != "No data to report." ]; - then - files="$files -$PWD/coverage.xml" - else - say " ${r}No data to report.${x}" - fi - cd "$proj_root" - else - say " ${r}No .coverage file found.${x}" - fi - else - say "${e}==>${x} Python coveragepy not found" - fi - fi - else - say "${e}==>${x} Python coveragepy disabled" - fi - - if [ "$search_in_o" != "" ]; - then - # location override - search_in="$search_in_o" - fi - - say "$e==>$x Searching for coverage reports in:" - for _path in $search_in - do - say " ${g}+${x} $_path" - done - - patterns="find $search_in \( \ - -name vendor \ - -or -name '$bower_components' \ - -or -name '.egg-info*' \ - -or -name 'conftest_*.c.gcov' \ - -or -name .env \ - -or -name .envs \ - -or -name .git \ - -or -name .hg \ - -or -name .tox \ - -or -name .venv \ - -or -name .venvs \ - -or -name .virtualenv \ - -or -name .virtualenvs \ - -or -name .yarn-cache \ - -or -name __pycache__ \ - -or -name env \ - -or -name envs \ - -or -name htmlcov \ - -or -name js/generated/coverage \ - -or -name node_modules \ - -or -name venv \ - -or -name venvs \ - -or -name virtualenv \ - -or -name virtualenvs \ - \) -prune -or \ - -type f \( -name '*coverage*.*' \ - -or -name '*.clover' \ - -or -name '*.codecov.*' \ - -or -name '*.gcov' \ - -or -name '*.lcov' \ - -or -name '*.lst' \ - -or -name 'clover.xml' \ - -or -name 'cobertura.xml' \ - -or -name 'codecov.*' \ - -or -name 'cover.out' \ - -or -name 'codecov-result.json' \ - -or -name 'coverage-final.json' \ - -or -name 'excoveralls.json' \ - -or -name 'gcov.info' \ - -or -name 'jacoco*.xml' \ - -or -name '*Jacoco*.xml' \ - -or -name 'lcov.dat' \ - -or -name 'lcov.info' \ - -or -name 'luacov.report.out' \ - -or -name 'naxsi.info' \ - -or -name 'nosetests.xml' \ - -or -name 'report.xml' \ - $include_cov \) \ - $exclude_cov \ - -not -name '*.am' \ - -not -name '*.bash' \ - -not -name '*.bat' \ - -not -name '*.bw' \ - -not -name '*.cfg' \ - -not -name '*.class' \ - -not -name '*.cmake' \ - -not -name '*.cmake' \ - -not -name '*.conf' \ - -not -name '*.coverage' \ - -not -name '*.cp' \ - -not -name '*.cpp' \ - -not -name '*.crt' \ - -not -name '*.css' \ - -not -name '*.csv' \ - -not -name '*.csv' \ - -not -name '*.data' \ - -not -name '*.db' \ - -not -name '*.dox' \ - -not -name '*.ec' \ - -not -name '*.ec' \ - -not -name '*.egg' \ - -not -name '*.el' \ - -not -name '*.env' \ - -not -name '*.erb' \ - -not -name '*.exe' \ - -not -name '*.ftl' \ - -not -name '*.gif' \ - -not -name '*.gradle' \ - -not -name '*.gz' \ - -not -name '*.h' \ - -not -name '*.html' \ - -not -name '*.in' \ - -not -name '*.jade' \ - -not -name '*.jar*' \ - -not -name '*.jpeg' \ - -not -name '*.jpg' \ - -not -name '*.js' \ - -not -name '*.less' \ - -not -name '*.log' \ - -not -name '*.m4' \ - -not -name '*.mak*' \ - -not -name '*.md' \ - -not -name '*.o' \ - -not -name '*.p12' \ - -not -name '*.pem' \ - -not -name '*.png' \ - -not -name '*.pom*' \ - -not -name '*.profdata' \ - -not -name '*.proto' \ - -not -name '*.ps1' \ - -not -name '*.pth' \ - -not -name '*.py' \ - -not -name '*.pyc' \ - -not -name '*.pyo' \ - -not -name '*.rb' \ - -not -name '*.rsp' \ - -not -name '*.rst' \ - -not -name '*.ru' \ - -not -name '*.sbt' \ - -not -name '*.scss' \ - -not -name '*.scss' \ - -not -name '*.serialized' \ - -not -name '*.sh' \ - -not -name '*.snapshot' \ - -not -name '*.sql' \ - -not -name '*.svg' \ - -not -name '*.tar.tz' \ - -not -name '*.template' \ - -not -name '*.whl' \ - -not -name '*.xcconfig' \ - -not -name '*.xcoverage.*' \ - -not -name '*/classycle/report.xml' \ - -not -name '*codecov.yml' \ - -not -name '*~' \ - -not -name '.*coveragerc' \ - -not -name '.coverage*' \ - -not -name 'coverage-summary.json' \ - -not -name 'createdFiles.lst' \ - -not -name 'fullLocaleNames.lst' \ - -not -name 'include.lst' \ - -not -name 'inputFiles.lst' \ - -not -name 'phpunit-code-coverage.xml' \ - -not -name 'phpunit-coverage.xml' \ - -not -name 'remapInstanbul.coverage*.json' \ - -not -name 'scoverage.measurements.*' \ - -not -name 'test_*_coverage.txt' \ - -not -name 'testrunner-coverage*' \ - -print 2>/dev/null" - files=$(eval "$patterns" || echo '') - -elif [ "$include_cov" != "" ]; -then - files=$(eval "find $search_in -type f \( ${include_cov:5} \)$exclude_cov 2>/dev/null" || echo '') -elif [ "$direct_file_upload" != "" ]; -then - files=$direct_file_upload -fi - -num_of_files=$(echo "$files" | wc -l | tr -d ' ') -if [ "$num_of_files" != '' ] && [ "$files" != '' ]; -then - say " ${e}->${x} Found $num_of_files reports" -fi - -# no files found -if [ "$files" = "" ]; -then - say "${r}-->${x} No coverage report found." - say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" - exit ${exit_with}; -fi - -if [ "$ft_network" == "1" ]; -then - say "${e}==>${x} Detecting git/mercurial file structure" - network=$(cd "$git_root" && git ls-files $git_ls_files_recurse_submodules_o 2>/dev/null || hg locate 2>/dev/null || echo "") - if [ "$network" = "" ]; - then - network=$(find "$git_root" \( \ - -name virtualenv \ - -name .virtualenv \ - -name virtualenvs \ - -name .virtualenvs \ - -name '*.png' \ - -name '*.gif' \ - -name '*.jpg' \ - -name '*.jpeg' \ - -name '*.md' \ - -name .env \ - -name .envs \ - -name env \ - -name envs \ - -name .venv \ - -name .venvs \ - -name venv \ - -name venvs \ - -name .git \ - -name .egg-info \ - -name shunit2-2.1.6 \ - -name vendor \ - -name __pycache__ \ - -name node_modules \ - -path "*/$bower_components/*" \ - -path '*/target/delombok/*' \ - -path '*/build/lib/*' \ - -path '*/js/generated/coverage/*' \ - \) -prune -or \ - -type f -print 2>/dev/null || echo '') - fi - - if [ "$network_filter_o" != "" ]; - then - network=$(echo "$network" | grep -e "$network_filter_o/*") - fi - if [ "$prefix_o" != "" ]; - then - network=$(echo "$network" | awk "{print \"$prefix_o/\"\$0}") - fi -fi - -upload_file=$(mktemp /tmp/codecov.XXXXXX) -adjustments_file=$(mktemp /tmp/codecov.adjustments.XXXXXX) - -cleanup() { - rm -f "$upload_file" "$adjustments_file" "$upload_file.gz" -} - -trap cleanup INT ABRT TERM - - -if [ "$env" != "" ]; -then - inc_env="" - say "${e}==>${x} Appending build variables" - for varname in $(echo "$env" | tr ',' ' ') - do - if [ "$varname" != "" ]; - then - say " ${g}+${x} $varname" - inc_env="${inc_env}${varname}=$(eval echo "\$${varname}") -" - fi - done - echo "$inc_env<<<<<< ENV" >> "$upload_file" -fi - -# Append git file list -# write discovered yaml location -if [ "$direct_file_upload" = "" ]; -then - echo "$yaml" >> "$upload_file" -fi - -if [ "$ft_network" == "1" ]; -then - i="woff|eot|otf" # fonts - i="$i|gif|png|jpg|jpeg|psd" # images - i="$i|ptt|pptx|numbers|pages|md|txt|xlsx|docx|doc|pdf|csv" # docs - i="$i|.gitignore" # supporting docs - - if [ "$ft_html" != "1" ]; - then - i="$i|html" - fi - - if [ "$ft_yaml" != "1" ]; - then - i="$i|yml|yaml" - fi - - echo "$network" | grep -vwE "($i)$" >> "$upload_file" -fi -echo "<<<<<< network" >> "$upload_file" - -if [ "$direct_file_upload" = "" ]; -then - fr=0 - say "${e}==>${x} Reading reports" - while IFS='' read -r file; - do - # read the coverage file - if [ "$(echo "$file" | tr -d ' ')" != '' ]; - then - if [ -f "$file" ]; - then - report_len=$(wc -c < "$file") - if [ "$report_len" -ne 0 ]; - then - say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}" - # append to to upload - _filename=$(basename "$file") - if [ "${_filename##*.}" = 'gcov' ]; - then - { - echo "# path=$(echo "$file.reduced" | sed "s|^$git_root/||")"; - # get file name - head -1 "$file"; - } >> "$upload_file" - # 1. remove source code - # 2. remove ending bracket lines - # 3. remove whitespace - # 4. remove contextual lines - # 5. remove function names - awk -F': *' '{print $1":"$2":"}' "$file" \ - | sed '\/: *} *$/d' \ - | sed 's/^ *//' \ - | sed '/^-/d' \ - | sed 's/^function.*/func/' >> "$upload_file" - else - { - echo "# path=${file//^$git_root/||}"; - cat "$file"; - } >> "$upload_file" - fi - echo "<<<<<< EOF" >> "$upload_file" - fr=1 - if [ "$clean" = "1" ]; - then - rm "$file" - fi - else - say " ${r}-${x} Skipping empty file $file" - fi - else - say " ${r}-${x} file not found at $file" - fi - fi - done <<< "$(echo -e "$files")" - - if [ "$fr" = "0" ]; - then - say "${r}-->${x} No coverage data found." - say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" - say " search for your projects language to learn how to collect reports." - exit ${exit_with}; - fi -else - cp "$direct_file_upload" "$upload_file" - if [ "$clean" = "1" ]; - then - rm "$direct_file_upload" - fi -fi - -if [ "$ft_fix" = "1" ]; -then - say "${e}==>${x} Appending adjustments" - say " ${b}https://docs.codecov.io/docs/fixing-reports${x}" - - empty_line='^[[:space:]]*$' - # // - syntax_comment='^[[:space:]]*//.*' - # /* or */ - syntax_comment_block='^[[:space:]]*(\/\*|\*\/)[[:space:]]*$' - # { or } - syntax_bracket='^[[:space:]]*[\{\}][[:space:]]*(//.*)?$' - # [ or ] - syntax_list='^[[:space:]]*[][][[:space:]]*(//.*)?$' - # func ... { - syntax_go_func='^[[:space:]]*func[[:space:]]*[\{][[:space:]]*$' - - # shellcheck disable=SC2089 - skip_dirs="-not -path '*/$bower_components/*' \ - -not -path '*/node_modules/*'" - - cut_and_join() { - awk 'BEGIN { FS=":" } - $3 ~ /\/\*/ || $3 ~ /\*\// { print $0 ; next } - $1!=key { if (key!="") print out ; key=$1 ; out=$1":"$2 ; next } - { out=out","$2 } - END { print out }' 2>/dev/null - } - - if echo "$network" | grep -m1 '.kt$' 1>/dev/null; - then - # skip brackets and comments - cd "$git_root" && \ - find . -type f \ - -name '*.kt' \ - -exec \ - grep -nIHE -e "$syntax_bracket" \ - -e "$syntax_comment_block" {} \; \ - | cut_and_join \ - >> "$adjustments_file" \ - || echo '' - - # last line in file - cd "$git_root" && \ - find . -type f \ - -name '*.kt' -exec \ - wc -l {} \; \ - | while read -r l; do echo "EOF: $l"; done \ - 2>/dev/null \ - >> "$adjustments_file" \ - || echo '' - fi - - if echo "$network" | grep -m1 '.go$' 1>/dev/null; - then - # skip empty lines, comments, and brackets - cd "$git_root" && \ - find . -type f \ - -not -path '*/vendor/*' \ - -not -path '*/caches/*' \ - -name '*.go' \ - -exec \ - grep -nIHE \ - -e "$empty_line" \ - -e "$syntax_comment" \ - -e "$syntax_comment_block" \ - -e "$syntax_bracket" \ - -e "$syntax_go_func" \ - {} \; \ - | cut_and_join \ - >> "$adjustments_file" \ - || echo '' - fi - - if echo "$network" | grep -m1 '.dart$' 1>/dev/null; - then - # skip brackets - cd "$git_root" && \ - find . -type f \ - -name '*.dart' \ - -exec \ - grep -nIHE \ - -e "$syntax_bracket" \ - {} \; \ - | cut_and_join \ - >> "$adjustments_file" \ - || echo '' - fi - - if echo "$network" | grep -m1 '.php$' 1>/dev/null; - then - # skip empty lines, comments, and brackets - cd "$git_root" && \ - find . -type f \ - -not -path "*/vendor/*" \ - -name '*.php' \ - -exec \ - grep -nIHE \ - -e "$syntax_list" \ - -e "$syntax_bracket" \ - -e '^[[:space:]]*\);[[:space:]]*(//.*)?$' \ - {} \; \ - | cut_and_join \ - >> "$adjustments_file" \ - || echo '' - fi - - if echo "$network" | grep -m1 '\(.c\.cpp\|.cxx\|.h\|.hpp\|.m\|.swift\|.vala\)$' 1>/dev/null; - then - # skip brackets - # shellcheck disable=SC2086,SC2090 - cd "$git_root" && \ - find . -type f \ - $skip_dirs \ - \( \ - -name '*.c' \ - -or -name '*.cpp' \ - -or -name '*.cxx' \ - -or -name '*.h' \ - -or -name '*.hpp' \ - -or -name '*.m' \ - -or -name '*.swift' \ - -or -name '*.vala' \ - \) -exec \ - grep -nIHE \ - -e "$empty_line" \ - -e "$syntax_bracket" \ - -e '// LCOV_EXCL' \ - {} \; \ - | cut_and_join \ - >> "$adjustments_file" \ - || echo '' - - # skip brackets - # shellcheck disable=SC2086,SC2090 - cd "$git_root" && \ - find . -type f \ - $skip_dirs \ - \( \ - -name '*.c' \ - -or -name '*.cpp' \ - -or -name '*.cxx' \ - -or -name '*.h' \ - -or -name '*.hpp' \ - -or -name '*.m' \ - -or -name '*.swift' \ - -or -name '*.vala' \ - \) -exec \ - grep -nIH '// LCOV_EXCL' \ - {} \; \ - >> "$adjustments_file" \ - || echo '' - - fi - - found=$(< "$adjustments_file" tr -d ' ') - - if [ "$found" != "" ]; - then - say " ${g}+${x} Found adjustments" - { - echo "# path=fixes"; - cat "$adjustments_file"; - echo "<<<<<< EOF"; - } >> "$upload_file" - rm -rf "$adjustments_file" - else - say " ${e}->${x} No adjustments found" - fi -fi - -if [ "$url_o" != "" ]; -then - url="$url_o" -fi - -if [ "$dump" != "0" ]; -then - # trim whitespace from query - say " ${e}->${x} Dumping upload file (no upload)" - echo "$url/upload/v4?$(echo "package=$package-$VERSION&$query" | tr -d ' ')" - cat "$upload_file" -else - if [ "$save_to" != "" ]; - then - say "${e}==>${x} Copying upload file to ${save_to}" - mkdir -p "$(dirname "$save_to")" - cp "$upload_file" "$save_to" - fi - - say "${e}==>${x} Gzipping contents" - gzip -nf9 "$upload_file" - say " $(du -h "$upload_file.gz")" - - query=$(echo "${query}" | tr -d ' ') - say "${e}==>${x} Uploading reports" - say " ${e}url:${x} $url" - say " ${e}query:${x} $query" - - # Full query (to display on terminal output) - query=$(echo "package=$package-$VERSION&token=$token&$query" | tr -d ' ') - queryNoToken=$(echo "package=$package-$VERSION&token=&$query" | tr -d ' ') - - if [ "$ft_s3" = "1" ]; - then - say "${e}->${x} Pinging Codecov" - say "$url/upload/v4?$queryNoToken" - # shellcheck disable=SC2086,2090 - res=$(curl $curl_s -X POST $cacert \ - --retry 5 --retry-delay 2 --connect-timeout 2 \ - -H 'X-Reduced-Redundancy: false' \ - -H 'X-Content-Type: application/x-gzip' \ - -H 'Content-Length: 0' \ - -H "X-Upload-Token: ${token}" \ - --write-out "\n%{response_code}\n" \ - $curlargs \ - "$url/upload/v4?$query" || true) - # a good reply is "https://codecov.io" + "\n" + "https://storage.googleapis.com/codecov/..." - s3target=$(echo "$res" | sed -n 2p) - status=$(tail -n1 <<< "$res") - - if [ "$status" = "200" ] && [ "$s3target" != "" ]; - then - say "${e}->${x} Uploading to" - say "${s3target}" - - # shellcheck disable=SC2086 - s3=$(curl -fiX PUT \ - --data-binary @"$upload_file.gz" \ - -H 'Content-Type: application/x-gzip' \ - -H 'Content-Encoding: gzip' \ - $curlawsargs \ - "$s3target" || true) - - if [ "$s3" != "" ]; - then - say " ${g}->${x} Reports have been successfully queued for processing at ${b}$(echo "$res" | sed -n 1p)${x}" - exit 0 - else - say " ${r}X>${x} Failed to upload" - fi - elif [ "$status" = "400" ]; - then - # 400 Error - say "${r}${res}${x}" - exit ${exit_with} - else - say "${r}${res}${x}" - fi - fi - - say "${e}==>${x} Uploading to Codecov" - - # shellcheck disable=SC2086,2090 - res=$(curl -X POST $cacert \ - --data-binary @"$upload_file.gz" \ - --retry 5 --retry-delay 2 --connect-timeout 2 \ - -H 'Content-Type: text/plain' \ - -H 'Content-Encoding: gzip' \ - -H 'X-Content-Encoding: gzip' \ - -H "X-Upload-Token: ${token}" \ - -H 'Accept: text/plain' \ - $curlargs \ - "$url/upload/v2?$query&attempt=$i" || echo 'HTTP 500') - # {"message": "Coverage reports upload successfully", "uploaded": true, "queued": true, "id": "...", "url": "https://codecov.io/..."\} - uploaded=$(grep -o '\"uploaded\": [a-z]*' <<< "$res" | head -1 | cut -d' ' -f2) - if [ "$uploaded" = "true" ] - then - say " Reports have been successfully queued for processing at ${b}$(echo "$res" | head -2 | tail -1)${x}" - exit 0 - else - say " ${g}${res}${x}" - exit ${exit_with} - fi - - say " ${r}X> Failed to upload coverage reports${x}" -fi - -exit ${exit_with} diff --git a/tools/bin/go_core_tests b/tools/bin/go_core_tests index 694a51d1f82..c0b0f43aed4 100755 --- a/tools/bin/go_core_tests +++ b/tools/bin/go_core_tests @@ -32,7 +32,5 @@ if [[ $EXITCODE != 0 ]]; then echo "Encountered test failures." else echo "All tests passed!" - # uploading coverage.txt to CodeCov - $(dirname "$0")/codecov -f coverage.txt fi exit $EXITCODE