diff --git a/.changeset/big-camels-report.md b/.changeset/big-camels-report.md
new file mode 100644
index 00000000000..f81f66b9138
--- /dev/null
+++ b/.changeset/big-camels-report.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#bugfix fix non-idempotent loopp registry.Register
diff --git a/.changeset/brave-cooks-itch.md b/.changeset/brave-cooks-itch.md
new file mode 100644
index 00000000000..1ed3dd7e117
--- /dev/null
+++ b/.changeset/brave-cooks-itch.md
@@ -0,0 +1,5 @@
+---
+"chainlink": minor
+---
+
+#updated feat(job-distributor): support tron chain type on sync
diff --git a/.changeset/giant-eels-jump.md b/.changeset/giant-eels-jump.md
new file mode 100644
index 00000000000..5ab8ca875ca
--- /dev/null
+++ b/.changeset/giant-eels-jump.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+Add error handling for Arbitrum RPC server timeouts. #added
diff --git a/.github/e2e-tests.yml b/.github/e2e-tests.yml
index fe30e2342c2..675fa315dfa 100644
--- a/.github/e2e-tests.yml
+++ b/.github/e2e-tests.yml
@@ -10,7 +10,7 @@ runner-test-matrix:
# START: OCR tests
# Example of 1 runner for all tests in integration-tests/smoke/ocr_test.go
- - id: smoke/ocr_test.go:*
+ - id: smoke/ocr_test.go:*
path: integration-tests/smoke/ocr_test.go
test_env_type: docker
runs_on: ubuntu-latest
@@ -27,7 +27,7 @@ runner-test-matrix:
runs_on: ubuntu-latest
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRv1Soak$ -test.parallel=1 -timeout 900h -count=1 -json
test_cmd_opts: 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false
- test_secrets_required: true
+ test_secrets_required: true
test_env_vars:
TEST_SUITE: soak
@@ -60,7 +60,7 @@ runner-test-matrix:
test_config_override_path: integration-tests/testconfig/ocr2/overrides/base_sepolia_quick_smoke_test.toml
test_secrets_required: true
test_env_vars:
- TEST_SUITE: soak
+ TEST_SUITE: soak
- id: soak/ocr_test.go:TestForwarderOCRv1Soak
path: integration-tests/soak/ocr_test.go
@@ -79,7 +79,7 @@ runner-test-matrix:
test_secrets_required: true
test_env_vars:
TEST_SUITE: soak
-
+
- id: soak/ocr_test.go:TestOCRSoak_GethReorgBelowFinality_FinalityTagDisabled
path: integration-tests/soak/ocr_test.go
test_env_type: k8s-remote-runner
@@ -87,7 +87,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run TestOCRSoak_GethReorgBelowFinality_FinalityTagDisabled -test.parallel=1 -timeout 900h -count=1 -json
test_secrets_required: true
test_env_vars:
- TEST_SUITE: soak
+ TEST_SUITE: soak
- id: soak/ocr_test.go:TestOCRSoak_GethReorgBelowFinality_FinalityTagEnabled
path: integration-tests/soak/ocr_test.go
@@ -96,7 +96,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_GethReorgBelowFinality_FinalityTagEnabled$ -test.parallel=1 -timeout 900h -count=1 -json
test_secrets_required: true
test_env_vars:
- TEST_SUITE: soak
+ TEST_SUITE: soak
- id: soak/ocr_test.go:TestOCRSoak_GasSpike
path: integration-tests/soak/ocr_test.go
@@ -105,7 +105,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_GasSpike$ -test.parallel=1 -timeout 900h -count=1 -json
test_secrets_required: true
test_env_vars:
- TEST_SUITE: soak
+ TEST_SUITE: soak
- id: soak/ocr_test.go:TestOCRSoak_ChangeBlockGasLimit
path: integration-tests/soak/ocr_test.go
@@ -114,7 +114,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_ChangeBlockGasLimit$ -test.parallel=1 -timeout 900h -count=1 -json
test_secrets_required: true
test_env_vars:
- TEST_SUITE: soak
+ TEST_SUITE: soak
- id: soak/ocr_test.go:TestOCRSoak_RPCDownForAllCLNodes
path: integration-tests/soak/ocr_test.go
@@ -123,7 +123,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_RPCDownForAllCLNodes$ -test.parallel=1 -timeout 900h -count=1 -json
test_secrets_required: true
test_env_vars:
- TEST_SUITE: soak
+ TEST_SUITE: soak
- id: soak/ocr_test.go:TestOCRSoak_RPCDownForHalfCLNodes
path: integration-tests/soak/ocr_test.go
@@ -132,7 +132,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_RPCDownForHalfCLNodes$ -test.parallel=1 -timeout 900h -count=1 -json
test_secrets_required: true
test_env_vars:
- TEST_SUITE: soak
+ TEST_SUITE: soak
- id: smoke/forwarder_ocr_test.go:*
path: integration-tests/smoke/forwarder_ocr_test.go
@@ -168,7 +168,7 @@ runner-test-matrix:
pyroscope_env: ci-smoke-ocr2-evm-simulated
test_env_vars:
E2E_TEST_CHAINLINK_VERSION: '{{ env.DEFAULT_CHAINLINK_PLUGINS_VERSION }}' # This is the chainlink version that has the plugins
-
+
- id: smoke/ocr2_test.go:*-plugins
path: integration-tests/smoke/ocr2_test.go
test_env_type: docker
@@ -193,11 +193,12 @@ runner-test-matrix:
test_cmd: cd integration-tests/chaos && DETACH_RUNNER=false go test -test.run "^TestOCRChaos$" -v -test.parallel=10 -timeout 60m -count=1 -json
test_env_vars:
TEST_SUITE: chaos
+ CHAINLINK_USER_TEAM: Foundations
# END: OCR tests
# START: Automation tests
-
+
- id: smoke/automation_test.go:^TestAutomationBasic/registry_2_0|TestAutomationBasic/registry_2_1_conditional|TestAutomationBasic/registry_2_1_logtrigger$
path: integration-tests/smoke/automation_test.go
test_env_type: docker
@@ -273,7 +274,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_3_with_mercury_v03_link|TestAutomationBasic/registry_2_3_with_logtrigger_and_mercury_v02_link$" -test.parallel=2 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestSetUpkeepTriggerConfig$
path: integration-tests/smoke/automation_test.go
@@ -284,7 +285,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestSetUpkeepTriggerConfig$ -test.parallel=2 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestAutomationAddFunds$
path: integration-tests/smoke/automation_test.go
@@ -295,7 +296,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationAddFunds$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestAutomationPauseUnPause$
path: integration-tests/smoke/automation_test.go
@@ -306,7 +307,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationPauseUnPause$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestAutomationRegisterUpkeep$
path: integration-tests/smoke/automation_test.go
@@ -317,7 +318,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationRegisterUpkeep$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestAutomationPauseRegistry$
path: integration-tests/smoke/automation_test.go
@@ -328,7 +329,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationPauseRegistry$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestAutomationKeeperNodesDown$
path: integration-tests/smoke/automation_test.go
@@ -339,7 +340,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationKeeperNodesDown$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestAutomationPerformSimulation$
path: integration-tests/smoke/automation_test.go
@@ -350,7 +351,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationPerformSimulation$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestAutomationCheckPerformGasLimit$
path: integration-tests/smoke/automation_test.go
@@ -361,7 +362,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationCheckPerformGasLimit$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestUpdateCheckData$
path: integration-tests/smoke/automation_test.go
@@ -372,7 +373,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestUpdateCheckData$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/automation_test.go:^TestSetOffchainConfigWithMaxGasPrice$
path: integration-tests/smoke/automation_test.go
@@ -383,7 +384,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestSetOffchainConfigWithMaxGasPrice$ -test.parallel=2 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-automation-evm-simulated
+ pyroscope_env: ci-smoke-automation-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperBasicSmoke$
path: integration-tests/smoke/keeper_test.go
@@ -393,7 +394,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperBasicSmoke$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperBlockCountPerTurn$
path: integration-tests/smoke/keeper_test.go
@@ -403,7 +404,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperBlockCountPerTurn$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperSimulation$
path: integration-tests/smoke/keeper_test.go
@@ -413,7 +414,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperSimulation$ -test.parallel=2 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperCheckPerformGasLimit$
path: integration-tests/smoke/keeper_test.go
@@ -423,7 +424,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperCheckPerformGasLimit$ -test.parallel=2 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperRegisterUpkeep$
path: integration-tests/smoke/keeper_test.go
@@ -433,7 +434,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperRegisterUpkeep$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperAddFunds$
path: integration-tests/smoke/keeper_test.go
@@ -443,7 +444,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperAddFunds$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperRemove$
path: integration-tests/smoke/keeper_test.go
@@ -453,8 +454,8 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperRemove$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
-
+ pyroscope_env: ci-smoke-keeper-evm-simulated
+
- id: smoke/keeper_test.go:^TestKeeperPauseRegistry$
path: integration-tests/smoke/keeper_test.go
test_env_type: docker
@@ -463,7 +464,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperPauseRegistry$ -test.parallel=2 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperMigrateRegistry$
path: integration-tests/smoke/keeper_test.go
@@ -473,7 +474,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperMigrateRegistry$ -test.parallel=1 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperNodeDown$
path: integration-tests/smoke/keeper_test.go
@@ -483,7 +484,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperNodeDown$ -test.parallel=3 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperPauseUnPauseUpkeep$
path: integration-tests/smoke/keeper_test.go
@@ -493,7 +494,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperPauseUnPauseUpkeep$ -test.parallel=1 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperUpdateCheckData$
path: integration-tests/smoke/keeper_test.go
@@ -503,7 +504,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperUpdateCheckData$ -test.parallel=1 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: smoke/keeper_test.go:^TestKeeperJobReplacement$
path: integration-tests/smoke/keeper_test.go
@@ -513,7 +514,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperJobReplacement$ -test.parallel=1 -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-keeper-evm-simulated
+ pyroscope_env: ci-smoke-keeper-evm-simulated
- id: load/automationv2_1/automationv2_1_test.go:TestLogTrigger
path: integration-tests/load/automationv2_1/automationv2_1_test.go
@@ -546,7 +547,7 @@ runner-test-matrix:
test_env_type: docker
runs_on: ubuntu22.04-8cores-32GB
triggers:
- - Automation Nightly Tests
+ - Automation Nightly Tests
test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationNodeUpgrade/registry_2_1 -test.parallel=5 -timeout 60m -count=1 -json
test_env_vars:
E2E_TEST_CHAINLINK_IMAGE: public.ecr.aws/chainlink/chainlink
@@ -624,6 +625,7 @@ runner-test-matrix:
pyroscope_env: ci-automation-on-demand-chaos
test_env_vars:
TEST_SUITE: chaos
+ CHAINLINK_USER_TEAM: Automation
- id: benchmark/automation_test.go:TestAutomationBenchmark
path: integration-tests/benchmark/automation_test.go
@@ -676,7 +678,7 @@ runner-test-matrix:
test_env_vars:
TEST_TYPE: Smoke
triggers:
- - On Demand VRFV2 Plus Performance Test
+ - On Demand VRFV2 Plus Performance Test
- id: load/vrfv2plus/vrfv2plus_test.go:^TestVRFV2PlusBHSPerformance$Smoke
path: integration-tests/load/vrfv2plus/vrfv2plus_test.go
@@ -688,7 +690,7 @@ runner-test-matrix:
test_env_vars:
TEST_TYPE: Smoke
triggers:
- - On Demand VRFV2 Plus Performance Test
+ - On Demand VRFV2 Plus Performance Test
- id: load/vrfv2/vrfv2_test.go:^TestVRFV2Performance$Smoke
path: integration-tests/load/vrfv2/vrfv2_test.go
@@ -698,9 +700,9 @@ runner-test-matrix:
test_config_override_required: true
test_secrets_required: true
test_env_vars:
- TEST_TYPE: Smoke
+ TEST_TYPE: Smoke
triggers:
- - On Demand VRFV2 Performance Test
+ - On Demand VRFV2 Performance Test
- id: load/vrfv2/vrfv2_test.go:^TestVRFV2PlusBHSPerformance$Smoke
path: integration-tests/load/vrfv2/vrfv2_test.go
@@ -892,7 +894,7 @@ runner-test-matrix:
- Merge Queue E2E Core Tests
- Nightly E2E Tests
test_cmd: cd integration-tests/ && go test smoke/flux_test.go -timeout 30m -count=1 -json
- pyroscope_env: ci-smoke-flux-evm-simulated
+ pyroscope_env: ci-smoke-flux-evm-simulated
- id: smoke/reorg_above_finality_test.go:*
path: integration-tests/smoke/reorg_above_finality_test.go
@@ -904,7 +906,7 @@ runner-test-matrix:
- Nightly E2E Tests
test_cmd: cd integration-tests/ && go test smoke/reorg_above_finality_test.go -timeout 30m -count=1 -json
pyroscope_env: ci-smoke-reorg-above-finality-evm-simulated
-
+
- id: migration/upgrade_version_test.go:*
path: integration-tests/migration/upgrade_version_test.go
test_env_type: docker
@@ -966,7 +968,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_TwoMessagesOnTwoLanesIncludingBatching$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -982,7 +984,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_MultipleMessagesOnOneLaneNoWaitForExec$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -998,7 +1000,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_NotEnoughObservers$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -1014,7 +1016,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_DifferentSigners$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -1030,7 +1032,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_NotEnoughSigners$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -1046,7 +1048,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_DifferentRmnNodesForDifferentChains$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -1062,7 +1064,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_TwoMessagesOneSourceChainCursed$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -1078,7 +1080,7 @@ runner-test-matrix:
- id: smoke/ccip/ccip_rmn_test.go:^TestRMN_GlobalCurseTwoMessagesOnTwoLanes$
path: integration-tests/smoke/ccip/ccip_rmn_test.go
test_env_type: docker
- runs_on: ubuntu-latest
+ runs_on: ubuntu20.04-8cores-32GB
triggers:
- PR E2E Core Tests
- Nightly E2E Tests
@@ -1106,7 +1108,8 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
-
+ CHAINLINK_USER_TEAM: CCIP
+
- id: ccip-smoke-usdc
path: integration-tests/ccip-tests/smoke/ccip_test.go
test_env_type: docker
@@ -1118,6 +1121,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/usdc_mock_deployment.toml
- id: ccip-smoke-db-compatibility
@@ -1131,6 +1135,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/db-compatibility.toml
- id: ccip-smoke-leader-lane
@@ -1157,6 +1162,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPTokenPoolRateLimits$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPMulticall$
path: integration-tests/ccip-tests/smoke/ccip_test.go
@@ -1169,6 +1175,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPMulticall$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPManuallyExecuteAfterExecutionFailingDueToInsufficientGas$
path: integration-tests/ccip-tests/smoke/ccip_test.go
@@ -1181,6 +1188,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPManuallyExecuteAfterExecutionFailingDueToInsufficientGas$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPOnRampLimits$
path: integration-tests/ccip-tests/smoke/ccip_test.go
@@ -1193,6 +1201,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPOnRampLimits$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPOffRampCapacityLimit$
path: integration-tests/ccip-tests/smoke/ccip_test.go
@@ -1202,7 +1211,8 @@ runner-test-matrix:
- Nightly E2E Tests
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPOffRampCapacityLimit$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
- E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPOffRampAggRateLimit$
path: integration-tests/ccip-tests/smoke/ccip_test.go
@@ -1213,6 +1223,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPOffRampAggRateLimit$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPReorgBelowFinality$
path: integration-tests/ccip-tests/smoke/ccip_test.go
@@ -1225,6 +1236,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPReorgBelowFinality$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/ccip-reorg.toml
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPReorgAboveFinalityAtDestination$
@@ -1238,6 +1250,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPReorgAboveFinalityAtDestination$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/ccip-reorg.toml
- id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPReorgAboveFinalityAtSource$
@@ -1251,6 +1264,7 @@ runner-test-matrix:
test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPReorgAboveFinalityAtSource$ -timeout 30m -count=1 -test.parallel=1 -json
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
+ CHAINLINK_USER_TEAM: CCIP
test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/ccip-reorg.toml
- id: integration-tests/ccip-tests/load/ccip_test.go:TestLoadCCIPStableRPS
@@ -1262,8 +1276,8 @@ runner-test-matrix:
TEST_SUITE: ccip-load
E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/6vjVx-1V8/ccip-long-running-tests"
triggers:
- - E2E CCIP Load Tests
- test_artifacts_on_failure:
+ - E2E CCIP Load Tests
+ test_artifacts_on_failure:
- ./integration-tests/load/logs/payload_ccip.json
# Enable when CCIP-2277 is resolved
@@ -1277,8 +1291,8 @@ runner-test-matrix:
# test_env_vars:
# E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/6vjVx-1V8/ccip-long-running-tests"
# triggers:
- # - E2E CCIP Load Tests
- # test_artifacts_on_failure:
+ # - E2E CCIP Load Tests
+ # test_artifacts_on_failure:
# - ./integration-tests/load/logs/payload_ccip.json
- id: ccip-tests/chaos/ccip_test.go
@@ -1306,5 +1320,5 @@ runner-test-matrix:
TEST_TRIGGERED_BY: ccip-cron-chaos-eth
TEST_LOG_LEVEL: debug
E2E_TEST_GRAFANA_DASHBOARD_URL: /d/6vjVx-1V8/ccip-long-running-tests
-
+
# END: CCIP tests
diff --git a/.github/workflows/ci-core-partial.yml b/.github/workflows/ci-core-partial.yml
index c9752d4e1e4..35f689090e8 100644
--- a/.github/workflows/ci-core-partial.yml
+++ b/.github/workflows/ci-core-partial.yml
@@ -46,6 +46,7 @@ jobs:
permissions:
id-token: write
contents: write
+ actions: write
strategy:
fail-fast: false
matrix:
@@ -86,7 +87,7 @@ jobs:
go-mod-download-directory: ${{ matrix.type.test-suite == 'ccip-deployment' && matrix.type.module-directory || '' }}
- name: Build Tests
- uses: smartcontractkit/.github/apps/go-conditional-tests@37882e110590e636627a26371bdbd56ddfcce821 # go-conditional-tests@0.1.0
+ uses: smartcontractkit/.github/apps/go-conditional-tests@57f99fbea73056c490c766d50ef582a13ec4f3bb # go-conditional-tests@0.2.0
timeout-minutes: 10
with:
pipeline-step: "build"
@@ -98,7 +99,7 @@ jobs:
build-flags: ${{ matrix.type.build-flags }}
- name: Run Tests
- uses: smartcontractkit/.github/apps/go-conditional-tests@37882e110590e636627a26371bdbd56ddfcce821 # go-conditional-tests@0.1.0
+ uses: smartcontractkit/.github/apps/go-conditional-tests@57f99fbea73056c490c766d50ef582a13ec4f3bb # go-conditional-tests@0.2.0
timeout-minutes: 15
env:
CL_DATABASE_URL: ${{ env.DB_URL }}
@@ -112,7 +113,7 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Update Test Index
- uses: smartcontractkit/.github/apps/go-conditional-tests@37882e110590e636627a26371bdbd56ddfcce821 # go-conditional-tests@0.1.0
+ uses: smartcontractkit/.github/apps/go-conditional-tests@57f99fbea73056c490c766d50ef582a13ec4f3bb # go-conditional-tests@0.2.0
with:
pipeline-step: "update"
collect-coverage: ${{ needs.filter.outputs.should-collect-coverage }}
@@ -130,7 +131,7 @@ jobs:
if: ${{ needs.filter.outputs.should-collect-coverage == 'true' }}
runs-on: ubuntu-latest
steps:
- - name: Checkout the repo
+ - name: Checkout the repo
uses: actions/checkout@v4.2.1
with:
# fetches all history for all tags and branches to provide more metadata for sonar reports
diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml
index 9134a8c9b56..726b6b14074 100644
--- a/.github/workflows/ci-core.yml
+++ b/.github/workflows/ci-core.yml
@@ -463,7 +463,7 @@ jobs:
findByTestFilesDiff: true
findByAffectedPackages: false
slackNotificationAfterTestsChannelId: 'C07TRF65CNS' #flaky-test-detector-notifications
- extraArgs: '{ "skipped_tests": "TestChainComponents", "run_with_race": "true", "print_failed_tests": "true", "test_repeat_count": "3", "min_pass_ratio": "0.01" }'
+ extraArgs: '{ "skipped_tests": "TestChainComponents", "run_with_race": "true", "print_failed_tests": "true", "test_repeat_count": "3", "omit_test_outputs_on_success": "true" }'
secrets:
SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -482,7 +482,7 @@ jobs:
findByTestFilesDiff: true
findByAffectedPackages: false
slackNotificationAfterTestsChannelId: 'C07TRF65CNS' #flaky-test-detector-notifications
- extraArgs: '{ "skipped_tests": "TestAddLane", "run_with_race": "true", "print_failed_tests": "true", "test_repeat_count": "3", "min_pass_ratio": "0.01" }'
+ extraArgs: '{ "skipped_tests": "TestAddLane", "run_with_race": "true", "print_failed_tests": "true", "test_repeat_count": "3", "omit_test_outputs_on_success": "true" }'
secrets:
SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/client-compatibility-tests.yml b/.github/workflows/client-compatibility-tests.yml
index 03c5b893cca..5f986ccf16c 100644
--- a/.github/workflows/client-compatibility-tests.yml
+++ b/.github/workflows/client-compatibility-tests.yml
@@ -668,9 +668,6 @@ jobs:
E2E_TEST_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
E2E_TEST_PYROSCOPE_ENVIRONMENT: ci-client-compatability-${{ matrix.eth_client }}-testnet
E2E_TEST_PYROSCOPE_ENABLED: "true"
- E2E_TEST_LOGGING_RUN_ID: ${{ github.run_id }}
- E2E_TEST_LOG_COLLECT: ${{ vars.TEST_LOG_COLLECT }}
- E2E_TEST_LOG_STREAM_LOG_TARGETS: ${{ vars.LOGSTREAM_LOG_TARGETS }}
E2E_TEST_PRIVATE_ETHEREUM_EXECUTION_LAYER: ${{ matrix.evm_node.eth_implementation || 'geth' }}
E2E_TEST_PRIVATE_ETHEREUM_ETHEREUM_VERSION: auto_fill # Auto fill the version based on the docker image
E2E_TEST_PRIVATE_ETHEREUM_CUSTOM_DOCKER_IMAGE: ${{ matrix.evm_node.docker_image }}
diff --git a/.github/workflows/flakeguard-on-demand.yml b/.github/workflows/flakeguard-on-demand.yml
index 0ba84c5ab58..4508da30e6b 100644
--- a/.github/workflows/flakeguard-on-demand.yml
+++ b/.github/workflows/flakeguard-on-demand.yml
@@ -14,14 +14,13 @@ on:
description: 'The path to the project to run the flaky test detection.'
default: '.'
baseRef:
- required: true
+ required: false
type: string
- description: 'The base reference or branch to compare changes for detecting flaky tests.'
- default: 'origin/develop'
+ description: 'The base reference or branch to compare changes for detecting flaky tests. Set only when running diffs between branches. E.g. (develop)'
headRef:
required: false
type: string
- description: 'The head reference or branch to compare changes for detecting flaky tests. Default is the current branch.'
+ description: 'The head reference or branch to compare changes for detecting flaky tests. Default is the current branch. E.g. (develop)'
runAllTests:
required: false
type: boolean
diff --git a/.github/workflows/flakeguard.yml b/.github/workflows/flakeguard.yml
index 4c1aa695c62..3951c356a3b 100644
--- a/.github/workflows/flakeguard.yml
+++ b/.github/workflows/flakeguard.yml
@@ -15,11 +15,11 @@ on:
baseRef:
required: false
type: string
- description: 'The base reference or branch to compare changes for detecting flaky tests.'
+ description: 'The base reference or branch to compare changes for detecting flaky tests. Set only when running diffs between branches. E.g. (develop)'
headRef:
required: false
type: string
- description: 'The head reference or branch to compare changes for detecting flaky tests. Default is the current branch.'
+ description: 'The head reference or branch to compare changes for detecting flaky tests. Default is the current branch. E.g. (develop)'
runAllTests:
required: false
type: boolean
@@ -56,6 +56,7 @@ on:
required: true
env:
+ GIT_BASE_REF: ${{ inputs.baseRef }}
GIT_HEAD_REF: ${{ inputs.headRef || github.ref }}
SKIPPED_TESTS: ${{ fromJSON(inputs.extraArgs)['skipped_tests'] || '' }} # Comma separated list of test names to skip running in the flaky detector. Related issue: TT-1823
DEFAULT_MAX_RUNNER_COUNT: ${{ fromJSON(inputs.extraArgs)['default_max_runner_count'] || '8' }} # The default maximum number of GitHub runners to use for parallel test execution.
@@ -67,7 +68,7 @@ env:
ALL_TESTS_RUNNER: ${{ fromJSON(inputs.extraArgs)['all_tests_runner'] || 'ubuntu22.04-32cores-128GB' }} # The runner to use for running all tests.
DEFAULT_RUNNER: 'ubuntu-latest' # The default runner to use for running tests.
UPLOAD_ALL_TEST_RESULTS: ${{ fromJSON(inputs.extraArgs)['upload_all_test_results'] || 'false' }} # Whether to upload all test results as artifacts.
-
+ OMIT_TEST_OUTPUTS_ON_SUCCESS: ${{ fromJSON(inputs.extraArgs)['omit_test_outputs_on_success'] || 'true' }}
jobs:
get-tests:
@@ -80,6 +81,7 @@ jobs:
affected_test_packages: ${{ steps.get-tests.outputs.packages }}
git_head_sha: ${{ steps.get_commit_sha.outputs.git_head_sha }}
git_head_short_sha: ${{ steps.get_commit_sha.outputs.git_head_short_sha }}
+ git_base_sha: ${{ steps.get_commit_sha.outputs.git_base_sha }}
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
@@ -87,14 +89,33 @@ jobs:
fetch-depth: 0
ref: ${{ env.GIT_HEAD_REF }}
- - name: Get commit SHA
+ - name: Get SHA
id: get_commit_sha
run: |
+ # Resolve HEAD SHA
git_head_sha=$(git rev-parse HEAD)
git_head_short_sha=$(git rev-parse --short HEAD)
echo "git_head_sha=$git_head_sha" >> $GITHUB_OUTPUT
echo "git_head_short_sha=$git_head_short_sha" >> $GITHUB_OUTPUT
+ # Print HEAD SHAs to the console
+ echo "HEAD SHA: $git_head_sha"
+ echo "HEAD Short SHA: $git_head_short_sha"
+
+ # Conditionally resolve BASE SHA
+ if [ -n "${{ env.GIT_BASE_REF }}" ]; then
+ git fetch origin ${{ env.GIT_BASE_REF }} --quiet
+
+ git_base_sha=$(git rev-parse origin/${{ env.GIT_BASE_REF }})
+ echo "git_base_sha=$git_base_sha" >> $GITHUB_OUTPUT
+
+ # Print BASE SHA to the console
+ echo "BASE SHA: $git_base_sha"
+ else
+ echo "BASE SHA not provided."
+ echo "git_base_sha=" >> $GITHUB_OUTPUT
+ fi
+
- name: Set up Go 1.21.9
uses: actions/setup-go@v5.0.2
with:
@@ -102,7 +123,7 @@ jobs:
- name: Install flakeguard
shell: bash
- run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@404e04e1e2e2dd5a384b09bd05b8d80409b6609a # flakguard@0.1.0
+ run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@ea4ffd8c51ce02efebf5ea6bca503fe10b6cee92 # flakguard@0.1.0
- name: Find new or updated test packages
if: ${{ inputs.runAllTests == false }}
@@ -115,7 +136,7 @@ jobs:
PATH=$PATH:$(go env GOPATH)/bin
export PATH
- PACKAGES=$(flakeguard find --find-by-test-files-diff=${{ inputs.findByTestFilesDiff }} --find-by-affected-packages=${{ inputs.findByAffectedPackages }} --base-ref=origin/${{ inputs.baseRef }} --project-path=${{ inputs.projectPath }})
+ PACKAGES=$(flakeguard find --find-by-test-files-diff=${{ inputs.findByTestFilesDiff }} --find-by-affected-packages=${{ inputs.findByAffectedPackages }} --base-ref=origin/${{ env.GIT_BASE_REF }} --project-path=${{ inputs.projectPath }})
echo $PACKAGES
echo "packages=$PACKAGES" >> $GITHUB_OUTPUT
@@ -130,7 +151,7 @@ jobs:
PATH=$PATH:$(go env GOPATH)/bin
export PATH
- TEST_FILES=$(flakeguard find --only-show-changed-test-files=true --base-ref=origin/${{ inputs.baseRef }} --project-path=${{ inputs.projectPath }})
+ TEST_FILES=$(flakeguard find --only-show-changed-test-files=true --base-ref=origin/${{ env.GIT_BASE_REF }} --project-path=${{ inputs.projectPath }})
echo $TEST_FILES
echo "test_files=$TEST_FILES" >> $GITHUB_OUTPUT
@@ -261,11 +282,11 @@ jobs:
- name: Install flakeguard
shell: bash
- run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@404e04e1e2e2dd5a384b09bd05b8d80409b6609a # flakguard@0.1.0
+ run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@ea4ffd8c51ce02efebf5ea6bca503fe10b6cee92 # flakguard@0.1.0
- name: Run tests with flakeguard
shell: bash
- run: flakeguard run --project-path=${{ inputs.projectPath }} --test-packages=${{ matrix.testPackages }} --run-count=${{ env.TEST_REPEAT_COUNT }} --max-pass-ratio=${{ inputs.maxPassRatio }} --race=${{ env.RUN_WITH_RACE }} --shuffle=${{ env.RUN_WITH_SHUFFLE }} --shuffle-seed=${{ env.SHUFFLE_SEED }} --skip-tests=${{ env.SKIPPED_TESTS }} --output-json=test-result.json
+ run: flakeguard run --project-path=${{ inputs.projectPath }} --test-packages=${{ matrix.testPackages }} --run-count=${{ env.TEST_REPEAT_COUNT }} --max-pass-ratio=${{ inputs.maxPassRatio }} --race=${{ env.RUN_WITH_RACE }} --shuffle=${{ env.RUN_WITH_SHUFFLE }} --shuffle-seed=${{ env.SHUFFLE_SEED }} --skip-tests=${{ env.SKIPPED_TESTS }} --output-json=test-result.json --omit-test-outputs-on-success=${{ env.OMIT_TEST_OUTPUTS_ON_SUCCESS }}
env:
CL_DATABASE_URL: ${{ env.DB_URL }}
@@ -281,7 +302,7 @@ jobs:
needs: [get-tests, run-tests]
if: always()
name: Report
- runs-on: ubuntu-24.04-8cores-32GB-ARM # Use a runner with more resources to avoid OOM errors when aggregating test results.
+ runs-on: ubuntu-latest
outputs:
test_results: ${{ steps.results.outputs.results }}
steps:
@@ -308,7 +329,7 @@ jobs:
- name: Install flakeguard
shell: bash
- run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@404e04e1e2e2dd5a384b09bd05b8d80409b6609a # flakguard@0.1.0
+ run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@ea4ffd8c51ce02efebf5ea6bca503fe10b6cee92 # flakguard@0.1.0
- name: Aggregate Flakeguard Results
id: results
@@ -329,7 +350,11 @@ jobs:
--output-path ./flakeguard-report \
--repo-path "${{ github.workspace }}" \
--codeowners-path "${{ github.workspace }}/.github/CODEOWNERS" \
- --max-pass-ratio "${{ inputs.maxPassRatio }}"
+ --max-pass-ratio "${{ inputs.maxPassRatio }}" \
+ --repo-url "${{ inputs.repoUrl }}" \
+ --base-sha "${{ needs.get-tests.outputs.git_base_sha }}" \
+ --head-sha "${{ needs.get-tests.outputs.git_head_sha }}" \
+ --github-workflow-name "${{ github.workflow }}"
# Print out the summary file
echo -e "\nFlakeguard Summary:"
@@ -461,7 +486,7 @@ jobs:
"type": "section",
"text": {
"type": "mrkdwn",
- "text": "${{ inputs.runAllTests == true && format('Ran all tests for `{0}` branch.', inputs.headRef) || format('Ran changed tests between `{0}` and `{1}` (`{2}`).', inputs.baseRef, needs.get-tests.outputs.git_head_short_sha, env.GIT_HEAD_REF) }}"
+ "text": "${{ inputs.runAllTests == true && format('Ran all tests for `{0}` branch.', env.GIT_HEAD_REF) || format('Ran changed tests between `{0}` and `{1}` (`{2}`).', env.GIT_BASE_REF, needs.get-tests.outputs.git_head_short_sha, env.GIT_HEAD_REF) }}"
}
},
{
@@ -481,7 +506,7 @@ jobs:
"type": "section",
"text": {
"type": "mrkdwn",
- "text": "${{ format('<{0}/{1}/actions/runs/{2}|View Flaky Detector Details> | <{3}/compare/{4}...{5}#files_bucket|Compare Changes>{6}', github.server_url, github.repository, github.run_id, inputs.repoUrl, inputs.baseRef, needs.get-tests.outputs.git_head_sha, github.event_name == 'pull_request' && format(' | <{0}|View PR>', github.event.pull_request.html_url) || '') }}"
+ "text": "${{ format('<{0}/{1}/actions/runs/{2}|View Flaky Detector Details> | <{3}/compare/{4}...{5}#files_bucket|Compare Changes>{6}', github.server_url, github.repository, github.run_id, inputs.repoUrl, env.GIT_BASE_REF, needs.get-tests.outputs.git_head_sha, github.event_name == 'pull_request' && format(' | <{0}|View PR>', github.event.pull_request.html_url) || '') }}"
}
}
]
@@ -514,7 +539,7 @@ jobs:
"type": "section",
"text": {
"type": "mrkdwn",
- "text": "${{ inputs.runAllTests == true && format('Ran all tests for `{0}` branch.', env.GIT_HEAD_REF) || format('Ran changed tests between `{0}` and `{1}` (`{2}`).', inputs.baseRef, needs.get-tests.outputs.git_head_short_sha, env.GIT_HEAD_REF) }}"
+ "text": "${{ inputs.runAllTests == true && format('Ran all tests for `{0}` branch.', env.GIT_HEAD_REF) || format('Ran changed tests between `{0}` and `{1}` (`{2}`).', env.GIT_BASE_REF, needs.get-tests.outputs.git_head_short_sha, env.GIT_HEAD_REF) }}"
}
},
{
diff --git a/.github/workflows/integration-chaos-tests.yml b/.github/workflows/integration-chaos-tests.yml
index 314e54a1ab8..c9f7f2661ec 100644
--- a/.github/workflows/integration-chaos-tests.yml
+++ b/.github/workflows/integration-chaos-tests.yml
@@ -6,11 +6,49 @@ on:
tags:
- "*"
workflow_dispatch:
+ inputs:
+ team:
+ description: Team to run the tests for (e.g. BIX, CCIP)
+ required: true
+ type: string
jobs:
+ run-e2e-tests-workflow-dispatch:
+ name: Run E2E Tests (Workflow Dispatch)
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0d4a2b2b009c87b5c366d0b97f7a8d7de2f60760
+ if: github.event_name == 'workflow_dispatch'
+ with:
+ test_path: .github/e2e-tests.yml
+ chainlink_version: ${{ github.sha }}
+ require_chainlink_image_versions_in_qa_ecr: ${{ github.sha }}
+ test_trigger: E2E Chaos Tests
+ test_log_level: debug
+ team: ${{ inputs.team }}
+ secrets:
+ QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
+ QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
+ PROD_AWS_ACCOUNT_NUMBER: ${{ secrets.AWS_ACCOUNT_ID_PROD }}
+ QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }}
+ QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
+ QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }}
+ GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }}
+ GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }}
+ GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }}
+ LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }}
+ LOKI_URL: ${{ secrets.LOKI_URL }}
+ LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }}
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ AWS_REGION: ${{ secrets.QA_AWS_REGION }}
+ AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }}
+ AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }}
+ SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
+
run-e2e-tests-workflow:
- name: Run E2E Tests
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0
+ name: Run E2E Tests (Push and Sechedule)
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0d4a2b2b009c87b5c366d0b97f7a8d7de2f60760
+ if: github.event_name != 'workflow_dispatch'
with:
test_path: .github/e2e-tests.yml
chainlink_version: ${{ github.sha }}
@@ -32,8 +70,9 @@ jobs:
LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }}
LOKI_URL: ${{ secrets.LOKI_URL }}
LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }}
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_REGION: ${{ secrets.QA_AWS_REGION }}
AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }}
- AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }}
+ AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }}
SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
+
diff --git a/.github/workflows/integration-in-memory-tests.yml b/.github/workflows/integration-in-memory-tests.yml
index 8d777b41ea1..341d66f641e 100644
--- a/.github/workflows/integration-in-memory-tests.yml
+++ b/.github/workflows/integration-in-memory-tests.yml
@@ -73,7 +73,7 @@ jobs:
contents: read
needs: changes
if: github.event_name == 'pull_request' && ( needs.changes.outputs.ccip_changes == 'true' || needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true')
- uses: smartcontractkit/.github/.github/workflows/run-integration-tests.yml@57112554b9e5cfae79e795a8b1c36acf7e9dead7
+ uses: smartcontractkit/.github/.github/workflows/run-integration-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
workflow_name: Run CCIP Integration Tests For PR
test_path: .github/integration-in-memory-tests.yml
@@ -95,7 +95,7 @@ jobs:
contents: read
needs: changes
if: github.event_name == 'merge_group' && ( needs.changes.outputs.ccip_changes == 'true' || needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true')
- uses: smartcontractkit/.github/.github/workflows/run-integration-tests.yml@57112554b9e5cfae79e795a8b1c36acf7e9dead7
+ uses: smartcontractkit/.github/.github/workflows/run-integration-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
workflow_name: Run CCIP Integration Tests For Merge Queue
test_path: .github/integration-in-memory-tests.yml
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index 2c11d7568aa..27bdfa52243 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -210,7 +210,7 @@ jobs:
contents: read
needs: [build-chainlink, changes]
if: github.event_name == 'pull_request' && ( needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true')
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@27467f0073162e0ca77d33ce26f649b3d0f4c188 #ctf-run-tests@0.2.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
workflow_name: Run Core E2E Tests For PR
chainlink_version: ${{ inputs.evm-ref || github.sha }}
@@ -251,7 +251,7 @@ jobs:
contents: read
needs: [build-chainlink, changes]
if: github.event_name == 'merge_group' && ( needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true')
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@27467f0073162e0ca77d33ce26f649b3d0f4c188 #ctf-run-tests@1.0.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
workflow_name: Run Core E2E Tests For Merge Queue
chainlink_version: ${{ inputs.evm-ref || github.sha }}
@@ -296,7 +296,7 @@ jobs:
contents: read
needs: [build-chainlink, changes]
if: github.event_name == 'pull_request' && (needs.changes.outputs.ccip_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true')
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0d4a2b2b009c87b5c366d0b97f7a8d7de2f60760
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
workflow_name: Run CCIP E2E Tests For PR
chainlink_version: ${{ inputs.evm-ref || github.sha }}
@@ -338,7 +338,7 @@ jobs:
contents: read
needs: [build-chainlink, changes]
if: github.event_name == 'merge_group' && (needs.changes.outputs.ccip_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true')
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0d4a2b2b009c87b5c366d0b97f7a8d7de2f60760
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
workflow_name: Run CCIP E2E Tests For Merge Queue
chainlink_version: ${{ inputs.evm-ref || github.sha }}
diff --git a/.github/workflows/on-demand-vrfv2-performance-test.yml b/.github/workflows/on-demand-vrfv2-performance-test.yml
index aadef377718..f9aeaa0fa1f 100644
--- a/.github/workflows/on-demand-vrfv2-performance-test.yml
+++ b/.github/workflows/on-demand-vrfv2-performance-test.yml
@@ -67,7 +67,7 @@ jobs:
run-e2e-tests-workflow:
name: Run E2E Tests
needs: set-tests-to-run
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }}
chainlink_version: ${{ inputs.chainlink_version }}
diff --git a/.github/workflows/on-demand-vrfv2-smoke-tests.yml b/.github/workflows/on-demand-vrfv2-smoke-tests.yml
index 4ebc38a8081..ad616dea744 100644
--- a/.github/workflows/on-demand-vrfv2-smoke-tests.yml
+++ b/.github/workflows/on-demand-vrfv2-smoke-tests.yml
@@ -70,7 +70,7 @@ jobs:
run-e2e-tests-workflow:
name: Run E2E Tests
needs: set-tests-to-run
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }}
chainlink_version: ${{ inputs.chainlink_version }}
diff --git a/.github/workflows/on-demand-vrfv2plus-performance-test.yml b/.github/workflows/on-demand-vrfv2plus-performance-test.yml
index f6d120ac178..b3a820e25a0 100644
--- a/.github/workflows/on-demand-vrfv2plus-performance-test.yml
+++ b/.github/workflows/on-demand-vrfv2plus-performance-test.yml
@@ -67,7 +67,7 @@ jobs:
run-e2e-tests-workflow:
name: Run E2E Tests
needs: set-tests-to-run
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }}
chainlink_version: ${{ inputs.chainlink_version }}
diff --git a/.github/workflows/on-demand-vrfv2plus-smoke-tests.yml b/.github/workflows/on-demand-vrfv2plus-smoke-tests.yml
index af26c527988..8561034b103 100644
--- a/.github/workflows/on-demand-vrfv2plus-smoke-tests.yml
+++ b/.github/workflows/on-demand-vrfv2plus-smoke-tests.yml
@@ -70,7 +70,7 @@ jobs:
run-e2e-tests-workflow:
name: Run E2E Tests
needs: set-tests-to-run
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }}
chainlink_version: ${{ inputs.chainlink_version }}
diff --git a/.github/workflows/run-nightly-e2e-tests.yml b/.github/workflows/run-nightly-e2e-tests.yml
index eba1108f89f..712fb088181 100644
--- a/.github/workflows/run-nightly-e2e-tests.yml
+++ b/.github/workflows/run-nightly-e2e-tests.yml
@@ -20,7 +20,7 @@ on:
jobs:
call-run-e2e-tests-workflow:
name: Run E2E Tests
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
chainlink_version: ${{ inputs.chainlink_version || 'develop' }}
test_path: .github/e2e-tests.yml
diff --git a/.github/workflows/run-selected-e2e-tests.yml b/.github/workflows/run-selected-e2e-tests.yml
index 0e7c97c67fc..e95ce1cef19 100644
--- a/.github/workflows/run-selected-e2e-tests.yml
+++ b/.github/workflows/run-selected-e2e-tests.yml
@@ -35,7 +35,7 @@ run-name: ${{ inputs.workflow_run_name }}
jobs:
call-run-e2e-tests-workflow:
name: Run E2E Tests
- uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0
+ uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@0632b5652dd5eb03bfa87e23a2b3e2911484fe59
with:
chainlink_version: ${{ github.event.inputs.chainlink_version }}
test_path: .github/e2e-tests.yml
diff --git a/.mockery.yaml b/.mockery.yaml
index dd9024cc066..5777ca1da92 100644
--- a/.mockery.yaml
+++ b/.mockery.yaml
@@ -583,12 +583,6 @@ packages:
github.com/smartcontractkit/chainlink/v2/core/services/workflows/syncer:
interfaces:
ORM:
- ContractReader:
- config:
- mockname: "Mock{{ .InterfaceName }}"
- filename: contract_reader_mock.go
- inpackage: true
- dir: "{{ .InterfaceDir }}"
Handler:
config:
mockname: "Mock{{ .InterfaceName }}"
diff --git a/GNUmakefile b/GNUmakefile
index 336efd326a7..b765c63a3f4 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -133,13 +133,6 @@ testdb-force: ## Prepares the test database, drops any pesky user connections th
testdb-user-only: ## Prepares the test database with user only.
go run . local db preparetest --user-only
-# Format for CI
-.PHONY: presubmit
-presubmit: ## Format go files and imports.
- goimports -w .
- gofmt -w .
- go mod tidy
-
.PHONY: gomods
gomods: ## Install gomods
go install github.com/jmank88/gomods@v0.1.4
diff --git a/contracts/src/v0.8/shared/token/ERC20/BurnMintERC20.sol b/contracts/src/v0.8/shared/token/ERC20/BurnMintERC20.sol
index 946a6623b49..ea11dc08798 100644
--- a/contracts/src/v0.8/shared/token/ERC20/BurnMintERC20.sol
+++ b/contracts/src/v0.8/shared/token/ERC20/BurnMintERC20.sol
@@ -13,7 +13,6 @@ import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/ut
/// @notice A basic ERC20 compatible token contract with burn and minting roles.
/// @dev The total supply can be limited during deployment.
-/// @dev This contract has not been audited and is not yet approved for production use.
contract BurnMintERC20 is IBurnMintERC20, IGetCCIPAdmin, IERC165, ERC20Burnable, AccessControl {
error MaxSupplyExceeded(uint256 supplyAfterMint);
error InvalidRecipient(address recipient);
@@ -153,7 +152,7 @@ contract BurnMintERC20 is IBurnMintERC20, IGetCCIPAdmin, IERC165, ERC20Burnable,
/// @dev only the owner can call this function, NOT the current ccipAdmin, and 1-step ownership transfer is used.
/// @param newAdmin The address to transfer the CCIPAdmin role to. Setting to address(0) is a valid way to revoke
/// the role
- function setCCIPAdmin(address newAdmin) public onlyRole(DEFAULT_ADMIN_ROLE) {
+ function setCCIPAdmin(address newAdmin) external onlyRole(DEFAULT_ADMIN_ROLE) {
address currentAdmin = s_ccipAdmin;
s_ccipAdmin = newAdmin;
diff --git a/core/capabilities/compute/compute.go b/core/capabilities/compute/compute.go
index 316e4f00eea..2ba5daefaa6 100644
--- a/core/capabilities/compute/compute.go
+++ b/core/capabilities/compute/compute.go
@@ -318,18 +318,13 @@ func (c *Compute) createFetcher() func(ctx context.Context, req *wasmpb.FetchReq
headersReq[k] = v.String()
}
- payloadBytes, err := json.Marshal(ghcapabilities.Request{
+ resp, err := c.outgoingConnectorHandler.HandleSingleNodeRequest(ctx, messageID, ghcapabilities.Request{
URL: req.Url,
Method: req.Method,
Headers: headersReq,
Body: req.Body,
TimeoutMs: req.TimeoutMs,
})
- if err != nil {
- return nil, fmt.Errorf("failed to marshal fetch request: %w", err)
- }
-
- resp, err := c.outgoingConnectorHandler.HandleSingleNodeRequest(ctx, messageID, payloadBytes)
if err != nil {
return nil, err
}
diff --git a/core/capabilities/compute/compute_test.go b/core/capabilities/compute/compute_test.go
index c4146b7408e..3e5f501fa61 100644
--- a/core/capabilities/compute/compute_test.go
+++ b/core/capabilities/compute/compute_test.go
@@ -14,6 +14,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/capabilities"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/wasmtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils/matches"
cappkg "github.com/smartcontractkit/chainlink-common/pkg/capabilities"
"github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
@@ -188,6 +189,7 @@ func TestComputeFetch(t *testing.T) {
th := setup(t, defaultConfig)
th.connector.EXPECT().DonID().Return("don-id")
+ th.connector.EXPECT().AwaitConnection(matches.AnyContext, "gateway1").Return(nil)
th.connector.EXPECT().GatewayIDs().Return([]string{"gateway1", "gateway2"})
msgID := strings.Join([]string{
diff --git a/core/capabilities/remote/executable/client_test.go b/core/capabilities/remote/executable/client_test.go
index 5c4da350b9e..f4e6add82b0 100644
--- a/core/capabilities/remote/executable/client_test.go
+++ b/core/capabilities/remote/executable/client_test.go
@@ -12,7 +12,9 @@ import (
commoncap "github.com/smartcontractkit/chainlink-common/pkg/capabilities"
"github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb"
"github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
"github.com/smartcontractkit/chainlink-common/pkg/values"
+
"github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/executable"
remotetypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/types"
"github.com/smartcontractkit/chainlink/v2/core/capabilities/transmission"
@@ -29,6 +31,7 @@ const (
)
func Test_Client_DonTopologies(t *testing.T) {
+ tests.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/CAPPL-363")
ctx := testutils.Context(t)
transmissionSchedule, err := values.NewMap(map[string]any{
@@ -87,6 +90,7 @@ func Test_Client_DonTopologies(t *testing.T) {
}
func Test_Client_TransmissionSchedules(t *testing.T) {
+ tests.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/CAPPL-363")
ctx := testutils.Context(t)
responseTest := func(t *testing.T, response commoncap.CapabilityResponse, responseError error) {
diff --git a/core/capabilities/webapi/outgoing_connector_handler.go b/core/capabilities/webapi/outgoing_connector_handler.go
index d18ee971d1a..a9ff9ee3aae 100644
--- a/core/capabilities/webapi/outgoing_connector_handler.go
+++ b/core/capabilities/webapi/outgoing_connector_handler.go
@@ -6,6 +6,7 @@ import (
"fmt"
"sort"
"sync"
+ "time"
"github.com/pkg/errors"
@@ -17,6 +18,10 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/common"
)
+const (
+ defaultFetchTimeoutMs = 20_000
+)
+
var _ connector.GatewayConnectorHandler = &OutgoingConnectorHandler{}
type OutgoingConnectorHandler struct {
@@ -51,8 +56,24 @@ func NewOutgoingConnectorHandler(gc connector.GatewayConnector, config ServiceCo
}
// HandleSingleNodeRequest sends a request to first available gateway node and blocks until response is received
-// TODO: handle retries and timeouts
-func (c *OutgoingConnectorHandler) HandleSingleNodeRequest(ctx context.Context, messageID string, payload []byte) (*api.Message, error) {
+// TODO: handle retries
+func (c *OutgoingConnectorHandler) HandleSingleNodeRequest(ctx context.Context, messageID string, req capabilities.Request) (*api.Message, error) {
+ // set default timeout if not provided for all outgoing requests
+ if req.TimeoutMs == 0 {
+ req.TimeoutMs = defaultFetchTimeoutMs
+ }
+
+ // Create a subcontext with the timeout plus some margin for the gateway to process the request
+ timeoutDuration := time.Duration(req.TimeoutMs) * time.Millisecond
+ margin := 100 * time.Millisecond
+ ctx, cancel := context.WithTimeout(ctx, timeoutDuration+margin)
+ defer cancel()
+
+ payload, err := json.Marshal(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal fetch request: %w", err)
+ }
+
ch := make(chan *api.Message, 1)
c.responseChsMu.Lock()
c.responseChs[messageID] = ch
@@ -75,8 +96,15 @@ func (c *OutgoingConnectorHandler) HandleSingleNodeRequest(ctx context.Context,
}
sort.Strings(gatewayIDs)
- err := c.gc.SignAndSendToGateway(ctx, gatewayIDs[0], body)
- if err != nil {
+ selectedGateway := gatewayIDs[0]
+
+ l.Infow("selected gateway, awaiting connection", "gatewayID", selectedGateway)
+
+ if err := c.gc.AwaitConnection(ctx, selectedGateway); err != nil {
+ return nil, errors.Wrap(err, "await connection canceled")
+ }
+
+ if err := c.gc.SignAndSendToGateway(ctx, selectedGateway, body); err != nil {
return nil, errors.Wrap(err, "failed to send request to gateway")
}
diff --git a/core/capabilities/webapi/outgoing_connector_handler_test.go b/core/capabilities/webapi/outgoing_connector_handler_test.go
new file mode 100644
index 00000000000..4a8c425d4f1
--- /dev/null
+++ b/core/capabilities/webapi/outgoing_connector_handler_test.go
@@ -0,0 +1,135 @@
+package webapi
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils/matches"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/gateway/api"
+ gcmocks "github.com/smartcontractkit/chainlink/v2/core/services/gateway/connector/mocks"
+ ghcapabilities "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/capabilities"
+ "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/common"
+)
+
+func TestHandleSingleNodeRequest(t *testing.T) {
+ t.Run("OK-timeout_is_not_specify_default_timeout_is_expected", func(t *testing.T) {
+ ctx := tests.Context(t)
+ log := logger.TestLogger(t)
+ connector := gcmocks.NewGatewayConnector(t)
+ var defaultConfig = ServiceConfig{
+ RateLimiter: common.RateLimiterConfig{
+ GlobalRPS: 100.0,
+ GlobalBurst: 100,
+ PerSenderRPS: 100.0,
+ PerSenderBurst: 100,
+ },
+ }
+ connectorHandler, err := NewOutgoingConnectorHandler(connector, defaultConfig, ghcapabilities.MethodComputeAction, log)
+ require.NoError(t, err)
+
+ msgID := "msgID"
+ testURL := "http://localhost:8080"
+ connector.EXPECT().DonID().Return("donID")
+ connector.EXPECT().AwaitConnection(matches.AnyContext, "gateway1").Return(nil)
+ connector.EXPECT().GatewayIDs().Return([]string{"gateway1"})
+
+ // build the expected body with the default timeout
+ req := ghcapabilities.Request{
+ URL: testURL,
+ TimeoutMs: defaultFetchTimeoutMs,
+ }
+ payload, err := json.Marshal(req)
+ require.NoError(t, err)
+
+ expectedBody := &api.MessageBody{
+ MessageId: msgID,
+ DonId: connector.DonID(),
+ Method: ghcapabilities.MethodComputeAction,
+ Payload: payload,
+ }
+
+ // expect the request body to contain the default timeout
+ connector.EXPECT().SignAndSendToGateway(mock.Anything, "gateway1", expectedBody).Run(func(ctx context.Context, gatewayID string, msg *api.MessageBody) {
+ connectorHandler.HandleGatewayMessage(ctx, "gateway1", gatewayResponse(t, msgID))
+ }).Return(nil).Times(1)
+
+ _, err = connectorHandler.HandleSingleNodeRequest(ctx, msgID, ghcapabilities.Request{
+ URL: testURL,
+ })
+ require.NoError(t, err)
+ })
+
+ t.Run("OK-timeout_is_specified", func(t *testing.T) {
+ ctx := tests.Context(t)
+ log := logger.TestLogger(t)
+ connector := gcmocks.NewGatewayConnector(t)
+ var defaultConfig = ServiceConfig{
+ RateLimiter: common.RateLimiterConfig{
+ GlobalRPS: 100.0,
+ GlobalBurst: 100,
+ PerSenderRPS: 100.0,
+ PerSenderBurst: 100,
+ },
+ }
+ connectorHandler, err := NewOutgoingConnectorHandler(connector, defaultConfig, ghcapabilities.MethodComputeAction, log)
+ require.NoError(t, err)
+
+ msgID := "msgID"
+ testURL := "http://localhost:8080"
+ connector.EXPECT().DonID().Return("donID")
+ connector.EXPECT().AwaitConnection(matches.AnyContext, "gateway1").Return(nil)
+ connector.EXPECT().GatewayIDs().Return([]string{"gateway1"})
+
+ // build the expected body with the defined timeout
+ req := ghcapabilities.Request{
+ URL: testURL,
+ TimeoutMs: 40000,
+ }
+ payload, err := json.Marshal(req)
+ require.NoError(t, err)
+
+ expectedBody := &api.MessageBody{
+ MessageId: msgID,
+ DonId: connector.DonID(),
+ Method: ghcapabilities.MethodComputeAction,
+ Payload: payload,
+ }
+
+ // expect the request body to contain the defined timeout
+ connector.EXPECT().SignAndSendToGateway(mock.Anything, "gateway1", expectedBody).Run(func(ctx context.Context, gatewayID string, msg *api.MessageBody) {
+ connectorHandler.HandleGatewayMessage(ctx, "gateway1", gatewayResponse(t, msgID))
+ }).Return(nil).Times(1)
+
+ _, err = connectorHandler.HandleSingleNodeRequest(ctx, msgID, ghcapabilities.Request{
+ URL: testURL,
+ TimeoutMs: 40000,
+ })
+ require.NoError(t, err)
+ })
+}
+
+func gatewayResponse(t *testing.T, msgID string) *api.Message {
+ headers := map[string]string{"Content-Type": "application/json"}
+ body := []byte("response body")
+ responsePayload, err := json.Marshal(ghcapabilities.Response{
+ StatusCode: 200,
+ Headers: headers,
+ Body: body,
+ ExecutionError: false,
+ })
+ require.NoError(t, err)
+ return &api.Message{
+ Body: api.MessageBody{
+ MessageId: msgID,
+ Method: ghcapabilities.MethodWebAPITarget,
+ Payload: responsePayload,
+ },
+ }
+}
diff --git a/core/capabilities/webapi/target/target.go b/core/capabilities/webapi/target/target.go
index b211e0fe837..4934ab382d5 100644
--- a/core/capabilities/webapi/target/target.go
+++ b/core/capabilities/webapi/target/target.go
@@ -135,18 +135,13 @@ func (c *Capability) Execute(ctx context.Context, req capabilities.CapabilityReq
return capabilities.CapabilityResponse{}, err
}
- payloadBytes, err := json.Marshal(payload)
- if err != nil {
- return capabilities.CapabilityResponse{}, err
- }
-
// Default to SingleNode delivery mode
deliveryMode := defaultIfNil(workflowCfg.DeliveryMode, webapi.SingleNode)
switch deliveryMode {
case webapi.SingleNode:
// blocking call to handle single node request. waits for response from gateway
- resp, err := c.connectorHandler.HandleSingleNodeRequest(ctx, messageID, payloadBytes)
+ resp, err := c.connectorHandler.HandleSingleNodeRequest(ctx, messageID, payload)
if err != nil {
return capabilities.CapabilityResponse{}, err
}
diff --git a/core/capabilities/webapi/target/target_test.go b/core/capabilities/webapi/target/target_test.go
index f51cdcd0d70..1af9a107054 100644
--- a/core/capabilities/webapi/target/target_test.go
+++ b/core/capabilities/webapi/target/target_test.go
@@ -194,7 +194,7 @@ func TestCapability_Execute(t *testing.T) {
require.NoError(t, err)
gatewayResp := gatewayResponse(t, msgID)
-
+ th.connector.EXPECT().AwaitConnection(mock.Anything, "gateway1").Return(nil)
th.connector.On("SignAndSendToGateway", mock.Anything, "gateway1", mock.Anything).Return(nil).Run(func(args mock.Arguments) {
th.connectorHandler.HandleGatewayMessage(ctx, "gateway1", gatewayResp)
}).Once()
diff --git a/core/chains/evm/client/errors.go b/core/chains/evm/client/errors.go
index 1075dc40606..bde97185580 100644
--- a/core/chains/evm/client/errors.go
+++ b/core/chains/evm/client/errors.go
@@ -64,6 +64,7 @@ const (
ServiceUnavailable
TerminallyStuck
TooManyResults
+ ServiceTimeout
)
type ClientErrors map[int]*regexp.Regexp
@@ -160,7 +161,8 @@ var arbitrum = ClientErrors{
Fatal: arbitrumFatal,
L2FeeTooLow: regexp.MustCompile(`(: |^)max fee per gas less than block base fee(:|$)`),
L2Full: regexp.MustCompile(`(: |^)(queue full|sequencer pending tx pool full, please try again)(:|$)`),
- ServiceUnavailable: regexp.MustCompile(`(: |^)502 Bad Gateway: [\s\S]*$|network is unreachable|i/o timeout`),
+ ServiceUnavailable: regexp.MustCompile(`(: |^)502 Bad Gateway: [\s\S]*$|network is unreachable|i/o timeout|(: |^)503 Service Temporarily Unavailable(:|$)`),
+ ServiceTimeout: regexp.MustCompile(`(: |^)408 Request Timeout(:|$)`),
}
// Treasure
@@ -398,6 +400,11 @@ func (s *SendError) IsServiceUnavailable(configErrors *ClientErrors) bool {
return s.is(ServiceUnavailable, configErrors) || pkgerrors.Is(s.err, commonclient.ErroringNodeError)
}
+// IsServiceTimeout indicates if the error was caused by a service timeout
+func (s *SendError) IsServiceTimeout(configErrors *ClientErrors) bool {
+ return s.is(ServiceTimeout, configErrors)
+}
+
// IsTerminallyStuck indicates if a transaction was stuck without any chance of inclusion
func (s *SendError) IsTerminallyStuckConfigError(configErrors *ClientErrors) bool {
return s.is(TerminallyStuck, configErrors)
@@ -619,6 +626,10 @@ func ClassifySendError(err error, clientErrors config.ClientErrors, lggr logger.
lggr.Errorw(fmt.Sprintf("service unavailable while sending transaction %x", tx.Hash()), "err", sendError, "etx", tx)
return commonclient.Retryable
}
+ if sendError.IsServiceTimeout(configErrors) {
+ lggr.Errorw(fmt.Sprintf("service timed out while sending transaction %x", tx.Hash()), "err", sendError, "etx", tx)
+ return commonclient.Retryable
+ }
if sendError.IsTimeout() {
lggr.Errorw(fmt.Sprintf("timeout while sending transaction %x", tx.Hash()), "err", sendError, "etx", tx)
return commonclient.Retryable
@@ -666,7 +677,7 @@ var drpc = ClientErrors{
// Linkpool, Blockdaemon, and Chainstack all return "request timed out" if the log results are too large for them to process
var defaultClient = ClientErrors{
- TooManyResults: regexp.MustCompile(`request timed out`),
+ TooManyResults: regexp.MustCompile(`request timed out|408 Request Timed Out`),
}
// JSON-RPC error codes which can indicate a refusal of the server to process an eth_getLogs request because the result set is too large
diff --git a/core/chains/evm/client/errors_test.go b/core/chains/evm/client/errors_test.go
index 75ac21597d8..1f9aaa53365 100644
--- a/core/chains/evm/client/errors_test.go
+++ b/core/chains/evm/client/errors_test.go
@@ -245,6 +245,7 @@ func Test_Eth_Errors(t *testing.T) {
{"network is unreachable", true, "Arbitrum"},
{"client error service unavailable", true, "tomlConfig"},
{"[Request ID: 825608a8-fd8a-4b5b-aea7-92999509306d] Error invoking RPC: [Request ID: 825608a8-fd8a-4b5b-aea7-92999509306d] Transaction execution returns a null value for transaction", true, "hedera"},
+ {"call failed: 503 Service Temporarily Unavailable: \r\n
503 Service Temporarily Unavailable\r\n\r\n503 Service Temporarily Unavailable
\r\n\r\n\r\n", true, "Arbitrum"},
}
for _, test := range tests {
err = evmclient.NewSendErrorS(test.message)
@@ -260,6 +261,20 @@ func Test_Eth_Errors(t *testing.T) {
}
})
+ t.Run("IsServiceTimeout", func(t *testing.T) {
+ tests := []errorCase{
+ {"call failed: 408 Request Timeout: {", true, "Arbitrum"},
+ {"408 Request Timeout: {\"id\":303,\"jsonrpc\":\"2.0\",\"error\":{\"code\\\":-32009,\\\"message\\\":\\\"request timeout\\\"}}\",\"errVerbose\":\"408 Request Timeout:\n", true, "Arbitrum"},
+ {"request timeout", false, "tomlConfig"},
+ }
+ for _, test := range tests {
+ err = evmclient.NewSendErrorS(test.message)
+ assert.Equal(t, err.IsServiceTimeout(clientErrors), test.expect)
+ err = newSendErrorWrapped(test.message)
+ assert.Equal(t, err.IsServiceTimeout(clientErrors), test.expect)
+ }
+ })
+
t.Run("IsTxFeeExceedsCap", func(t *testing.T) {
tests := []errorCase{
{"tx fee (1.10 ether) exceeds the configured cap (1.00 ether)", true, "geth"},
diff --git a/core/chains/evm/gas/models.go b/core/chains/evm/gas/models.go
index 6cb89818c8f..2d6fe971d9c 100644
--- a/core/chains/evm/gas/models.go
+++ b/core/chains/evm/gas/models.go
@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
pkgerrors "github.com/pkg/errors"
+
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math"
diff --git a/core/chains/evm/gas/rollups/l1_oracle.go b/core/chains/evm/gas/rollups/l1_oracle.go
index ceecb80c608..d9f12dfa79e 100644
--- a/core/chains/evm/gas/rollups/l1_oracle.go
+++ b/core/chains/evm/gas/rollups/l1_oracle.go
@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
+
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink-common/pkg/logger"
diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go
index 6ef4fefecee..725fdbda63c 100644
--- a/core/chains/evm/logpoller/log_poller.go
+++ b/core/chains/evm/logpoller/log_poller.go
@@ -23,6 +23,8 @@ import (
pkgerrors "github.com/pkg/errors"
"golang.org/x/exp/maps"
+ commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
+
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink-common/pkg/timeutil"
@@ -91,6 +93,7 @@ type Client interface {
}
type HeadTracker interface {
+ services.Service
LatestAndFinalizedBlock(ctx context.Context) (latest, finalized *evmtypes.Head, err error)
}
@@ -99,7 +102,6 @@ var (
ErrReplayRequestAborted = pkgerrors.New("aborted, replay request cancelled")
ErrReplayInProgress = pkgerrors.New("replay request cancelled, but replay is already in progress")
ErrLogPollerShutdown = pkgerrors.New("replay aborted due to log poller shutdown")
- ErrFinalityViolated = pkgerrors.New("finality violated")
)
type logPoller struct {
@@ -525,7 +527,7 @@ func (lp *logPoller) Close() error {
func (lp *logPoller) Healthy() error {
if lp.finalityViolated.Load() {
- return ErrFinalityViolated
+ return commontypes.ErrFinalityViolated
}
return nil
}
diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go
index 7114960efdd..df688cd5e5c 100644
--- a/core/chains/evm/logpoller/log_poller_test.go
+++ b/core/chains/evm/logpoller/log_poller_test.go
@@ -22,10 +22,13 @@ import (
"github.com/stretchr/testify/require"
"go.uber.org/zap/zapcore"
+ commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
+
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/types/query"
"github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
commonutils "github.com/smartcontractkit/chainlink-common/pkg/utils"
+
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/chaintype"
htMocks "github.com/smartcontractkit/chainlink/v2/common/headtracker/mocks"
@@ -1106,7 +1109,8 @@ func TestLogPoller_ReorgDeeperThanFinality(t *testing.T) {
secondPoll := th.PollAndSaveLogs(testutils.Context(t), firstPoll)
assert.Equal(t, firstPoll, secondPoll)
- assert.Equal(t, logpoller.ErrFinalityViolated, th.LogPoller.Healthy())
+ require.Equal(t, commontypes.ErrFinalityViolated, th.LogPoller.Healthy())
+ require.Equal(t, commontypes.ErrFinalityViolated, th.LogPoller.HealthReport()[th.LogPoller.Name()])
// Manually remove re-org'd chain from the log poller to bring it back to life
// LogPoller should be healthy again after first poll
@@ -1116,7 +1120,8 @@ func TestLogPoller_ReorgDeeperThanFinality(t *testing.T) {
// Poll from latest
recoveryPoll := th.PollAndSaveLogs(testutils.Context(t), 1)
assert.Equal(t, int64(35), recoveryPoll)
- assert.NoError(t, th.LogPoller.Healthy())
+ require.NoError(t, th.LogPoller.Healthy())
+ require.NoError(t, th.LogPoller.HealthReport()[th.LogPoller.Name()])
})
}
}
diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go
index 412231308b6..1fdc1a46d34 100644
--- a/core/cmd/shell_local.go
+++ b/core/cmd/shell_local.go
@@ -35,6 +35,8 @@ import (
cutils "github.com/smartcontractkit/chainlink-common/pkg/utils"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
+
"github.com/smartcontractkit/chainlink/v2/core/build"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
@@ -47,7 +49,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/sessions"
"github.com/smartcontractkit/chainlink/v2/core/shutdown"
"github.com/smartcontractkit/chainlink/v2/core/static"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
"github.com/smartcontractkit/chainlink/v2/core/store/migrate"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/core/web"
@@ -805,7 +806,7 @@ func (s *Shell) PrepareTestDatabase(c *cli.Context) error {
// Creating pristine DB copy to speed up FullTestDB
dbUrl := cfg.Database().URL()
- db, err := sqlx.Open(string(dialects.Postgres), dbUrl.String())
+ db, err := sqlx.Open(string(pgcommon.Postgres), dbUrl.String())
if err != nil {
return s.errorOut(err)
}
@@ -1088,7 +1089,7 @@ type dbConfig interface {
MaxOpenConns() int
MaxIdleConns() int
URL() url.URL
- Dialect() dialects.DialectName
+ Dialect() pgcommon.DialectName
}
func newConnection(ctx context.Context, cfg dbConfig) (*sqlx.DB, error) {
@@ -1104,7 +1105,7 @@ func dropAndCreateDB(parsed url.URL, force bool) (err error) {
// to a different one. template1 should be present on all postgres installations
dbname := parsed.Path[1:]
parsed.Path = "/template1"
- db, err := sql.Open(string(dialects.Postgres), parsed.String())
+ db, err := sql.Open(string(pgcommon.Postgres), parsed.String())
if err != nil {
return fmt.Errorf("unable to open postgres database for creating test db: %+v", err)
}
@@ -1203,7 +1204,7 @@ func checkSchema(dbURL url.URL, prevSchema string) error {
}
func insertFixtures(dbURL url.URL, pathToFixtures string) (err error) {
- db, err := sql.Open(string(dialects.Postgres), dbURL.String())
+ db, err := sql.Open(string(pgcommon.Postgres), dbURL.String())
if err != nil {
return fmt.Errorf("unable to open postgres database for creating test db: %+v", err)
}
diff --git a/core/cmd/shell_local_test.go b/core/cmd/shell_local_test.go
index 78254c0279e..7cdc8c21840 100644
--- a/core/cmd/shell_local_test.go
+++ b/core/cmd/shell_local_test.go
@@ -10,6 +10,7 @@ import (
"time"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
"github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox"
"github.com/smartcontractkit/chainlink/v2/common/client"
@@ -29,7 +30,6 @@ import (
chainlinkmocks "github.com/smartcontractkit/chainlink/v2/core/services/chainlink/mocks"
evmrelayer "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
"github.com/smartcontractkit/chainlink/v2/core/sessions/localauth"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/core/utils/testutils/heavyweight"
@@ -283,7 +283,7 @@ func TestShell_RebroadcastTransactions_Txm(t *testing.T) {
// test multiple connections to the database, and changes made within
// the transaction cannot be seen from another connection.
config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.Database.Dialect = dialects.Postgres
+ c.Database.Dialect = pgcommon.Postgres
// evm config is used in this test. but if set, it must be pass config validation.
// simplest to make it nil
c.EVM = nil
@@ -363,7 +363,7 @@ func TestShell_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) {
// test multiple connections to the database, and changes made within
// the transaction cannot be seen from another connection.
config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.Database.Dialect = dialects.Postgres
+ c.Database.Dialect = pgcommon.Postgres
// evm config is used in this test. but if set, it must be pass config validation.
// simplest to make it nil
c.EVM = nil
@@ -441,7 +441,7 @@ func TestShell_RebroadcastTransactions_AddressCheck(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.Database.Dialect = dialects.Postgres
+ c.Database.Dialect = pgcommon.Postgres
c.EVM = nil
// seems to be needed for config validate
@@ -499,7 +499,7 @@ func TestShell_RebroadcastTransactions_AddressCheck(t *testing.T) {
func TestShell_CleanupChainTables(t *testing.T) {
// Just check if it doesn't error, command itself shouldn't be changed unless major schema changes were made.
// It would be really hard to write a test that accounts for schema changes, so this should be enough to alarm us that something broke.
- config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.Database.Dialect = dialects.Postgres })
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.Database.Dialect = pgcommon.Postgres })
client := cmd.Shell{
Config: config,
Logger: logger.TestLogger(t),
diff --git a/core/config/database_config.go b/core/config/database_config.go
index f1cdffc2f46..56f8f8165d4 100644
--- a/core/config/database_config.go
+++ b/core/config/database_config.go
@@ -4,7 +4,7 @@ import (
"net/url"
"time"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
)
type Backup interface {
@@ -35,7 +35,7 @@ type Database interface {
DefaultIdleInTxSessionTimeout() time.Duration
DefaultLockTimeout() time.Duration
DefaultQueryTimeout() time.Duration
- Dialect() dialects.DialectName
+ Dialect() pgcommon.DialectName
LogSQL() bool
MaxIdleConns() int
MaxOpenConns() int
diff --git a/core/config/docs/defaults.go b/core/config/docs/defaults.go
index 53e6433a8ef..0d94be1b3cc 100644
--- a/core/config/docs/defaults.go
+++ b/core/config/docs/defaults.go
@@ -5,9 +5,10 @@ import (
"strings"
"github.com/smartcontractkit/chainlink-common/pkg/config"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
+
"github.com/smartcontractkit/chainlink/v2/core/config/toml"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink/cfgtest"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
)
var (
@@ -22,7 +23,7 @@ func init() {
func CoreDefaults() (c toml.Core) {
c.SetFrom(&defaults)
- c.Database.Dialect = dialects.Postgres // not user visible - overridden for tests only
+ c.Database.Dialect = pgcommon.Postgres // not user visible - overridden for tests only
c.Tracing.Attributes = make(map[string]string)
return
}
diff --git a/core/config/toml/types.go b/core/config/toml/types.go
index 475e95d53df..620f7d96eee 100644
--- a/core/config/toml/types.go
+++ b/core/config/toml/types.go
@@ -16,6 +16,7 @@ import (
ocrcommontypes "github.com/smartcontractkit/libocr/commontypes"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
"github.com/smartcontractkit/chainlink/v2/core/build"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
@@ -23,10 +24,8 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/config/parse"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
"github.com/smartcontractkit/chainlink/v2/core/sessions"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
-
configutils "github.com/smartcontractkit/chainlink/v2/core/utils/config"
)
@@ -339,7 +338,7 @@ type Database struct {
DefaultIdleInTxSessionTimeout *commonconfig.Duration
DefaultLockTimeout *commonconfig.Duration
DefaultQueryTimeout *commonconfig.Duration
- Dialect dialects.DialectName `toml:"-"`
+ Dialect pgcommon.DialectName `toml:"-"`
LogQueries *bool
MaxIdleConns *int64
MaxOpenConns *int64
diff --git a/core/gethwrappers/go_generate_test.go b/core/gethwrappers/go_generate_test.go
index a6253cb1a66..1066149278f 100644
--- a/core/gethwrappers/go_generate_test.go
+++ b/core/gethwrappers/go_generate_test.go
@@ -16,7 +16,8 @@ import (
"github.com/fatih/color"
cutils "github.com/smartcontractkit/chainlink-common/pkg/utils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/stretchr/testify/assert"
@@ -29,7 +30,7 @@ const compileCommand = "../../contracts/scripts/native_solc_compile_all"
// contract artifacts in contracts/solc with the abi and bytecode stored in the
// contract wrapper
func TestCheckContractHashesFromLastGoGenerate(t *testing.T) {
- testutils.SkipShort(t, "requires compiled artifacts")
+ tests.SkipShort(t, "requires compiled artifacts")
versions, err := ReadVersionsDB()
require.NoError(t, err)
require.NotEmpty(t, versions.GethVersion, `version DB should have a "GETH_VERSION:" line`)
diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go
index 7ade85f4bf7..a55c57cc9a2 100644
--- a/core/internal/cltest/cltest.go
+++ b/core/internal/cltest/cltest.go
@@ -47,6 +47,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/loop"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
"github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
"github.com/smartcontractkit/chainlink/v2/common/client"
commonmocks "github.com/smartcontractkit/chainlink/v2/common/types/mocks"
@@ -531,7 +532,7 @@ func NewEthMocks(t testing.TB) *evmclimocks.Client {
}
func NewEthMocksWithStartupAssertions(t testing.TB) *evmclimocks.Client {
- testutils.SkipShort(t, "long test")
+ tests.SkipShort(t, "long test")
c := NewEthMocks(t)
chHead := make(<-chan *evmtypes.Head)
c.On("Dial", mock.Anything).Maybe().Return(nil)
@@ -554,7 +555,7 @@ func NewEthMocksWithStartupAssertions(t testing.TB) *evmclimocks.Client {
// NewEthMocksWithTransactionsOnBlocksAssertions sets an Eth mock with transactions on blocks
func NewEthMocksWithTransactionsOnBlocksAssertions(t testing.TB) *evmclimocks.Client {
- testutils.SkipShort(t, "long test")
+ tests.SkipShort(t, "long test")
c := NewEthMocks(t)
chHead := make(<-chan *evmtypes.Head)
c.On("Dial", mock.Anything).Maybe().Return(nil)
diff --git a/core/internal/features/features_test.go b/core/internal/features/features_test.go
index 2d0d046857c..88305403f2b 100644
--- a/core/internal/features/features_test.go
+++ b/core/internal/features/features_test.go
@@ -40,6 +40,7 @@ import (
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
"github.com/smartcontractkit/chainlink/v2/core/auth"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
@@ -798,7 +799,7 @@ func setupForwarderEnabledNode(t *testing.T, owner *bind.TransactOpts, portV2 in
func TestIntegration_OCR(t *testing.T) {
t.Skip("fails after geth upgrade https://github.com/smartcontractkit/chainlink/pull/11809; passes local but fails CI")
- testutils.SkipShort(t, "long test")
+ tests.SkipShort(t, "long test")
t.Parallel()
tests := []struct {
id int
@@ -1031,7 +1032,7 @@ observationSource = """
func TestIntegration_OCR_ForwarderFlow(t *testing.T) {
t.Skip("fails after geth upgrade https://github.com/smartcontractkit/chainlink/pull/11809")
- testutils.SkipShort(t, "long test")
+ tests.SkipShort(t, "long test")
t.Parallel()
numOracles := 4
t.Run("ocr_forwarder_flow", func(t *testing.T) {
diff --git a/core/internal/testutils/configtest/general_config.go b/core/internal/testutils/configtest/general_config.go
index 63aba18c351..f0851c67740 100644
--- a/core/internal/testutils/configtest/general_config.go
+++ b/core/internal/testutils/configtest/general_config.go
@@ -7,14 +7,16 @@ import (
"github.com/stretchr/testify/require"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
+
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
+
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
)
@@ -48,7 +50,7 @@ func overrides(c *chainlink.Config, s *chainlink.Secrets) {
c.InsecureFastScrypt = ptr(true)
c.ShutdownGracePeriod = commonconfig.MustNewDuration(testutils.DefaultWaitTimeout)
- c.Database.Dialect = dialects.TransactionWrappedPostgres
+ c.Database.Dialect = pgcommon.TransactionWrappedPostgres
c.Database.Lock.Enabled = ptr(false)
c.Database.MaxIdleConns = ptr[int64](20)
c.Database.MaxOpenConns = ptr[int64](20)
diff --git a/core/internal/testutils/pgtest/pgtest.go b/core/internal/testutils/pgtest/pgtest.go
index 8464604b667..ee17d8f4d14 100644
--- a/core/internal/testutils/pgtest/pgtest.go
+++ b/core/internal/testutils/pgtest/pgtest.go
@@ -3,26 +3,25 @@ package pgtest
import (
"testing"
- "github.com/google/uuid"
"github.com/jmoiron/sqlx"
- "github.com/scylladb/go-reflectx"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
"github.com/smartcontractkit/chainlink-common/pkg/utils"
+
+ "github.com/smartcontractkit/chainlink/v2/core/config/env"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
)
func NewSqlxDB(t testing.TB) *sqlx.DB {
testutils.SkipShortDB(t)
- db, err := sqlx.Open(string(dialects.TransactionWrappedPostgres), uuid.New().String())
- require.NoError(t, err)
- t.Cleanup(func() { assert.NoError(t, db.Close()) })
- db.MapperFunc(reflectx.CamelToSnakeASCII)
-
- return db
+ dbURL := string(env.DatabaseURL.Get())
+ if dbURL == "" {
+ t.Errorf("you must provide a CL_DATABASE_URL environment variable")
+ return nil
+ }
+ return pg.NewTestDB(t, dbURL)
}
func MustExec(t *testing.T, ds sqlutil.DataSource, stmt string, args ...interface{}) {
diff --git a/core/internal/testutils/pgtest/txdb.go b/core/internal/testutils/pgtest/txdb.go
deleted file mode 100644
index 7591054305c..00000000000
--- a/core/internal/testutils/pgtest/txdb.go
+++ /dev/null
@@ -1,509 +0,0 @@
-package pgtest
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "flag"
- "fmt"
- "io"
- "net/url"
- "strings"
- "sync"
- "testing"
-
- "github.com/jmoiron/sqlx"
- "go.uber.org/multierr"
-
- "github.com/smartcontractkit/chainlink/v2/core/config/env"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
-)
-
-// txdb is a simplified version of https://github.com/DATA-DOG/go-txdb
-//
-// The original lib has various problems and is hard to understand because it
-// tries to be more general. The version in this file is more tightly focused
-// to our needs and should be easier to reason about and less likely to have
-// subtle bugs/races.
-//
-// It doesn't currently support savepoints but could be made to if necessary.
-//
-// Transaction BEGIN/ROLLBACK effectively becomes a no-op, this should have no
-// negative impact on normal test operation.
-//
-// If you MUST test BEGIN/ROLLBACK behaviour, you will have to configure your
-// store to use the raw DialectPostgres dialect and setup a one-use database.
-// See heavyweight.FullTestDB() as a convenience function to help you do this,
-// but please use sparingly because as it's name implies, it is expensive.
-func init() {
- testing.Init()
- if !flag.Parsed() {
- flag.Parse()
- }
- if testing.Short() {
- // -short tests don't need a DB
- return
- }
- dbURL := string(env.DatabaseURL.Get())
- if dbURL == "" {
- panic("you must provide a CL_DATABASE_URL environment variable")
- }
-
- parsed, err := url.Parse(dbURL)
- if err != nil {
- panic(err)
- }
- if parsed.Path == "" {
- msg := fmt.Sprintf("invalid %[1]s: `%[2]s`. You must set %[1]s env var to point to your test database. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try %[1]s=postgresql://postgres@localhost:5432/chainlink_test?sslmode=disable", env.DatabaseURL, parsed.String())
- panic(msg)
- }
- if !strings.HasSuffix(parsed.Path, "_test") {
- msg := fmt.Sprintf("cannot run tests against database named `%s`. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try %s=postgresql://postgres@localhost:5432/chainlink_test?sslmode=disable", parsed.Path[1:], env.DatabaseURL)
- panic(msg)
- }
- name := string(dialects.TransactionWrappedPostgres)
- sql.Register(name, &txDriver{
- dbURL: dbURL,
- conns: make(map[string]*conn),
- })
- sqlx.BindDriver(name, sqlx.DOLLAR)
-}
-
-var _ driver.Conn = &conn{}
-
-var _ driver.Validator = &conn{}
-var _ driver.SessionResetter = &conn{}
-
-// txDriver is an sql driver which runs on a single transaction.
-// When `Close` is called, transaction is rolled back.
-type txDriver struct {
- sync.Mutex
- db *sql.DB
- conns map[string]*conn
-
- dbURL string
-}
-
-func (d *txDriver) Open(dsn string) (driver.Conn, error) {
- d.Lock()
- defer d.Unlock()
- // Open real db connection if its the first call
- if d.db == nil {
- db, err := sql.Open(string(dialects.Postgres), d.dbURL)
- if err != nil {
- return nil, err
- }
- d.db = db
- }
- c, exists := d.conns[dsn]
- if !exists || !c.tryOpen() {
- tx, err := d.db.Begin()
- if err != nil {
- return nil, err
- }
- c = &conn{tx: tx, opened: 1, dsn: dsn}
- c.removeSelf = func() error {
- return d.deleteConn(c)
- }
- d.conns[dsn] = c
- }
- return c, nil
-}
-
-// deleteConn is called by a connection when it is closed via the `close` method.
-// It also auto-closes the DB when the last checked out connection is closed.
-func (d *txDriver) deleteConn(c *conn) error {
- // must lock here to avoid racing with Open
- d.Lock()
- defer d.Unlock()
-
- if d.conns[c.dsn] != c {
- return nil // already been replaced
- }
- delete(d.conns, c.dsn)
- if len(d.conns) == 0 && d.db != nil {
- if err := d.db.Close(); err != nil {
- return err
- }
- d.db = nil
- }
- return nil
-}
-
-type conn struct {
- sync.Mutex
- dsn string
- tx *sql.Tx // tx may be shared by many conns, definitive one lives in the map keyed by DSN on the txDriver. Do not modify from conn
- closed bool
- opened int
- removeSelf func() error
-}
-
-func (c *conn) Begin() (driver.Tx, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
- // Begin is a noop because the transaction was already opened
- return tx{c.tx}, nil
-}
-
-// Implement the "ConnBeginTx" interface
-func (c *conn) BeginTx(_ context.Context, opts driver.TxOptions) (driver.Tx, error) {
- // Context is ignored, because single transaction is shared by all callers, thus caller should not be able to
- // control it with local context
- return c.Begin()
-}
-
-// Prepare returns a prepared statement, bound to this connection.
-func (c *conn) Prepare(query string) (driver.Stmt, error) {
- return c.PrepareContext(context.Background(), query)
-}
-
-// Implement the "ConnPrepareContext" interface
-func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
-
- // TODO: Fix context handling
- // FIXME: It is not safe to give the passed in context to the tx directly
- // because the tx is shared by many conns and cancelling the context will
- // destroy the tx which can affect other conns
- st, err := c.tx.PrepareContext(context.Background(), query)
- if err != nil {
- return nil, err
- }
- return &stmt{st, c}, nil
-}
-
-// IsValid is called prior to placing the connection into the
-// connection pool by database/sql. The connection will be discarded if false is returned.
-func (c *conn) IsValid() bool {
- c.Lock()
- defer c.Unlock()
- return !c.closed
-}
-
-func (c *conn) ResetSession(ctx context.Context) error {
- // Ensure bad connections are reported: From database/sql/driver:
- // If a connection is never returned to the connection pool but immediately reused, then
- // ResetSession is called prior to reuse but IsValid is not called.
- c.Lock()
- defer c.Unlock()
- if c.closed {
- return driver.ErrBadConn
- }
-
- return nil
-}
-
-// pgx returns nil
-func (c *conn) CheckNamedValue(nv *driver.NamedValue) error {
- return nil
-}
-
-// Implement the "QueryerContext" interface
-func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
-
- // TODO: Fix context handling
- rs, err := c.tx.QueryContext(context.Background(), query, mapNamedArgs(args)...)
- if err != nil {
- return nil, err
- }
- defer rs.Close()
-
- return buildRows(rs)
-}
-
-// Implement the "ExecerContext" interface
-func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
- // TODO: Fix context handling
- return c.tx.ExecContext(context.Background(), query, mapNamedArgs(args)...)
-}
-
-// tryOpen attempts to increment the open count, but returns false if closed.
-func (c *conn) tryOpen() bool {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- return false
- }
- c.opened++
- return true
-}
-
-// Close invalidates and potentially stops any current
-// prepared statements and transactions, marking this
-// connection as no longer in use.
-//
-// Because the sql package maintains a free pool of
-// connections and only calls Close when there's a surplus of
-// idle connections, it shouldn't be necessary for drivers to
-// do their own connection caching.
-//
-// Drivers must ensure all network calls made by Close
-// do not block indefinitely (e.g. apply a timeout).
-func (c *conn) Close() (err error) {
- if !c.close() {
- return
- }
- // Wait to remove self to avoid nesting locks.
- if err := c.removeSelf(); err != nil {
- panic(err)
- }
- return
-}
-
-func (c *conn) close() bool {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- // Double close, should be a safe to make this a noop
- // PGX allows double close
- // See: https://github.com/jackc/pgx/blob/a457da8bffa4f90ad672fa093ee87f20cf06687b/conn.go#L249
- return false
- }
-
- c.opened--
- if c.opened > 0 {
- return false
- }
- if c.tx != nil {
- if err := c.tx.Rollback(); err != nil {
- panic(err)
- }
- c.tx = nil
- }
- c.closed = true
- return true
-}
-
-type tx struct {
- tx *sql.Tx
-}
-
-func (tx tx) Commit() error {
- // Commit is a noop because the transaction will be rolled back at the end
- return nil
-}
-
-func (tx tx) Rollback() error {
- // Rollback is a noop because the transaction will be rolled back at the end
- return nil
-}
-
-type stmt struct {
- st *sql.Stmt
- conn *conn
-}
-
-func (s stmt) Exec(args []driver.Value) (driver.Result, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- return s.st.Exec(mapArgs(args)...)
-}
-
-// Implement the "StmtExecContext" interface
-func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- // TODO: Fix context handling
- return s.st.ExecContext(context.Background(), mapNamedArgs(args)...)
-}
-
-func mapArgs(args []driver.Value) (res []interface{}) {
- res = make([]interface{}, len(args))
- for i := range args {
- res[i] = args[i]
- }
- return
-}
-
-func (s stmt) NumInput() int {
- return -1
-}
-
-func (s stmt) Query(args []driver.Value) (driver.Rows, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- rows, err := s.st.Query(mapArgs(args)...)
- defer func() {
- err = multierr.Combine(err, rows.Close())
- }()
- if err != nil {
- return nil, err
- }
- return buildRows(rows)
-}
-
-// Implement the "StmtQueryContext" interface
-func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- // TODO: Fix context handling
- rows, err := s.st.QueryContext(context.Background(), mapNamedArgs(args)...)
- if err != nil {
- return nil, err
- }
- return buildRows(rows)
-}
-
-func (s stmt) Close() error {
- s.conn.Lock()
- defer s.conn.Unlock()
- return s.st.Close()
-}
-
-func buildRows(r *sql.Rows) (driver.Rows, error) {
- set := &rowSets{}
- rs := &rows{}
- if err := rs.read(r); err != nil {
- return set, err
- }
- set.sets = append(set.sets, rs)
- for r.NextResultSet() {
- rss := &rows{}
- if err := rss.read(r); err != nil {
- return set, err
- }
- set.sets = append(set.sets, rss)
- }
- return set, nil
-}
-
-// Implement the "RowsNextResultSet" interface
-func (rs *rowSets) HasNextResultSet() bool {
- return rs.pos+1 < len(rs.sets)
-}
-
-// Implement the "RowsNextResultSet" interface
-func (rs *rowSets) NextResultSet() error {
- if !rs.HasNextResultSet() {
- return io.EOF
- }
-
- rs.pos++
- return nil
-}
-
-type rows struct {
- rows [][]driver.Value
- pos int
- cols []string
- colTypes []*sql.ColumnType
-}
-
-func (r *rows) Columns() []string {
- return r.cols
-}
-
-func (r *rows) ColumnTypeDatabaseTypeName(index int) string {
- return r.colTypes[index].DatabaseTypeName()
-}
-
-func (r *rows) Next(dest []driver.Value) error {
- r.pos++
- if r.pos > len(r.rows) {
- return io.EOF
- }
-
- for i, val := range r.rows[r.pos-1] {
- dest[i] = *(val.(*interface{}))
- }
-
- return nil
-}
-
-func (r *rows) Close() error {
- return nil
-}
-
-func (r *rows) read(rs *sql.Rows) error {
- var err error
- r.cols, err = rs.Columns()
- if err != nil {
- return err
- }
-
- r.colTypes, err = rs.ColumnTypes()
- if err != nil {
- return err
- }
-
- for rs.Next() {
- values := make([]interface{}, len(r.cols))
- for i := range values {
- values[i] = new(interface{})
- }
- if err := rs.Scan(values...); err != nil {
- return err
- }
- row := make([]driver.Value, len(r.cols))
- for i, v := range values {
- row[i] = driver.Value(v)
- }
- r.rows = append(r.rows, row)
- }
- return rs.Err()
-}
-
-type rowSets struct {
- sets []*rows
- pos int
-}
-
-func (rs *rowSets) Columns() []string {
- return rs.sets[rs.pos].cols
-}
-
-func (rs *rowSets) ColumnTypeDatabaseTypeName(index int) string {
- return rs.sets[rs.pos].ColumnTypeDatabaseTypeName(index)
-}
-
-func (rs *rowSets) Close() error {
- return nil
-}
-
-// advances to next row
-func (rs *rowSets) Next(dest []driver.Value) error {
- return rs.sets[rs.pos].Next(dest)
-}
-
-func mapNamedArgs(args []driver.NamedValue) (res []interface{}) {
- res = make([]interface{}, len(args))
- for i := range args {
- name := args[i].Name
- if name != "" {
- res[i] = sql.Named(name, args[i].Value)
- } else {
- res[i] = args[i].Value
- }
- }
- return
-}
diff --git a/core/internal/testutils/testutils.go b/core/internal/testutils/testutils.go
index 0504570365b..53b555c0e8b 100644
--- a/core/internal/testutils/testutils.go
+++ b/core/internal/testutils/testutils.go
@@ -32,6 +32,7 @@ import (
// NOTE: To avoid circular dependencies, this package MUST NOT import
// anything from "github.com/smartcontractkit/chainlink/v2/core"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
)
const (
@@ -415,16 +416,9 @@ func WaitForLogMessageCount(t *testing.T, observedLogs *observer.ObservedLogs, m
})
}
-// SkipShort skips tb during -short runs, and notes why.
-func SkipShort(tb testing.TB, why string) {
- if testing.Short() {
- tb.Skipf("skipping: %s", why)
- }
-}
-
// SkipShortDB skips tb during -short runs, and notes the DB dependency.
func SkipShortDB(tb testing.TB) {
- SkipShort(tb, "DB dependency")
+ tests.SkipShort(tb, "DB dependency")
}
func AssertCount(t *testing.T, ds sqlutil.DataSource, tableName string, expected int64) {
@@ -454,10 +448,6 @@ func MustDecodeBase64(s string) (b []byte) {
return
}
-func SkipFlakey(t *testing.T, ticketURL string) {
- t.Skip("Flakey", ticketURL)
-}
-
func MustRandBytes(n int) (b []byte) {
b = make([]byte, n)
_, err := rand.Read(b)
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 6bab1f30f8e..bbc233803c3 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -33,7 +33,7 @@ require (
github.com/prometheus/client_golang v1.20.5
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chainlink-automation v0.8.1
- github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83
+ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805
github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.19.0
@@ -69,7 +69,7 @@ require (
github.com/NethermindEth/starknet.go v0.7.1-0.20240401080518-34a506f3cfdb // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/XSAM/otelsql v0.27.0 // indirect
- github.com/andybalholm/brotli v1.1.0 // indirect
+ github.com/andybalholm/brotli v1.1.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c // indirect
github.com/avast/retry-go/v4 v4.6.0 // indirect
@@ -144,7 +144,7 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
- github.com/gin-contrib/cors v1.5.0 // indirect
+ github.com/gin-contrib/cors v1.7.2 // indirect
github.com/gin-contrib/expvar v0.0.1 // indirect
github.com/gin-contrib/sessions v0.0.5 // indirect
github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 // indirect
@@ -170,7 +170,7 @@ require (
github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
github.com/go-webauthn/webauthn v0.9.4 // indirect
github.com/go-webauthn/x v0.1.5 // indirect
- github.com/goccy/go-json v0.10.2 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/goccy/go-yaml v1.12.0 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
@@ -238,8 +238,8 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
@@ -301,14 +301,14 @@ require (
github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shirou/gopsutil/v3 v3.24.3 // indirect
- github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 // indirect
+ github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix // indirect
github.com/smartcontractkit/chain-selectors v1.0.34 // indirect
- github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0 // indirect
+ github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b // indirect
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e // indirect
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db // indirect
github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 // indirect
- github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 // indirect
+ github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 // indirect
github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 // indirect
@@ -380,15 +380,15 @@ require (
go.uber.org/ratelimit v0.3.1 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/arch v0.11.0 // indirect
- golang.org/x/crypto v0.28.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
- golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.26.0 // indirect
- golang.org/x/term v0.25.0 // indirect
- golang.org/x/text v0.19.0 // indirect
+ golang.org/x/sync v0.10.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index f86aad22fb4..ab7950ea73a 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -139,9 +139,11 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg=
-github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
-github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
+github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
+github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow-go/v18 v18.0.0 h1:1dBDaSbH3LtulTyOVYaBCHO3yVRwjV+TZaqn3g6V7ZM=
+github.com/apache/arrow-go/v18 v18.0.0/go.mod h1:t6+cWRSmKgdQ6HsxisQjok+jBpKGhRDiqcf3p0p/F+A=
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -412,8 +414,8 @@ github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9y
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
-github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
+github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw=
+github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w=
github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw=
github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE=
@@ -497,8 +499,8 @@ github.com/go-webauthn/webauthn v0.9.4/go.mod h1:LqupCtzSef38FcxzaklmOn7AykGKhAh
github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0=
github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM=
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
@@ -563,6 +565,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
+github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -830,11 +834,11 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
-github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -881,6 +885,8 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk=
+github.com/marcboeker/go-duckdb v1.8.3 h1:ZkYwiIZhbYsT6MmJsZ3UPTHrTZccDdM4ztoqSlEMXiQ=
+github.com/marcboeker/go-duckdb v1.8.3/go.mod h1:C9bYRE1dPYb1hhfu/SSomm78B0FXmNgRvv6YBW/Hooc=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@@ -1018,6 +1024,8 @@ github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xl
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@@ -1134,16 +1142,16 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 h1:qQH6fZZe31nBAG6INHph3z5ysDTPptyu0TR9uoJ1+ok=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86/go.mod h1:WtWOoVQQEHxRHL2hNmuRrvDfYfQG/CioFNoa9Rr2mBE=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix h1:DPJD++yKLSx0EfT+U14P8vLVxjXFmoIETiCO9lVwQo8=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix/go.mod h1:NnT6w4Kj42OFFXhSx99LvJZWPpMjmo4+CpDEWfw61xY=
github.com/smartcontractkit/chain-selectors v1.0.34 h1:MJ17OGu8+jjl426pcKrJkCf3fePb3eCreuAnUA3RBj4=
github.com/smartcontractkit/chain-selectors v1.0.34/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8=
github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU=
github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0 h1:/1L+v4SxUD2K5RMRbfByyLfePMAgQKeD0onSetPnGmA=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83 h1:NjrU7KOn3Tk+C6QFo9tQBqeotPKytpBwhn/J1s+yiiY=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83/go.mod h1:bQktEJf7sJ0U3SmIcXvbGUox7SmXcnSEZ4kUbT8R5Nk=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b h1:iSQJ6ng4FhEswf8SXunGkaJlVP3E3JlgLB8Oo2f3Ud4=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805 h1:Pz8jB/6qe10xT10h2S3LFYJrnebNpG5rJ/w16HZGwPQ=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805/go.mod h1:yti7e1+G9hhkYhj+L5sVUULn9Bn3bBL5/AxaNqdJ5YQ=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e h1:PRoeby6ZlTuTkv2f+7tVU4+zboTfRzI+beECynF4JQ0=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e/go.mod h1:mUh5/woemsVaHgTorA080hrYmO3syBCmPdnWc/5dOqk=
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db h1:N1RH1hSr2ACzOFc9hkCcjE8pRBTdcU3p8nsTJByaLes=
@@ -1152,8 +1160,8 @@ github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6An
github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 h1:0ewLMbAz3rZrovdRUCgd028yOXX8KigB4FndAUdI2kM=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 h1:onBe3DqNrbtOAzKS4PrPIiJX65BGo1aYiYZxFVEW+jc=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc h1:dssRwJhmzJkUN/OajaDj2GsxBn+Tupk3bI1BkPEoJg0=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc/go.mod h1:p8aUDfJeley6oer7y+Ucd3edOtRlMTnWg3mN6rhaLWo=
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 h1:tNS7U9lrxkFvEuyxQv11HHOiV9LPDGC9wYEy+yM/Jv4=
@@ -1304,6 +1312,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
+github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1313,6 +1323,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U=
github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
@@ -1447,8 +1459,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1572,8 +1584,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1656,8 +1668,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1666,8 +1678,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
-golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1681,8 +1693,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
-golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go
index 166022ac753..86dbfa0c404 100644
--- a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go
+++ b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go
@@ -294,7 +294,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) {
panic(innerErr)
}
- n.HashedCapabilityIds = [][32]byte{ocrid, ctid}
+ n.HashedCapabilityIds = [][32]byte{ocrid, ctid, aid}
nodes = append(nodes, n)
}
@@ -337,7 +337,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) {
Config: ccfgb,
},
}
- _, err = reg.AddDON(env.Owner, ps, cfgs, true, true, 2)
+ _, err = reg.AddDON(env.Owner, ps, cfgs, true, true, 1)
if err != nil {
log.Printf("workflowDON: failed to AddDON: %s", err)
}
diff --git a/core/services/chainlink/config_database.go b/core/services/chainlink/config_database.go
index fe10c63f71b..fd8aa96419d 100644
--- a/core/services/chainlink/config_database.go
+++ b/core/services/chainlink/config_database.go
@@ -4,9 +4,10 @@ import (
"net/url"
"time"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
+
"github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/config/toml"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
)
type backupConfig struct {
@@ -109,7 +110,7 @@ func (d *databaseConfig) URL() url.URL {
return *d.s.URL.URL()
}
-func (d *databaseConfig) Dialect() dialects.DialectName {
+func (d *databaseConfig) Dialect() pgcommon.DialectName {
return d.c.Dialect
}
diff --git a/core/services/chainlink/config_database_test.go b/core/services/chainlink/config_database_test.go
index b52d17452aa..f8f864f97ab 100644
--- a/core/services/chainlink/config_database_test.go
+++ b/core/services/chainlink/config_database_test.go
@@ -7,8 +7,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
+
"github.com/smartcontractkit/chainlink/v2/core/config"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
)
func TestDatabaseConfig(t *testing.T) {
@@ -21,31 +22,31 @@ URL = "postgresql://doesnotexist:justtopassvalidationtests@localhost:5432/chainl
require.NoError(t, err)
backup := cfg.Database().Backup()
- assert.Equal(t, backup.Dir(), "test/backup/dir")
- assert.Equal(t, backup.Frequency(), 1*time.Hour)
- assert.Equal(t, backup.Mode(), config.DatabaseBackupModeFull)
- assert.Equal(t, backup.OnVersionUpgrade(), true)
+ assert.Equal(t, "test/backup/dir", backup.Dir())
+ assert.Equal(t, 1*time.Hour, backup.Frequency())
+ assert.Equal(t, config.DatabaseBackupModeFull, backup.Mode())
+ assert.True(t, backup.OnVersionUpgrade())
assert.Nil(t, backup.URL())
db := cfg.Database()
- assert.Equal(t, db.DefaultIdleInTxSessionTimeout(), 1*time.Minute)
- assert.Equal(t, db.DefaultLockTimeout(), 1*time.Hour)
- assert.Equal(t, db.DefaultQueryTimeout(), 1*time.Second)
- assert.Equal(t, db.LogSQL(), true)
- assert.Equal(t, db.MaxIdleConns(), 7)
- assert.Equal(t, db.MaxOpenConns(), 13)
- assert.Equal(t, db.MigrateDatabase(), true)
- assert.Equal(t, db.Dialect(), dialects.Postgres)
+ assert.Equal(t, 1*time.Minute, db.DefaultIdleInTxSessionTimeout())
+ assert.Equal(t, 1*time.Hour, db.DefaultLockTimeout())
+ assert.Equal(t, 1*time.Second, db.DefaultQueryTimeout())
+ assert.True(t, db.LogSQL())
+ assert.Equal(t, 7, db.MaxIdleConns())
+ assert.Equal(t, 13, db.MaxOpenConns())
+ assert.True(t, db.MigrateDatabase())
+ assert.Equal(t, pgcommon.Postgres, db.Dialect())
url := db.URL()
assert.NotEqual(t, url.String(), "")
lock := db.Lock()
- assert.Equal(t, lock.LockingMode(), "none")
- assert.Equal(t, lock.LeaseDuration(), 1*time.Minute)
- assert.Equal(t, lock.LeaseRefreshInterval(), 1*time.Second)
+ assert.Equal(t, "none", lock.LockingMode())
+ assert.Equal(t, 1*time.Minute, lock.LeaseDuration())
+ assert.Equal(t, 1*time.Second, lock.LeaseRefreshInterval())
l := db.Listener()
- assert.Equal(t, l.MaxReconnectDuration(), 1*time.Minute)
- assert.Equal(t, l.MinReconnectInterval(), 5*time.Minute)
- assert.Equal(t, l.FallbackPollInterval(), 2*time.Minute)
+ assert.Equal(t, 1*time.Minute, l.MaxReconnectDuration())
+ assert.Equal(t, 5*time.Minute, l.MinReconnectInterval())
+ assert.Equal(t, 2*time.Minute, l.FallbackPollInterval())
}
diff --git a/core/services/feeds/models.go b/core/services/feeds/models.go
index 93dddd86dae..a6cf103b4e9 100644
--- a/core/services/feeds/models.go
+++ b/core/services/feeds/models.go
@@ -12,6 +12,7 @@ import (
"gopkg.in/guregu/null.v4"
proto "github.com/smartcontractkit/chainlink-protos/orchestrator/feedsmanager"
+
"github.com/smartcontractkit/chainlink/v2/core/utils/crypto"
)
@@ -82,6 +83,7 @@ const (
ChainTypeEVM ChainType = "EVM"
ChainTypeSolana ChainType = "SOLANA"
ChainTypeStarknet ChainType = "STARKNET"
+ ChainTypeTron ChainType = "TRON"
)
func NewChainType(s string) (ChainType, error) {
@@ -94,6 +96,8 @@ func NewChainType(s string) (ChainType, error) {
return ChainTypeSolana, nil
case "APTOS":
return ChainTypeAptos, nil
+ case "TRON":
+ return ChainTypeTron, nil
default:
return ChainTypeUnknown, errors.New("invalid chain type")
}
diff --git a/core/services/feeds/models_test.go b/core/services/feeds/models_test.go
index 13567281735..d0d4382b055 100644
--- a/core/services/feeds/models_test.go
+++ b/core/services/feeds/models_test.go
@@ -28,6 +28,11 @@ func Test_NewChainType(t *testing.T) {
give: "STARKNET",
want: ChainTypeStarknet,
},
+ {
+ name: "Tron Chain Type",
+ give: "TRON",
+ want: ChainTypeTron,
+ },
{
name: "Invalid Chain Type",
give: "",
diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go
index 88b364cdeb3..150db269e45 100644
--- a/core/services/fluxmonitorv2/flux_monitor_test.go
+++ b/core/services/fluxmonitorv2/flux_monitor_test.go
@@ -150,7 +150,7 @@ type setupOptions struct {
// functional options to configure the setup
func setup(t *testing.T, ds sqlutil.DataSource, optionFns ...func(*setupOptions)) (*fluxmonitorv2.FluxMonitor, *testMocks) {
t.Helper()
- testutils.SkipShort(t, "long test")
+ tests.SkipShort(t, "long test")
tm := setupMocks(t)
options := setupOptions{
diff --git a/core/services/fluxmonitorv2/integrations_test.go b/core/services/fluxmonitorv2/integrations_test.go
index 1d77b694cbe..d30ff4479a8 100644
--- a/core/services/fluxmonitorv2/integrations_test.go
+++ b/core/services/fluxmonitorv2/integrations_test.go
@@ -25,6 +25,7 @@ import (
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
@@ -95,7 +96,7 @@ func WithMinMaxSubmission(min, max *big.Int) func(cfg *fluxAggregatorUniverseCon
// arguments match the arguments of the same name in the FluxAggregator
// constructor.
func setupFluxAggregatorUniverse(t *testing.T, configOptions ...func(cfg *fluxAggregatorUniverseConfig)) fluxAggregatorUniverse {
- testutils.SkipShort(t, "VRFCoordinatorV2Universe")
+ tests.SkipShort(t, "VRFCoordinatorV2Universe")
cfg := &fluxAggregatorUniverseConfig{
MinSubmission: big.NewInt(0),
MaxSubmission: big.NewInt(100000000000),
diff --git a/core/services/gateway/connector/connector.go b/core/services/gateway/connector/connector.go
index a8d356478e9..cab123d4ce5 100644
--- a/core/services/gateway/connector/connector.go
+++ b/core/services/gateway/connector/connector.go
@@ -28,13 +28,14 @@ type GatewayConnector interface {
AddHandler(methods []string, handler GatewayConnectorHandler) error
// SendToGateway takes a signed message as argument and sends it to the specified gateway
- SendToGateway(ctx context.Context, gatewayId string, msg *api.Message) error
+ SendToGateway(ctx context.Context, gatewayID string, msg *api.Message) error
// SignAndSendToGateway signs the message and sends the message to the specified gateway
SignAndSendToGateway(ctx context.Context, gatewayID string, msg *api.MessageBody) error
// GatewayIDs returns the list of Gateway IDs
GatewayIDs() []string
// DonID returns the DON ID
DonID() string
+ AwaitConnection(ctx context.Context, gatewayID string) error
}
// Signer implementation needs to be provided by a GatewayConnector user (node)
@@ -78,12 +79,30 @@ func (c *gatewayConnector) HealthReport() map[string]error {
func (c *gatewayConnector) Name() string { return c.lggr.Name() }
type gatewayState struct {
+ // signal channel is closed once the gateway is connected
+ signalCh chan struct{}
+
conn network.WSConnectionWrapper
config ConnectorGatewayConfig
url *url.URL
wsClient network.WebSocketClient
}
+// A gatewayState is connected when the signal channel is closed
+func (gs *gatewayState) signal() {
+ close(gs.signalCh)
+}
+
+// awaitConn blocks until the gateway is connected or the context is done
+func (gs *gatewayState) awaitConn(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("await connection failed: %w", ctx.Err())
+ case <-gs.signalCh:
+ return nil
+ }
+}
+
func NewGatewayConnector(config *ConnectorConfig, signer Signer, clock clockwork.Clock, lggr logger.Logger) (GatewayConnector, error) {
if config == nil || signer == nil || clock == nil || lggr == nil {
return nil, errors.New("nil dependency")
@@ -125,6 +144,7 @@ func NewGatewayConnector(config *ConnectorConfig, signer Signer, clock clockwork
config: gw,
url: parsedURL,
wsClient: network.NewWebSocketClient(config.WsClientConfig, connector, lggr),
+ signalCh: make(chan struct{}),
}
gateways[gw.Id] = gateway
urlToId[gw.URL] = gw.Id
@@ -150,17 +170,25 @@ func (c *gatewayConnector) AddHandler(methods []string, handler GatewayConnector
return nil
}
-func (c *gatewayConnector) SendToGateway(ctx context.Context, gatewayId string, msg *api.Message) error {
+func (c *gatewayConnector) AwaitConnection(ctx context.Context, gatewayID string) error {
+ gateway, ok := c.gateways[gatewayID]
+ if !ok {
+ return fmt.Errorf("invalid Gateway ID %s", gatewayID)
+ }
+ return gateway.awaitConn(ctx)
+}
+
+func (c *gatewayConnector) SendToGateway(ctx context.Context, gatewayID string, msg *api.Message) error {
data, err := c.codec.EncodeResponse(msg)
if err != nil {
- return fmt.Errorf("error encoding response for gateway %s: %v", gatewayId, err)
+ return fmt.Errorf("error encoding response for gateway %s: %w", gatewayID, err)
}
- gateway, ok := c.gateways[gatewayId]
+ gateway, ok := c.gateways[gatewayID]
if !ok {
- return fmt.Errorf("invalid Gateway ID %s", gatewayId)
+ return fmt.Errorf("invalid Gateway ID %s", gatewayID)
}
if gateway.conn == nil {
- return fmt.Errorf("connector not started")
+ return errors.New("connector not started")
}
return gateway.conn.Write(ctx, websocket.BinaryMessage, data)
}
@@ -242,10 +270,15 @@ func (c *gatewayConnector) reconnectLoop(gatewayState *gatewayState) {
} else {
c.lggr.Infow("connected successfully", "url", gatewayState.url)
closeCh := gatewayState.conn.Reset(conn)
+ gatewayState.signal()
<-closeCh
c.lggr.Infow("connection closed", "url", gatewayState.url)
+
// reset backoff
redialBackoff = utils.NewRedialBackoff()
+
+ // reset signal channel
+ gatewayState.signalCh = make(chan struct{})
}
select {
case <-c.shutdownCh:
diff --git a/core/services/gateway/connector/mocks/gateway_connector.go b/core/services/gateway/connector/mocks/gateway_connector.go
index 183fc949cd5..ba5c2213b5f 100644
--- a/core/services/gateway/connector/mocks/gateway_connector.go
+++ b/core/services/gateway/connector/mocks/gateway_connector.go
@@ -73,6 +73,53 @@ func (_c *GatewayConnector_AddHandler_Call) RunAndReturn(run func([]string, conn
return _c
}
+// AwaitConnection provides a mock function with given fields: ctx, gatewayID
+func (_m *GatewayConnector) AwaitConnection(ctx context.Context, gatewayID string) error {
+ ret := _m.Called(ctx, gatewayID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for AwaitConnection")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(ctx, gatewayID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// GatewayConnector_AwaitConnection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AwaitConnection'
+type GatewayConnector_AwaitConnection_Call struct {
+ *mock.Call
+}
+
+// AwaitConnection is a helper method to define mock.On call
+// - ctx context.Context
+// - gatewayID string
+func (_e *GatewayConnector_Expecter) AwaitConnection(ctx interface{}, gatewayID interface{}) *GatewayConnector_AwaitConnection_Call {
+ return &GatewayConnector_AwaitConnection_Call{Call: _e.mock.On("AwaitConnection", ctx, gatewayID)}
+}
+
+func (_c *GatewayConnector_AwaitConnection_Call) Run(run func(ctx context.Context, gatewayID string)) *GatewayConnector_AwaitConnection_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *GatewayConnector_AwaitConnection_Call) Return(_a0 error) *GatewayConnector_AwaitConnection_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GatewayConnector_AwaitConnection_Call) RunAndReturn(run func(context.Context, string) error) *GatewayConnector_AwaitConnection_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ChallengeResponse provides a mock function with given fields: _a0, challenge
func (_m *GatewayConnector) ChallengeResponse(_a0 *url.URL, challenge []byte) ([]byte, error) {
ret := _m.Called(_a0, challenge)
@@ -464,9 +511,9 @@ func (_c *GatewayConnector_Ready_Call) RunAndReturn(run func() error) *GatewayCo
return _c
}
-// SendToGateway provides a mock function with given fields: ctx, gatewayId, msg
-func (_m *GatewayConnector) SendToGateway(ctx context.Context, gatewayId string, msg *api.Message) error {
- ret := _m.Called(ctx, gatewayId, msg)
+// SendToGateway provides a mock function with given fields: ctx, gatewayID, msg
+func (_m *GatewayConnector) SendToGateway(ctx context.Context, gatewayID string, msg *api.Message) error {
+ ret := _m.Called(ctx, gatewayID, msg)
if len(ret) == 0 {
panic("no return value specified for SendToGateway")
@@ -474,7 +521,7 @@ func (_m *GatewayConnector) SendToGateway(ctx context.Context, gatewayId string,
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, *api.Message) error); ok {
- r0 = rf(ctx, gatewayId, msg)
+ r0 = rf(ctx, gatewayID, msg)
} else {
r0 = ret.Error(0)
}
@@ -489,13 +536,13 @@ type GatewayConnector_SendToGateway_Call struct {
// SendToGateway is a helper method to define mock.On call
// - ctx context.Context
-// - gatewayId string
+// - gatewayID string
// - msg *api.Message
-func (_e *GatewayConnector_Expecter) SendToGateway(ctx interface{}, gatewayId interface{}, msg interface{}) *GatewayConnector_SendToGateway_Call {
- return &GatewayConnector_SendToGateway_Call{Call: _e.mock.On("SendToGateway", ctx, gatewayId, msg)}
+func (_e *GatewayConnector_Expecter) SendToGateway(ctx interface{}, gatewayID interface{}, msg interface{}) *GatewayConnector_SendToGateway_Call {
+ return &GatewayConnector_SendToGateway_Call{Call: _e.mock.On("SendToGateway", ctx, gatewayID, msg)}
}
-func (_c *GatewayConnector_SendToGateway_Call) Run(run func(ctx context.Context, gatewayId string, msg *api.Message)) *GatewayConnector_SendToGateway_Call {
+func (_c *GatewayConnector_SendToGateway_Call) Run(run func(ctx context.Context, gatewayID string, msg *api.Message)) *GatewayConnector_SendToGateway_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(string), args[2].(*api.Message))
})
diff --git a/core/services/llo/mercurytransmitter/server.go b/core/services/llo/mercurytransmitter/server.go
index 4e97c0483b3..3ce2b0a4b4a 100644
--- a/core/services/llo/mercurytransmitter/server.go
+++ b/core/services/llo/mercurytransmitter/server.go
@@ -62,6 +62,22 @@ var (
},
[]string{"donID", "serverURL", "code"},
)
+ promTransmitConcurrentTransmitGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "llo",
+ Subsystem: "mercurytransmitter",
+ Name: "concurrent_transmit_gauge",
+ Help: "Gauge that measures the number of transmit threads currently waiting on a remote transmit call. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
+ },
+ []string{"donID", "serverURL"},
+ )
+ promTransmitConcurrentDeleteGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "llo",
+ Subsystem: "mercurytransmitter",
+ Name: "concurrent_delete_gauge",
+ Help: "Gauge that measures the number of delete threads currently waiting on a delete call to the DB. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
+ },
+ []string{"donID", "serverURL"},
+ )
)
type ReportPacker interface {
@@ -87,12 +103,14 @@ type server struct {
evmPremiumLegacyPacker ReportPacker
jsonPacker ReportPacker
- transmitSuccessCount prometheus.Counter
- transmitDuplicateCount prometheus.Counter
- transmitConnectionErrorCount prometheus.Counter
- transmitQueueDeleteErrorCount prometheus.Counter
- transmitQueueInsertErrorCount prometheus.Counter
- transmitQueuePushErrorCount prometheus.Counter
+ transmitSuccessCount prometheus.Counter
+ transmitDuplicateCount prometheus.Counter
+ transmitConnectionErrorCount prometheus.Counter
+ transmitQueueDeleteErrorCount prometheus.Counter
+ transmitQueueInsertErrorCount prometheus.Counter
+ transmitQueuePushErrorCount prometheus.Counter
+ transmitConcurrentTransmitGauge prometheus.Gauge
+ transmitConcurrentDeleteGauge prometheus.Gauge
transmitThreadBusyCount atomic.Int32
deleteThreadBusyCount atomic.Int32
@@ -130,6 +148,8 @@ func newServer(lggr logger.Logger, verboseLogging bool, cfg QueueConfig, client
promTransmitQueueDeleteErrorCount.WithLabelValues(donIDStr, serverURL),
promTransmitQueueInsertErrorCount.WithLabelValues(donIDStr, serverURL),
promTransmitQueuePushErrorCount.WithLabelValues(donIDStr, serverURL),
+ promTransmitConcurrentTransmitGauge.WithLabelValues(donIDStr, serverURL),
+ promTransmitConcurrentDeleteGauge.WithLabelValues(donIDStr, serverURL),
atomic.Int32{},
atomic.Int32{},
}
@@ -161,7 +181,7 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
select {
case hash := <-s.deleteQueue:
for {
- s.deleteThreadBusyCount.Add(1)
+ s.deleteThreadBusyCountInc()
if err := s.pm.orm.Delete(ctx, [][32]byte{hash}); err != nil {
s.lggr.Errorw("Failed to delete transmission record", "err", err, "transmissionHash", hash)
s.transmitQueueDeleteErrorCount.Inc()
@@ -170,7 +190,7 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
// Wait a backoff duration before trying to delete again
continue
case <-stopCh:
- s.deleteThreadBusyCount.Add(-1)
+ s.deleteThreadBusyCountDec()
// abort and return immediately on stop even if items remain in queue
return
}
@@ -179,7 +199,7 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
}
// success
b.Reset()
- s.deleteThreadBusyCount.Add(-1)
+ s.deleteThreadBusyCountDec()
case <-stopCh:
// abort and return immediately on stop even if items remain in queue
return
@@ -187,6 +207,23 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
}
}
+func (s *server) transmitThreadBusyCountInc() {
+ val := s.transmitThreadBusyCount.Add(1)
+ s.transmitConcurrentTransmitGauge.Set(float64(val))
+}
+func (s *server) transmitThreadBusyCountDec() {
+ val := s.transmitThreadBusyCount.Add(-1)
+ s.transmitConcurrentTransmitGauge.Set(float64(val))
+}
+func (s *server) deleteThreadBusyCountInc() {
+ val := s.deleteThreadBusyCount.Add(1)
+ s.transmitConcurrentDeleteGauge.Set(float64(val))
+}
+func (s *server) deleteThreadBusyCountDec() {
+ val := s.deleteThreadBusyCount.Add(-1)
+ s.transmitConcurrentDeleteGauge.Set(float64(val))
+}
+
func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donIDStr string) {
defer wg.Done()
// Exponential backoff with very short retry interval (since latency is a priority)
@@ -208,8 +245,8 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donI
return false
}
- s.transmitThreadBusyCount.Add(1)
- defer s.transmitThreadBusyCount.Add(-1)
+ s.transmitThreadBusyCountInc()
+ defer s.transmitThreadBusyCountDec()
req, res, err := func(ctx context.Context) (*pb.TransmitRequest, *pb.TransmitResponse, error) {
ctx, cancelFn := context.WithTimeout(ctx, utils.WithJitter(s.transmitTimeout))
diff --git a/core/services/llo/mercurytransmitter/transmitter.go b/core/services/llo/mercurytransmitter/transmitter.go
index 8e60bf938a5..23aa4b79e58 100644
--- a/core/services/llo/mercurytransmitter/transmitter.go
+++ b/core/services/llo/mercurytransmitter/transmitter.go
@@ -116,7 +116,6 @@ type transmitter struct {
orm ORM
servers map[string]*server
registerer prometheus.Registerer
- collectors []prometheus.Collector
donID uint32
fromAccount string
@@ -155,7 +154,6 @@ func newTransmitter(opts Opts) *transmitter {
opts.ORM,
servers,
opts.Registerer,
- nil,
opts.DonID,
fmt.Sprintf("%x", opts.FromAccount),
make(services.StopChan),
@@ -194,31 +192,6 @@ func (mt *transmitter) Start(ctx context.Context) (err error) {
go s.runDeleteQueueLoop(mt.stopCh, mt.wg)
go s.runQueueLoop(mt.stopCh, mt.wg, donIDStr)
}
- mt.collectors = append(mt.collectors, prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "llo",
- Subsystem: "mercurytransmitter",
- Name: "concurrent_transmit_gauge",
- Help: "Gauge that measures the number of transmit threads currently waiting on a remote transmit call. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
- ConstLabels: prometheus.Labels{"donID": donIDStr, "serverURL": s.url, "maxConcurrentTransmits": strconv.FormatInt(int64(nThreads), 10)},
- }, func() float64 {
- return float64(s.transmitThreadBusyCount.Load())
- }))
- mt.collectors = append(mt.collectors, prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "llo",
- Subsystem: "mercurytransmitter",
- Name: "concurrent_delete_gauge",
- Help: "Gauge that measures the number of delete threads currently waiting on a delete call to the DB. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
- ConstLabels: prometheus.Labels{"donID": donIDStr, "serverURL": s.url, "maxConcurrentDeletes": strconv.FormatInt(int64(nThreads), 10)},
- }, func() float64 {
- return float64(s.deleteThreadBusyCount.Load())
- }))
- for _, c := range mt.collectors {
- if err := mt.registerer.Register(c); err != nil {
- return err
- }
- }
}
if err := (&services.MultiStart{}).Start(ctx, startClosers...); err != nil {
return err
@@ -250,12 +223,7 @@ func (mt *transmitter) Close() error {
closers = append(closers, s.pm)
closers = append(closers, s.c)
}
- err := services.CloseAll(closers...)
- // Unregister all the gauge funcs
- for _, c := range mt.collectors {
- mt.registerer.Unregister(c)
- }
- return err
+ return services.CloseAll(closers...)
})
}
diff --git a/core/services/ocr2/plugins/mercury/plugin.go b/core/services/ocr2/plugins/mercury/plugin.go
index 8a4101804dd..b0983e55c89 100644
--- a/core/services/ocr2/plugins/mercury/plugin.go
+++ b/core/services/ocr2/plugins/mercury/plugin.go
@@ -1,6 +1,7 @@
package mercury
import (
+ "context"
"encoding/json"
"fmt"
"os/exec"
@@ -79,14 +80,13 @@ func NewServices(
return nil, errors.New("expected job to have a non-nil PipelineSpec")
}
- var err error
var pluginConfig config.PluginConfig
if len(jb.OCR2OracleSpec.PluginConfig) == 0 {
if !enableTriggerCapability {
return nil, fmt.Errorf("at least one transmission option must be configured")
}
} else {
- err = json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig)
+ err := json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig)
if err != nil {
return nil, errors.WithStack(err)
}
@@ -101,8 +101,8 @@ func NewServices(
// encapsulate all the subservices and ensure we close them all if any fail to start
srvs := []job.ServiceCtx{ocr2Provider}
abort := func() {
- if err = services.MultiCloser(srvs).Close(); err != nil {
- lggr.Errorw("Error closing unused services", "err", err)
+ if cerr := services.MultiCloser(srvs).Close(); cerr != nil {
+ lggr.Errorw("Error closing unused services", "err", cerr)
}
}
saver := ocrcommon.NewResultRunSaver(pipelineRunner, lggr, cfg.MaxSuccessfulRuns(), cfg.ResultWriteQueueDepth())
@@ -112,6 +112,7 @@ func NewServices(
var (
factory ocr3types.MercuryPluginFactory
factoryServices []job.ServiceCtx
+ fErr error
)
fCfg := factoryCfg{
orm: orm,
@@ -127,31 +128,31 @@ func NewServices(
}
switch feedID.Version() {
case 1:
- factory, factoryServices, err = newv1factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv1factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v1 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v1 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
case 2:
- factory, factoryServices, err = newv2factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv2factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v2 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v2 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
case 3:
- factory, factoryServices, err = newv3factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv3factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v3 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v3 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
case 4:
- factory, factoryServices, err = newv4factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv4factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v4 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v4 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
default:
@@ -214,13 +215,14 @@ func newv4factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loop mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV4Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -253,13 +255,14 @@ func newv3factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV3Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -292,13 +295,14 @@ func newv2factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV2Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -329,13 +333,14 @@ func newv1factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV1Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -344,20 +349,46 @@ func newv1factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
return factory, srvs, nil
}
-func initLoop(cmd string, cfg Config, feedID utils.FeedID, lggr logger.Logger) (func() *exec.Cmd, loop.GRPCOpts, logger.Logger, error) {
+func initLoop(cmd string, cfg Config, feedID utils.FeedID, lggr logger.Logger) (func() *exec.Cmd, *loopUnregisterCloser, loop.GRPCOpts, logger.Logger, error) {
lggr.Debugw("Initializing Mercury loop", "command", cmd)
mercuryLggr := lggr.Named(fmt.Sprintf("MercuryV%d", feedID.Version())).Named(feedID.String())
envVars, err := plugins.ParseEnvFile(env.MercuryPlugin.Env.Get())
if err != nil {
- return nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to parse mercury env file: %w", err)
+ return nil, nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to parse mercury env file: %w", err)
}
+ loopID := mercuryLggr.Name()
cmdFn, opts, err := cfg.RegisterLOOP(plugins.CmdConfig{
- ID: mercuryLggr.Name(),
+ ID: loopID,
Cmd: cmd,
Env: envVars,
})
if err != nil {
- return nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to register loop: %w", err)
+ return nil, nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to register loop: %w", err)
+ }
+ return cmdFn, newLoopUnregister(cfg, loopID), opts, mercuryLggr, nil
+}
+
+// loopUnregisterCloser is a helper to unregister a loop
+// as a service
+// TODO BCF-3451 all other jobs that use custom plugin providers that should be refactored to use this pattern
+// perhaps it can be implemented in the delegate on job delete.
+type loopUnregisterCloser struct {
+ r plugins.RegistrarConfig
+ id string
+}
+
+func (l *loopUnregisterCloser) Close() error {
+ l.r.UnregisterLOOP(l.id)
+ return nil
+}
+
+func (l *loopUnregisterCloser) Start(ctx context.Context) error {
+ return nil
+}
+
+func newLoopUnregister(r plugins.RegistrarConfig, id string) *loopUnregisterCloser {
+ return &loopUnregisterCloser{
+ r: r,
+ id: id,
}
- return cmdFn, opts, mercuryLggr, nil
}
diff --git a/core/services/ocr2/plugins/mercury/plugin_test.go b/core/services/ocr2/plugins/mercury/plugin_test.go
index 22aaf7522de..eb67da53100 100644
--- a/core/services/ocr2/plugins/mercury/plugin_test.go
+++ b/core/services/ocr2/plugins/mercury/plugin_test.go
@@ -2,6 +2,7 @@ package mercury_test
import (
"context"
+ "errors"
"os/exec"
"reflect"
"testing"
@@ -9,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink/v2/core/config/env"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -22,6 +24,7 @@ import (
v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
mercuryocr2 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury"
@@ -92,21 +95,23 @@ var (
// this is kind of gross, but it's the best way to test return values of the services
expectedEmbeddedServiceCnt = 3
- expectedLoopServiceCnt = expectedEmbeddedServiceCnt + 1
+ expectedLoopServiceCnt = expectedEmbeddedServiceCnt + 2 // factory server and loop unregisterer
)
func TestNewServices(t *testing.T) {
type args struct {
pluginConfig job.JSONConfig
feedID utils.FeedID
+ cfg mercuryocr2.Config
}
- tests := []struct {
+ testCases := []struct {
name string
args args
loopMode bool
wantLoopFactory any
wantServiceCnt int
wantErr bool
+ wantErrStr string
}{
{
name: "no plugin config error ",
@@ -186,6 +191,19 @@ func TestNewServices(t *testing.T) {
wantErr: false,
wantLoopFactory: &loop.MercuryV3Service{},
},
+ {
+ name: "v3 loop err",
+ loopMode: true,
+ args: args{
+ pluginConfig: v3jsonCfg,
+ feedID: v3FeedId,
+ cfg: mercuryocr2.NewMercuryConfig(1, 1, &testRegistrarConfig{failRegister: true}),
+ },
+ wantServiceCnt: expectedLoopServiceCnt,
+ wantErr: true,
+ wantLoopFactory: &loop.MercuryV3Service{},
+ wantErrStr: "failed to init loop for feed",
+ },
{
name: "v4 loop",
loopMode: true,
@@ -198,17 +216,27 @@ func TestNewServices(t *testing.T) {
wantLoopFactory: &loop.MercuryV4Service{},
},
}
- for _, tt := range tests {
+ for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.loopMode {
t.Setenv(string(env.MercuryPlugin.Cmd), "fake_cmd")
assert.NotEmpty(t, env.MercuryPlugin.Cmd.Get())
}
- got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID)
+ // use default config if not provided
+ if tt.args.cfg == nil {
+ tt.args.cfg = testCfg
+ }
+ got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
if (err != nil) != tt.wantErr {
t.Errorf("NewServices() error = %v, wantErr %v", err, tt.wantErr)
return
}
+ if err != nil {
+ if tt.wantErrStr != "" {
+ assert.Contains(t, err.Error(), tt.wantErrStr)
+ }
+ return
+ }
assert.Len(t, got, tt.wantServiceCnt)
if tt.loopMode {
foundLoopFactory := false
@@ -222,15 +250,97 @@ func TestNewServices(t *testing.T) {
}
})
}
+
+ t.Run("restartable loop", func(t *testing.T) {
+ // setup a real loop registry to test restartability
+ registry := plugins.NewLoopRegistry(logger.TestLogger(t), nil, nil, nil, "")
+ loopRegistrarConfig := plugins.NewRegistrarConfig(loop.GRPCOpts{}, registry.Register, registry.Unregister)
+ prodCfg := mercuryocr2.NewMercuryConfig(1, 1, loopRegistrarConfig)
+ type args struct {
+ pluginConfig job.JSONConfig
+ feedID utils.FeedID
+ cfg mercuryocr2.Config
+ }
+ testCases := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "v1 loop",
+ args: args{
+ pluginConfig: v1jsonCfg,
+ feedID: v1FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ {
+ name: "v2 loop",
+ args: args{
+ pluginConfig: v2jsonCfg,
+ feedID: v2FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ {
+ name: "v3 loop",
+ args: args{
+ pluginConfig: v3jsonCfg,
+ feedID: v3FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ {
+ name: "v4 loop",
+ args: args{
+ pluginConfig: v4jsonCfg,
+ feedID: v4FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range testCases {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv(string(env.MercuryPlugin.Cmd), "fake_cmd")
+ assert.NotEmpty(t, env.MercuryPlugin.Cmd.Get())
+
+ got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("NewServices() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ // hack to simulate a restart. we don't have enough boilerplate to start the oracle service
+ // only care about the subservices so we start all except the oracle, which happens to be the last one
+ for i := 0; i < len(got)-1; i++ {
+ require.NoError(t, got[i].Start(tests.Context(t)))
+ }
+ // if we don't close the services, we get conflicts with the loop registry
+ _, err = newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
+ require.ErrorContains(t, err, "plugin already registered")
+
+ // close all services and try again
+ for i := len(got) - 2; i >= 0; i-- {
+ require.NoError(t, got[i].Close())
+ }
+ _, err = newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
+ require.NoError(t, err)
+ })
+ }
+ })
}
// we are only varying the version via feedID (and the plugin config)
// this wrapper supplies dummy values for the rest of the arguments
-func newServicesTestWrapper(t *testing.T, pluginConfig job.JSONConfig, feedID utils.FeedID) ([]job.ServiceCtx, error) {
+func newServicesTestWrapper(t *testing.T, pluginConfig job.JSONConfig, feedID utils.FeedID, cfg mercuryocr2.Config) ([]job.ServiceCtx, error) {
t.Helper()
jb := testJob
jb.OCR2OracleSpec.PluginConfig = pluginConfig
- return mercuryocr2.NewServices(jb, &testProvider{}, nil, logger.TestLogger(t), testArgsNoPlugin, testCfg, nil, &testDataSourceORM{}, feedID, false)
+ return mercuryocr2.NewServices(jb, &testProvider{}, nil, logger.TestLogger(t), testArgsNoPlugin, cfg, nil, &testDataSourceORM{}, feedID, false)
}
type testProvider struct{}
@@ -292,16 +402,21 @@ func (*testProvider) ReportCodecV3() v3.ReportCodec { return nil }
func (*testProvider) ReportCodecV4() v4.ReportCodec { return nil }
// Start implements types.MercuryProvider.
-func (*testProvider) Start(context.Context) error { panic("unimplemented") }
+func (*testProvider) Start(context.Context) error { return nil }
var _ commontypes.MercuryProvider = (*testProvider)(nil)
-type testRegistrarConfig struct{}
+type testRegistrarConfig struct {
+ failRegister bool
+}
func (c *testRegistrarConfig) UnregisterLOOP(ID string) {}
// RegisterLOOP implements plugins.RegistrarConfig.
-func (*testRegistrarConfig) RegisterLOOP(config plugins.CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) {
+func (c *testRegistrarConfig) RegisterLOOP(config plugins.CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) {
+ if c.failRegister {
+ return nil, loop.GRPCOpts{}, errors.New("failed to register")
+ }
return nil, loop.GRPCOpts{}, nil
}
diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go
index 66112756370..2283559365c 100644
--- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go
@@ -29,6 +29,8 @@ import (
"github.com/smartcontractkit/chainlink-automation/pkg/v2/config"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-common/pkg/types"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
@@ -200,7 +202,7 @@ func getUpkeepIDFromTx(t *testing.T, registry *keeper_registry_wrapper2_0.Keeper
}
func TestIntegration_KeeperPluginBasic(t *testing.T) {
- testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/AUTO-11072")
+ tests.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/AUTO-11072")
runKeeperPluginBasic(t)
}
diff --git a/core/services/ocr3/promwrapper/factory.go b/core/services/ocr3/promwrapper/factory.go
index 6518cea3c0d..e369b3260ef 100644
--- a/core/services/ocr3/promwrapper/factory.go
+++ b/core/services/ocr3/promwrapper/factory.go
@@ -47,6 +47,7 @@ func (r ReportingPluginFactory[RI]) NewReportingPlugin(ctx context.Context, conf
config.ConfigDigest.String(),
promOCR3ReportsGenerated,
promOCR3Durations,
+ promOCR3Sizes,
promOCR3PluginStatus,
)
return wrapped, info, err
diff --git a/core/services/ocr3/promwrapper/plugin.go b/core/services/ocr3/promwrapper/plugin.go
index dcee5050d1e..aa5fb87a6ee 100644
--- a/core/services/ocr3/promwrapper/plugin.go
+++ b/core/services/ocr3/promwrapper/plugin.go
@@ -21,6 +21,7 @@ type reportingPlugin[RI any] struct {
// Prometheus components for tracking metrics
reportsGenerated *prometheus.CounterVec
durations *prometheus.HistogramVec
+ sizes *prometheus.CounterVec
status *prometheus.GaugeVec
}
@@ -31,6 +32,7 @@ func newReportingPlugin[RI any](
configDigest string,
reportsGenerated *prometheus.CounterVec,
durations *prometheus.HistogramVec,
+ sizes *prometheus.CounterVec,
status *prometheus.GaugeVec,
) *reportingPlugin[RI] {
return &reportingPlugin[RI]{
@@ -40,6 +42,7 @@ func newReportingPlugin[RI any](
configDigest: configDigest,
reportsGenerated: reportsGenerated,
durations: durations,
+ sizes: sizes,
status: status,
}
}
@@ -51,9 +54,11 @@ func (p *reportingPlugin[RI]) Query(ctx context.Context, outctx ocr3types.Outcom
}
func (p *reportingPlugin[RI]) Observation(ctx context.Context, outctx ocr3types.OutcomeContext, query ocrtypes.Query) (ocrtypes.Observation, error) {
- return withObservedExecution(p, observation, func() (ocrtypes.Observation, error) {
+ result, err := withObservedExecution(p, observation, func() (ocrtypes.Observation, error) {
return p.ReportingPlugin.Observation(ctx, outctx, query)
})
+ p.trackSize(observation, len(result), err)
+ return result, err
}
func (p *reportingPlugin[RI]) ValidateObservation(ctx context.Context, outctx ocr3types.OutcomeContext, query ocrtypes.Query, ao ocrtypes.AttributedObservation) error {
@@ -65,9 +70,11 @@ func (p *reportingPlugin[RI]) ValidateObservation(ctx context.Context, outctx oc
}
func (p *reportingPlugin[RI]) Outcome(ctx context.Context, outctx ocr3types.OutcomeContext, query ocrtypes.Query, aos []ocrtypes.AttributedObservation) (ocr3types.Outcome, error) {
- return withObservedExecution(p, outcome, func() (ocr3types.Outcome, error) {
+ result, err := withObservedExecution(p, outcome, func() (ocr3types.Outcome, error) {
return p.ReportingPlugin.Outcome(ctx, outctx, query, aos)
})
+ p.trackSize(outcome, len(result), err)
+ return result, err
}
func (p *reportingPlugin[RI]) Reports(ctx context.Context, seqNr uint64, outcome ocr3types.Outcome) ([]ocr3types.ReportPlus[RI], error) {
@@ -111,6 +118,15 @@ func (p *reportingPlugin[RI]) updateStatus(status bool) {
Set(float64(boolToInt(status)))
}
+func (p *reportingPlugin[RI]) trackSize(function functionType, size int, err error) {
+ if err != nil {
+ return
+ }
+ p.sizes.
+ WithLabelValues(p.chainID, p.plugin, string(function)).
+ Add(float64(size))
+}
+
func boolToInt(arg bool) int {
if arg {
return 1
diff --git a/core/services/ocr3/promwrapper/plugin_test.go b/core/services/ocr3/promwrapper/plugin_test.go
index 9a7b6f2e648..a10a467799f 100644
--- a/core/services/ocr3/promwrapper/plugin_test.go
+++ b/core/services/ocr3/promwrapper/plugin_test.go
@@ -17,17 +17,20 @@ import (
)
func Test_ReportsGeneratedGauge(t *testing.T) {
+ pluginObservationSize := 5
+ pluginOutcomeSize := 3
+
plugin1 := newReportingPlugin(
fakePlugin[uint]{reports: make([]ocr3types.ReportPlus[uint], 2)},
- "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin2 := newReportingPlugin(
- fakePlugin[bool]{reports: make([]ocr3types.ReportPlus[bool], 10)},
- "solana", "different_plugin", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ fakePlugin[bool]{reports: make([]ocr3types.ReportPlus[bool], 10), observationSize: pluginObservationSize, outcomeSize: pluginOutcomeSize},
+ "solana", "different_plugin", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin3 := newReportingPlugin(
fakePlugin[string]{err: errors.New("error")},
- "1234", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "1234", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
r1, err := plugin1.Reports(tests.Context(t), 1, nil)
@@ -64,20 +67,33 @@ func Test_ReportsGeneratedGauge(t *testing.T) {
require.NoError(t, plugin1.Close())
pluginHealth = testutil.ToFloat64(promOCR3PluginStatus.WithLabelValues("123", "empty", "abc"))
require.Equal(t, 0, int(pluginHealth))
+
+ iterations := 10
+ for i := 0; i < iterations; i++ {
+ _, err1 := plugin2.Outcome(tests.Context(t), ocr3types.OutcomeContext{}, nil, nil)
+ require.NoError(t, err1)
+ }
+ _, err1 := plugin2.Observation(tests.Context(t), ocr3types.OutcomeContext{}, nil)
+ require.NoError(t, err1)
+
+ outcomesLen := testutil.ToFloat64(promOCR3Sizes.WithLabelValues("solana", "different_plugin", "outcome"))
+ require.Equal(t, pluginOutcomeSize*iterations, int(outcomesLen))
+ observationLen := testutil.ToFloat64(promOCR3Sizes.WithLabelValues("solana", "different_plugin", "observation"))
+ require.Equal(t, pluginObservationSize, int(observationLen))
}
func Test_DurationHistograms(t *testing.T) {
plugin1 := newReportingPlugin(
fakePlugin[uint]{},
- "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin2 := newReportingPlugin(
fakePlugin[uint]{err: errors.New("error")},
- "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin3 := newReportingPlugin(
fakePlugin[uint]{},
- "solana", "commit", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "solana", "commit", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
for _, p := range []*reportingPlugin[uint]{plugin1, plugin2, plugin3} {
@@ -102,8 +118,10 @@ func Test_DurationHistograms(t *testing.T) {
}
type fakePlugin[RI any] struct {
- reports []ocr3types.ReportPlus[RI]
- err error
+ reports []ocr3types.ReportPlus[RI]
+ observationSize int
+ outcomeSize int
+ err error
}
func (f fakePlugin[RI]) Query(context.Context, ocr3types.OutcomeContext) (ocrtypes.Query, error) {
@@ -117,7 +135,7 @@ func (f fakePlugin[RI]) Observation(context.Context, ocr3types.OutcomeContext, o
if f.err != nil {
return nil, f.err
}
- return ocrtypes.Observation{}, nil
+ return make([]byte, f.observationSize), nil
}
func (f fakePlugin[RI]) ValidateObservation(context.Context, ocr3types.OutcomeContext, ocrtypes.Query, ocrtypes.AttributedObservation) error {
@@ -132,7 +150,7 @@ func (f fakePlugin[RI]) Outcome(context.Context, ocr3types.OutcomeContext, ocrty
if f.err != nil {
return nil, f.err
}
- return ocr3types.Outcome{}, nil
+ return make([]byte, f.outcomeSize), nil
}
func (f fakePlugin[RI]) Reports(context.Context, uint64, ocr3types.Outcome) ([]ocr3types.ReportPlus[RI], error) {
diff --git a/core/services/ocr3/promwrapper/types.go b/core/services/ocr3/promwrapper/types.go
index 2fa29dcdf20..59468358783 100644
--- a/core/services/ocr3/promwrapper/types.go
+++ b/core/services/ocr3/promwrapper/types.go
@@ -48,6 +48,13 @@ var (
},
[]string{"chainID", "plugin", "function", "success"},
)
+ promOCR3Sizes = promauto.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "ocr3_reporting_plugin_data_sizes",
+ Help: "Tracks the size of the data produced by OCR3 plugin in bytes (e.g. reports, observations etc.)",
+ },
+ []string{"chainID", "plugin", "function"},
+ )
promOCR3PluginStatus = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "ocr3_reporting_plugin_status",
diff --git a/core/services/pg/connection.go b/core/services/pg/connection.go
index 64a137762fc..bf3663e82ce 100644
--- a/core/services/pg/connection.go
+++ b/core/services/pg/connection.go
@@ -19,7 +19,7 @@ import (
"go.opentelemetry.io/otel"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
)
// NOTE: This is the default level in Postgres anyway, we just make it
@@ -51,7 +51,7 @@ type ConnectionConfig interface {
MaxIdleConns() int
}
-func NewConnection(ctx context.Context, uri string, dialect dialects.DialectName, config ConnectionConfig) (*sqlx.DB, error) {
+func NewConnection(ctx context.Context, uri string, dialect pgcommon.DialectName, config ConnectionConfig) (*sqlx.DB, error) {
opts := []otelsql.Option{otelsql.WithAttributes(semconv.DBSystemPostgreSQL),
otelsql.WithTracerProvider(otel.GetTracerProvider()),
otelsql.WithSQLCommenter(true),
@@ -70,7 +70,7 @@ func NewConnection(ctx context.Context, uri string, dialect dialects.DialectName
lockTimeout, idleInTxSessionTimeout, defaultIsolation.String())
var sqldb *sql.DB
- if dialect == dialects.TransactionWrappedPostgres {
+ if dialect == pgcommon.TransactionWrappedPostgres {
// Dbtx uses the uri as a unique identifier for each transaction. Each ORM
// should be encapsulated in it's own transaction, and thus needs its own
// unique id.
@@ -78,7 +78,11 @@ func NewConnection(ctx context.Context, uri string, dialect dialects.DialectName
// We can happily throw away the original uri here because if we are using
// txdb it should have already been set at the point where we called
// txdb.Register
- var err error
+
+ err := pgcommon.RegisterTxDb(uri)
+ if err != nil {
+ return nil, fmt.Errorf("failed to register txdb: %w", err)
+ }
sqldb, err = otelsql.Open(string(dialect), uuid.New().String(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to open txdb: %w", err)
diff --git a/core/services/pg/connection_test.go b/core/services/pg/connection_test.go
index c4314bfb309..3ae70d14637 100644
--- a/core/services/pg/connection_test.go
+++ b/core/services/pg/connection_test.go
@@ -4,15 +4,13 @@ import (
"testing"
"time"
- "github.com/google/uuid"
_ "github.com/jackc/pgx/v4/stdlib"
- "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
)
var _ Getter = &mockGetter{}
@@ -67,11 +65,9 @@ func Test_checkVersion(t *testing.T) {
func Test_disallowReplica(t *testing.T) {
testutils.SkipShortDB(t)
- db, err := sqlx.Open(string(dialects.TransactionWrappedPostgres), uuid.New().String())
- require.NoError(t, err)
- t.Cleanup(func() { require.NoError(t, db.Close()) })
+ db := pgtest.NewSqlxDB(t)
- _, err = db.Exec("SET session_replication_role= 'origin'")
+ _, err := db.Exec("SET session_replication_role= 'origin'")
require.NoError(t, err)
err = disallowReplica(db)
require.NoError(t, err)
diff --git a/core/services/pg/locked_db.go b/core/services/pg/locked_db.go
index 14ddb2317a5..baea01b43a5 100644
--- a/core/services/pg/locked_db.go
+++ b/core/services/pg/locked_db.go
@@ -11,10 +11,11 @@ import (
"github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-common/pkg/services"
+ "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
+
"github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/static"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
)
// LockedDB bounds DB connection and DB locks.
@@ -28,7 +29,7 @@ type LockedDBConfig interface {
ConnectionConfig
URL() url.URL
DefaultQueryTimeout() time.Duration
- Dialect() dialects.DialectName
+ Dialect() pg.DialectName
}
type lockedDb struct {
diff --git a/core/services/relay/evm/cap_encoder.go b/core/services/relay/evm/cap_encoder.go
index 2a6f288a5de..713a9796dd2 100644
--- a/core/services/relay/evm/cap_encoder.go
+++ b/core/services/relay/evm/cap_encoder.go
@@ -8,6 +8,7 @@ import (
"fmt"
consensustypes "github.com/smartcontractkit/chainlink-common/pkg/capabilities/consensus/ocr3/types"
+ commoncodec "github.com/smartcontractkit/chainlink-common/pkg/codec"
commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
"github.com/smartcontractkit/chainlink-common/pkg/values"
@@ -17,8 +18,9 @@ import (
)
const (
- abiConfigFieldName = "abi"
- encoderName = "user"
+ abiConfigFieldName = "abi"
+ subabiConfigFieldName = "subabi"
+ encoderName = "user"
)
type capEncoder struct {
@@ -46,9 +48,33 @@ func NewEVMEncoder(config *values.Map) (consensustypes.Encoder, error) {
return nil, err
}
+ chainCodecConfig := types.ChainCodecConfig{
+ TypeABI: string(jsonSelector),
+ }
+
+ var subabi map[string]string
+ subabiConfig, ok := config.Underlying[subabiConfigFieldName]
+ if ok {
+ err2 := subabiConfig.UnwrapTo(&subabi)
+ if err2 != nil {
+ return nil, err2
+ }
+ codecs, err2 := makePreCodecModifierCodecs(subabi)
+ if err2 != nil {
+ return nil, err2
+ }
+ chainCodecConfig.ModifierConfigs = commoncodec.ModifiersConfig{
+ &commoncodec.PreCodecModifierConfig{
+ Fields: subabi,
+ Codecs: codecs,
+ },
+ }
+ }
+
codecConfig := types.CodecConfig{Configs: map[string]types.ChainCodecConfig{
- encoderName: {TypeABI: string(jsonSelector)},
+ encoderName: chainCodecConfig,
}}
+
c, err := codec.NewCodec(codecConfig)
if err != nil {
return nil, err
@@ -57,6 +83,32 @@ func NewEVMEncoder(config *values.Map) (consensustypes.Encoder, error) {
return &capEncoder{codec: c}, nil
}
+func makePreCodecModifierCodecs(subabi map[string]string) (map[string]commontypes.RemoteCodec, error) {
+ codecs := map[string]commontypes.RemoteCodec{}
+ for _, abi := range subabi {
+ selector, err := abiutil.ParseSelector("inner(" + abi + ")")
+ if err != nil {
+ return nil, err
+ }
+ jsonSelector, err := json.Marshal(selector.Inputs)
+ if err != nil {
+ return nil, err
+ }
+ emptyName := ""
+ codecConfig := types.CodecConfig{Configs: map[string]types.ChainCodecConfig{
+ emptyName: {
+ TypeABI: string(jsonSelector),
+ },
+ }}
+ codec, err := codec.NewCodec(codecConfig)
+ if err != nil {
+ return nil, err
+ }
+ codecs[abi] = codec
+ }
+ return codecs, nil
+}
+
func (c *capEncoder) Encode(ctx context.Context, input values.Map) ([]byte, error) {
unwrappedInput, err := input.Unwrap()
if err != nil {
diff --git a/core/services/relay/evm/cap_encoder_test.go b/core/services/relay/evm/cap_encoder_test.go
index d290a7fd2b0..4c0285fc987 100644
--- a/core/services/relay/evm/cap_encoder_test.go
+++ b/core/services/relay/evm/cap_encoder_test.go
@@ -217,6 +217,78 @@ func TestEVMEncoder_InvalidIDs(t *testing.T) {
assert.ErrorContains(t, err, "incorrect length for id")
}
+func TestEVMEncoder_SubABI(t *testing.T) {
+ config := map[string]any{
+ "abi": "(bytes32 FeedID, bytes Bundle, uint32 Timestamp)[] Reports",
+ "subabi": map[string]string{
+ "Reports.Bundle": "uint256 Ask, uint256 Bid",
+ },
+ }
+ wrapped, err := values.NewMap(config)
+ require.NoError(t, err)
+ enc, err := evm.NewEVMEncoder(wrapped)
+ require.NoError(t, err)
+
+ type SubReport struct {
+ Ask int
+ Bid int
+ }
+ type ReportStruct struct {
+ FeedID [32]byte
+ Bundle SubReport
+ Timestamp uint32
+ }
+ reportOne := ReportStruct{
+ FeedID: [32]byte{1},
+ Bundle: SubReport{
+ Ask: 5,
+ Bid: 6,
+ },
+ Timestamp: 47890122,
+ }
+ reportTwo := ReportStruct{
+ FeedID: [32]byte{2},
+ Bundle: SubReport{
+ Ask: 7,
+ Bid: 8,
+ },
+ Timestamp: 47890122,
+ }
+
+ // output of a reduce aggregator + metadata fields appended by OCR
+ input := map[string]any{
+ "Reports": []any{reportOne, reportTwo},
+ consensustypes.MetadataFieldName: getMetadata(workflowID),
+ }
+ wrapped, err = values.NewMap(input)
+ require.NoError(t, err)
+ encoded, err := enc.Encode(testutils.Context(t), *wrapped)
+ require.NoError(t, err)
+
+ expected :=
+ // start of the outer tuple
+ getHexMetadata() +
+ // start of the inner tuple (user_fields)
+ "0000000000000000000000000000000000000000000000000000000000000020" + // offset of Reports array
+ "0000000000000000000000000000000000000000000000000000000000000002" + // length of Reports array
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset of ReportOne
+ "0000000000000000000000000000000000000000000000000000000000000100" + // offset of ReportTwo
+ "0100000000000000000000000000000000000000000000000000000000000000" + // ReportOne FeedID
+ "0000000000000000000000000000000000000000000000000000000000000060" + // offset of ReportOne Bundle
+ "0000000000000000000000000000000000000000000000000000000002dabeca" + // ReportOne Timestamp
+ "0000000000000000000000000000000000000000000000000000000000000040" + // length of ReportOne Bundle
+ "0000000000000000000000000000000000000000000000000000000000000005" + // ReportOne Ask
+ "0000000000000000000000000000000000000000000000000000000000000006" + // ReportOne Bid
+ "0200000000000000000000000000000000000000000000000000000000000000" + // ReportTwo FeedID
+ "0000000000000000000000000000000000000000000000000000000000000060" + // offset of ReportTwo Bundle
+ "0000000000000000000000000000000000000000000000000000000002dabeca" + // ReportTwo Timestamp
+ "0000000000000000000000000000000000000000000000000000000000000040" + // length of ReportTwo Bundle
+ "0000000000000000000000000000000000000000000000000000000000000007" + // ReportTwo Ask
+ "0000000000000000000000000000000000000000000000000000000000000008" // ReportTwo Bid
+
+ require.Equal(t, expected, hex.EncodeToString(encoded))
+}
+
func getHexMetadata() string {
return "01" + executionID + timestampHex + donIDHex + configVersionHex + workflowID + workflowName + workflowOwnerID + reportID
}
diff --git a/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go b/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go
index 3c6ee8a1d04..c7c164803cb 100644
--- a/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go
+++ b/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go
@@ -6,7 +6,9 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
+ rand2 "math/rand/v2"
"strings"
+ "sync"
"testing"
"time"
@@ -31,17 +33,38 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/utils/crypto"
"github.com/stretchr/testify/require"
+
+ crypto2 "github.com/ethereum/go-ethereum/crypto"
)
type testEvtHandler struct {
events []syncer.Event
+ mux sync.Mutex
}
func (m *testEvtHandler) Handle(ctx context.Context, event syncer.Event) error {
+ m.mux.Lock()
+ defer m.mux.Unlock()
m.events = append(m.events, event)
return nil
}
+func (m *testEvtHandler) ClearEvents() {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ m.events = make([]syncer.Event, 0)
+}
+
+func (m *testEvtHandler) GetEvents() []syncer.Event {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ eventsCopy := make([]syncer.Event, len(m.events))
+ copy(eventsCopy, m.events)
+
+ return eventsCopy
+}
+
func newTestEvtHandler() *testEvtHandler {
return &testEvtHandler{
events: make([]syncer.Event, 0),
@@ -68,6 +91,138 @@ func (m *testWorkflowRegistryContractLoader) LoadWorkflows(ctx context.Context,
}, nil
}
+func Test_EventHandlerStateSync(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ backendTH := testutils.NewEVMBackendTH(t)
+ donID := uint32(1)
+
+ eventPollTicker := time.NewTicker(50 * time.Millisecond)
+ defer eventPollTicker.Stop()
+
+ // Deploy a test workflow_registry
+ wfRegistryAddr, _, wfRegistryC, err := workflow_registry_wrapper.DeployWorkflowRegistry(backendTH.ContractsOwner, backendTH.Backend.Client())
+ backendTH.Backend.Commit()
+ require.NoError(t, err)
+
+ // setup contract state to allow the secrets to be updated
+ updateAllowedDONs(t, backendTH, wfRegistryC, []uint32{donID}, true)
+ updateAuthorizedAddress(t, backendTH, wfRegistryC, []common.Address{backendTH.ContractsOwner.From}, true)
+
+ // Create some initial static state
+ numberWorkflows := 20
+ for i := 0; i < numberWorkflows; i++ {
+ var workflowID [32]byte
+ _, err = rand.Read((workflowID)[:])
+ require.NoError(t, err)
+ workflow := RegisterWorkflowCMD{
+ Name: fmt.Sprintf("test-wf-%d", i),
+ DonID: donID,
+ Status: uint8(1),
+ SecretsURL: "someurl",
+ }
+ workflow.ID = workflowID
+ registerWorkflow(t, backendTH, wfRegistryC, workflow)
+ }
+
+ testEventHandler := newTestEvtHandler()
+ loader := syncer.NewWorkflowRegistryContractLoader(lggr, wfRegistryAddr.Hex(), func(ctx context.Context, bytes []byte) (syncer.ContractReader, error) {
+ return backendTH.NewContractReader(ctx, t, bytes)
+ }, testEventHandler)
+
+ // Create the registry
+ registry := syncer.NewWorkflowRegistry(
+ lggr,
+ func(ctx context.Context, bytes []byte) (syncer.ContractReader, error) {
+ return backendTH.NewContractReader(ctx, t, bytes)
+ },
+ wfRegistryAddr.Hex(),
+ syncer.WorkflowEventPollerConfig{
+ QueryCount: 20,
+ },
+ testEventHandler,
+ loader,
+ &testDonNotifier{
+ don: capabilities.DON{
+ ID: donID,
+ },
+ err: nil,
+ },
+ syncer.WithTicker(eventPollTicker.C),
+ )
+
+ servicetest.Run(t, registry)
+
+ require.Eventually(t, func() bool {
+ numEvents := len(testEventHandler.GetEvents())
+ return numEvents == numberWorkflows
+ }, 5*time.Second, time.Second)
+
+ for _, event := range testEventHandler.GetEvents() {
+ assert.Equal(t, syncer.WorkflowRegisteredEvent, event.GetEventType())
+ }
+
+ testEventHandler.ClearEvents()
+
+ // Create different event types for a number of workflows and confirm that the event handler processes them in order
+ numberOfEventCycles := 50
+ for i := 0; i < numberOfEventCycles; i++ {
+ var workflowID [32]byte
+ _, err = rand.Read((workflowID)[:])
+ require.NoError(t, err)
+ workflow := RegisterWorkflowCMD{
+ Name: "test-wf-register-event",
+ DonID: donID,
+ Status: uint8(1),
+ SecretsURL: "",
+ }
+ workflow.ID = workflowID
+
+ // Generate events of different types with some jitter
+ registerWorkflow(t, backendTH, wfRegistryC, workflow)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ data := append(backendTH.ContractsOwner.From.Bytes(), []byte(workflow.Name)...)
+ workflowKey := crypto2.Keccak256Hash(data)
+ activateWorkflow(t, backendTH, wfRegistryC, workflowKey)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ pauseWorkflow(t, backendTH, wfRegistryC, workflowKey)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ var newWorkflowID [32]byte
+ _, err = rand.Read((newWorkflowID)[:])
+ require.NoError(t, err)
+ updateWorkflow(t, backendTH, wfRegistryC, workflowKey, newWorkflowID, workflow.BinaryURL+"2", workflow.ConfigURL, workflow.SecretsURL)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ deleteWorkflow(t, backendTH, wfRegistryC, workflowKey)
+ }
+
+ // Confirm the expected number of events are received in the correct order
+ require.Eventually(t, func() bool {
+ events := testEventHandler.GetEvents()
+ numEvents := len(events)
+ expectedNumEvents := 5 * numberOfEventCycles
+
+ if numEvents == expectedNumEvents {
+ // verify the events are the expected types in the expected order
+ for idx, event := range events {
+ switch idx % 5 {
+ case 0:
+ assert.Equal(t, syncer.WorkflowRegisteredEvent, event.GetEventType())
+ case 1:
+ assert.Equal(t, syncer.WorkflowActivatedEvent, event.GetEventType())
+ case 2:
+ assert.Equal(t, syncer.WorkflowPausedEvent, event.GetEventType())
+ case 3:
+ assert.Equal(t, syncer.WorkflowUpdatedEvent, event.GetEventType())
+ case 4:
+ assert.Equal(t, syncer.WorkflowDeletedEvent, event.GetEventType())
+ }
+ }
+ return true
+ }
+
+ return false
+ }, 50*time.Second, time.Second)
+}
+
func Test_InitialStateSync(t *testing.T) {
lggr := logger.TestLogger(t)
backendTH := testutils.NewEVMBackendTH(t)
@@ -128,10 +283,10 @@ func Test_InitialStateSync(t *testing.T) {
servicetest.Run(t, worker)
require.Eventually(t, func() bool {
- return len(testEventHandler.events) == numberWorkflows
+ return len(testEventHandler.GetEvents()) == numberWorkflows
}, 5*time.Second, time.Second)
- for _, event := range testEventHandler.events {
+ for _, event := range testEventHandler.GetEvents() {
assert.Equal(t, syncer.WorkflowRegisteredEvent, event.GetEventType())
}
}
@@ -263,7 +418,7 @@ func Test_RegistrySyncer_WorkflowRegistered_InitiallyPaused(t *testing.T) {
require.NoError(t, err)
from := [20]byte(backendTH.ContractsOwner.From)
- id, err := workflows.GenerateWorkflowID(from[:], []byte(wantContents), []byte(""), "")
+ id, err := workflows.GenerateWorkflowID(from[:], "test-wf", []byte(wantContents), []byte(""), "")
require.NoError(t, err)
giveWorkflow.ID = id
@@ -361,7 +516,7 @@ func Test_RegistrySyncer_WorkflowRegistered_InitiallyActivated(t *testing.T) {
require.NoError(t, err)
from := [20]byte(backendTH.ContractsOwner.From)
- id, err := workflows.GenerateWorkflowID(from[:], []byte(wantContents), []byte(""), "")
+ id, err := workflows.GenerateWorkflowID(from[:], "test-wf", []byte(wantContents), []byte(""), "")
require.NoError(t, err)
giveWorkflow.ID = id
@@ -497,3 +652,59 @@ func requestForceUpdateSecrets(
th.Backend.Commit()
th.Backend.Commit()
}
+
+func activateWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte,
+) {
+ t.Helper()
+ _, err := wfRegC.ActivateWorkflow(th.ContractsOwner, workflowKey)
+ require.NoError(t, err, "failed to activate workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
+
+func pauseWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte,
+) {
+ t.Helper()
+ _, err := wfRegC.PauseWorkflow(th.ContractsOwner, workflowKey)
+ require.NoError(t, err, "failed to pause workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
+
+func deleteWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte,
+) {
+ t.Helper()
+ _, err := wfRegC.DeleteWorkflow(th.ContractsOwner, workflowKey)
+ require.NoError(t, err, "failed to delete workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
+
+func updateWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte, newWorkflowID [32]byte, binaryURL string, configURL string, secretsURL string,
+) {
+ t.Helper()
+ _, err := wfRegC.UpdateWorkflow(th.ContractsOwner, workflowKey, newWorkflowID, binaryURL, configURL, secretsURL)
+ require.NoError(t, err, "failed to update workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
diff --git a/core/services/relay/evm/chain_components_test.go b/core/services/relay/evm/chain_components_test.go
index bc2703d9678..1e8c47c51ec 100644
--- a/core/services/relay/evm/chain_components_test.go
+++ b/core/services/relay/evm/chain_components_test.go
@@ -3,6 +3,7 @@ package evm_test
import (
"context"
"crypto/ecdsa"
+ "errors"
"fmt"
"math"
"math/big"
@@ -12,6 +13,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
evmtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient/simulated"
@@ -19,15 +21,21 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/smartcontractkit/chainlink-common/pkg/services"
+
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
commontestutils "github.com/smartcontractkit/chainlink-common/pkg/loop/testutils"
clcommontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
"github.com/smartcontractkit/chainlink-common/pkg/types/interfacetests"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+ htMocks "github.com/smartcontractkit/chainlink/v2/common/headtracker/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ lpMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
evmtxmgr "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ clevmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
@@ -206,8 +214,21 @@ func TestContractReaderEventsInitValidation(t *testing.T) {
}
}
+func TestChainReader_HealthReport(t *testing.T) {
+ lp := lpMocks.NewLogPoller(t)
+ lp.EXPECT().HealthReport().Return(map[string]error{"lp_name": clcommontypes.ErrFinalityViolated}).Once()
+ ht := htMocks.NewHeadTracker[*clevmtypes.Head, common.Hash](t)
+ htError := errors.New("head tracker error")
+ ht.EXPECT().HealthReport().Return(map[string]error{"ht_name": htError}).Once()
+ cr, err := evm.NewChainReaderService(testutils.Context(t), logger.NullLogger, lp, ht, nil, types.ChainReaderConfig{Contracts: nil})
+ require.NoError(t, err)
+ healthReport := cr.HealthReport()
+ require.True(t, services.ContainsError(healthReport, clcommontypes.ErrFinalityViolated), "expected chain reader to propagate logpoller's error")
+ require.True(t, services.ContainsError(healthReport, htError), "expected chain reader to propagate headtracker's error")
+}
+
func TestChainComponents(t *testing.T) {
- testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/BCFR-1083")
+ tests.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/BCFR-1083")
t.Parallel()
it := &EVMChainComponentsInterfaceTester[*testing.T]{Helper: &helper{}}
// TODO, generated binding tests are broken
diff --git a/core/services/relay/evm/chain_reader.go b/core/services/relay/evm/chain_reader.go
index 99be89eae17..ffe9cd19aea 100644
--- a/core/services/relay/evm/chain_reader.go
+++ b/core/services/relay/evm/chain_reader.go
@@ -20,6 +20,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/types/query"
"github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
"github.com/smartcontractkit/chainlink-common/pkg/values"
+
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
@@ -179,7 +180,13 @@ func (cr *chainReader) Close() error {
func (cr *chainReader) Ready() error { return nil }
func (cr *chainReader) HealthReport() map[string]error {
- return map[string]error{cr.Name(): nil}
+ report := map[string]error{
+ cr.Name(): cr.Healthy(),
+ }
+
+ commonservices.CopyHealth(report, cr.lp.HealthReport())
+ commonservices.CopyHealth(report, cr.ht.HealthReport())
+ return report
}
func (cr *chainReader) Bind(ctx context.Context, bindings []commontypes.BoundContract) error {
@@ -199,7 +206,11 @@ func (cr *chainReader) GetLatestValue(ctx context.Context, readName string, conf
ptrToValue, isValue := returnVal.(*values.Value)
if !isValue {
_, err = binding.GetLatestValueWithHeadData(ctx, common.HexToAddress(address), confidenceLevel, params, returnVal)
- return err
+ if err != nil {
+ return err
+ }
+
+ return nil
}
contractType, err := cr.CreateContractType(readName, false)
diff --git a/core/services/relay/evm/read/batch.go b/core/services/relay/evm/read/batch.go
index 16333149f11..ce1c546ad73 100644
--- a/core/services/relay/evm/read/batch.go
+++ b/core/services/relay/evm/read/batch.go
@@ -241,32 +241,36 @@ func (c *defaultEvmBatchCaller) unpackBatchResults(
return nil, callErr
}
- if err = c.codec.Decode(
- ctx,
- packedBytes,
- call.ReturnVal,
- codec.WrapItemType(call.ContractName, call.ReadName, false),
- ); err != nil {
- if len(packedBytes) == 0 {
- callErr := newErrorFromCall(
- fmt.Errorf("%w: %w: %s", types.ErrInternal, errEmptyOutput, err.Error()),
- call, block, batchReadType,
- )
-
- callErr.Result = &hexEncodedOutputs[idx]
-
- results[idx].err = callErr
- } else {
- callErr := newErrorFromCall(
- fmt.Errorf("%w: codec decode result: %s", types.ErrInvalidType, err.Error()),
- call, block, batchReadType,
- )
-
- callErr.Result = &hexEncodedOutputs[idx]
- results[idx].err = callErr
- }
+ // the codec can't do anything with no bytes, so skip decoding and allow
+ // the result to be the empty struct or value
+ if len(packedBytes) > 0 {
+ if err = c.codec.Decode(
+ ctx,
+ packedBytes,
+ call.ReturnVal,
+ codec.WrapItemType(call.ContractName, call.ReadName, false),
+ ); err != nil {
+ if len(packedBytes) == 0 {
+ callErr := newErrorFromCall(
+ fmt.Errorf("%w: %w: %s", types.ErrInternal, errEmptyOutput, err.Error()),
+ call, block, batchReadType,
+ )
+
+ callErr.Result = &hexEncodedOutputs[idx]
+
+ results[idx].err = callErr
+ } else {
+ callErr := newErrorFromCall(
+ fmt.Errorf("%w: codec decode result: %s", types.ErrInvalidType, err.Error()),
+ call, block, batchReadType,
+ )
+
+ callErr.Result = &hexEncodedOutputs[idx]
+ results[idx].err = callErr
+ }
- continue
+ continue
+ }
}
results[idx].returnVal = call.ReturnVal
diff --git a/core/services/relay/evm/read/method.go b/core/services/relay/evm/read/method.go
index e988e4352f7..ed44e1aa9ca 100644
--- a/core/services/relay/evm/read/method.go
+++ b/core/services/relay/evm/read/method.go
@@ -2,6 +2,7 @@ package read
import (
"context"
+ "errors"
"fmt"
"math/big"
"sync"
@@ -22,6 +23,8 @@ import (
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
)
+var ErrEmptyContractReturnValue = errors.New("the contract return value was empty")
+
type MethodBinding struct {
// read-only properties
contractName string
@@ -173,6 +176,12 @@ func (b *MethodBinding) GetLatestValueWithHeadData(ctx context.Context, addr com
return nil, callErr
}
+ // there may be cases where the contract value has not been set and the RPC returns with a value of 0x
+ // which is a set of empty bytes. there is no need for the codec to run in this case.
+ if len(bytes) == 0 {
+ return block.ToChainAgnosticHead(), nil
+ }
+
if err = b.codec.Decode(ctx, bytes, returnVal, codec.WrapItemType(b.contractName, b.method, false)); err != nil {
callErr := newErrorFromCall(
fmt.Errorf("%w: decode return data: %s", commontypes.ErrInvalidType, err.Error()),
diff --git a/core/services/relay/evm/statuschecker/txm_status_checker_test.go b/core/services/relay/evm/statuschecker/txm_status_checker_test.go
index 456d07e7a7d..7a682d708e2 100644
--- a/core/services/relay/evm/statuschecker/txm_status_checker_test.go
+++ b/core/services/relay/evm/statuschecker/txm_status_checker_test.go
@@ -10,12 +10,13 @@ import (
"github.com/stretchr/testify/mock"
"github.com/smartcontractkit/chainlink-common/pkg/types"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
)
func Test_CheckMessageStatus(t *testing.T) {
- testutils.SkipShort(t, "")
+ tests.SkipShort(t, "")
ctx := context.Background()
mockTxManager := mocks.NewMockEvmTxManager(t)
checker := NewTxmStatusChecker(mockTxManager.GetTransactionStatus)
diff --git a/core/services/vrf/v2/integration_v2_plus_test.go b/core/services/vrf/v2/integration_v2_plus_test.go
index 75cffe1057c..d1cc030043d 100644
--- a/core/services/vrf/v2/integration_v2_plus_test.go
+++ b/core/services/vrf/v2/integration_v2_plus_test.go
@@ -17,6 +17,7 @@ import (
"github.com/stretchr/testify/require"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
@@ -66,7 +67,7 @@ type coordinatorV2PlusUniverse struct {
}
func newVRFCoordinatorV2PlusUniverse(t *testing.T, key ethkey.KeyV2, numConsumers int, trusting bool) coordinatorV2PlusUniverse {
- testutils.SkipShort(t, "VRFCoordinatorV2Universe")
+ tests.SkipShort(t, "VRFCoordinatorV2Universe")
oracleTransactor, err := bind.NewKeyedTransactorWithChainID(key.ToEcdsaPrivKey(), testutils.SimulatedChainID)
require.NoError(t, err)
var (
diff --git a/core/services/vrf/v2/integration_v2_test.go b/core/services/vrf/v2/integration_v2_test.go
index d9086a52a33..6cbcc799e1b 100644
--- a/core/services/vrf/v2/integration_v2_test.go
+++ b/core/services/vrf/v2/integration_v2_test.go
@@ -31,6 +31,8 @@ import (
commonassets "github.com/smartcontractkit/chainlink-common/pkg/assets"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
@@ -145,7 +147,7 @@ func makeTestTxm(t *testing.T, txStore txmgr.TestEvmTxStore, keyStore keystore.M
}
func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers int) coordinatorV2Universe {
- testutils.SkipShort(t, "VRFCoordinatorV2Universe")
+ tests.SkipShort(t, "VRFCoordinatorV2Universe")
oracleTransactor, err := bind.NewKeyedTransactorWithChainID(key.ToEcdsaPrivKey(), testutils.SimulatedChainID)
require.NoError(t, err)
var (
diff --git a/core/services/workflows/engine.go b/core/services/workflows/engine.go
index 943802d1962..d153e53bc07 100644
--- a/core/services/workflows/engine.go
+++ b/core/services/workflows/engine.go
@@ -28,7 +28,7 @@ import (
)
const (
- fifteenMinutesMs = 15 * 60 * 1000
+ fifteenMinutesSec = 15 * 60
reservedFieldNameStepTimeout = "cre_step_timeout"
maxStepTimeoutOverrideSec = 10 * 60 // 10 minutes
)
@@ -446,7 +446,7 @@ func (e *Engine) registerTrigger(ctx context.Context, t *triggerCapability, trig
}
eventsCh, err := t.trigger.RegisterTrigger(ctx, triggerRegRequest)
if err != nil {
- e.metrics.incrementRegisterTriggerFailureCounter(ctx)
+ e.metrics.with(platform.KeyTriggerID, triggerID).incrementRegisterTriggerFailureCounter(ctx)
// It's confusing that t.ID is different from triggerID, but
// t.ID is the capability ID, and triggerID is the trigger ID.
//
@@ -704,7 +704,7 @@ func (e *Engine) finishExecution(ctx context.Context, cma custmsg.MessageEmitter
e.metrics.updateWorkflowTimeoutDurationHistogram(ctx, executionDuration)
}
- if executionDuration > fifteenMinutesMs {
+ if executionDuration > fifteenMinutesSec {
logCustMsg(ctx, cma, fmt.Sprintf("execution duration exceeded 15 minutes: %d (seconds)", executionDuration), l)
l.Warnf("execution duration exceeded 15 minutes: %d (seconds)", executionDuration)
}
diff --git a/core/services/workflows/monitoring.go b/core/services/workflows/monitoring.go
index 8457dadeb60..b73ee6e5eda 100644
--- a/core/services/workflows/monitoring.go
+++ b/core/services/workflows/monitoring.go
@@ -143,19 +143,19 @@ func MetricViews() []sdkmetric.View {
sdkmetric.NewView(
sdkmetric.Instrument{Name: "platform_engine_workflow_earlyexit_time_seconds"},
sdkmetric.Stream{Aggregation: sdkmetric.AggregationExplicitBucketHistogram{
- Boundaries: []float64{0, 1, 10, 100},
+ Boundaries: []float64{0, 1, 10, 30, 120},
}},
),
sdkmetric.NewView(
sdkmetric.Instrument{Name: "platform_engine_workflow_completed_time_seconds"},
sdkmetric.Stream{Aggregation: sdkmetric.AggregationExplicitBucketHistogram{
- Boundaries: []float64{0, 100, 1000, 10_000, 50_000, 100_0000, 500_000},
+ Boundaries: []float64{0, 10, 30, 60, 120, 300, 600, 900, 1200},
}},
),
sdkmetric.NewView(
sdkmetric.Instrument{Name: "platform_engine_workflow_error_time_seconds"},
sdkmetric.Stream{Aggregation: sdkmetric.AggregationExplicitBucketHistogram{
- Boundaries: []float64{0, 20, 60, 120, 240},
+ Boundaries: []float64{0, 30, 60, 120, 240, 600},
}},
),
sdkmetric.NewView(
diff --git a/core/services/workflows/syncer/contract_reader_mock.go b/core/services/workflows/syncer/contract_reader_mock.go
deleted file mode 100644
index e6e7c8385f5..00000000000
--- a/core/services/workflows/syncer/contract_reader_mock.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Code generated by mockery v2.46.3. DO NOT EDIT.
-
-package syncer
-
-import (
- context "context"
-
- query "github.com/smartcontractkit/chainlink-common/pkg/types/query"
- primitives "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
- mock "github.com/stretchr/testify/mock"
-
- types "github.com/smartcontractkit/chainlink-common/pkg/types"
-)
-
-// MockContractReader is an autogenerated mock type for the ContractReader type
-type MockContractReader struct {
- mock.Mock
-}
-
-type MockContractReader_Expecter struct {
- mock *mock.Mock
-}
-
-func (_m *MockContractReader) EXPECT() *MockContractReader_Expecter {
- return &MockContractReader_Expecter{mock: &_m.Mock}
-}
-
-// Bind provides a mock function with given fields: _a0, _a1
-func (_m *MockContractReader) Bind(_a0 context.Context, _a1 []types.BoundContract) error {
- ret := _m.Called(_a0, _a1)
-
- if len(ret) == 0 {
- panic("no return value specified for Bind")
- }
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, []types.BoundContract) error); ok {
- r0 = rf(_a0, _a1)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// MockContractReader_Bind_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bind'
-type MockContractReader_Bind_Call struct {
- *mock.Call
-}
-
-// Bind is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 []types.BoundContract
-func (_e *MockContractReader_Expecter) Bind(_a0 interface{}, _a1 interface{}) *MockContractReader_Bind_Call {
- return &MockContractReader_Bind_Call{Call: _e.mock.On("Bind", _a0, _a1)}
-}
-
-func (_c *MockContractReader_Bind_Call) Run(run func(_a0 context.Context, _a1 []types.BoundContract)) *MockContractReader_Bind_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].([]types.BoundContract))
- })
- return _c
-}
-
-func (_c *MockContractReader_Bind_Call) Return(_a0 error) *MockContractReader_Bind_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *MockContractReader_Bind_Call) RunAndReturn(run func(context.Context, []types.BoundContract) error) *MockContractReader_Bind_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// Close provides a mock function with given fields:
-func (_m *MockContractReader) Close() error {
- ret := _m.Called()
-
- if len(ret) == 0 {
- panic("no return value specified for Close")
- }
-
- var r0 error
- if rf, ok := ret.Get(0).(func() error); ok {
- r0 = rf()
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// MockContractReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
-type MockContractReader_Close_Call struct {
- *mock.Call
-}
-
-// Close is a helper method to define mock.On call
-func (_e *MockContractReader_Expecter) Close() *MockContractReader_Close_Call {
- return &MockContractReader_Close_Call{Call: _e.mock.On("Close")}
-}
-
-func (_c *MockContractReader_Close_Call) Run(run func()) *MockContractReader_Close_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run()
- })
- return _c
-}
-
-func (_c *MockContractReader_Close_Call) Return(_a0 error) *MockContractReader_Close_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *MockContractReader_Close_Call) RunAndReturn(run func() error) *MockContractReader_Close_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// GetLatestValueWithHeadData provides a mock function with given fields: ctx, readName, confidenceLevel, params, returnVal
-func (_m *MockContractReader) GetLatestValueWithHeadData(ctx context.Context, readName string, confidenceLevel primitives.ConfidenceLevel, params any, returnVal any) (*types.Head, error) {
- ret := _m.Called(ctx, readName, confidenceLevel, params, returnVal)
-
- if len(ret) == 0 {
- panic("no return value specified for GetLatestValueWithHeadData")
- }
-
- var r0 *types.Head
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, primitives.ConfidenceLevel, any, any) (*types.Head, error)); ok {
- return rf(ctx, readName, confidenceLevel, params, returnVal)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, primitives.ConfidenceLevel, any, any) *types.Head); ok {
- r0 = rf(ctx, readName, confidenceLevel, params, returnVal)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*types.Head)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, primitives.ConfidenceLevel, any, any) error); ok {
- r1 = rf(ctx, readName, confidenceLevel, params, returnVal)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// MockContractReader_GetLatestValueWithHeadData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestValueWithHeadData'
-type MockContractReader_GetLatestValueWithHeadData_Call struct {
- *mock.Call
-}
-
-// GetLatestValueWithHeadData is a helper method to define mock.On call
-// - ctx context.Context
-// - readName string
-// - confidenceLevel primitives.ConfidenceLevel
-// - params any
-// - returnVal any
-func (_e *MockContractReader_Expecter) GetLatestValueWithHeadData(ctx interface{}, readName interface{}, confidenceLevel interface{}, params interface{}, returnVal interface{}) *MockContractReader_GetLatestValueWithHeadData_Call {
- return &MockContractReader_GetLatestValueWithHeadData_Call{Call: _e.mock.On("GetLatestValueWithHeadData", ctx, readName, confidenceLevel, params, returnVal)}
-}
-
-func (_c *MockContractReader_GetLatestValueWithHeadData_Call) Run(run func(ctx context.Context, readName string, confidenceLevel primitives.ConfidenceLevel, params any, returnVal any)) *MockContractReader_GetLatestValueWithHeadData_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(string), args[2].(primitives.ConfidenceLevel), args[3].(any), args[4].(any))
- })
- return _c
-}
-
-func (_c *MockContractReader_GetLatestValueWithHeadData_Call) Return(head *types.Head, err error) *MockContractReader_GetLatestValueWithHeadData_Call {
- _c.Call.Return(head, err)
- return _c
-}
-
-func (_c *MockContractReader_GetLatestValueWithHeadData_Call) RunAndReturn(run func(context.Context, string, primitives.ConfidenceLevel, any, any) (*types.Head, error)) *MockContractReader_GetLatestValueWithHeadData_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// QueryKey provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4
-func (_m *MockContractReader) QueryKey(_a0 context.Context, _a1 types.BoundContract, _a2 query.KeyFilter, _a3 query.LimitAndSort, _a4 any) ([]types.Sequence, error) {
- ret := _m.Called(_a0, _a1, _a2, _a3, _a4)
-
- if len(ret) == 0 {
- panic("no return value specified for QueryKey")
- }
-
- var r0 []types.Sequence
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) ([]types.Sequence, error)); ok {
- return rf(_a0, _a1, _a2, _a3, _a4)
- }
- if rf, ok := ret.Get(0).(func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) []types.Sequence); ok {
- r0 = rf(_a0, _a1, _a2, _a3, _a4)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]types.Sequence)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) error); ok {
- r1 = rf(_a0, _a1, _a2, _a3, _a4)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// MockContractReader_QueryKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryKey'
-type MockContractReader_QueryKey_Call struct {
- *mock.Call
-}
-
-// QueryKey is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 types.BoundContract
-// - _a2 query.KeyFilter
-// - _a3 query.LimitAndSort
-// - _a4 any
-func (_e *MockContractReader_Expecter) QueryKey(_a0 interface{}, _a1 interface{}, _a2 interface{}, _a3 interface{}, _a4 interface{}) *MockContractReader_QueryKey_Call {
- return &MockContractReader_QueryKey_Call{Call: _e.mock.On("QueryKey", _a0, _a1, _a2, _a3, _a4)}
-}
-
-func (_c *MockContractReader_QueryKey_Call) Run(run func(_a0 context.Context, _a1 types.BoundContract, _a2 query.KeyFilter, _a3 query.LimitAndSort, _a4 any)) *MockContractReader_QueryKey_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(types.BoundContract), args[2].(query.KeyFilter), args[3].(query.LimitAndSort), args[4].(any))
- })
- return _c
-}
-
-func (_c *MockContractReader_QueryKey_Call) Return(_a0 []types.Sequence, _a1 error) *MockContractReader_QueryKey_Call {
- _c.Call.Return(_a0, _a1)
- return _c
-}
-
-func (_c *MockContractReader_QueryKey_Call) RunAndReturn(run func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) ([]types.Sequence, error)) *MockContractReader_QueryKey_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// Start provides a mock function with given fields: ctx
-func (_m *MockContractReader) Start(ctx context.Context) error {
- ret := _m.Called(ctx)
-
- if len(ret) == 0 {
- panic("no return value specified for Start")
- }
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context) error); ok {
- r0 = rf(ctx)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// MockContractReader_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
-type MockContractReader_Start_Call struct {
- *mock.Call
-}
-
-// Start is a helper method to define mock.On call
-// - ctx context.Context
-func (_e *MockContractReader_Expecter) Start(ctx interface{}) *MockContractReader_Start_Call {
- return &MockContractReader_Start_Call{Call: _e.mock.On("Start", ctx)}
-}
-
-func (_c *MockContractReader_Start_Call) Run(run func(ctx context.Context)) *MockContractReader_Start_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context))
- })
- return _c
-}
-
-func (_c *MockContractReader_Start_Call) Return(_a0 error) *MockContractReader_Start_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *MockContractReader_Start_Call) RunAndReturn(run func(context.Context) error) *MockContractReader_Start_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// NewMockContractReader creates a new instance of MockContractReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-// The first argument is typically a *testing.T value.
-func NewMockContractReader(t interface {
- mock.TestingT
- Cleanup(func())
-}) *MockContractReader {
- mock := &MockContractReader{}
- mock.Mock.Test(t)
-
- t.Cleanup(func() { mock.AssertExpectations(t) })
-
- return mock
-}
diff --git a/core/services/workflows/syncer/fetcher.go b/core/services/workflows/syncer/fetcher.go
index fdd0134909d..6a80739bbfe 100644
--- a/core/services/workflows/syncer/fetcher.go
+++ b/core/services/workflows/syncer/fetcher.go
@@ -18,10 +18,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/common"
)
-const (
- defaultFetchTimeoutMs = 20_000
-)
-
type FetcherService struct {
services.StateMachine
lggr logger.Logger
@@ -88,17 +84,11 @@ func hash(url string) string {
}
func (s *FetcherService) Fetch(ctx context.Context, url string) ([]byte, error) {
- payloadBytes, err := json.Marshal(ghcapabilities.Request{
- URL: url,
- Method: http.MethodGet,
- TimeoutMs: defaultFetchTimeoutMs,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to marshal fetch request: %w", err)
- }
-
messageID := strings.Join([]string{ghcapabilities.MethodWorkflowSyncer, hash(url)}, "/")
- resp, err := s.och.HandleSingleNodeRequest(ctx, messageID, payloadBytes)
+ resp, err := s.och.HandleSingleNodeRequest(ctx, messageID, ghcapabilities.Request{
+ URL: url,
+ Method: http.MethodGet,
+ })
if err != nil {
return nil, err
}
diff --git a/core/services/workflows/syncer/fetcher_test.go b/core/services/workflows/syncer/fetcher_test.go
index 8e3e58fba0d..ee59d22608a 100644
--- a/core/services/workflows/syncer/fetcher_test.go
+++ b/core/services/workflows/syncer/fetcher_test.go
@@ -15,6 +15,7 @@ import (
gcmocks "github.com/smartcontractkit/chainlink/v2/core/services/gateway/connector/mocks"
"github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/capabilities"
ghcapabilities "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/capabilities"
+ "github.com/smartcontractkit/chainlink/v2/core/utils/matches"
)
type wrapper struct {
@@ -48,6 +49,7 @@ func TestNewFetcherService(t *testing.T) {
fetcher.och.HandleGatewayMessage(ctx, "gateway1", gatewayResp)
}).Return(nil).Times(1)
connector.EXPECT().DonID().Return("don-id")
+ connector.EXPECT().AwaitConnection(matches.AnyContext, "gateway1").Return(nil)
connector.EXPECT().GatewayIDs().Return([]string{"gateway1", "gateway2"})
payload, err := fetcher.Fetch(ctx, url)
diff --git a/core/services/workflows/syncer/handler.go b/core/services/workflows/syncer/handler.go
index b88527f905d..534dfd57e7b 100644
--- a/core/services/workflows/syncer/handler.go
+++ b/core/services/workflows/syncer/handler.go
@@ -400,25 +400,13 @@ func (h *eventHandler) workflowRegisteredEvent(
ctx context.Context,
payload WorkflowRegistryWorkflowRegisteredV1,
) error {
- // Download the contents of binaryURL, configURL and secretsURL and cache them locally.
- binary, err := h.fetcher(ctx, payload.BinaryURL)
+ // Fetch the workflow artifacts from the database or download them from the specified URLs
+ decodedBinary, config, err := h.getWorkflowArtifacts(ctx, payload)
if err != nil {
- return fmt.Errorf("failed to fetch binary from %s : %w", payload.BinaryURL, err)
- }
-
- decodedBinary, err := base64.StdEncoding.DecodeString(string(binary))
- if err != nil {
- return fmt.Errorf("failed to decode binary: %w", err)
- }
-
- var config []byte
- if payload.ConfigURL != "" {
- config, err = h.fetcher(ctx, payload.ConfigURL)
- if err != nil {
- return fmt.Errorf("failed to fetch config from %s : %w", payload.ConfigURL, err)
- }
+ return err
}
+ // Always fetch secrets from the SecretsURL
var secrets []byte
if payload.SecretsURL != "" {
secrets, err = h.fetcher(ctx, payload.SecretsURL)
@@ -428,7 +416,7 @@ func (h *eventHandler) workflowRegisteredEvent(
}
// Calculate the hash of the binary and config files
- hash, err := pkgworkflows.GenerateWorkflowID(payload.WorkflowOwner, decodedBinary, config, payload.SecretsURL)
+ hash, err := pkgworkflows.GenerateWorkflowID(payload.WorkflowOwner, payload.WorkflowName, decodedBinary, config, payload.SecretsURL)
if err != nil {
return fmt.Errorf("failed to generate workflow id: %w", err)
}
@@ -456,12 +444,13 @@ func (h *eventHandler) workflowRegisteredEvent(
}
wfID := hex.EncodeToString(payload.WorkflowID[:])
+ owner := hex.EncodeToString(payload.WorkflowOwner)
entry := &job.WorkflowSpec{
Workflow: hex.EncodeToString(decodedBinary),
Config: string(config),
WorkflowID: wfID,
Status: status,
- WorkflowOwner: hex.EncodeToString(payload.WorkflowOwner),
+ WorkflowOwner: owner,
WorkflowName: payload.WorkflowName,
SpecType: job.WASMFile,
BinaryURL: payload.BinaryURL,
@@ -480,7 +469,7 @@ func (h *eventHandler) workflowRegisteredEvent(
engine, err := h.engineFactory(
ctx,
wfID,
- string(payload.WorkflowOwner),
+ owner,
payload.WorkflowName,
config,
decodedBinary,
@@ -498,6 +487,42 @@ func (h *eventHandler) workflowRegisteredEvent(
return nil
}
+// getWorkflowArtifacts retrieves the workflow artifacts from the database if they exist,
+// or downloads them from the specified URLs if they are not found in the database.
+func (h *eventHandler) getWorkflowArtifacts(
+ ctx context.Context,
+ payload WorkflowRegistryWorkflowRegisteredV1,
+) ([]byte, []byte, error) {
+ spec, err := h.orm.GetWorkflowSpecByID(ctx, hex.EncodeToString(payload.WorkflowID[:]))
+ if err != nil {
+ binary, err2 := h.fetcher(ctx, payload.BinaryURL)
+ if err2 != nil {
+ return nil, nil, fmt.Errorf("failed to fetch binary from %s : %w", payload.BinaryURL, err)
+ }
+
+ decodedBinary, err2 := base64.StdEncoding.DecodeString(string(binary))
+ if err2 != nil {
+ return nil, nil, fmt.Errorf("failed to decode binary: %w", err)
+ }
+
+ var config []byte
+ if payload.ConfigURL != "" {
+ config, err2 = h.fetcher(ctx, payload.ConfigURL)
+ if err2 != nil {
+ return nil, nil, fmt.Errorf("failed to fetch config from %s : %w", payload.ConfigURL, err)
+ }
+ }
+ return decodedBinary, config, nil
+ }
+
+ // there is no update in the BinaryURL or ConfigURL, lets decode the stored artifacts
+ decodedBinary, err := hex.DecodeString(spec.Workflow)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to decode stored workflow spec: %w", err)
+ }
+ return decodedBinary, []byte(spec.Config), nil
+}
+
func (h *eventHandler) engineFactoryFn(ctx context.Context, id string, owner string, name string, config []byte, binary []byte) (services.Service, error) {
moduleConfig := &host.ModuleConfig{Logger: h.lggr, Labeler: h.emitter}
sdkSpec, err := host.GetWorkflowSpec(ctx, moduleConfig, binary, config)
diff --git a/core/services/workflows/syncer/handler_test.go b/core/services/workflows/syncer/handler_test.go
index eb8b338158f..f205cbde1cd 100644
--- a/core/services/workflows/syncer/handler_test.go
+++ b/core/services/workflows/syncer/handler_test.go
@@ -444,7 +444,7 @@ func testRunningWorkflow(t *testing.T, tc testCase) {
fetcher = tc.fetcher
)
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
require.NoError(t, err)
wfID := hex.EncodeToString(giveWFID[:])
@@ -492,7 +492,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
})
)
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
require.NoError(t, err)
wfIDs := hex.EncodeToString(giveWFID[:])
@@ -584,9 +584,9 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
})
)
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
require.NoError(t, err)
- updatedWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, updateConfig, secretsURL)
+ updatedWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, updateConfig, secretsURL)
require.NoError(t, err)
require.NoError(t, err)
diff --git a/core/services/workflows/syncer/heap.go b/core/services/workflows/syncer/heap.go
deleted file mode 100644
index 061293928a3..00000000000
--- a/core/services/workflows/syncer/heap.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package syncer
-
-import "container/heap"
-
-type Heap interface {
- // Push adds a new item to the heap.
- Push(x WorkflowRegistryEventResponse)
-
- // Pop removes the smallest item from the heap and returns it.
- Pop() WorkflowRegistryEventResponse
-
- // Len returns the number of items in the heap.
- Len() int
-}
-
-// publicHeap is a wrapper around the heap.Interface that exposes the Push and Pop methods.
-type publicHeap[T any] struct {
- heap heap.Interface
-}
-
-func (h *publicHeap[T]) Push(x T) {
- heap.Push(h.heap, x)
-}
-
-func (h *publicHeap[T]) Pop() T {
- return heap.Pop(h.heap).(T)
-}
-
-func (h *publicHeap[T]) Len() int {
- return h.heap.Len()
-}
-
-// blockHeightHeap is a heap.Interface that sorts WorkflowRegistryEventResponses by block height.
-type blockHeightHeap []WorkflowRegistryEventResponse
-
-// newBlockHeightHeap returns an initialized heap that sorts WorkflowRegistryEventResponses by block height.
-func newBlockHeightHeap() Heap {
- h := blockHeightHeap(make([]WorkflowRegistryEventResponse, 0))
- heap.Init(&h)
- return &publicHeap[WorkflowRegistryEventResponse]{heap: &h}
-}
-
-func (h *blockHeightHeap) Len() int { return len(*h) }
-
-func (h *blockHeightHeap) Less(i, j int) bool {
- return (*h)[i].Event.Head.Height < (*h)[j].Event.Head.Height
-}
-
-func (h *blockHeightHeap) Swap(i, j int) {
- (*h)[i], (*h)[j] = (*h)[j], (*h)[i]
-}
-
-func (h *blockHeightHeap) Push(x any) {
- *h = append(*h, x.(WorkflowRegistryEventResponse))
-}
-
-func (h *blockHeightHeap) Pop() any {
- old := *h
- n := len(old)
- x := old[n-1]
- *h = old[0 : n-1]
- return x
-}
diff --git a/core/services/workflows/syncer/mocks/orm.go b/core/services/workflows/syncer/mocks/orm.go
index da96f422361..09a543d65e3 100644
--- a/core/services/workflows/syncer/mocks/orm.go
+++ b/core/services/workflows/syncer/mocks/orm.go
@@ -540,6 +540,65 @@ func (_c *ORM_GetWorkflowSpec_Call) RunAndReturn(run func(context.Context, strin
return _c
}
+// GetWorkflowSpecByID provides a mock function with given fields: ctx, id
+func (_m *ORM) GetWorkflowSpecByID(ctx context.Context, id string) (*job.WorkflowSpec, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetWorkflowSpecByID")
+ }
+
+ var r0 *job.WorkflowSpec
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (*job.WorkflowSpec, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) *job.WorkflowSpec); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*job.WorkflowSpec)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ORM_GetWorkflowSpecByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetWorkflowSpecByID'
+type ORM_GetWorkflowSpecByID_Call struct {
+ *mock.Call
+}
+
+// GetWorkflowSpecByID is a helper method to define mock.On call
+// - ctx context.Context
+// - id string
+func (_e *ORM_Expecter) GetWorkflowSpecByID(ctx interface{}, id interface{}) *ORM_GetWorkflowSpecByID_Call {
+ return &ORM_GetWorkflowSpecByID_Call{Call: _e.mock.On("GetWorkflowSpecByID", ctx, id)}
+}
+
+func (_c *ORM_GetWorkflowSpecByID_Call) Run(run func(ctx context.Context, id string)) *ORM_GetWorkflowSpecByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *ORM_GetWorkflowSpecByID_Call) Return(_a0 *job.WorkflowSpec, _a1 error) *ORM_GetWorkflowSpecByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *ORM_GetWorkflowSpecByID_Call) RunAndReturn(run func(context.Context, string) (*job.WorkflowSpec, error)) *ORM_GetWorkflowSpecByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// Update provides a mock function with given fields: ctx, secretsURL, contents
func (_m *ORM) Update(ctx context.Context, secretsURL string, contents string) (int64, error) {
ret := _m.Called(ctx, secretsURL, contents)
diff --git a/core/services/workflows/syncer/orm.go b/core/services/workflows/syncer/orm.go
index 97f2c834f36..ff9336a0893 100644
--- a/core/services/workflows/syncer/orm.go
+++ b/core/services/workflows/syncer/orm.go
@@ -52,6 +52,9 @@ type WorkflowSpecsDS interface {
// DeleteWorkflowSpec deletes the workflow spec for the given owner and name.
DeleteWorkflowSpec(ctx context.Context, owner, name string) error
+
+ // GetWorkflowSpecByID returns the workflow spec for the given workflowID.
+ GetWorkflowSpecByID(ctx context.Context, id string) (*job.WorkflowSpec, error)
}
type ORM interface {
@@ -161,6 +164,10 @@ func (orm *orm) GetContentsByWorkflowID(ctx context.Context, workflowID string)
return "", "", ErrEmptySecrets
}
+ if jr.Contents.String == "" {
+ return "", "", ErrEmptySecrets
+ }
+
return jr.SecretsURLHash.String, jr.Contents.String, nil
}
@@ -206,65 +213,73 @@ func (orm *orm) GetSecretsURLHash(owner, secretsURL []byte) ([]byte, error) {
func (orm *orm) UpsertWorkflowSpec(ctx context.Context, spec *job.WorkflowSpec) (int64, error) {
var id int64
+ err := sqlutil.TransactDataSource(ctx, orm.ds, nil, func(tx sqlutil.DataSource) error {
+ txErr := tx.QueryRowxContext(
+ ctx,
+ `DELETE FROM workflow_specs WHERE workflow_owner = $1 AND workflow_name = $2 AND workflow_id != $3`,
+ spec.WorkflowOwner,
+ spec.WorkflowName,
+ spec.WorkflowID,
+ ).Scan(nil)
+ if txErr != nil && !errors.Is(txErr, sql.ErrNoRows) {
+ return fmt.Errorf("failed to clean up previous workflow specs: %w", txErr)
+ }
- query := `
- INSERT INTO workflow_specs (
- workflow,
- config,
- workflow_id,
- workflow_owner,
- workflow_name,
- status,
- binary_url,
- config_url,
- secrets_id,
- created_at,
- updated_at,
- spec_type
- ) VALUES (
- :workflow,
- :config,
- :workflow_id,
- :workflow_owner,
- :workflow_name,
- :status,
- :binary_url,
- :config_url,
- :secrets_id,
- :created_at,
- :updated_at,
- :spec_type
- ) ON CONFLICT (workflow_owner, workflow_name) DO UPDATE
- SET
- workflow = EXCLUDED.workflow,
- config = EXCLUDED.config,
- workflow_id = EXCLUDED.workflow_id,
- workflow_owner = EXCLUDED.workflow_owner,
- workflow_name = EXCLUDED.workflow_name,
- status = EXCLUDED.status,
- binary_url = EXCLUDED.binary_url,
- config_url = EXCLUDED.config_url,
- secrets_id = EXCLUDED.secrets_id,
- created_at = EXCLUDED.created_at,
- updated_at = EXCLUDED.updated_at,
- spec_type = EXCLUDED.spec_type
- RETURNING id
- `
-
- stmt, err := orm.ds.PrepareNamedContext(ctx, query)
- if err != nil {
- return 0, err
- }
- defer stmt.Close()
+ query := `
+ INSERT INTO workflow_specs (
+ workflow,
+ config,
+ workflow_id,
+ workflow_owner,
+ workflow_name,
+ status,
+ binary_url,
+ config_url,
+ secrets_id,
+ created_at,
+ updated_at,
+ spec_type
+ ) VALUES (
+ :workflow,
+ :config,
+ :workflow_id,
+ :workflow_owner,
+ :workflow_name,
+ :status,
+ :binary_url,
+ :config_url,
+ :secrets_id,
+ :created_at,
+ :updated_at,
+ :spec_type
+ ) ON CONFLICT (workflow_owner, workflow_name) DO UPDATE
+ SET
+ workflow = EXCLUDED.workflow,
+ config = EXCLUDED.config,
+ workflow_id = EXCLUDED.workflow_id,
+ workflow_owner = EXCLUDED.workflow_owner,
+ workflow_name = EXCLUDED.workflow_name,
+ status = EXCLUDED.status,
+ binary_url = EXCLUDED.binary_url,
+ config_url = EXCLUDED.config_url,
+ secrets_id = EXCLUDED.secrets_id,
+ created_at = EXCLUDED.created_at,
+ updated_at = EXCLUDED.updated_at,
+ spec_type = EXCLUDED.spec_type
+ RETURNING id
+ `
- spec.UpdatedAt = time.Now()
- err = stmt.QueryRowxContext(ctx, spec).Scan(&id)
+ stmt, err := orm.ds.PrepareNamedContext(ctx, query)
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
- if err != nil {
- return 0, err
- }
+ spec.UpdatedAt = time.Now()
+ return stmt.QueryRowxContext(ctx, spec).Scan(&id)
+ })
- return id, nil
+ return id, err
}
func (orm *orm) UpsertWorkflowSpecWithSecrets(
@@ -289,6 +304,17 @@ func (orm *orm) UpsertWorkflowSpecWithSecrets(
return fmt.Errorf("failed to create workflow secrets: %w", txErr)
}
+ txErr = tx.QueryRowxContext(
+ ctx,
+ `DELETE FROM workflow_specs WHERE workflow_owner = $1 AND workflow_name = $2 AND workflow_id != $3`,
+ spec.WorkflowOwner,
+ spec.WorkflowName,
+ spec.WorkflowID,
+ ).Scan(nil)
+ if txErr != nil && !errors.Is(txErr, sql.ErrNoRows) {
+ return fmt.Errorf("failed to clean up previous workflow specs: %w", txErr)
+ }
+
spec.SecretsID = sql.NullInt64{Int64: sid, Valid: true}
query := `
@@ -328,10 +354,10 @@ func (orm *orm) UpsertWorkflowSpecWithSecrets(
status = EXCLUDED.status,
binary_url = EXCLUDED.binary_url,
config_url = EXCLUDED.config_url,
- secrets_id = EXCLUDED.secrets_id,
created_at = EXCLUDED.created_at,
updated_at = EXCLUDED.updated_at,
- spec_type = EXCLUDED.spec_type
+ spec_type = EXCLUDED.spec_type,
+ secrets_id = EXCLUDED.secrets_id
RETURNING id
`
@@ -363,6 +389,22 @@ func (orm *orm) GetWorkflowSpec(ctx context.Context, owner, name string) (*job.W
return &spec, nil
}
+func (orm *orm) GetWorkflowSpecByID(ctx context.Context, id string) (*job.WorkflowSpec, error) {
+ query := `
+ SELECT *
+ FROM workflow_specs
+ WHERE workflow_id = $1
+ `
+
+ var spec job.WorkflowSpec
+ err := orm.ds.GetContext(ctx, &spec, query, id)
+ if err != nil {
+ return nil, err
+ }
+
+ return &spec, nil
+}
+
func (orm *orm) DeleteWorkflowSpec(ctx context.Context, owner, name string) error {
query := `
DELETE FROM workflow_specs
diff --git a/core/services/workflows/syncer/orm_test.go b/core/services/workflows/syncer/orm_test.go
index 08c60447498..f47bd6c3731 100644
--- a/core/services/workflows/syncer/orm_test.go
+++ b/core/services/workflows/syncer/orm_test.go
@@ -6,6 +6,8 @@ import (
"testing"
"time"
+ "github.com/google/uuid"
+
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -197,6 +199,45 @@ func Test_GetWorkflowSpec(t *testing.T) {
})
}
+func Test_GetWorkflowSpecByID(t *testing.T) {
+ db := pgtest.NewSqlxDB(t)
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ orm := &orm{ds: db, lggr: lggr}
+
+ t.Run("gets a workflow spec by ID", func(t *testing.T) {
+ spec := &job.WorkflowSpec{
+ Workflow: "test_workflow",
+ Config: "test_config",
+ WorkflowID: "cid-123",
+ WorkflowOwner: "owner-123",
+ WorkflowName: "Test Workflow",
+ Status: job.WorkflowSpecStatusActive,
+ BinaryURL: "http://example.com/binary",
+ ConfigURL: "http://example.com/config",
+ CreatedAt: time.Now(),
+ SpecType: job.WASMFile,
+ }
+
+ id, err := orm.UpsertWorkflowSpec(ctx, spec)
+ require.NoError(t, err)
+ require.NotZero(t, id)
+
+ dbSpec, err := orm.GetWorkflowSpecByID(ctx, spec.WorkflowID)
+ require.NoError(t, err)
+ require.Equal(t, spec.Workflow, dbSpec.Workflow)
+
+ err = orm.DeleteWorkflowSpec(ctx, spec.WorkflowOwner, spec.WorkflowName)
+ require.NoError(t, err)
+ })
+
+ t.Run("fails if no workflow spec exists", func(t *testing.T) {
+ dbSpec, err := orm.GetWorkflowSpecByID(ctx, "inexistent-workflow-id")
+ require.Error(t, err)
+ require.Nil(t, dbSpec)
+ })
+}
+
func Test_GetContentsByWorkflowID(t *testing.T) {
db := pgtest.NewSqlxDB(t)
ctx := testutils.Context(t)
@@ -256,3 +297,160 @@ func Test_GetContentsByWorkflowID(t *testing.T) {
assert.Equal(t, giveHash, gotHash)
assert.Equal(t, giveContent, gotContent)
}
+
+func Test_GetContentsByWorkflowID_SecretsProvidedButEmpty(t *testing.T) {
+ db := pgtest.NewSqlxDB(t)
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ orm := &orm{ds: db, lggr: lggr}
+
+ // workflow_id is missing
+ _, _, err := orm.GetContentsByWorkflowID(ctx, "doesnt-exist")
+ require.ErrorContains(t, err, "no rows in result set")
+
+ // secrets_id is nil; should return EmptySecrets
+ workflowID := "aWorkflowID"
+ giveURL := "https://example.com"
+ giveBytes, err := crypto.Keccak256([]byte(giveURL))
+ require.NoError(t, err)
+ giveHash := hex.EncodeToString(giveBytes)
+ giveContent := ""
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, &job.WorkflowSpec{
+ Workflow: "",
+ Config: "",
+ WorkflowID: workflowID,
+ WorkflowOwner: "aWorkflowOwner",
+ WorkflowName: "aWorkflowName",
+ BinaryURL: "",
+ ConfigURL: "",
+ CreatedAt: time.Now(),
+ SpecType: job.DefaultSpecType,
+ }, giveURL, giveHash, giveContent)
+ require.NoError(t, err)
+
+ _, _, err = orm.GetContentsByWorkflowID(ctx, workflowID)
+ require.ErrorIs(t, err, ErrEmptySecrets)
+}
+
+func Test_UpsertWorkflowSpecWithSecrets(t *testing.T) {
+ db := pgtest.NewSqlxDB(t)
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ orm := &orm{ds: db, lggr: lggr}
+
+ t.Run("inserts new spec and new secrets", func(t *testing.T) {
+ giveURL := "https://example.com"
+ giveBytes, err := crypto.Keccak256([]byte(giveURL))
+ require.NoError(t, err)
+ giveHash := hex.EncodeToString(giveBytes)
+ giveContent := "some contents"
+
+ spec := &job.WorkflowSpec{
+ Workflow: "test_workflow",
+ Config: "test_config",
+ WorkflowID: "cid-123",
+ WorkflowOwner: "owner-123",
+ WorkflowName: "Test Workflow",
+ Status: job.WorkflowSpecStatusActive,
+ BinaryURL: "http://example.com/binary",
+ ConfigURL: "http://example.com/config",
+ CreatedAt: time.Now(),
+ SpecType: job.WASMFile,
+ }
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, giveContent)
+ require.NoError(t, err)
+
+ // Verify the record exists in the database
+ var dbSpec job.WorkflowSpec
+ err = db.Get(&dbSpec, `SELECT * FROM workflow_specs WHERE workflow_owner = $1 AND workflow_name = $2`, spec.WorkflowOwner, spec.WorkflowName)
+ require.NoError(t, err)
+ require.Equal(t, spec.Workflow, dbSpec.Workflow)
+
+ // Verify the secrets exists in the database
+ contents, err := orm.GetContents(ctx, giveURL)
+ require.NoError(t, err)
+ require.Equal(t, giveContent, contents)
+ })
+
+ t.Run("updates existing spec and secrets", func(t *testing.T) {
+ giveURL := "https://example.com"
+ giveBytes, err := crypto.Keccak256([]byte(giveURL))
+ require.NoError(t, err)
+ giveHash := hex.EncodeToString(giveBytes)
+ giveContent := "some contents"
+
+ spec := &job.WorkflowSpec{
+ Workflow: "test_workflow",
+ Config: "test_config",
+ WorkflowID: "cid-123",
+ WorkflowOwner: "owner-123",
+ WorkflowName: "Test Workflow",
+ Status: job.WorkflowSpecStatusActive,
+ BinaryURL: "http://example.com/binary",
+ ConfigURL: "http://example.com/config",
+ CreatedAt: time.Now(),
+ SpecType: job.WASMFile,
+ }
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, giveContent)
+ require.NoError(t, err)
+
+ // Update the status
+ spec.Status = job.WorkflowSpecStatusPaused
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, "new contents")
+ require.NoError(t, err)
+
+ // Verify the record is updated in the database
+ var dbSpec job.WorkflowSpec
+ err = db.Get(&dbSpec, `SELECT * FROM workflow_specs WHERE workflow_owner = $1 AND workflow_name = $2`, spec.WorkflowOwner, spec.WorkflowName)
+ require.NoError(t, err)
+ require.Equal(t, spec.Config, dbSpec.Config)
+
+ // Verify the secrets is updated in the database
+ contents, err := orm.GetContents(ctx, giveURL)
+ require.NoError(t, err)
+ require.Equal(t, "new contents", contents)
+ })
+
+ t.Run("updates existing spec and secrets if spec has executions", func(t *testing.T) {
+ giveURL := "https://example.com"
+ giveBytes, err := crypto.Keccak256([]byte(giveURL))
+ require.NoError(t, err)
+ giveHash := hex.EncodeToString(giveBytes)
+ giveContent := "some contents"
+
+ spec := &job.WorkflowSpec{
+ Workflow: "test_workflow",
+ Config: "test_config",
+ WorkflowID: "cid-123",
+ WorkflowOwner: "owner-123",
+ WorkflowName: "Test Workflow",
+ Status: job.WorkflowSpecStatusActive,
+ BinaryURL: "http://example.com/binary",
+ ConfigURL: "http://example.com/config",
+ CreatedAt: time.Now(),
+ SpecType: job.WASMFile,
+ }
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, giveContent)
+ require.NoError(t, err)
+
+ _, err = db.ExecContext(
+ ctx,
+ `INSERT INTO workflow_executions (id, workflow_id, status, created_at) VALUES ($1, $2, $3, $4)`,
+ uuid.New().String(),
+ "cid-123",
+ "started",
+ time.Now(),
+ )
+ require.NoError(t, err)
+
+ // Update the status
+ spec.WorkflowID = "cid-456"
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, "new contents")
+ require.NoError(t, err)
+ })
+}
diff --git a/core/services/workflows/syncer/workflow_registry.go b/core/services/workflows/syncer/workflow_registry.go
index 75fcc9735ad..4809f3563ca 100644
--- a/core/services/workflows/syncer/workflow_registry.go
+++ b/core/services/workflows/syncer/workflow_registry.go
@@ -5,13 +5,15 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
+ "iter"
+ "strings"
"sync"
"time"
"github.com/smartcontractkit/chainlink-common/pkg/capabilities"
"github.com/smartcontractkit/chainlink-common/pkg/services"
- types "github.com/smartcontractkit/chainlink-common/pkg/types"
- query "github.com/smartcontractkit/chainlink-common/pkg/types/query"
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/query"
"github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
"github.com/smartcontractkit/chainlink-common/pkg/values"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/workflow/generated/workflow_registry_wrapper"
@@ -90,19 +92,19 @@ type WorkflowLoadConfig struct {
// FetcherFunc is an abstraction for fetching the contents stored at a URL.
type FetcherFunc func(ctx context.Context, url string) ([]byte, error)
-type ContractReaderFactory interface {
- NewContractReader(context.Context, []byte) (types.ContractReader, error)
-}
-
// ContractReader is a subset of types.ContractReader defined locally to enable mocking.
type ContractReader interface {
Start(ctx context.Context) error
Close() error
Bind(context.Context, []types.BoundContract) error
- QueryKey(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) ([]types.Sequence, error)
+ QueryKeys(ctx context.Context, keyQueries []types.ContractKeyFilter, limitAndSort query.LimitAndSort) (iter.Seq2[string, types.Sequence], error)
GetLatestValueWithHeadData(ctx context.Context, readName string, confidenceLevel primitives.ConfidenceLevel, params any, returnVal any) (head *types.Head, err error)
}
+type ContractReaderFactory interface {
+ NewContractReader(context.Context, []byte) (types.ContractReader, error)
+}
+
// WorkflowRegistrySyncer is the public interface of the package.
type WorkflowRegistrySyncer interface {
services.Service
@@ -128,21 +130,11 @@ type workflowRegistry struct {
newContractReaderFn newContractReaderFn
- eventPollerCfg WorkflowEventPollerConfig
- eventTypes []WorkflowRegistryEventType
-
- // eventsCh is read by the handler and each event is handled once received.
- eventsCh chan WorkflowRegistryEventResponse
+ eventPollerCfg WorkflowEventPollerConfig
+ eventTypes []WorkflowRegistryEventType
handler evtHandler
initialWorkflowsStateLoader initialWorkflowsStateLoader
- // batchCh is a channel that receives batches of events from the contract query goroutines.
- batchCh chan []WorkflowRegistryEventResponse
-
- // heap is a min heap that merges batches of events from the contract query goroutines. The
- // default min heap is sorted by block height.
- heap Heap
-
workflowDonNotifier donNotifier
reader ContractReader
@@ -197,11 +189,8 @@ func NewWorkflowRegistry(
newContractReaderFn: newContractReaderFn,
workflowRegistryAddress: addr,
eventPollerCfg: eventPollerConfig,
- heap: newBlockHeightHeap(),
stopCh: make(services.StopChan),
eventTypes: ets,
- eventsCh: make(chan WorkflowRegistryEventResponse),
- batchCh: make(chan []WorkflowRegistryEventResponse, len(ets)),
handler: handler,
initialWorkflowsStateLoader: initialWorkflowsStateLoader,
workflowDonNotifier: workflowDonNotifier,
@@ -234,19 +223,25 @@ func (w *workflowRegistry) Start(_ context.Context) error {
w.lggr.Debugw("Loading initial workflows for DON", "DON", don.ID)
loadWorkflowsHead, err := w.initialWorkflowsStateLoader.LoadWorkflows(ctx, don)
if err != nil {
- w.lggr.Errorw("failed to load workflows", "err", err)
- return
- }
+ // TODO - this is a temporary fix to handle the case where the chainreader errors because the contract
+ // contains no workflows. To track: https://smartcontract-it.atlassian.net/browse/CAPPL-393
+ if !strings.Contains(err.Error(), "attempting to unmarshal an empty string while arguments are expected") {
+ w.lggr.Errorw("failed to load workflows", "err", err)
+ return
+ }
- w.syncEventsLoop(ctx, loadWorkflowsHead.Height)
- }()
+ loadWorkflowsHead = &types.Head{
+ Height: "0",
+ }
+ }
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- defer cancel()
+ reader, err := w.getContractReader(ctx)
+ if err != nil {
+ w.lggr.Criticalf("contract reader unavailable : %s", err)
+ return
+ }
- w.handlerLoop(ctx)
+ w.readRegistryEvents(ctx, reader, loadWorkflowsHead.Height)
}()
return nil
@@ -273,135 +268,82 @@ func (w *workflowRegistry) Name() string {
return name
}
-// handlerLoop handles the events that are emitted by the contract.
-func (w *workflowRegistry) handlerLoop(ctx context.Context) {
+// readRegistryEvents polls the contract for events and send them to the events channel.
+func (w *workflowRegistry) readRegistryEvents(ctx context.Context, reader ContractReader, lastReadBlockNumber string) {
+ ticker := w.getTicker()
+
+ var keyQueries = make([]types.ContractKeyFilter, 0, len(w.eventTypes))
+ for _, et := range w.eventTypes {
+ var logData values.Value
+ keyQueries = append(keyQueries, types.ContractKeyFilter{
+ KeyFilter: query.KeyFilter{
+ Key: string(et),
+ Expressions: []query.Expression{
+ query.Confidence(primitives.Finalized),
+ query.Block(lastReadBlockNumber, primitives.Gt),
+ },
+ },
+ Contract: types.BoundContract{
+ Name: WorkflowRegistryContractName,
+ Address: w.workflowRegistryAddress,
+ },
+ SequenceDataType: &logData,
+ })
+ }
+
+ cursor := ""
for {
select {
case <-ctx.Done():
return
- case resp, open := <-w.eventsCh:
- if !open {
- return
+ case <-ticker:
+ limitAndSort := query.LimitAndSort{
+ SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
+ Limit: query.Limit{Count: w.eventPollerCfg.QueryCount},
}
-
- if resp.Err != nil || resp.Event == nil {
- w.lggr.Errorw("failed to handle event", "err", resp.Err)
- continue
+ if cursor != "" {
+ limitAndSort.Limit = query.CursorLimit(cursor, query.CursorFollowing, w.eventPollerCfg.QueryCount)
}
- event := resp.Event
- w.lggr.Debugf("handling event: %+v", event)
- if err := w.handler.Handle(ctx, *event); err != nil {
- w.lggr.Errorw("failed to handle event", "event", event, "err", err)
+ logsIter, err := reader.QueryKeys(ctx, keyQueries, limitAndSort)
+ if err != nil {
+ w.lggr.Errorw("failed to query keys", "err", err)
continue
}
- }
- }
-}
-// syncEventsLoop polls the contract for events and passes them to a channel for handling.
-func (w *workflowRegistry) syncEventsLoop(ctx context.Context, lastReadBlockNumber string) {
- var (
- // sendLog is a helper that sends a WorkflowRegistryEventResponse to the eventsCh in a
- // blocking way that will send the response or be canceled.
- sendLog = func(resp WorkflowRegistryEventResponse) {
- select {
- case w.eventsCh <- resp:
- case <-ctx.Done():
+ var logs []sequenceWithEventType
+ for eventType, log := range logsIter {
+ logs = append(logs, sequenceWithEventType{
+ Sequence: log,
+ EventType: WorkflowRegistryEventType(eventType),
+ })
}
- }
-
- ticker = w.getTicker()
+ w.lggr.Debugw("QueryKeys called", "logs", len(logs), "eventTypes", w.eventTypes, "lastReadBlockNumber", lastReadBlockNumber, "logCursor", cursor)
- signals = make(map[WorkflowRegistryEventType]chan struct{}, 0)
- )
-
- // critical failure if there is no reader, the loop will exit and the parent context will be
- // canceled.
- reader, err := w.getContractReader(ctx)
- if err != nil {
- w.lggr.Criticalf("contract reader unavailable : %s", err)
- return
- }
-
- // fan out and query for each event type
- for i := 0; i < len(w.eventTypes); i++ {
- signal := make(chan struct{}, 1)
- signals[w.eventTypes[i]] = signal
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
-
- queryEvent(
- ctx,
- signal,
- w.lggr,
- reader,
- lastReadBlockNumber,
- queryEventConfig{
- ContractName: WorkflowRegistryContractName,
- ContractAddress: w.workflowRegistryAddress,
- WorkflowEventPollerConfig: w.eventPollerCfg,
- },
- w.eventTypes[i],
- w.batchCh,
- )
- }()
- }
-
- // Periodically send a signal to all the queryEvent goroutines to query the contract
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker:
- w.lggr.Debugw("Syncing with WorkflowRegistry")
- // for each event type, send a signal for it to execute a query and produce a new
- // batch of event logs
- for i := 0; i < len(w.eventTypes); i++ {
- signal := signals[w.eventTypes[i]]
- select {
- case signal <- struct{}{}:
- case <-ctx.Done():
- return
- }
+ // ChainReader QueryKey API provides logs including the cursor value and not
+ // after the cursor value. If the response only consists of the log corresponding
+ // to the cursor and no log after it, then we understand that there are no new
+ // logs
+ if len(logs) == 1 && logs[0].Sequence.Cursor == cursor {
+ w.lggr.Infow("No new logs since", "cursor", cursor)
+ continue
}
- // block on fan-in until all fetched event logs are sent to the handlers
- w.orderAndSend(
- ctx,
- len(w.eventTypes),
- w.batchCh,
- sendLog,
- )
- }
- }
-}
+ var events []WorkflowRegistryEventResponse
+ for _, log := range logs {
+ if log.Sequence.Cursor == cursor {
+ continue
+ }
-// orderAndSend reads n batches from the batch channel, heapifies all the batches then dequeues
-// the min heap via the sendLog function.
-func (w *workflowRegistry) orderAndSend(
- ctx context.Context,
- batchCount int,
- batchCh <-chan []WorkflowRegistryEventResponse,
- sendLog func(WorkflowRegistryEventResponse),
-) {
- for {
- select {
- case <-ctx.Done():
- return
- case batch := <-batchCh:
- for _, response := range batch {
- w.heap.Push(response)
+ events = append(events, toWorkflowRegistryEventResponse(log.Sequence, log.EventType, w.lggr))
+ cursor = log.Sequence.Cursor
}
- batchCount--
- // If we have received responses for all the events, then we can drain the heap.
- if batchCount == 0 {
- for w.heap.Len() > 0 {
- sendLog(w.heap.Pop())
+ for _, event := range events {
+ err := w.handler.Handle(ctx, event.Event)
+ if err != nil {
+ w.lggr.Errorw("failed to handle event", "err", err, "type", event.Event.EventType)
}
- return
}
}
}
@@ -437,95 +379,9 @@ func (w *workflowRegistry) getContractReader(ctx context.Context) (ContractReade
return w.reader, nil
}
-type queryEventConfig struct {
- ContractName string
- ContractAddress string
- WorkflowEventPollerConfig
-}
-
-// queryEvent queries the contract for events of the given type on each tick from the ticker.
-// Sends a batch of event logs to the batch channel. The batch represents all the
-// event logs read since the last query. Loops until the context is canceled.
-func queryEvent(
- ctx context.Context,
- ticker <-chan struct{},
- lggr logger.Logger,
- reader ContractReader,
- lastReadBlockNumber string,
- cfg queryEventConfig,
- et WorkflowRegistryEventType,
- batchCh chan<- []WorkflowRegistryEventResponse,
-) {
- // create query
- var (
- logData values.Value
- cursor = ""
- limitAndSort = query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: cfg.QueryCount},
- }
- bc = types.BoundContract{
- Name: cfg.ContractName,
- Address: cfg.ContractAddress,
- }
- )
-
- // Loop until canceled
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker:
- responseBatch := []WorkflowRegistryEventResponse{}
-
- if cursor != "" {
- limitAndSort.Limit = query.CursorLimit(cursor, query.CursorFollowing, cfg.QueryCount)
- }
-
- logs, err := reader.QueryKey(
- ctx,
- bc,
- query.KeyFilter{
- Key: string(et),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block(lastReadBlockNumber, primitives.Gte),
- },
- },
- limitAndSort,
- &logData,
- )
- lcursor := cursor
- if lcursor == "" {
- lcursor = "empty"
- }
- lggr.Debugw("QueryKeys called", "logs", len(logs), "eventType", et, "lastReadBlockNumber", lastReadBlockNumber, "logCursor", lcursor)
-
- if err != nil {
- lggr.Errorw("QueryKey failure", "err", err)
- continue
- }
-
- // ChainReader QueryKey API provides logs including the cursor value and not
- // after the cursor value. If the response only consists of the log corresponding
- // to the cursor and no log after it, then we understand that there are no new
- // logs
- if len(logs) == 1 && logs[0].Cursor == cursor {
- lggr.Infow("No new logs since", "cursor", cursor)
- continue
- }
-
- for _, log := range logs {
- if log.Cursor == cursor {
- continue
- }
-
- responseBatch = append(responseBatch, toWorkflowRegistryEventResponse(log, et, lggr))
- cursor = log.Cursor
- }
- batchCh <- responseBatch
- }
- }
+type sequenceWithEventType struct {
+ Sequence types.Sequence
+ EventType WorkflowRegistryEventType
}
func getWorkflowRegistryEventReader(
@@ -681,7 +537,7 @@ func (l *workflowRegistryContractLoader) LoadWorkflows(ctx context.Context, don
var workflows GetWorkflowMetadataListByDONReturnVal
headAtLastRead, err = contractReader.GetLatestValueWithHeadData(ctx, readIdentifier, primitives.Finalized, params, &workflows)
if err != nil {
- return nil, fmt.Errorf("failed to get workflow metadata for don %w", err)
+ return nil, fmt.Errorf("failed to get lastest value with head data %w", err)
}
l.lggr.Debugw("Rehydrating existing workflows", "len", len(workflows.WorkflowMetadataList))
diff --git a/core/services/workflows/syncer/workflow_registry_test.go b/core/services/workflows/syncer/workflow_registry_test.go
deleted file mode 100644
index 621d3d123d5..00000000000
--- a/core/services/workflows/syncer/workflow_registry_test.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package syncer
-
-import (
- "context"
- "encoding/hex"
- "testing"
- "time"
-
- "github.com/stretchr/testify/mock"
-
- "github.com/jonboulle/clockwork"
-
- "github.com/smartcontractkit/chainlink-common/pkg/capabilities"
- "github.com/smartcontractkit/chainlink-common/pkg/custmsg"
- "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
- types "github.com/smartcontractkit/chainlink-common/pkg/types"
- query "github.com/smartcontractkit/chainlink-common/pkg/types/query"
- "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
- "github.com/smartcontractkit/chainlink-common/pkg/values"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/workflowkey"
- "github.com/smartcontractkit/chainlink/v2/core/utils/crypto"
- "github.com/smartcontractkit/chainlink/v2/core/utils/matches"
-
- "github.com/stretchr/testify/require"
-)
-
-type testDonNotifier struct {
- don capabilities.DON
- err error
-}
-
-func (t *testDonNotifier) WaitForDon(ctx context.Context) (capabilities.DON, error) {
- return t.don, t.err
-}
-
-func Test_Workflow_Registry_Syncer(t *testing.T) {
- var (
- giveContents = "contents"
- wantContents = "updated contents"
- contractAddress = "0xdeadbeef"
- giveCfg = WorkflowEventPollerConfig{
- QueryCount: 20,
- }
- giveURL = "http://example.com"
- giveHash, err = crypto.Keccak256([]byte(giveURL))
-
- giveLog = types.Sequence{
- Data: map[string]any{
- "SecretsURLHash": giveHash,
- "Owner": "0xowneraddr",
- },
- Cursor: "cursor",
- }
- )
-
- require.NoError(t, err)
-
- var (
- lggr = logger.TestLogger(t)
- db = pgtest.NewSqlxDB(t)
- orm = &orm{ds: db, lggr: lggr}
- ctx, cancel = context.WithCancel(testutils.Context(t))
- reader = NewMockContractReader(t)
- emitter = custmsg.NewLabeler()
- gateway = func(_ context.Context, _ string) ([]byte, error) {
- return []byte(wantContents), nil
- }
- ticker = make(chan time.Time)
-
- handler = NewEventHandler(lggr, orm, gateway, nil, nil,
- emitter, clockwork.NewFakeClock(), workflowkey.Key{})
- loader = NewWorkflowRegistryContractLoader(lggr, contractAddress, func(ctx context.Context, bytes []byte) (ContractReader, error) {
- return reader, nil
- }, handler)
-
- worker = NewWorkflowRegistry(lggr, func(ctx context.Context, bytes []byte) (ContractReader, error) {
- return reader, nil
- }, contractAddress,
- WorkflowEventPollerConfig{
- QueryCount: 20,
- }, handler, loader,
- &testDonNotifier{
- don: capabilities.DON{
- ID: 1,
- },
- err: nil,
- },
- WithTicker(ticker))
- )
-
- // Cleanup the worker
- defer cancel()
-
- // Seed the DB with an original entry
- _, err = orm.Create(ctx, giveURL, hex.EncodeToString(giveHash), giveContents)
- require.NoError(t, err)
-
- // Mock out the contract reader query
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(ForceUpdateSecretsEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{giveLog}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowPausedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowDeletedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowActivatedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowUpdatedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowRegisteredEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().GetLatestValueWithHeadData(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&types.Head{
- Height: "0",
- }, nil)
- reader.EXPECT().Start(mock.Anything).Return(nil)
- reader.EXPECT().Bind(mock.Anything, mock.Anything).Return(nil)
-
- // Go run the worker
- servicetest.Run(t, worker)
-
- // Send a tick to start a query
- ticker <- time.Now()
-
- // Require the secrets contents to eventually be updated
- require.Eventually(t, func() bool {
- secrets, err := orm.GetContents(ctx, giveURL)
- require.NoError(t, err)
- return secrets == wantContents
- }, 5*time.Second, time.Second)
-}
diff --git a/core/store/dialects/dialects.go b/core/store/dialects/dialects.go
deleted file mode 100644
index d250fa1b99b..00000000000
--- a/core/store/dialects/dialects.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package dialects
-
-import (
- // need to make sure pgx driver is registered before opening connection
- _ "github.com/jackc/pgx/v4/stdlib"
-)
-
-// DialectName is a compiler enforced type used that maps to database dialect names
-type DialectName string
-
-const (
- // Postgres represents the postgres dialect.
- Postgres DialectName = "pgx"
- // TransactionWrappedPostgres is useful for tests.
- // When the connection is opened, it starts a transaction and all
- // operations performed on the DB will be within that transaction.
- TransactionWrappedPostgres DialectName = "txdb"
-)
diff --git a/core/store/migrate/migrations/0259_add_workflow_secrets.sql b/core/store/migrate/migrations/0259_add_workflow_secrets.sql
index fb76d945571..420f7ed6e49 100644
--- a/core/store/migrate/migrations/0259_add_workflow_secrets.sql
+++ b/core/store/migrate/migrations/0259_add_workflow_secrets.sql
@@ -38,4 +38,5 @@ DROP INDEX IF EXISTS idx_secrets_url_hash;
-- Drop the workflow_artifacts table
DROP TABLE IF EXISTS workflow_secrets;
--- +goose StatementEnd
\ No newline at end of file
+-- +goose StatementEnd
+
diff --git a/core/store/migrate/migrations/0261_remove_unique_constraint_secrets.sql b/core/store/migrate/migrations/0261_remove_unique_constraint_secrets.sql
new file mode 100644
index 00000000000..15f59d8ae28
--- /dev/null
+++ b/core/store/migrate/migrations/0261_remove_unique_constraint_secrets.sql
@@ -0,0 +1,10 @@
+-- +goose Up
+-- +goose StatementBegin
+-- unique constraint on workflow_owner and workflow_name
+ALTER TABLE workflow_specs DROP CONSTRAINT workflow_specs_secrets_id_key;
+-- +goose StatementEnd
+
+-- +goose Down
+-- +goose StatementBegin
+ALTER TABLE workflow_specs ADD CONSTRAINT workflow_specs_secrets_id_key unique (secrets_id);
+-- +goose StatementEnd
diff --git a/core/utils/testutils/heavyweight/orm.go b/core/utils/testutils/heavyweight/orm.go
index 536515e02e4..775eabab0c8 100644
--- a/core/utils/testutils/heavyweight/orm.go
+++ b/core/utils/testutils/heavyweight/orm.go
@@ -14,12 +14,13 @@ import (
"github.com/jmoiron/sqlx"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
"github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/internal/testdb"
)
@@ -53,10 +54,10 @@ const (
)
func (c Kind) PrepareDB(t testing.TB, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
- testutils.SkipShort(t, "FullTestDB")
+ tests.SkipShort(t, "FullTestDB")
gcfg := configtest.NewGeneralConfigSimulated(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.Database.Dialect = dialects.Postgres
+ c.Database.Dialect = pgcommon.Postgres
if overrideFn != nil {
overrideFn(c, s)
}
@@ -65,7 +66,7 @@ func (c Kind) PrepareDB(t testing.TB, overrideFn func(c *chainlink.Config, s *ch
require.NoError(t, os.MkdirAll(gcfg.RootDir(), 0700))
migrationTestDBURL, err := testdb.CreateOrReplace(gcfg.Database().URL(), generateName(), c != KindEmpty)
require.NoError(t, err)
- db, err := pg.NewConnection(tests.Context(t), migrationTestDBURL, dialects.Postgres, gcfg.Database())
+ db, err := pg.NewConnection(tests.Context(t), migrationTestDBURL, pgcommon.Postgres, gcfg.Database())
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, db.Close()) // must close before dropping
@@ -74,7 +75,7 @@ func (c Kind) PrepareDB(t testing.TB, overrideFn func(c *chainlink.Config, s *ch
})
gcfg = configtest.NewGeneralConfigSimulated(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.Database.Dialect = dialects.Postgres
+ c.Database.Dialect = pgcommon.Postgres
s.Database.URL = models.MustSecretURL(migrationTestDBURL)
if overrideFn != nil {
overrideFn(c, s)
diff --git a/deployment/README.md b/deployment/README.md
index c6579ca6205..f3bd29b768b 100644
--- a/deployment/README.md
+++ b/deployment/README.md
@@ -6,14 +6,14 @@ deployment/configuration logic to be tested against ephemeral environments
and then exposed for use in persistent environments like testnet/mainnet.
## Table of Contents
-- [Address Book](##Address-Book)
-- [View](##View)
-- [Environment](##Environment)
-- [Job Distributor](##Job-Distributor)
-- [Changesets](##Changesets)
-- [Directory Structure](##Directory-Structure)
-- [Integration Testing](##Integration-Testing)
-- [FAQ](##FAQ)
+- [Address Book](#address-book)
+- [View](#view)
+- [Environment](#environment)
+- [Job Distributor](#job-distributor)
+- [Changesets](#changsets)
+- [Directory Structure](#directory-structure)
+- [Integration Testing](#integration-testing)
+- [FAQ](#faq)
## Address Book
An [address book](https://github.com/smartcontractkit/chainlink/blob/develop/deployment/address_book.go#L79) represents
@@ -100,14 +100,14 @@ TODO: Add various examples in deployment/example.
contracts (like MCMS, LinkToken etc) which can be shared
by products.
-/deployment//internal
+/deployment/product/internal
- Internal building blocks for changesets
-/deployment//view
+/deployment/product/view
- Hold readonly mappings Go bindings to json marshallable objects.
- Used to generate a view of the system.
-/deployment//changeset
+/deployment/product/changeset
- Think of this as the public API for deployment and configuration
of your product.
- All the changesets should have an associated test using a memory or devenv
diff --git a/deployment/address_book.go b/deployment/address_book.go
index 6f605013011..3ce0332a4c3 100644
--- a/deployment/address_book.go
+++ b/deployment/address_book.go
@@ -89,8 +89,10 @@ type AddressBook interface {
Remove(ab AddressBook) error
}
+type AddressesByChain map[uint64]map[string]TypeAndVersion
+
type AddressBookMap struct {
- addressesByChain map[uint64]map[string]TypeAndVersion
+ addressesByChain AddressesByChain
mtx sync.RWMutex
}
diff --git a/deployment/ccip/changeset/accept_ownership_test.go b/deployment/ccip/changeset/accept_ownership_test.go
index 5580b31a85a..9b71e0ad5cb 100644
--- a/deployment/ccip/changeset/accept_ownership_test.go
+++ b/deployment/ccip/changeset/accept_ownership_test.go
@@ -9,9 +9,11 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
)
func Test_NewAcceptOwnershipChangeset(t *testing.T) {
+ t.Parallel()
e := NewMemoryEnvironment(t)
state, err := LoadOnchainState(e.Env)
require.NoError(t, err)
@@ -20,12 +22,12 @@ func Test_NewAcceptOwnershipChangeset(t *testing.T) {
source := allChains[0]
dest := allChains[1]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
- source: &commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
+ source: {
Timelock: state.Chains[source].Timelock,
CallProxy: state.Chains[source].CallProxy,
},
- dest: &commonchangeset.TimelockExecutionContracts{
+ dest: {
Timelock: state.Chains[dest].Timelock,
CallProxy: state.Chains[dest].CallProxy,
},
diff --git a/deployment/ccip/changeset/cs_add_chain.go b/deployment/ccip/changeset/cs_add_chain.go
index b3d0df04c93..ddb6e61d5ba 100644
--- a/deployment/ccip/changeset/cs_add_chain.go
+++ b/deployment/ccip/changeset/cs_add_chain.go
@@ -8,18 +8,14 @@ import (
"github.com/smartcontractkit/chainlink-ccip/chainconfig"
"github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3"
-
- "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal"
"github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
- "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_home"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
"github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_home"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/fee_quoter"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/onramp"
)
@@ -136,135 +132,6 @@ func NewChainInboundChangeset(
}, nil
}
-type AddDonAndSetCandidateChangesetConfig struct {
- HomeChainSelector uint64
- FeedChainSelector uint64
- NewChainSelector uint64
- PluginType types.PluginType
- NodeIDs []string
- CCIPOCRParams CCIPOCRParams
-}
-
-func (a AddDonAndSetCandidateChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (deployment.Nodes, error) {
- if a.HomeChainSelector == 0 {
- return nil, fmt.Errorf("HomeChainSelector must be set")
- }
- if a.FeedChainSelector == 0 {
- return nil, fmt.Errorf("FeedChainSelector must be set")
- }
- if a.NewChainSelector == 0 {
- return nil, fmt.Errorf("ocr config chain selector must be set")
- }
- if a.PluginType != types.PluginTypeCCIPCommit &&
- a.PluginType != types.PluginTypeCCIPExec {
- return nil, fmt.Errorf("PluginType must be set to either CCIPCommit or CCIPExec")
- }
- // TODO: validate token config
- if len(a.NodeIDs) == 0 {
- return nil, fmt.Errorf("nodeIDs must be set")
- }
- nodes, err := deployment.NodeInfo(a.NodeIDs, e.Offchain)
- if err != nil {
- return nil, fmt.Errorf("get node info: %w", err)
- }
-
- // check that chain config is set up for the new chain
- chainConfig, err := state.Chains[a.HomeChainSelector].CCIPHome.GetChainConfig(nil, a.NewChainSelector)
- if err != nil {
- return nil, fmt.Errorf("get all chain configs: %w", err)
- }
-
- // FChain should never be zero if a chain config is set in CCIPHome
- if chainConfig.FChain == 0 {
- return nil, fmt.Errorf("chain config not set up for new chain %d", a.NewChainSelector)
- }
-
- err = a.CCIPOCRParams.Validate()
- if err != nil {
- return nil, fmt.Errorf("invalid ccip ocr params: %w", err)
- }
-
- if e.OCRSecrets.IsEmpty() {
- return nil, fmt.Errorf("OCR secrets must be set")
- }
-
- return nodes, nil
-}
-
-// AddDonAndSetCandidateChangeset adds new DON for destination to home chain
-// and sets the commit plugin config as candidateConfig for the don.
-func AddDonAndSetCandidateChangeset(
- e deployment.Environment,
- cfg AddDonAndSetCandidateChangesetConfig,
-) (deployment.ChangesetOutput, error) {
- state, err := LoadOnchainState(e)
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
-
- nodes, err := cfg.Validate(e, state)
- if err != nil {
- return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
- }
-
- newDONArgs, err := internal.BuildOCR3ConfigForCCIPHome(
- e.OCRSecrets,
- state.Chains[cfg.NewChainSelector].OffRamp,
- e.Chains[cfg.NewChainSelector],
- nodes.NonBootstraps(),
- state.Chains[cfg.HomeChainSelector].RMNHome.Address(),
- cfg.CCIPOCRParams.OCRParameters,
- cfg.CCIPOCRParams.CommitOffChainConfig,
- cfg.CCIPOCRParams.ExecuteOffChainConfig,
- )
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
- latestDon, err := internal.LatestCCIPDON(state.Chains[cfg.HomeChainSelector].CapabilityRegistry)
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
- commitConfig, ok := newDONArgs[cfg.PluginType]
- if !ok {
- return deployment.ChangesetOutput{}, fmt.Errorf("missing commit plugin in ocr3Configs")
- }
- donID := latestDon.Id + 1
- addDonOp, err := newDonWithCandidateOp(
- donID, commitConfig,
- state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
- nodes.NonBootstraps(),
- )
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
-
- var (
- timelocksPerChain = map[uint64]common.Address{
- cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].Timelock.Address(),
- }
- proposerMCMSes = map[uint64]*gethwrappers.ManyChainMultiSig{
- cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].ProposerMcm,
- }
- )
- prop, err := proposalutils.BuildProposalFromBatches(
- timelocksPerChain,
- proposerMCMSes,
- []timelock.BatchChainOperation{{
- ChainIdentifier: mcms.ChainIdentifier(cfg.HomeChainSelector),
- Batch: []mcms.Operation{addDonOp},
- }},
- "setCandidate for commit and AddDon on new Chain",
- 0, // minDelay
- )
- if err != nil {
- return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal from batch: %w", err)
- }
-
- return deployment.ChangesetOutput{
- Proposals: []timelock.MCMSWithTimelockProposal{*prop},
- }, nil
-}
-
func applyChainConfigUpdatesOp(
e deployment.Environment,
state CCIPOnChainState,
@@ -304,38 +171,3 @@ func applyChainConfigUpdatesOp(
Value: big.NewInt(0),
}, nil
}
-
-// newDonWithCandidateOp sets the candidate commit config by calling setCandidate on CCIPHome contract through the AddDON call on CapReg contract
-// This should be done first before calling any other UpdateDON calls
-// This proposes to set up OCR3 config for the commit plugin for the DON
-func newDonWithCandidateOp(
- donID uint32,
- pluginConfig ccip_home.CCIPHomeOCR3Config,
- capReg *capabilities_registry.CapabilitiesRegistry,
- nodes deployment.Nodes,
-) (mcms.Operation, error) {
- encodedSetCandidateCall, err := internal.CCIPHomeABI.Pack(
- "setCandidate",
- donID,
- pluginConfig.PluginType,
- pluginConfig,
- [32]byte{},
- )
- if err != nil {
- return mcms.Operation{}, fmt.Errorf("pack set candidate call: %w", err)
- }
- addDonTx, err := capReg.AddDON(deployment.SimTransactOpts(), nodes.PeerIDs(), []capabilities_registry.CapabilitiesRegistryCapabilityConfiguration{
- {
- CapabilityId: internal.CCIPCapabilityID,
- Config: encodedSetCandidateCall,
- },
- }, false, false, nodes.DefaultF())
- if err != nil {
- return mcms.Operation{}, fmt.Errorf("could not generate add don tx w/ commit config: %w", err)
- }
- return mcms.Operation{
- To: capReg.Address(),
- Data: addDonTx.Data(),
- Value: big.NewInt(0),
- }, nil
-}
diff --git a/deployment/ccip/changeset/cs_add_chain_test.go b/deployment/ccip/changeset/cs_add_chain_test.go
index 8f9eef78a05..ab66b782b7f 100644
--- a/deployment/ccip/changeset/cs_add_chain_test.go
+++ b/deployment/ccip/changeset/cs_add_chain_test.go
@@ -1,12 +1,12 @@
package changeset
import (
- "math/big"
"testing"
"time"
"github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
@@ -30,6 +30,8 @@ import (
)
func TestAddChainInbound(t *testing.T) {
+ t.Skipf("Skipping test as it is running into timeout issues, move the test into integration in-memory tests")
+ t.Parallel()
// 4 chains where the 4th is added after initial deployment.
e := NewMemoryEnvironment(t,
WithChains(4),
@@ -43,12 +45,7 @@ func TestAddChainInbound(t *testing.T) {
initialDeploy := e.Env.AllChainSelectorsExcluding([]uint64{newChain})
newAddresses := deployment.NewMemoryAddressBook()
- cfg := commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ cfg := proposalutils.SingleGroupTimelockConfig(t)
e.Env, err = commonchangeset.ApplyChangesets(t, e.Env, nil, []commonchangeset.ChangesetApplication{
{
Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployLinkToken),
@@ -171,7 +168,7 @@ func TestAddChainInbound(t *testing.T) {
}
// transfer ownership to timelock
- _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
+ _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
initialDeploy[0]: {
Timelock: state.Chains[initialDeploy[0]].Timelock,
CallProxy: state.Chains[initialDeploy[0]].CallProxy,
@@ -202,18 +199,11 @@ func TestAddChainInbound(t *testing.T) {
assertTimelockOwnership(t, e, initialDeploy, state)
- nodes, err := deployment.NodeInfo(e.Env.NodeIDs, e.Env.Offchain)
- require.NoError(t, err)
-
// TODO This currently is not working - Able to send the request here but request gets stuck in execution
// Send a new message and expect that this is delivered once the chain is completely set up as inbound
//TestSendRequest(t, e.Env, state, initialDeploy[0], newChain, true)
- var nodeIDs []string
- for _, node := range nodes {
- nodeIDs = append(nodeIDs, node.NodeID)
- }
- _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
+ _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
e.HomeChainSel: {
Timelock: state.Chains[e.HomeChainSel].Timelock,
CallProxy: state.Chains[e.HomeChainSel].CallProxy,
@@ -226,31 +216,39 @@ func TestAddChainInbound(t *testing.T) {
{
Changeset: commonchangeset.WrapChangeSet(AddDonAndSetCandidateChangeset),
Config: AddDonAndSetCandidateChangesetConfig{
- HomeChainSelector: e.HomeChainSel,
- FeedChainSelector: e.FeedChainSel,
- NewChainSelector: newChain,
- PluginType: types.PluginTypeCCIPCommit,
- NodeIDs: nodeIDs,
- CCIPOCRParams: DefaultOCRParams(
- e.FeedChainSel,
- tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[newChain].LinkToken, state.Chains[newChain].Weth9),
- nil,
- ),
+ SetCandidateConfigBase: SetCandidateConfigBase{
+ HomeChainSelector: e.HomeChainSel,
+ FeedChainSelector: e.FeedChainSel,
+ DONChainSelector: newChain,
+ PluginType: types.PluginTypeCCIPCommit,
+ CCIPOCRParams: DefaultOCRParams(
+ e.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[newChain].LinkToken, state.Chains[newChain].Weth9),
+ nil,
+ ),
+ MCMS: &MCMSConfig{
+ MinDelay: 0,
+ },
+ },
},
},
{
- Changeset: commonchangeset.WrapChangeSet(SetCandidatePluginChangeset),
- Config: AddDonAndSetCandidateChangesetConfig{
- HomeChainSelector: e.HomeChainSel,
- FeedChainSelector: e.FeedChainSel,
- NewChainSelector: newChain,
- PluginType: types.PluginTypeCCIPExec,
- NodeIDs: nodeIDs,
- CCIPOCRParams: DefaultOCRParams(
- e.FeedChainSel,
- tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[newChain].LinkToken, state.Chains[newChain].Weth9),
- nil,
- ),
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
+ SetCandidateConfigBase: SetCandidateConfigBase{
+ HomeChainSelector: e.HomeChainSel,
+ FeedChainSelector: e.FeedChainSel,
+ DONChainSelector: newChain,
+ PluginType: types.PluginTypeCCIPExec,
+ CCIPOCRParams: DefaultOCRParams(
+ e.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[newChain].LinkToken, state.Chains[newChain].Weth9),
+ nil,
+ ),
+ MCMS: &MCMSConfig{
+ MinDelay: 0,
+ },
+ },
},
},
{
@@ -258,13 +256,13 @@ func TestAddChainInbound(t *testing.T) {
Config: PromoteAllCandidatesChangesetConfig{
HomeChainSelector: e.HomeChainSel,
DONChainSelector: newChain,
- NodeIDs: nodeIDs,
MCMS: &MCMSConfig{
MinDelay: 0,
},
},
},
})
+ require.NoError(t, err)
// verify if the configs are updated
require.NoError(t, ValidateCCIPHomeConfigSetUp(
diff --git a/deployment/ccip/changeset/cs_add_lane_test.go b/deployment/ccip/changeset/cs_add_lane_test.go
index 7f1374a1725..5c324c975ef 100644
--- a/deployment/ccip/changeset/cs_add_lane_test.go
+++ b/deployment/ccip/changeset/cs_add_lane_test.go
@@ -16,6 +16,7 @@ import (
)
func TestAddLanesWithTestRouter(t *testing.T) {
+ t.Parallel()
e := NewMemoryEnvironment(t)
// Here we have CR + nodes set up, but no CCIP contracts deployed.
state, err := LoadOnchainState(e.Env)
diff --git a/deployment/ccip/changeset/cs_ccip_home.go b/deployment/ccip/changeset/cs_ccip_home.go
index 202d4216b60..f1e860d9d28 100644
--- a/deployment/ccip/changeset/cs_ccip_home.go
+++ b/deployment/ccip/changeset/cs_ccip_home.go
@@ -14,57 +14,63 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
"github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal"
"github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_home"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
)
var (
+ _ deployment.ChangeSet[AddDonAndSetCandidateChangesetConfig] = AddDonAndSetCandidateChangeset
_ deployment.ChangeSet[PromoteAllCandidatesChangesetConfig] = PromoteAllCandidatesChangeset
- _ deployment.ChangeSet[AddDonAndSetCandidateChangesetConfig] = SetCandidatePluginChangeset
+ _ deployment.ChangeSet[SetCandidateChangesetConfig] = SetCandidateChangeset
+ _ deployment.ChangeSet[RevokeCandidateChangesetConfig] = RevokeCandidateChangeset
)
type PromoteAllCandidatesChangesetConfig struct {
HomeChainSelector uint64
+
// DONChainSelector is the chain selector of the DON that we want to promote the candidate config of.
// Note that each (chain, ccip capability version) pair has a unique DON ID.
DONChainSelector uint64
- NodeIDs []string
- MCMS *MCMSConfig
+
+ // MCMS is optional MCMS configuration, if provided the changeset will generate an MCMS proposal.
+ // If nil, the changeset will execute the commands directly using the deployer key
+ // of the provided environment.
+ MCMS *MCMSConfig
}
-func (p PromoteAllCandidatesChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (deployment.Nodes, error) {
+func (p PromoteAllCandidatesChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (donID uint32, err error) {
if err := deployment.IsValidChainSelector(p.HomeChainSelector); err != nil {
- return nil, fmt.Errorf("home chain selector invalid: %w", err)
+ return 0, fmt.Errorf("home chain selector invalid: %w", err)
}
if err := deployment.IsValidChainSelector(p.DONChainSelector); err != nil {
- return nil, fmt.Errorf("don chain selector invalid: %w", err)
+ return 0, fmt.Errorf("don chain selector invalid: %w", err)
}
- if len(p.NodeIDs) == 0 {
- return nil, fmt.Errorf("NodeIDs must be set")
+ if len(e.NodeIDs) == 0 {
+ return 0, fmt.Errorf("NodeIDs must be set")
}
if state.Chains[p.HomeChainSelector].CCIPHome == nil {
- return nil, fmt.Errorf("CCIPHome contract does not exist")
+ return 0, fmt.Errorf("CCIPHome contract does not exist")
}
if state.Chains[p.HomeChainSelector].CapabilityRegistry == nil {
- return nil, fmt.Errorf("CapabilityRegistry contract does not exist")
+ return 0, fmt.Errorf("CapabilityRegistry contract does not exist")
}
-
- nodes, err := deployment.NodeInfo(p.NodeIDs, e.Offchain)
- if err != nil {
- return nil, fmt.Errorf("fetch node info: %w", err)
+ if state.Chains[p.DONChainSelector].OffRamp == nil {
+ // should not be possible, but a defensive check.
+ return 0, fmt.Errorf("OffRamp contract does not exist")
}
- donID, err := internal.DonIDForChain(
+ donID, err = internal.DonIDForChain(
state.Chains[p.HomeChainSelector].CapabilityRegistry,
state.Chains[p.HomeChainSelector].CCIPHome,
p.DONChainSelector,
)
if err != nil {
- return nil, fmt.Errorf("fetch don id for chain: %w", err)
+ return 0, fmt.Errorf("fetch don id for chain: %w", err)
}
if donID == 0 {
- return nil, fmt.Errorf("don doesn't exist in CR for chain %d", p.DONChainSelector)
+ return 0, fmt.Errorf("don doesn't exist in CR for chain %d", p.DONChainSelector)
}
// Check that candidate digest and active digest are not both zero - this is enforced onchain.
@@ -72,31 +78,34 @@ func (p PromoteAllCandidatesChangesetConfig) Validate(e deployment.Environment,
Context: context.Background(),
}, donID, uint8(cctypes.PluginTypeCCIPCommit))
if err != nil {
- return nil, fmt.Errorf("fetching commit configs from cciphome: %w", err)
+ return 0, fmt.Errorf("fetching commit configs from cciphome: %w", err)
}
execConfigs, err := state.Chains[p.HomeChainSelector].CCIPHome.GetAllConfigs(&bind.CallOpts{
Context: context.Background(),
}, donID, uint8(cctypes.PluginTypeCCIPExec))
if err != nil {
- return nil, fmt.Errorf("fetching exec configs from cciphome: %w", err)
+ return 0, fmt.Errorf("fetching exec configs from cciphome: %w", err)
}
if commitConfigs.ActiveConfig.ConfigDigest == [32]byte{} &&
commitConfigs.CandidateConfig.ConfigDigest == [32]byte{} {
- return nil, fmt.Errorf("commit active and candidate config digests are both zero")
+ return 0, fmt.Errorf("commit active and candidate config digests are both zero")
}
if execConfigs.ActiveConfig.ConfigDigest == [32]byte{} &&
execConfigs.CandidateConfig.ConfigDigest == [32]byte{} {
- return nil, fmt.Errorf("exec active and candidate config digests are both zero")
+ return 0, fmt.Errorf("exec active and candidate config digests are both zero")
}
- return nodes, nil
+ return donID, nil
}
// PromoteAllCandidatesChangeset generates a proposal to call promoteCandidate on the CCIPHome through CapReg.
-// This needs to be called after SetCandidateProposal is executed.
+// Note that a DON must exist prior to being able to use this changeset effectively,
+// i.e AddDonAndSetCandidateChangeset must be called first.
+// This can also be used to promote a 0x0 candidate config to be the active, effectively shutting down the DON.
+// At that point you can call the RemoveDON changeset to remove the DON entirely from the capability registry.
func PromoteAllCandidatesChangeset(
e deployment.Environment,
cfg PromoteAllCandidatesChangesetConfig,
@@ -106,11 +115,16 @@ func PromoteAllCandidatesChangeset(
return deployment.ChangesetOutput{}, err
}
- nodes, err := cfg.Validate(e, state)
+ donID, err := cfg.Validate(e, state)
if err != nil {
return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
}
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("fetch node info: %w", err)
+ }
+
txOpts := e.Chains[cfg.HomeChainSelector].DeployerKey
if cfg.MCMS != nil {
txOpts = deployment.SimTransactOpts()
@@ -119,12 +133,12 @@ func PromoteAllCandidatesChangeset(
homeChain := e.Chains[cfg.HomeChainSelector]
promoteCandidateOps, err := promoteAllCandidatesForChainOps(
- homeChain,
txOpts,
+ homeChain,
state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
state.Chains[cfg.HomeChainSelector].CCIPHome,
- cfg.DONChainSelector,
nodes.NonBootstraps(),
+ donID,
cfg.MCMS != nil,
)
if err != nil {
@@ -160,8 +174,124 @@ func PromoteAllCandidatesChangeset(
}, nil
}
-// SetCandidatePluginChangeset calls setCandidate on the CCIPHome for setting up OCR3 exec Plugin config for the new chain.
-func SetCandidatePluginChangeset(
+// SetCandidateConfigBase is a common base config struct for AddDonAndSetCandidateChangesetConfig and SetCandidateChangesetConfig.
+// This is extracted to deduplicate most of the validation logic.
+// Remaining validation logic is done in the specific config structs that inherit from this.
+type SetCandidateConfigBase struct {
+ HomeChainSelector uint64
+ FeedChainSelector uint64
+
+ // DONChainSelector is the chain selector of the chain where the DON will be added.
+ DONChainSelector uint64
+
+ PluginType types.PluginType
+ // Note that the PluginType field is used to determine which field in CCIPOCRParams is used.
+ CCIPOCRParams CCIPOCRParams
+
+ // MCMS is optional MCMS configuration, if provided the changeset will generate an MCMS proposal.
+ // If nil, the changeset will execute the commands directly using the deployer key
+ // of the provided environment.
+ MCMS *MCMSConfig
+}
+
+func (s SetCandidateConfigBase) Validate(e deployment.Environment, state CCIPOnChainState) error {
+ if err := deployment.IsValidChainSelector(s.HomeChainSelector); err != nil {
+ return fmt.Errorf("home chain selector invalid: %w", err)
+ }
+ if err := deployment.IsValidChainSelector(s.FeedChainSelector); err != nil {
+ return fmt.Errorf("feed chain selector invalid: %w", err)
+ }
+ if err := deployment.IsValidChainSelector(s.DONChainSelector); err != nil {
+ return fmt.Errorf("don chain selector invalid: %w", err)
+ }
+ if len(e.NodeIDs) == 0 {
+ return fmt.Errorf("nodeIDs must be set")
+ }
+ if state.Chains[s.HomeChainSelector].CCIPHome == nil {
+ return fmt.Errorf("CCIPHome contract does not exist")
+ }
+ if state.Chains[s.HomeChainSelector].CapabilityRegistry == nil {
+ return fmt.Errorf("CapabilityRegistry contract does not exist")
+ }
+ if state.Chains[s.DONChainSelector].OffRamp == nil {
+ // should not be possible, but a defensive check.
+ return fmt.Errorf("OffRamp contract does not exist on don chain selector %d", s.DONChainSelector)
+ }
+ if s.PluginType != types.PluginTypeCCIPCommit &&
+ s.PluginType != types.PluginTypeCCIPExec {
+ return fmt.Errorf("PluginType must be set to either CCIPCommit or CCIPExec")
+ }
+
+ // no donID check since this config is used for both adding a new DON and updating an existing one.
+ // see AddDonAndSetCandidateChangesetConfig.Validate and SetCandidateChangesetConfig.Validate
+ // for these checks.
+
+ // check that chain config is set up for the new chain
+ chainConfig, err := state.Chains[s.HomeChainSelector].CCIPHome.GetChainConfig(nil, s.DONChainSelector)
+ if err != nil {
+ return fmt.Errorf("get all chain configs: %w", err)
+ }
+
+ // FChain should never be zero if a chain config is set in CCIPHome
+ if chainConfig.FChain == 0 {
+ return fmt.Errorf("chain config not set up for new chain %d", s.DONChainSelector)
+ }
+
+ err = s.CCIPOCRParams.Validate()
+ if err != nil {
+ return fmt.Errorf("invalid ccip ocr params: %w", err)
+ }
+
+ // TODO: validate token config in the commit config, if commit is the plugin.
+ // TODO: validate gas config in the chain config in cciphome for this DONChainSelector.
+
+ if e.OCRSecrets.IsEmpty() {
+ return fmt.Errorf("OCR secrets must be set")
+ }
+
+ return nil
+}
+
+// AddDonAndSetCandidateChangesetConfig is a separate config struct
+// because the validation is slightly different from SetCandidateChangesetConfig.
+// In particular, we check to make sure we don't already have a DON for the chain.
+type AddDonAndSetCandidateChangesetConfig struct {
+ SetCandidateConfigBase
+}
+
+func (a AddDonAndSetCandidateChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) error {
+ err := a.SetCandidateConfigBase.Validate(e, state)
+ if err != nil {
+ return err
+ }
+
+ // check if a DON already exists for this chain
+ donID, err := internal.DonIDForChain(
+ state.Chains[a.HomeChainSelector].CapabilityRegistry,
+ state.Chains[a.HomeChainSelector].CCIPHome,
+ a.DONChainSelector,
+ )
+ if err != nil {
+ return fmt.Errorf("fetch don id for chain: %w", err)
+ }
+ if donID != 0 {
+ return fmt.Errorf("don already exists in CR for chain %d, it has id %d", a.DONChainSelector, donID)
+ }
+
+ return nil
+}
+
+// AddDonAndSetCandidateChangeset adds new DON for destination to home chain
+// and sets the plugin config as candidateConfig for the don.
+//
+// This is the first step to creating a CCIP DON and must be executed before any
+// other changesets (SetCandidateChangeset, PromoteAllCandidatesChangeset)
+// can be executed.
+//
+// Note that these operations must be done together because the createDON call
+// in the capability registry calls the capability config contract, so we must
+// provide suitable calldata for CCIPHome.
+func AddDonAndSetCandidateChangeset(
e deployment.Environment,
cfg AddDonAndSetCandidateChangesetConfig,
) (deployment.ChangesetOutput, error) {
@@ -170,15 +300,25 @@ func SetCandidatePluginChangeset(
return deployment.ChangesetOutput{}, err
}
- nodes, err := cfg.Validate(e, state)
+ err = cfg.Validate(e, state)
if err != nil {
return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
}
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("get node info: %w", err)
+ }
+
+ txOpts := e.Chains[cfg.HomeChainSelector].DeployerKey
+ if cfg.MCMS != nil {
+ txOpts = deployment.SimTransactOpts()
+ }
+
newDONArgs, err := internal.BuildOCR3ConfigForCCIPHome(
e.OCRSecrets,
- state.Chains[cfg.NewChainSelector].OffRamp,
- e.Chains[cfg.NewChainSelector],
+ state.Chains[cfg.DONChainSelector].OffRamp,
+ e.Chains[cfg.DONChainSelector],
nodes.NonBootstraps(),
state.Chains[cfg.HomeChainSelector].RMNHome.Address(),
cfg.CCIPOCRParams.OCRParameters,
@@ -189,39 +329,209 @@ func SetCandidatePluginChangeset(
return deployment.ChangesetOutput{}, err
}
- config, ok := newDONArgs[cfg.PluginType]
+ latestDon, err := internal.LatestCCIPDON(state.Chains[cfg.HomeChainSelector].CapabilityRegistry)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ pluginOCR3Config, ok := newDONArgs[cfg.PluginType]
if !ok {
- return deployment.ChangesetOutput{}, fmt.Errorf("missing %s plugin in ocr3Configs", cfg.PluginType.String())
+ return deployment.ChangesetOutput{}, fmt.Errorf("missing commit plugin in ocr3Configs")
}
- setCandidateMCMSOps, err := setCandidateOnExistingDon(
- config,
+ expectedDonID := latestDon.Id + 1
+ addDonOp, err := newDonWithCandidateOp(
+ txOpts,
+ e.Chains[cfg.HomeChainSelector],
+ expectedDonID,
+ pluginOCR3Config,
state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
- state.Chains[cfg.HomeChainSelector].CCIPHome,
- cfg.NewChainSelector,
nodes.NonBootstraps(),
+ cfg.MCMS != nil,
)
if err != nil {
return deployment.ChangesetOutput{}, err
}
+ if cfg.MCMS == nil {
+ return deployment.ChangesetOutput{}, nil
+ }
- var (
- timelocksPerChain = map[uint64]common.Address{
+ prop, err := proposalutils.BuildProposalFromBatches(
+ map[uint64]common.Address{
cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].Timelock.Address(),
- }
- proposerMCMSes = map[uint64]*gethwrappers.ManyChainMultiSig{
+ },
+ map[uint64]*gethwrappers.ManyChainMultiSig{
cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].ProposerMcm,
+ },
+ []timelock.BatchChainOperation{{
+ ChainIdentifier: mcms.ChainIdentifier(cfg.HomeChainSelector),
+ Batch: []mcms.Operation{addDonOp},
+ }},
+ fmt.Sprintf("addDON on new Chain && setCandidate for plugin %s", cfg.PluginType.String()),
+ cfg.MCMS.MinDelay,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal from batch: %w", err)
+ }
+
+ return deployment.ChangesetOutput{
+ Proposals: []timelock.MCMSWithTimelockProposal{*prop},
+ }, nil
+}
+
+// newDonWithCandidateOp sets the candidate commit config by calling setCandidate on CCIPHome contract through the AddDON call on CapReg contract
+// This should be done first before calling any other UpdateDON calls
+// This proposes to set up OCR3 config for the commit plugin for the DON
+func newDonWithCandidateOp(
+ txOpts *bind.TransactOpts,
+ homeChain deployment.Chain,
+ donID uint32,
+ pluginConfig ccip_home.CCIPHomeOCR3Config,
+ capReg *capabilities_registry.CapabilitiesRegistry,
+ nodes deployment.Nodes,
+ mcmsEnabled bool,
+) (mcms.Operation, error) {
+ encodedSetCandidateCall, err := internal.CCIPHomeABI.Pack(
+ "setCandidate",
+ donID,
+ pluginConfig.PluginType,
+ pluginConfig,
+ [32]byte{},
+ )
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("pack set candidate call: %w", err)
+ }
+
+ addDonTx, err := capReg.AddDON(
+ txOpts,
+ nodes.PeerIDs(),
+ []capabilities_registry.CapabilitiesRegistryCapabilityConfiguration{
+ {
+ CapabilityId: internal.CCIPCapabilityID,
+ Config: encodedSetCandidateCall,
+ },
+ },
+ false, // isPublic
+ false, // acceptsWorkflows
+ nodes.DefaultF(),
+ )
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("could not generate add don tx w/ commit config: %w", err)
+ }
+ if !mcmsEnabled {
+ _, err = deployment.ConfirmIfNoError(homeChain, addDonTx, err)
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("error confirming addDon call: %w", err)
}
+ }
+
+ return mcms.Operation{
+ To: capReg.Address(),
+ Data: addDonTx.Data(),
+ Value: big.NewInt(0),
+ }, nil
+}
+
+type SetCandidateChangesetConfig struct {
+ SetCandidateConfigBase
+}
+
+func (s SetCandidateChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (donID uint32, err error) {
+ err = s.SetCandidateConfigBase.Validate(e, state)
+ if err != nil {
+ return 0, err
+ }
+
+ donID, err = internal.DonIDForChain(
+ state.Chains[s.HomeChainSelector].CapabilityRegistry,
+ state.Chains[s.HomeChainSelector].CCIPHome,
+ s.DONChainSelector,
)
+ if err != nil {
+ return 0, fmt.Errorf("fetch don id for chain: %w", err)
+ }
+ if donID == 0 {
+ return 0, fmt.Errorf("don doesn't exist in CR for chain %d", s.DONChainSelector)
+ }
+
+ return donID, nil
+}
+
+// SetCandidateChangeset generates a proposal to call setCandidate on the CCIPHome through the capability registry.
+// A DON must exist in order to use this changeset effectively, i.e AddDonAndSetCandidateChangeset must be called first.
+func SetCandidateChangeset(
+ e deployment.Environment,
+ cfg SetCandidateChangesetConfig,
+) (deployment.ChangesetOutput, error) {
+ state, err := LoadOnchainState(e)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ donID, err := cfg.Validate(e, state)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
+ }
+
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("get node info: %w", err)
+ }
+
+ txOpts := e.Chains[cfg.HomeChainSelector].DeployerKey
+ if cfg.MCMS != nil {
+ txOpts = deployment.SimTransactOpts()
+ }
+
+ newDONArgs, err := internal.BuildOCR3ConfigForCCIPHome(
+ e.OCRSecrets,
+ state.Chains[cfg.DONChainSelector].OffRamp,
+ e.Chains[cfg.DONChainSelector],
+ nodes.NonBootstraps(),
+ state.Chains[cfg.HomeChainSelector].RMNHome.Address(),
+ cfg.CCIPOCRParams.OCRParameters,
+ cfg.CCIPOCRParams.CommitOffChainConfig,
+ cfg.CCIPOCRParams.ExecuteOffChainConfig,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ config, ok := newDONArgs[cfg.PluginType]
+ if !ok {
+ return deployment.ChangesetOutput{}, fmt.Errorf("missing %s plugin in ocr3Configs", cfg.PluginType.String())
+ }
+
+ setCandidateMCMSOps, err := setCandidateOnExistingDon(
+ txOpts,
+ e.Chains[cfg.HomeChainSelector],
+ state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
+ nodes.NonBootstraps(),
+ donID,
+ config,
+ cfg.MCMS != nil,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ if cfg.MCMS == nil {
+ return deployment.ChangesetOutput{}, nil
+ }
+
prop, err := proposalutils.BuildProposalFromBatches(
- timelocksPerChain,
- proposerMCMSes,
+ map[uint64]common.Address{
+ cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].Timelock.Address(),
+ },
+ map[uint64]*gethwrappers.ManyChainMultiSig{
+ cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].ProposerMcm,
+ },
[]timelock.BatchChainOperation{{
ChainIdentifier: mcms.ChainIdentifier(cfg.HomeChainSelector),
Batch: setCandidateMCMSOps,
}},
fmt.Sprintf("SetCandidate for %s plugin", cfg.PluginType.String()),
- 0, // minDelay
+ cfg.MCMS.MinDelay,
)
if err != nil {
return deployment.ChangesetOutput{}, err
@@ -236,22 +546,18 @@ func SetCandidatePluginChangeset(
// setCandidateOnExistingDon calls setCandidate on CCIPHome contract through the UpdateDON call on CapReg contract
// This proposes to set up OCR3 config for the provided plugin for the DON
func setCandidateOnExistingDon(
- pluginConfig ccip_home.CCIPHomeOCR3Config,
+ txOpts *bind.TransactOpts,
+ homeChain deployment.Chain,
capReg *capabilities_registry.CapabilitiesRegistry,
- ccipHome *ccip_home.CCIPHome,
- chainSelector uint64,
nodes deployment.Nodes,
+ donID uint32,
+ pluginConfig ccip_home.CCIPHomeOCR3Config,
+ mcmsEnabled bool,
) ([]mcms.Operation, error) {
- // fetch DON ID for the chain
- donID, err := internal.DonIDForChain(capReg, ccipHome, chainSelector)
- if err != nil {
- return nil, fmt.Errorf("fetch don id for chain: %w", err)
- }
if donID == 0 {
- return nil, fmt.Errorf("don doesn't exist in CR for chain %d", chainSelector)
+ return nil, fmt.Errorf("donID is zero")
}
- fmt.Printf("donID: %d", donID)
encodedSetCandidateCall, err := internal.CCIPHomeABI.Pack(
"setCandidate",
donID,
@@ -265,7 +571,7 @@ func setCandidateOnExistingDon(
// set candidate call
updateDonTx, err := capReg.UpdateDON(
- deployment.SimTransactOpts(),
+ txOpts,
donID,
nodes.PeerIDs(),
[]capabilities_registry.CapabilitiesRegistryCapabilityConfiguration{
@@ -278,7 +584,19 @@ func setCandidateOnExistingDon(
nodes.DefaultF(),
)
if err != nil {
- return nil, fmt.Errorf("update don w/ exec config: %w", err)
+ return nil, fmt.Errorf("update don w/ setCandidate call: %w", err)
+ }
+ if !mcmsEnabled {
+ _, err = deployment.ConfirmIfNoError(homeChain, updateDonTx, err)
+ if err != nil {
+ return nil, fmt.Errorf("error confirming updateDon call: %w", err)
+ }
+ }
+ if !mcmsEnabled {
+ _, err = deployment.ConfirmIfNoError(homeChain, updateDonTx, err)
+ if err != nil {
+ return nil, fmt.Errorf("error confirming updateDon call: %w", err)
+ }
}
return []mcms.Operation{{
@@ -290,13 +608,13 @@ func setCandidateOnExistingDon(
// promoteCandidateOp will create the MCMS Operation for `promoteCandidateAndRevokeActive` directed towards the capabilityRegistry
func promoteCandidateOp(
- homeChain deployment.Chain,
txOpts *bind.TransactOpts,
- donID uint32,
- pluginType uint8,
+ homeChain deployment.Chain,
capReg *capabilities_registry.CapabilitiesRegistry,
ccipHome *ccip_home.CCIPHome,
nodes deployment.Nodes,
+ donID uint32,
+ pluginType uint8,
mcmsEnabled bool,
) (mcms.Operation, error) {
allConfigs, err := ccipHome.GetAllConfigs(nil, donID, pluginType)
@@ -347,31 +665,44 @@ func promoteCandidateOp(
// promoteAllCandidatesForChainOps promotes the candidate commit and exec configs to active by calling promoteCandidateAndRevokeActive on CCIPHome through the UpdateDON call on CapReg contract
func promoteAllCandidatesForChainOps(
- homeChain deployment.Chain,
txOpts *bind.TransactOpts,
+ homeChain deployment.Chain,
capReg *capabilities_registry.CapabilitiesRegistry,
ccipHome *ccip_home.CCIPHome,
- chainSelector uint64,
nodes deployment.Nodes,
+ donID uint32,
mcmsEnabled bool,
) ([]mcms.Operation, error) {
- // fetch DON ID for the chain
- donID, err := internal.DonIDForChain(capReg, ccipHome, chainSelector)
- if err != nil {
- return nil, fmt.Errorf("fetch don id for chain: %w", err)
- }
if donID == 0 {
- return nil, fmt.Errorf("don doesn't exist in CR for chain %d", chainSelector)
+ return nil, fmt.Errorf("donID is zero")
}
var mcmsOps []mcms.Operation
- updateCommitOp, err := promoteCandidateOp(homeChain, txOpts, donID, uint8(cctypes.PluginTypeCCIPCommit), capReg, ccipHome, nodes, mcmsEnabled)
+ updateCommitOp, err := promoteCandidateOp(
+ txOpts,
+ homeChain,
+ capReg,
+ ccipHome,
+ nodes,
+ donID,
+ uint8(cctypes.PluginTypeCCIPCommit),
+ mcmsEnabled,
+ )
if err != nil {
return nil, fmt.Errorf("promote candidate op: %w", err)
}
mcmsOps = append(mcmsOps, updateCommitOp)
- updateExecOp, err := promoteCandidateOp(homeChain, txOpts, donID, uint8(cctypes.PluginTypeCCIPExec), capReg, ccipHome, nodes, mcmsEnabled)
+ updateExecOp, err := promoteCandidateOp(
+ txOpts,
+ homeChain,
+ capReg,
+ ccipHome,
+ nodes,
+ donID,
+ uint8(cctypes.PluginTypeCCIPExec),
+ mcmsEnabled,
+ )
if err != nil {
return nil, fmt.Errorf("promote candidate op: %w", err)
}
@@ -379,3 +710,181 @@ func promoteAllCandidatesForChainOps(
return mcmsOps, nil
}
+
+type RevokeCandidateChangesetConfig struct {
+ HomeChainSelector uint64
+
+ // DONChainSelector is the chain selector whose candidate config we want to revoke.
+ DONChainSelector uint64
+ PluginType types.PluginType
+
+ // MCMS is optional MCMS configuration, if provided the changeset will generate an MCMS proposal.
+ // If nil, the changeset will execute the commands directly using the deployer key
+ // of the provided environment.
+ MCMS *MCMSConfig
+}
+
+func (r RevokeCandidateChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (donID uint32, err error) {
+ if err := deployment.IsValidChainSelector(r.HomeChainSelector); err != nil {
+ return 0, fmt.Errorf("home chain selector invalid: %w", err)
+ }
+ if err := deployment.IsValidChainSelector(r.DONChainSelector); err != nil {
+ return 0, fmt.Errorf("don chain selector invalid: %w", err)
+ }
+ if len(e.NodeIDs) == 0 {
+ return 0, fmt.Errorf("NodeIDs must be set")
+ }
+ if state.Chains[r.HomeChainSelector].CCIPHome == nil {
+ return 0, fmt.Errorf("CCIPHome contract does not exist")
+ }
+ if state.Chains[r.HomeChainSelector].CapabilityRegistry == nil {
+ return 0, fmt.Errorf("CapabilityRegistry contract does not exist")
+ }
+
+ // check that the don exists for this chain
+ donID, err = internal.DonIDForChain(
+ state.Chains[r.HomeChainSelector].CapabilityRegistry,
+ state.Chains[r.HomeChainSelector].CCIPHome,
+ r.DONChainSelector,
+ )
+ if err != nil {
+ return 0, fmt.Errorf("fetch don id for chain: %w", err)
+ }
+ if donID == 0 {
+ return 0, fmt.Errorf("don doesn't exist in CR for chain %d", r.DONChainSelector)
+ }
+
+ // check that candidate digest is not zero - this is enforced onchain.
+ candidateDigest, err := state.Chains[r.HomeChainSelector].CCIPHome.GetCandidateDigest(nil, donID, uint8(r.PluginType))
+ if err != nil {
+ return 0, fmt.Errorf("fetching candidate digest from cciphome: %w", err)
+ }
+ if candidateDigest == [32]byte{} {
+ return 0, fmt.Errorf("candidate config digest is zero, can't revoke it")
+ }
+
+ return donID, nil
+}
+
+func RevokeCandidateChangeset(e deployment.Environment, cfg RevokeCandidateChangesetConfig) (deployment.ChangesetOutput, error) {
+ state, err := LoadOnchainState(e)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ donID, err := cfg.Validate(e, state)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
+ }
+
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("fetch nodes info: %w", err)
+ }
+
+ txOpts := e.Chains[cfg.HomeChainSelector].DeployerKey
+ if cfg.MCMS != nil {
+ txOpts = deployment.SimTransactOpts()
+ }
+
+ homeChain := e.Chains[cfg.HomeChainSelector]
+ ops, err := revokeCandidateOps(
+ txOpts,
+ homeChain,
+ state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
+ state.Chains[cfg.HomeChainSelector].CCIPHome,
+ nodes.NonBootstraps(),
+ donID,
+ uint8(cfg.PluginType),
+ cfg.MCMS != nil,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("revoke candidate ops: %w", err)
+ }
+ if cfg.MCMS == nil {
+ return deployment.ChangesetOutput{}, nil
+ }
+
+ prop, err := proposalutils.BuildProposalFromBatches(
+ map[uint64]common.Address{
+ cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].Timelock.Address(),
+ },
+ map[uint64]*gethwrappers.ManyChainMultiSig{
+ cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].ProposerMcm,
+ },
+ []timelock.BatchChainOperation{{
+ ChainIdentifier: mcms.ChainIdentifier(cfg.HomeChainSelector),
+ Batch: ops,
+ }},
+ fmt.Sprintf("revokeCandidate for don %d", cfg.DONChainSelector),
+ cfg.MCMS.MinDelay,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ return deployment.ChangesetOutput{
+ Proposals: []timelock.MCMSWithTimelockProposal{
+ *prop,
+ },
+ }, nil
+}
+
+func revokeCandidateOps(
+ txOpts *bind.TransactOpts,
+ homeChain deployment.Chain,
+ capReg *capabilities_registry.CapabilitiesRegistry,
+ ccipHome *ccip_home.CCIPHome,
+ nodes deployment.Nodes,
+ donID uint32,
+ pluginType uint8,
+ mcmsEnabled bool,
+) ([]mcms.Operation, error) {
+ if donID == 0 {
+ return nil, fmt.Errorf("donID is zero")
+ }
+
+ candidateDigest, err := ccipHome.GetCandidateDigest(nil, donID, pluginType)
+ if err != nil {
+ return nil, fmt.Errorf("fetching candidate digest from cciphome: %w", err)
+ }
+
+ encodedRevokeCandidateCall, err := internal.CCIPHomeABI.Pack(
+ "revokeCandidate",
+ donID,
+ pluginType,
+ candidateDigest,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("pack set candidate call: %w", err)
+ }
+
+ updateDonTx, err := capReg.UpdateDON(
+ txOpts,
+ donID,
+ nodes.PeerIDs(),
+ []capabilities_registry.CapabilitiesRegistryCapabilityConfiguration{
+ {
+ CapabilityId: internal.CCIPCapabilityID,
+ Config: encodedRevokeCandidateCall,
+ },
+ },
+ false, // isPublic
+ nodes.DefaultF(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("update don w/ revokeCandidate call: %w", deployment.MaybeDataErr(err))
+ }
+ if !mcmsEnabled {
+ _, err = deployment.ConfirmIfNoError(homeChain, updateDonTx, err)
+ if err != nil {
+ return nil, fmt.Errorf("error confirming updateDon call: %w", err)
+ }
+ }
+
+ return []mcms.Operation{{
+ To: capReg.Address(),
+ Data: updateDonTx.Data(),
+ Value: big.NewInt(0),
+ }}, nil
+}
diff --git a/deployment/ccip/changeset/cs_ccip_home_test.go b/deployment/ccip/changeset/cs_ccip_home_test.go
index 92784551957..b728e7b0c1d 100644
--- a/deployment/ccip/changeset/cs_ccip_home_test.go
+++ b/deployment/ccip/changeset/cs_ccip_home_test.go
@@ -4,20 +4,13 @@ import (
"testing"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/common"
- "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
- "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
- "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
"golang.org/x/exp/maps"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext"
"github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal"
"github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
- cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
-
- "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/stretchr/testify/require"
@@ -25,243 +18,215 @@ import (
"github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
)
-func TestActiveCandidate(t *testing.T) {
- t.Skipf("to be enabled after latest cl-ccip is compatible")
+func Test_PromoteCandidate(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ mcmsEnabled bool
+ }{
+ {
+ name: "MCMS enabled",
+ mcmsEnabled: true,
+ },
+ {
+ name: "MCMS disabled",
+ mcmsEnabled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := testcontext.Get(t)
+ tenv := NewMemoryEnvironment(t,
+ WithChains(2),
+ WithNodes(4))
+ state, err := LoadOnchainState(tenv.Env)
+ require.NoError(t, err)
- tenv := NewMemoryEnvironment(t,
- WithChains(3),
- WithNodes(5))
- e := tenv.Env
- state, err := LoadOnchainState(tenv.Env)
- require.NoError(t, err)
- allChains := maps.Keys(e.Chains)
+ // Deploy to all chains.
+ allChains := maps.Keys(tenv.Env.Chains)
+ source := allChains[0]
+ dest := allChains[1]
- // Add all lanes
- require.NoError(t, AddLanesForAll(e, state))
- // Need to keep track of the block number for each chain so that event subscription can be done from that block.
- startBlocks := make(map[uint64]*uint64)
- // Send a message from each chain to every other chain.
- expectedSeqNum := make(map[SourceDestPair]uint64)
- expectedSeqNumExec := make(map[SourceDestPair][]uint64)
- for src := range e.Chains {
- for dest, destChain := range e.Chains {
- if src == dest {
- continue
+ if tc.mcmsEnabled {
+ // Transfer ownership to timelock so that we can promote the zero digest later down the line.
+ transferToTimelock(t, tenv, state, source, dest)
}
- latesthdr, err := destChain.Client.HeaderByNumber(testcontext.Get(t), nil)
- require.NoError(t, err)
- block := latesthdr.Number.Uint64()
- startBlocks[dest] = &block
- msgSentEvent := TestSendRequest(t, e, state, src, dest, false, router.ClientEVM2AnyMessage{
- Receiver: common.LeftPadBytes(state.Chains[dest].Receiver.Address().Bytes(), 32),
- Data: []byte("hello world"),
- TokenAmounts: nil,
- FeeToken: common.HexToAddress("0x0"),
- ExtraArgs: nil,
- })
- expectedSeqNum[SourceDestPair{
- SourceChainSelector: src,
- DestChainSelector: dest,
- }] = msgSentEvent.SequenceNumber
- expectedSeqNumExec[SourceDestPair{
- SourceChainSelector: src,
- DestChainSelector: dest,
- }] = []uint64{msgSentEvent.SequenceNumber}
- }
- }
- // Wait for all commit reports to land.
- ConfirmCommitForAllWithExpectedSeqNums(t, e, state, expectedSeqNum, startBlocks)
+ var (
+ capReg = state.Chains[tenv.HomeChainSel].CapabilityRegistry
+ ccipHome = state.Chains[tenv.HomeChainSel].CCIPHome
+ )
+ donID, err := internal.DonIDForChain(capReg, ccipHome, dest)
+ require.NoError(t, err)
+ require.NotEqual(t, uint32(0), donID)
+ candidateDigestCommitBefore, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPCommit))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, candidateDigestCommitBefore)
+ candidateDigestExecBefore, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPExec))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, candidateDigestExecBefore)
- //After commit is reported on all chains, token prices should be updated in FeeQuoter.
- for dest := range e.Chains {
- linkAddress := state.Chains[dest].LinkToken.Address()
- feeQuoter := state.Chains[dest].FeeQuoter
- timestampedPrice, err := feeQuoter.GetTokenPrice(nil, linkAddress)
- require.NoError(t, err)
- require.Equal(t, MockLinkPrice, timestampedPrice.Value)
- }
+ var mcmsConfig *MCMSConfig
+ if tc.mcmsEnabled {
+ mcmsConfig = &MCMSConfig{
+ MinDelay: 0,
+ }
+ }
+ _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ tenv.HomeChainSel: {
+ Timelock: state.Chains[tenv.HomeChainSel].Timelock,
+ CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
+ },
+ }, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(PromoteAllCandidatesChangeset),
+ Config: PromoteAllCandidatesChangesetConfig{
+ HomeChainSelector: tenv.HomeChainSel,
+ DONChainSelector: dest,
+ MCMS: mcmsConfig,
+ },
+ },
+ })
+ require.NoError(t, err)
- //Wait for all exec reports to land
- ConfirmExecWithSeqNrsForAll(t, e, state, expectedSeqNumExec, startBlocks)
+ // after promoting the zero digest, active digest should also be zero
+ activeDigestCommit, err := ccipHome.GetActiveDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPCommit))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, activeDigestCommit)
- // compose the transfer ownership and accept ownership changesets
- timelockContracts := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
- for _, chain := range allChains {
- timelockContracts[chain] = &commonchangeset.TimelockExecutionContracts{
- Timelock: state.Chains[chain].Timelock,
- CallProxy: state.Chains[chain].CallProxy,
- }
+ activeDigestExec, err := ccipHome.GetActiveDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPExec))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, activeDigestExec)
+ })
}
+}
- _, err = commonchangeset.ApplyChangesets(t, e, timelockContracts, []commonchangeset.ChangesetApplication{
- // note this doesn't have proposals.
+func Test_SetCandidate(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ mcmsEnabled bool
+ }{
{
- Changeset: commonchangeset.WrapChangeSet(commonchangeset.TransferToMCMSWithTimelock),
- Config: genTestTransferOwnershipConfig(tenv, allChains, state),
+ name: "MCMS enabled",
+ mcmsEnabled: true,
},
- })
- require.NoError(t, err)
- // Apply the accept ownership proposal to all the chains.
-
- err = ConfirmRequestOnSourceAndDest(t, e, state, tenv.HomeChainSel, tenv.FeedChainSel, 2)
- require.NoError(t, err)
-
- // [ACTIVE, CANDIDATE] setup by setting candidate through cap reg
- capReg, ccipHome := state.Chains[tenv.HomeChainSel].CapabilityRegistry, state.Chains[tenv.HomeChainSel].CCIPHome
- donID, err := internal.DonIDForChain(capReg, ccipHome, tenv.FeedChainSel)
- require.NoError(t, err)
- require.NotEqual(t, uint32(0), donID)
- donInfo, err := state.Chains[tenv.HomeChainSel].CapabilityRegistry.GetDON(nil, donID)
- require.NoError(t, err)
- require.Equal(t, 5, len(donInfo.NodeP2PIds))
- require.Equal(t, uint32(4), donInfo.ConfigCount)
-
- state, err = LoadOnchainState(e)
- require.NoError(t, err)
-
- // delete a non-bootstrap node
- nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
- require.NoError(t, err)
- var newNodeIDs []string
- // make sure we delete a node that is NOT bootstrap.
- // we will remove bootstrap later by calling nodes.NonBootstrap()
- if nodes[0].IsBootstrap {
- newNodeIDs = e.NodeIDs[:len(e.NodeIDs)-1]
- } else {
- newNodeIDs = e.NodeIDs[1:]
- }
- nodes, err = deployment.NodeInfo(newNodeIDs, e.Offchain)
- require.NoError(t, err)
-
- // this will construct ocr3 configurations for the
- // commit and exec plugin we will be using
- rmnHomeAddress := state.Chains[tenv.HomeChainSel].RMNHome.Address()
- tokenConfig := NewTestTokenConfig(state.Chains[tenv.FeedChainSel].USDFeeds)
- ccipOCRParams := DefaultOCRParams(
- tenv.FeedChainSel,
- tokenConfig.GetTokenInfo(e.Logger, state.Chains[tenv.FeedChainSel].LinkToken, state.Chains[tenv.FeedChainSel].Weth9),
- nil,
- )
- ocr3ConfigMap, err := internal.BuildOCR3ConfigForCCIPHome(
- e.OCRSecrets,
- state.Chains[tenv.FeedChainSel].OffRamp,
- e.Chains[tenv.FeedChainSel],
- nodes.NonBootstraps(),
- rmnHomeAddress,
- ccipOCRParams.OCRParameters,
- ccipOCRParams.CommitOffChainConfig,
- ccipOCRParams.ExecuteOffChainConfig,
- )
- require.NoError(t, err)
-
- var (
- timelocksPerChain = map[uint64]common.Address{
- tenv.HomeChainSel: state.Chains[tenv.HomeChainSel].Timelock.Address(),
- }
- proposerMCMSes = map[uint64]*gethwrappers.ManyChainMultiSig{
- tenv.HomeChainSel: state.Chains[tenv.HomeChainSel].ProposerMcm,
- }
- )
- setCommitCandidateOp, err := setCandidateOnExistingDon(
- ocr3ConfigMap[cctypes.PluginTypeCCIPCommit],
- state.Chains[tenv.HomeChainSel].CapabilityRegistry,
- state.Chains[tenv.HomeChainSel].CCIPHome,
- tenv.FeedChainSel,
- nodes.NonBootstraps(),
- )
- require.NoError(t, err)
- setCommitCandidateProposal, err := proposalutils.BuildProposalFromBatches(timelocksPerChain, proposerMCMSes, []timelock.BatchChainOperation{{
- ChainIdentifier: mcms.ChainIdentifier(tenv.HomeChainSel),
- Batch: setCommitCandidateOp,
- }}, "set new candidates on commit plugin", 0)
- require.NoError(t, err)
- setCommitCandidateSigned := commonchangeset.SignProposal(t, e, setCommitCandidateProposal)
- commonchangeset.ExecuteProposal(t, e, setCommitCandidateSigned, &commonchangeset.TimelockExecutionContracts{
- Timelock: state.Chains[tenv.HomeChainSel].Timelock,
- CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
- }, tenv.HomeChainSel)
-
- // create the op for the commit plugin as well
- setExecCandidateOp, err := setCandidateOnExistingDon(
- ocr3ConfigMap[cctypes.PluginTypeCCIPExec],
- state.Chains[tenv.HomeChainSel].CapabilityRegistry,
- state.Chains[tenv.HomeChainSel].CCIPHome,
- tenv.FeedChainSel,
- nodes.NonBootstraps(),
- )
- require.NoError(t, err)
-
- setExecCandidateProposal, err := proposalutils.BuildProposalFromBatches(timelocksPerChain, proposerMCMSes, []timelock.BatchChainOperation{{
- ChainIdentifier: mcms.ChainIdentifier(tenv.HomeChainSel),
- Batch: setExecCandidateOp,
- }}, "set new candidates on commit and exec plugins", 0)
- require.NoError(t, err)
- setExecCandidateSigned := commonchangeset.SignProposal(t, e, setExecCandidateProposal)
- commonchangeset.ExecuteProposal(t, e, setExecCandidateSigned, &commonchangeset.TimelockExecutionContracts{
- Timelock: state.Chains[tenv.HomeChainSel].Timelock,
- CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
- }, tenv.HomeChainSel)
-
- // check setup was successful by confirming number of nodes from cap reg
- donInfo, err = state.Chains[tenv.HomeChainSel].CapabilityRegistry.GetDON(nil, donID)
- require.NoError(t, err)
- require.Equal(t, 4, len(donInfo.NodeP2PIds))
- require.Equal(t, uint32(6), donInfo.ConfigCount)
- // [ACTIVE, CANDIDATE] done setup
-
- // [ACTIVE, CANDIDATE] make sure we can still send successful transaction without updating job specs
- err = ConfirmRequestOnSourceAndDest(t, e, state, tenv.HomeChainSel, tenv.FeedChainSel, 3)
- require.NoError(t, err)
- // [ACTIVE, CANDIDATE] done send successful transaction on active
+ {
+ name: "MCMS disabled",
+ mcmsEnabled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := testcontext.Get(t)
+ tenv := NewMemoryEnvironment(t,
+ WithChains(2),
+ WithNodes(4))
+ state, err := LoadOnchainState(tenv.Env)
+ require.NoError(t, err)
- // [NEW ACTIVE, NO CANDIDATE] promote to active
- // confirm by getting old candidate digest and making sure new active matches
- oldCandidateDigest, err := state.Chains[tenv.HomeChainSel].CCIPHome.GetCandidateDigest(nil, donID, uint8(cctypes.PluginTypeCCIPExec))
- require.NoError(t, err)
+ // Deploy to all chains.
+ allChains := maps.Keys(tenv.Env.Chains)
+ source := allChains[0]
+ dest := allChains[1]
- promoteOps, err := promoteAllCandidatesForChainOps(
- tenv.Env.Chains[tenv.HomeChainSel],
- deployment.SimTransactOpts(),
- state.Chains[tenv.HomeChainSel].CapabilityRegistry,
- state.Chains[tenv.HomeChainSel].CCIPHome,
- tenv.FeedChainSel,
- nodes.NonBootstraps(),
- true)
- require.NoError(t, err)
- promoteProposal, err := proposalutils.BuildProposalFromBatches(timelocksPerChain, proposerMCMSes, []timelock.BatchChainOperation{{
- ChainIdentifier: mcms.ChainIdentifier(tenv.HomeChainSel),
- Batch: promoteOps,
- }}, "promote candidates and revoke actives", 0)
- require.NoError(t, err)
- promoteSigned := commonchangeset.SignProposal(t, e, promoteProposal)
- commonchangeset.ExecuteProposal(t, e, promoteSigned, &commonchangeset.TimelockExecutionContracts{
- Timelock: state.Chains[tenv.HomeChainSel].Timelock,
- CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
- }, tenv.HomeChainSel)
- // [NEW ACTIVE, NO CANDIDATE] done promoting
+ if tc.mcmsEnabled {
+ // Transfer ownership to timelock so that we can promote the zero digest later down the line.
+ transferToTimelock(t, tenv, state, source, dest)
+ }
- // [NEW ACTIVE, NO CANDIDATE] check onchain state
- newActiveDigest, err := state.Chains[tenv.HomeChainSel].CCIPHome.GetActiveDigest(nil, donID, uint8(cctypes.PluginTypeCCIPExec))
- require.NoError(t, err)
- require.Equal(t, oldCandidateDigest, newActiveDigest)
+ var (
+ capReg = state.Chains[tenv.HomeChainSel].CapabilityRegistry
+ ccipHome = state.Chains[tenv.HomeChainSel].CCIPHome
+ )
+ donID, err := internal.DonIDForChain(capReg, ccipHome, dest)
+ require.NoError(t, err)
+ require.NotEqual(t, uint32(0), donID)
+ candidateDigestCommitBefore, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPCommit))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, candidateDigestCommitBefore)
+ candidateDigestExecBefore, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPExec))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, candidateDigestExecBefore)
- newCandidateDigest, err := state.Chains[tenv.HomeChainSel].CCIPHome.GetCandidateDigest(nil, donID, uint8(cctypes.PluginTypeCCIPCommit))
- require.NoError(t, err)
- require.Equal(t, newCandidateDigest, [32]byte{})
- // [NEW ACTIVE, NO CANDIDATE] done checking on chain state
+ var mcmsConfig *MCMSConfig
+ if tc.mcmsEnabled {
+ mcmsConfig = &MCMSConfig{
+ MinDelay: 0,
+ }
+ }
+ tokenConfig := NewTestTokenConfig(state.Chains[tenv.FeedChainSel].USDFeeds)
+ _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ tenv.HomeChainSel: {
+ Timelock: state.Chains[tenv.HomeChainSel].Timelock,
+ CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
+ },
+ }, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
+ SetCandidateConfigBase: SetCandidateConfigBase{
+ HomeChainSelector: tenv.HomeChainSel,
+ FeedChainSelector: tenv.FeedChainSel,
+ DONChainSelector: dest,
+ PluginType: types.PluginTypeCCIPCommit,
+ CCIPOCRParams: DefaultOCRParams(
+ tenv.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[dest].LinkToken, state.Chains[dest].Weth9),
+ nil,
+ ),
+ MCMS: mcmsConfig,
+ },
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
+ SetCandidateConfigBase: SetCandidateConfigBase{
+ HomeChainSelector: tenv.HomeChainSel,
+ FeedChainSelector: tenv.FeedChainSel,
+ DONChainSelector: dest,
+ PluginType: types.PluginTypeCCIPExec,
+ CCIPOCRParams: DefaultOCRParams(
+ tenv.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[dest].LinkToken, state.Chains[dest].Weth9),
+ nil,
+ ),
+ MCMS: mcmsConfig,
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
- // [NEW ACTIVE, NO CANDIDATE] send successful request on new active
- donInfo, err = state.Chains[tenv.HomeChainSel].CapabilityRegistry.GetDON(nil, donID)
- require.NoError(t, err)
- require.Equal(t, uint32(8), donInfo.ConfigCount)
+ // after setting a new candidate on both plugins, the candidate config digest
+ // should be nonzero.
+ candidateDigestCommitAfter, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPCommit))
+ require.NoError(t, err)
+ require.NotEqual(t, [32]byte{}, candidateDigestCommitAfter)
+ require.NotEqual(t, candidateDigestCommitBefore, candidateDigestCommitAfter)
- err = ConfirmRequestOnSourceAndDest(t, e, state, tenv.HomeChainSel, tenv.FeedChainSel, 4)
- require.NoError(t, err)
- // [NEW ACTIVE, NO CANDIDATE] done sending successful request
+ candidateDigestExecAfter, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPExec))
+ require.NoError(t, err)
+ require.NotEqual(t, [32]byte{}, candidateDigestExecAfter)
+ require.NotEqual(t, candidateDigestExecBefore, candidateDigestExecAfter)
+ })
+ }
}
-func Test_PromoteCandidate(t *testing.T) {
+func Test_RevokeCandidate(t *testing.T) {
for _, tc := range []struct {
name string
mcmsEnabled bool
@@ -288,37 +253,9 @@ func Test_PromoteCandidate(t *testing.T) {
source := allChains[0]
dest := allChains[1]
- nodes, err := deployment.NodeInfo(tenv.Env.NodeIDs, tenv.Env.Offchain)
- require.NoError(t, err)
-
- var nodeIDs []string
- for _, node := range nodes {
- nodeIDs = append(nodeIDs, node.NodeID)
- }
-
if tc.mcmsEnabled {
// Transfer ownership to timelock so that we can promote the zero digest later down the line.
- _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
- source: {
- Timelock: state.Chains[source].Timelock,
- CallProxy: state.Chains[source].CallProxy,
- },
- dest: {
- Timelock: state.Chains[dest].Timelock,
- CallProxy: state.Chains[dest].CallProxy,
- },
- tenv.HomeChainSel: {
- Timelock: state.Chains[tenv.HomeChainSel].Timelock,
- CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
- },
- }, []commonchangeset.ChangesetApplication{
- {
- Changeset: commonchangeset.WrapChangeSet(commonchangeset.TransferToMCMSWithTimelock),
- Config: genTestTransferOwnershipConfig(tenv, allChains, state),
- },
- })
- require.NoError(t, err)
- assertTimelockOwnership(t, tenv, allChains, state)
+ transferToTimelock(t, tenv, state, source, dest)
}
var (
@@ -345,36 +282,136 @@ func Test_PromoteCandidate(t *testing.T) {
MinDelay: 0,
}
}
- _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
+ tokenConfig := NewTestTokenConfig(state.Chains[tenv.FeedChainSel].USDFeeds)
+ _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
tenv.HomeChainSel: {
Timelock: state.Chains[tenv.HomeChainSel].Timelock,
CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
},
}, []commonchangeset.ChangesetApplication{
{
- Changeset: commonchangeset.WrapChangeSet(PromoteAllCandidatesChangeset),
- Config: PromoteAllCandidatesChangesetConfig{
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
+ SetCandidateConfigBase: SetCandidateConfigBase{
+ HomeChainSelector: tenv.HomeChainSel,
+ FeedChainSelector: tenv.FeedChainSel,
+ DONChainSelector: dest,
+ PluginType: types.PluginTypeCCIPCommit,
+ CCIPOCRParams: DefaultOCRParams(
+ tenv.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[dest].LinkToken, state.Chains[dest].Weth9),
+ nil,
+ ),
+ MCMS: mcmsConfig,
+ },
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
+ SetCandidateConfigBase: SetCandidateConfigBase{
+ HomeChainSelector: tenv.HomeChainSel,
+ FeedChainSelector: tenv.FeedChainSel,
+ DONChainSelector: dest,
+ PluginType: types.PluginTypeCCIPExec,
+ CCIPOCRParams: DefaultOCRParams(
+ tenv.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[dest].LinkToken, state.Chains[dest].Weth9),
+ nil,
+ ),
+ MCMS: mcmsConfig,
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // after setting a new candidate on both plugins, the candidate config digest
+ // should be nonzero.
+ candidateDigestCommitAfter, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPCommit))
+ require.NoError(t, err)
+ require.NotEqual(t, [32]byte{}, candidateDigestCommitAfter)
+ require.NotEqual(t, candidateDigestCommitBefore, candidateDigestCommitAfter)
+
+ candidateDigestExecAfter, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPExec))
+ require.NoError(t, err)
+ require.NotEqual(t, [32]byte{}, candidateDigestExecAfter)
+ require.NotEqual(t, candidateDigestExecBefore, candidateDigestExecAfter)
+
+ // next we can revoke candidate - this should set the candidate digest back to zero
+ _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ tenv.HomeChainSel: {
+ Timelock: state.Chains[tenv.HomeChainSel].Timelock,
+ CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
+ },
+ }, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(RevokeCandidateChangeset),
+ Config: RevokeCandidateChangesetConfig{
+ HomeChainSelector: tenv.HomeChainSel,
+ DONChainSelector: dest,
+ PluginType: types.PluginTypeCCIPCommit,
+ MCMS: mcmsConfig,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(RevokeCandidateChangeset),
+ Config: RevokeCandidateChangesetConfig{
HomeChainSelector: tenv.HomeChainSel,
DONChainSelector: dest,
- NodeIDs: nodeIDs,
+ PluginType: types.PluginTypeCCIPExec,
MCMS: mcmsConfig,
},
},
})
require.NoError(t, err)
- // after promoting the zero digest, active digest should also be zero
- activeDigestCommit, err := ccipHome.GetActiveDigest(&bind.CallOpts{
+ // after revoking the candidate, the candidate digest should be zero
+ candidateDigestCommitAfterRevoke, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
Context: ctx,
}, donID, uint8(types.PluginTypeCCIPCommit))
require.NoError(t, err)
- require.Equal(t, [32]byte{}, activeDigestCommit)
+ require.Equal(t, [32]byte{}, candidateDigestCommitAfterRevoke)
- activeDigestExec, err := ccipHome.GetActiveDigest(&bind.CallOpts{
+ candidateDigestExecAfterRevoke, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
Context: ctx,
}, donID, uint8(types.PluginTypeCCIPExec))
require.NoError(t, err)
- require.Equal(t, [32]byte{}, activeDigestExec)
+ require.Equal(t, [32]byte{}, candidateDigestExecAfterRevoke)
})
}
}
+
+func transferToTimelock(
+ t *testing.T,
+ tenv DeployedEnv,
+ state CCIPOnChainState,
+ source,
+ dest uint64) {
+ // Transfer ownership to timelock so that we can promote the zero digest later down the line.
+ _, err := commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ source: {
+ Timelock: state.Chains[source].Timelock,
+ CallProxy: state.Chains[source].CallProxy,
+ },
+ dest: {
+ Timelock: state.Chains[dest].Timelock,
+ CallProxy: state.Chains[dest].CallProxy,
+ },
+ tenv.HomeChainSel: {
+ Timelock: state.Chains[tenv.HomeChainSel].Timelock,
+ CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
+ },
+ }, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.TransferToMCMSWithTimelock),
+ Config: genTestTransferOwnershipConfig(tenv, []uint64{source, dest}, state),
+ },
+ })
+ require.NoError(t, err)
+ assertTimelockOwnership(t, tenv, []uint64{source, dest}, state)
+}
diff --git a/deployment/ccip/changeset/cs_deploy_chain.go b/deployment/ccip/changeset/cs_deploy_chain.go
index 5a8d8cf7843..e403481b001 100644
--- a/deployment/ccip/changeset/cs_deploy_chain.go
+++ b/deployment/ccip/changeset/cs_deploy_chain.go
@@ -181,17 +181,6 @@ func deployChainContracts(
e.Logger.Errorw("RMNProxy not found", "chain", chain.String())
return fmt.Errorf("rmn proxy not found for chain %s, deploy the prerequisites first", chain.String())
}
- var rmnLegacyAddr common.Address
- if chainState.MockRMN != nil {
- rmnLegacyAddr = chainState.MockRMN.Address()
- }
- // If RMN is deployed, set rmnLegacyAddr to the RMN address
- if chainState.RMN != nil {
- rmnLegacyAddr = chainState.RMN.Address()
- }
- if rmnLegacyAddr == (common.Address{}) {
- e.Logger.Warnf("No legacy RMN contract found for chain %s, will not setRMN in RMNRemote", chain.String())
- }
if chainState.Receiver == nil {
_, err := deployment.DeployContract(e.Logger, chain, ab,
func(chain deployment.Chain) deployment.ContractDeploy[*maybe_revert_message_receiver.MaybeRevertMessageReceiver] {
@@ -211,6 +200,17 @@ func deployChainContracts(
} else {
e.Logger.Infow("receiver already deployed", "addr", chainState.Receiver.Address, "chain", chain.String())
}
+ var rmnLegacyAddr common.Address
+ if chainState.MockRMN != nil {
+ rmnLegacyAddr = chainState.MockRMN.Address()
+ }
+ // If RMN is deployed, set rmnLegacyAddr to the RMN address
+ if chainState.RMN != nil {
+ rmnLegacyAddr = chainState.RMN.Address()
+ }
+ if rmnLegacyAddr == (common.Address{}) {
+ e.Logger.Warnf("No legacy RMN contract found for chain %s, will not setRMN in RMNRemote", chain.String())
+ }
rmnRemoteContract := chainState.RMNRemote
if chainState.RMNRemote == nil {
// TODO: Correctly configure RMN remote.
@@ -234,6 +234,7 @@ func deployChainContracts(
} else {
e.Logger.Infow("rmn remote already deployed", "chain", chain.String(), "addr", chainState.RMNRemote.Address)
}
+
activeDigest, err := rmnHome.GetActiveDigest(&bind.CallOpts{})
if err != nil {
e.Logger.Errorw("Failed to get active digest", "chain", chain.String(), "err", err)
@@ -252,7 +253,6 @@ func deployChainContracts(
e.Logger.Errorw("Failed to confirm RMNRemote config", "chain", chain.String(), "err", err)
return err
}
-
if chainState.TestRouter == nil {
_, err := deployment.DeployContract(e.Logger, chain, ab,
func(chain deployment.Chain) deployment.ContractDeploy[*router.Router] {
diff --git a/deployment/ccip/changeset/cs_deploy_chain_test.go b/deployment/ccip/changeset/cs_deploy_chain_test.go
index 9c977241ca1..a72b1b1568b 100644
--- a/deployment/ccip/changeset/cs_deploy_chain_test.go
+++ b/deployment/ccip/changeset/cs_deploy_chain_test.go
@@ -3,7 +3,6 @@ package changeset
import (
"encoding/json"
"fmt"
- "math/big"
"testing"
"github.com/stretchr/testify/require"
@@ -11,12 +10,14 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/v2/core/logger"
)
func TestDeployChainContractsChangeset(t *testing.T) {
+ t.Parallel()
lggr := logger.TestLogger(t)
e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{
Bootstraps: 1,
@@ -30,12 +31,7 @@ func TestDeployChainContractsChangeset(t *testing.T) {
p2pIds := nodes.NonBootstraps().PeerIDs()
cfg := make(map[uint64]commontypes.MCMSWithTimelockConfig)
for _, chain := range e.AllChainSelectors() {
- cfg[chain] = commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ cfg[chain] = proposalutils.SingleGroupTimelockConfig(t)
}
var prereqCfg []DeployPrerequisiteConfigPerChain
for _, chain := range e.AllChainSelectors() {
@@ -104,6 +100,7 @@ func TestDeployChainContractsChangeset(t *testing.T) {
}
func TestDeployCCIPContracts(t *testing.T) {
+ t.Parallel()
e := NewMemoryEnvironment(t)
// Deploy all the CCIP contracts.
state, err := LoadOnchainState(e.Env)
diff --git a/deployment/ccip/changeset/cs_home_chain_test.go b/deployment/ccip/changeset/cs_home_chain_test.go
index a06161f7086..eb620691db0 100644
--- a/deployment/ccip/changeset/cs_home_chain_test.go
+++ b/deployment/ccip/changeset/cs_home_chain_test.go
@@ -13,6 +13,7 @@ import (
)
func TestDeployHomeChain(t *testing.T) {
+ t.Parallel()
lggr := logger.TestLogger(t)
e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{
Bootstraps: 1,
diff --git a/deployment/ccip/changeset/cs_initial_add_chain.go b/deployment/ccip/changeset/cs_initial_add_chain.go
index 5ba648d74b5..4f8b2ac2722 100644
--- a/deployment/ccip/changeset/cs_initial_add_chain.go
+++ b/deployment/ccip/changeset/cs_initial_add_chain.go
@@ -483,7 +483,7 @@ func ValidateCCIPHomeConfigSetUp(
return fmt.Errorf("fetch don id for chain: %w", err)
}
if donID == 0 {
- return fmt.Errorf("don id for chain(%d) does not exist", chainSel)
+ return fmt.Errorf("don id for chain (%d) does not exist", chainSel)
}
// final sanity checks on configs.
diff --git a/deployment/ccip/changeset/cs_initial_add_chain_test.go b/deployment/ccip/changeset/cs_initial_add_chain_test.go
index c1404eb7123..7e155b82ed1 100644
--- a/deployment/ccip/changeset/cs_initial_add_chain_test.go
+++ b/deployment/ccip/changeset/cs_initial_add_chain_test.go
@@ -9,10 +9,12 @@ import (
"github.com/stretchr/testify/require"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
)
func TestInitialAddChainAppliedTwice(t *testing.T) {
+ t.Parallel()
// This already applies the initial add chain changeset.
e := NewMemoryEnvironment(t)
@@ -24,10 +26,10 @@ func TestInitialAddChainAppliedTwice(t *testing.T) {
allChains := e.Env.AllChainSelectors()
tokenConfig := NewTestTokenConfig(state.Chains[e.FeedChainSel].USDFeeds)
chainConfigs := make(map[uint64]CCIPOCRParams)
- timelockContractsPerChain := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+ timelockContractsPerChain := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for _, chain := range allChains {
- timelockContractsPerChain[chain] = &commonchangeset.TimelockExecutionContracts{
+ timelockContractsPerChain[chain] = &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[chain].Timelock,
CallProxy: state.Chains[chain].CallProxy,
}
@@ -48,7 +50,6 @@ func TestInitialAddChainAppliedTwice(t *testing.T) {
require.NoError(t, err)
// send requests
chain1, chain2 := allChains[0], allChains[1]
-
_, err = AddLanes(e.Env, AddLanesConfig{
LaneConfigs: []LaneConfig{
{
diff --git a/deployment/ccip/changeset/cs_jobspec_test.go b/deployment/ccip/changeset/cs_jobspec_test.go
index 21e80e85aa2..a0445b0d5ee 100644
--- a/deployment/ccip/changeset/cs_jobspec_test.go
+++ b/deployment/ccip/changeset/cs_jobspec_test.go
@@ -13,6 +13,7 @@ import (
)
func TestJobSpecChangeset(t *testing.T) {
+ t.Parallel()
lggr := logger.TestLogger(t)
e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{
Chains: 1,
diff --git a/deployment/ccip/changeset/cs_prerequisites.go b/deployment/ccip/changeset/cs_prerequisites.go
index d611a9aae6b..919eb42f8ef 100644
--- a/deployment/ccip/changeset/cs_prerequisites.go
+++ b/deployment/ccip/changeset/cs_prerequisites.go
@@ -219,6 +219,37 @@ func deployPrerequisiteContracts(e deployment.Environment, ab deployment.Address
rmnProxy = rmnProxyContract.Contract
} else {
lggr.Infow("RMNProxy already deployed", "chain", chain.String(), "addr", rmnProxy.Address)
+ // check if the RMNProxy is pointing to the correct RMN contract
+ currentRMNAddr, err := rmnProxy.GetARM(nil)
+ if err != nil {
+ lggr.Errorw("Failed to get RMN from RMNProxy", "chain", chain.String(), "err", err)
+ return err
+ }
+ if currentRMNAddr != rmnAddr {
+ lggr.Infow("RMNProxy is not pointing to the correct RMN contract, updating RMN", "chain", chain.String(), "currentRMN", currentRMNAddr, "expectedRMN", rmnAddr)
+ rmnOwner, err := rmnProxy.Owner(nil)
+ if err != nil {
+ lggr.Errorw("Failed to get owner of RMNProxy", "chain", chain.String(), "err", err)
+ return err
+ }
+ if rmnOwner != chain.DeployerKey.From {
+ lggr.Warnw(
+ "RMNProxy is not owned by the deployer and RMNProxy is not pointing to the correct RMN contract, "+
+ "run SetRMNRemoteOnRMNProxy to update RMN with a proposal",
+ "chain", chain.String(), "owner", rmnOwner, "currentRMN", currentRMNAddr, "expectedRMN", rmnAddr)
+ } else {
+ tx, err := rmnProxy.SetARM(chain.DeployerKey, rmnAddr)
+ if err != nil {
+ lggr.Errorw("Failed to set RMN on RMNProxy", "chain", chain.String(), "err", err)
+ return err
+ }
+ _, err = chain.Confirm(tx)
+ if err != nil {
+ lggr.Errorw("Failed to confirm setRMN on RMNProxy", "chain", chain.String(), "err", err)
+ return err
+ }
+ }
+ }
}
if tokenAdminReg == nil {
tokenAdminRegistry, err := deployment.DeployContract(e.Logger, chain, ab,
diff --git a/deployment/ccip/changeset/cs_update_rmn_config.go b/deployment/ccip/changeset/cs_update_rmn_config.go
index b10991c977c..96f8eacb4cc 100644
--- a/deployment/ccip/changeset/cs_update_rmn_config.go
+++ b/deployment/ccip/changeset/cs_update_rmn_config.go
@@ -11,13 +11,146 @@ import (
"github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
+
"github.com/smartcontractkit/chainlink/deployment"
- commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
"github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_home"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_remote"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
)
+type SetRMNRemoteOnRMNProxyConfig struct {
+ ChainSelectors []uint64
+ MCMSConfig *MCMSConfig
+}
+
+func (c SetRMNRemoteOnRMNProxyConfig) Validate(state CCIPOnChainState) error {
+ for _, chain := range c.ChainSelectors {
+ err := deployment.IsValidChainSelector(chain)
+ if err != nil {
+ return err
+ }
+ chainState, exists := state.Chains[chain]
+ if !exists {
+ return fmt.Errorf("chain %d not found in state", chain)
+ }
+ if chainState.RMNRemote == nil {
+ return fmt.Errorf("RMNRemote not found for chain %d", chain)
+ }
+ if chainState.RMNProxy == nil {
+ return fmt.Errorf("RMNProxy not found for chain %d", chain)
+ }
+ }
+ return nil
+}
+
+func SetRMNRemoteOnRMNProxy(e deployment.Environment, cfg SetRMNRemoteOnRMNProxyConfig) (deployment.ChangesetOutput, error) {
+ state, err := LoadOnchainState(e)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("failed to load onchain state: %w", err)
+ }
+ if err := cfg.Validate(state); err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ timelocks, err := state.GetAllTimeLocksForChains(cfg.ChainSelectors)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("failed to get timelocks for chains %v: %w", cfg.ChainSelectors, err)
+ }
+ multiSigs, err := state.GetAllProposerMCMSForChains(cfg.ChainSelectors)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("failed to get proposer MCMS for chains %v: %w", cfg.ChainSelectors, err)
+ }
+ var timelockBatch []timelock.BatchChainOperation
+ for _, sel := range cfg.ChainSelectors {
+ chain, exists := e.Chains[sel]
+ if !exists {
+ return deployment.ChangesetOutput{}, fmt.Errorf("chain %d not found", sel)
+ }
+ txOpts := chain.DeployerKey
+ if cfg.MCMSConfig != nil {
+ txOpts = deployment.SimTransactOpts()
+ }
+ mcmsOps, err := setRMNRemoteOnRMNProxyOp(txOpts, chain, state.Chains[sel], cfg.MCMSConfig != nil)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("failed to set RMNRemote on RMNProxy for chain %s: %w", chain.String(), err)
+ }
+ if cfg.MCMSConfig != nil {
+ timelockBatch = append(timelockBatch, timelock.BatchChainOperation{
+ ChainIdentifier: mcms.ChainIdentifier(sel),
+ Batch: []mcms.Operation{mcmsOps},
+ })
+ }
+ }
+ // If we're not using MCMS, we can just return now as we've already confirmed the transactions
+ if len(timelockBatch) == 0 {
+ return deployment.ChangesetOutput{}, nil
+ }
+ prop, err := proposalutils.BuildProposalFromBatches(
+ timelocks,
+ multiSigs,
+ timelockBatch,
+ fmt.Sprintf("proposal to set RMNRemote on RMNProxy for chains %v", cfg.ChainSelectors),
+ cfg.MCMSConfig.MinDelay,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ return deployment.ChangesetOutput{
+ Proposals: []timelock.MCMSWithTimelockProposal{
+ *prop,
+ },
+ }, nil
+}
+
+func setRMNRemoteOnRMNProxyOp(txOpts *bind.TransactOpts, chain deployment.Chain, chainState CCIPChainState, mcmsEnabled bool) (mcms.Operation, error) {
+ rmnProxy := chainState.RMNProxy
+ rmnRemoteAddr := chainState.RMNRemote.Address()
+ setRMNTx, err := rmnProxy.SetARM(txOpts, rmnRemoteAddr)
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("failed to build call data/transaction to set RMNRemote on RMNProxy for chain %s: %w", chain.String(), err)
+ }
+ if !mcmsEnabled {
+ _, err = deployment.ConfirmIfNoError(chain, setRMNTx, err)
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("failed to confirm tx to set RMNRemote on RMNProxy for chain %s: %w", chain.String(), deployment.MaybeDataErr(err))
+ }
+ }
+ return mcms.Operation{
+ To: rmnProxy.Address(),
+ Data: setRMNTx.Data(),
+ Value: big.NewInt(0),
+ }, nil
+}
+
+type RMNNopConfig struct {
+ NodeIndex uint64
+ OffchainPublicKey [32]byte
+ EVMOnChainPublicKey common.Address
+ PeerId p2pkey.PeerID
+}
+
+func (c RMNNopConfig) ToRMNHomeNode() rmn_home.RMNHomeNode {
+ return rmn_home.RMNHomeNode{
+ PeerId: c.PeerId,
+ OffchainPublicKey: c.OffchainPublicKey,
+ }
+}
+
+func (c RMNNopConfig) ToRMNRemoteSigner() rmn_remote.RMNRemoteSigner {
+ return rmn_remote.RMNRemoteSigner{
+ OnchainPublicKey: c.EVMOnChainPublicKey,
+ NodeIndex: c.NodeIndex,
+ }
+}
+
+func (c RMNNopConfig) SetBit(bitmap *big.Int, value bool) {
+ if value {
+ bitmap.SetBit(bitmap, int(c.NodeIndex), 1)
+ } else {
+ bitmap.SetBit(bitmap, int(c.NodeIndex), 0)
+ }
+}
+
func getDeployer(e deployment.Environment, chain uint64, mcmConfig *MCMSConfig) *bind.TransactOpts {
if mcmConfig == nil {
return e.Chains[chain].DeployerKey
@@ -274,10 +407,10 @@ func NewPromoteCandidateConfigChangeset(e deployment.Environment, config Promote
}, nil
}
-func buildTimelockPerChain(e deployment.Environment, state CCIPOnChainState) map[uint64]*commonchangeset.TimelockExecutionContracts {
- timelocksPerChain := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+func buildTimelockPerChain(e deployment.Environment, state CCIPOnChainState) map[uint64]*proposalutils.TimelockExecutionContracts {
+ timelocksPerChain := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for _, chain := range e.Chains {
- timelocksPerChain[chain.Selector] = &commonchangeset.TimelockExecutionContracts{
+ timelocksPerChain[chain.Selector] = &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[chain.Selector].Timelock,
CallProxy: state.Chains[chain.Selector].CallProxy,
}
@@ -310,10 +443,14 @@ func buildRMNRemotePerChain(e deployment.Environment, state CCIPOnChainState) ma
return timelocksPerChain
}
+type RMNRemoteConfig struct {
+ Signers []rmn_remote.RMNRemoteSigner
+ F uint64
+}
+
type SetRMNRemoteConfig struct {
HomeChainSelector uint64
- Signers []rmn_remote.RMNRemoteSigner
- F uint64
+ RMNRemoteConfigs map[uint64]RMNRemoteConfig
MCMSConfig *MCMSConfig
}
@@ -323,14 +460,21 @@ func (c SetRMNRemoteConfig) Validate() error {
return err
}
- for i := 0; i < len(c.Signers)-1; i++ {
- if c.Signers[i].NodeIndex >= c.Signers[i+1].NodeIndex {
- return fmt.Errorf("signers must be in ascending order of nodeIndex")
+ for chain, config := range c.RMNRemoteConfigs {
+ err := deployment.IsValidChainSelector(chain)
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < len(config.Signers)-1; i++ {
+ if config.Signers[i].NodeIndex >= config.Signers[i+1].NodeIndex {
+ return fmt.Errorf("signers must be in ascending order of nodeIndex, but found %d >= %d", config.Signers[i].NodeIndex, config.Signers[i+1].NodeIndex)
+ }
}
- }
- if len(c.Signers) < 2*int(c.F)+1 {
- return fmt.Errorf("signers count must greater than or equal to %d", 2*c.F+1)
+ if len(config.Signers) < 2*int(config.F)+1 {
+ return fmt.Errorf("signers count (%d) must be greater than or equal to %d", len(config.Signers), 2*config.F+1)
+ }
}
return nil
@@ -367,9 +511,10 @@ func NewSetRMNRemoteConfigChangeset(e deployment.Environment, config SetRMNRemot
rmnRemotePerChain := buildRMNRemotePerChain(e, state)
batches := make([]timelock.BatchChainOperation, 0)
- for chain, remote := range rmnRemotePerChain {
- if remote == nil {
- continue
+ for chain, remoteConfig := range config.RMNRemoteConfigs {
+ remote, ok := rmnRemotePerChain[chain]
+ if !ok {
+ return deployment.ChangesetOutput{}, fmt.Errorf("RMNRemote contract not found for chain %d", chain)
}
currentVersionConfig, err := remote.GetVersionedConfig(nil)
@@ -379,8 +524,8 @@ func NewSetRMNRemoteConfigChangeset(e deployment.Environment, config SetRMNRemot
newConfig := rmn_remote.RMNRemoteConfig{
RmnHomeContractConfigDigest: activeConfig,
- Signers: config.Signers,
- F: config.F,
+ Signers: remoteConfig.Signers,
+ F: remoteConfig.F,
}
if reflect.DeepEqual(currentVersionConfig.Config, newConfig) {
diff --git a/deployment/ccip/changeset/cs_update_rmn_config_test.go b/deployment/ccip/changeset/cs_update_rmn_config_test.go
index e22b85cdf81..07bf22720c2 100644
--- a/deployment/ccip/changeset/cs_update_rmn_config_test.go
+++ b/deployment/ccip/changeset/cs_update_rmn_config_test.go
@@ -8,13 +8,37 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
+ commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_home"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_remote"
)
+var (
+ rmn_staging_1 = RMNNopConfig{
+ NodeIndex: 0,
+ PeerId: deployment.MustPeerIDFromString("p2p_12D3KooWRXxZq3pd4a3ZGkKj7Nt1SQQrnB8CuvbPnnV9KVeMeWqg"),
+ OffchainPublicKey: [32]byte(common.FromHex("0xb34944857a42444d1b285d7940d6e06682309e0781e43a69676ee9f85c73c2d1")),
+ EVMOnChainPublicKey: common.HexToAddress("0x5af8ee32316a6427f169a45fdc1b3a91a85ac459e3c1cb91c69e1c51f0c1fc21"),
+ }
+ rmn_staging_2 = RMNNopConfig{
+ NodeIndex: 1,
+ PeerId: deployment.MustPeerIDFromString("p2p_12D3KooWEmdxYQFsRbD9aFczF32zA3CcUwuSiWCk2CrmACo4v9RL"),
+ OffchainPublicKey: [32]byte(common.FromHex("0x68d9f3f274e3985528a923a9bace3d39c55dd778b187b4120b384cc48c892859")),
+ EVMOnChainPublicKey: common.HexToAddress("0x858589216956f482a0f68b282a7050af4cd48ed2"),
+ }
+ rmn_staging_3 = RMNNopConfig{
+ NodeIndex: 2,
+ PeerId: deployment.MustPeerIDFromString("p2p_12D3KooWJS42cNXKJvj6DeZnxEX7aGxhEuap6uNFrz554AbUDw6Q"),
+ OffchainPublicKey: [32]byte(common.FromHex("0x5af8ee32316a6427f169a45fdc1b3a91a85ac459e3c1cb91c69e1c51f0c1fc21")),
+ EVMOnChainPublicKey: common.HexToAddress("0x7c5e94162c6fabbdeb3bfe83ae532846e337bfae"),
+ }
+)
+
type updateRMNConfigTestCase struct {
useMCMS bool
name string
+ nops []RMNNopConfig
}
func TestUpdateRMNConfig(t *testing.T) {
@@ -23,15 +47,18 @@ func TestUpdateRMNConfig(t *testing.T) {
{
useMCMS: true,
name: "with MCMS",
+ nops: []RMNNopConfig{rmn_staging_1, rmn_staging_2, rmn_staging_3},
},
{
useMCMS: false,
name: "without MCMS",
+ nops: []RMNNopConfig{rmn_staging_1, rmn_staging_2, rmn_staging_3},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
updateRMNConfig(t, tc)
})
}
@@ -80,10 +107,15 @@ func updateRMNConfig(t *testing.T, tc updateRMNConfigTestCase) {
}
}
+ nodes := make([]rmn_home.RMNHomeNode, 0, len(tc.nops))
+ for _, nop := range tc.nops {
+ nodes = append(nodes, nop.ToRMNHomeNode())
+ }
+
setRMNHomeCandidateConfig := SetRMNHomeCandidateConfig{
HomeChainSelector: e.HomeChainSel,
RMNStaticConfig: rmn_home.RMNHomeStaticConfig{
- Nodes: []rmn_home.RMNHomeNode{},
+ Nodes: nodes,
OffchainConfig: []byte(""),
},
RMNDynamicConfig: rmn_home.RMNHomeDynamicConfig{
@@ -132,16 +164,25 @@ func updateRMNConfig(t *testing.T, tc updateRMNConfigTestCase) {
require.NoError(t, err)
require.NotEqual(t, previousActiveDigest, currentActiveDigest)
+ signers := make([]rmn_remote.RMNRemoteSigner, 0, len(tc.nops))
+ for _, nop := range tc.nops {
+ signers = append(signers, nop.ToRMNRemoteSigner())
+ }
+
+ remoteConfigs := make(map[uint64]RMNRemoteConfig, len(e.Env.Chains))
+ for _, chain := range e.Env.Chains {
+ remoteConfig := RMNRemoteConfig{
+ Signers: signers,
+ F: 0,
+ }
+
+ remoteConfigs[chain.Selector] = remoteConfig
+ }
+
setRemoteConfig := SetRMNRemoteConfig{
HomeChainSelector: e.HomeChainSel,
- Signers: []rmn_remote.RMNRemoteSigner{
- {
- OnchainPublicKey: common.Address{},
- NodeIndex: 0,
- },
- },
- F: 0,
- MCMSConfig: mcmsConfig,
+ RMNRemoteConfigs: remoteConfigs,
+ MCMSConfig: mcmsConfig,
}
_, err = commonchangeset.ApplyChangesets(t, e.Env, timelocksPerChain, []commonchangeset.ChangesetApplication{
@@ -176,3 +217,86 @@ func buildRMNRemoteAddressPerChain(e deployment.Environment, state CCIPOnChainSt
}
return rmnRemoteAddressPerChain
}
+
+func TestSetRMNRemoteOnRMNProxy(t *testing.T) {
+ t.Parallel()
+ e := NewMemoryEnvironment(t, WithNoJobsAndContracts())
+ allChains := e.Env.AllChainSelectors()
+ mcmsCfg := make(map[uint64]commontypes.MCMSWithTimelockConfig)
+ var err error
+ for _, c := range e.Env.AllChainSelectors() {
+ mcmsCfg[c] = proposalutils.SingleGroupTimelockConfig(t)
+ }
+ // Need to deploy prerequisites first so that we can form the USDC config
+ // no proposals to be made, timelock can be passed as nil here
+ e.Env, err = commonchangeset.ApplyChangesets(t, e.Env, nil, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployLinkToken),
+ Config: allChains,
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(DeployPrerequisites),
+ Config: DeployPrerequisiteConfig{
+ ChainSelectors: allChains,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployMCMSWithTimelock),
+ Config: mcmsCfg,
+ },
+ })
+ require.NoError(t, err)
+ contractsByChain := make(map[uint64][]common.Address)
+ state, err := LoadOnchainState(e.Env)
+ require.NoError(t, err)
+ for _, chain := range allChains {
+ rmnProxy := state.Chains[chain].RMNProxy
+ require.NotNil(t, rmnProxy)
+ contractsByChain[chain] = []common.Address{rmnProxy.Address()}
+ }
+ timelockContractsPerChain := make(map[uint64]*proposalutils.TimelockExecutionContracts)
+ for _, chain := range allChains {
+ timelockContractsPerChain[chain] = &proposalutils.TimelockExecutionContracts{
+ Timelock: state.Chains[chain].Timelock,
+ CallProxy: state.Chains[chain].CallProxy,
+ }
+ }
+ e.Env, err = commonchangeset.ApplyChangesets(t, e.Env, timelockContractsPerChain, []commonchangeset.ChangesetApplication{
+ // transfer ownership of RMNProxy to timelock
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.TransferToMCMSWithTimelock),
+ Config: commonchangeset.TransferToMCMSWithTimelockConfig{
+ ContractsByChain: contractsByChain,
+ MinDelay: 0,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(DeployChainContracts),
+ Config: DeployChainContractsConfig{
+ ChainSelectors: allChains,
+ HomeChainSelector: e.HomeChainSel,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(SetRMNRemoteOnRMNProxy),
+ Config: SetRMNRemoteOnRMNProxyConfig{
+ ChainSelectors: allChains,
+ MCMSConfig: &MCMSConfig{
+ MinDelay: 0,
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ state, err = LoadOnchainState(e.Env)
+ require.NoError(t, err)
+ for _, chain := range allChains {
+ rmnProxy := state.Chains[chain].RMNProxy
+ proxyOwner, err := rmnProxy.Owner(nil)
+ require.NoError(t, err)
+ require.Equal(t, state.Chains[chain].Timelock.Address(), proxyOwner)
+ rmnAddr, err := rmnProxy.GetARM(nil)
+ require.NoError(t, err)
+ require.Equal(t, rmnAddr, state.Chains[chain].RMNRemote.Address())
+ }
+}
diff --git a/deployment/ccip/changeset/internal/deploy_home_chain.go b/deployment/ccip/changeset/internal/deploy_home_chain.go
index aa029fd4bec..5697c0e2f73 100644
--- a/deployment/ccip/changeset/internal/deploy_home_chain.go
+++ b/deployment/ccip/changeset/internal/deploy_home_chain.go
@@ -143,6 +143,8 @@ func DonIDForChain(registry *capabilities_registry.CapabilitiesRegistry, ccipHom
return donIDs[0], nil
}
+// BuildSetOCR3ConfigArgs builds the OCR3 config arguments for the OffRamp contract
+// using the donID's OCR3 configs from the CCIPHome contract.
func BuildSetOCR3ConfigArgs(
donID uint32,
ccipHome *ccip_home.CCIPHome,
diff --git a/deployment/ccip/changeset/solana_state.go b/deployment/ccip/changeset/solana_state.go
new file mode 100644
index 00000000000..4e5507cfcd3
--- /dev/null
+++ b/deployment/ccip/changeset/solana_state.go
@@ -0,0 +1,6 @@
+package changeset
+
+// SolChainState holds a Go binding for all the currently deployed CCIP programs
+// on a chain. If a binding is nil, it means here is no such contract on the chain.
+type SolCCIPChainState struct {
+}
diff --git a/deployment/ccip/changeset/state.go b/deployment/ccip/changeset/state.go
index da24b7ac3cd..2490f621e35 100644
--- a/deployment/ccip/changeset/state.go
+++ b/deployment/ccip/changeset/state.go
@@ -3,6 +3,8 @@ package changeset
import (
"fmt"
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
+
burn_mint_token_pool "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/burn_mint_token_pool_1_4_0"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
@@ -166,6 +168,15 @@ func (c CCIPChainState) GenerateView() (view.ChainView, error) {
}
chainView.RMNRemote[c.RMNRemote.Address().Hex()] = rmnView
}
+
+ if c.RMNHome != nil {
+ rmnHomeView, err := v1_6.GenerateRMNHomeView(c.RMNHome)
+ if err != nil {
+ return chainView, errors.Wrapf(err, "failed to generate rmn home view for rmn home %s", c.RMNHome.Address().String())
+ }
+ chainView.RMNHome[c.RMNHome.Address().Hex()] = rmnHomeView
+ }
+
if c.FeeQuoter != nil && c.Router != nil && c.TokenAdminRegistry != nil {
fqView, err := v1_6.GenerateFeeQuoterView(c.FeeQuoter, c.Router, c.TokenAdminRegistry)
if err != nil {
@@ -296,7 +307,38 @@ type CCIPOnChainState struct {
// Populated go bindings for the appropriate version for all contracts.
// We would hold 2 versions of each contract here. Once we upgrade we can phase out the old one.
// When generating bindings, make sure the package name corresponds to the version.
- Chains map[uint64]CCIPChainState
+ Chains map[uint64]CCIPChainState
+ SolChains map[uint64]SolCCIPChainState
+}
+
+func (s CCIPOnChainState) GetAllProposerMCMSForChains(chains []uint64) (map[uint64]*gethwrappers.ManyChainMultiSig, error) {
+ multiSigs := make(map[uint64]*gethwrappers.ManyChainMultiSig)
+ for _, chain := range chains {
+ chainState, ok := s.Chains[chain]
+ if !ok {
+ return nil, fmt.Errorf("chain %d not found", chain)
+ }
+ if chainState.ProposerMcm == nil {
+ return nil, fmt.Errorf("proposer mcm not found for chain %d", chain)
+ }
+ multiSigs[chain] = chainState.ProposerMcm
+ }
+ return multiSigs, nil
+}
+
+func (s CCIPOnChainState) GetAllTimeLocksForChains(chains []uint64) (map[uint64]common.Address, error) {
+ timelocks := make(map[uint64]common.Address)
+ for _, chain := range chains {
+ chainState, ok := s.Chains[chain]
+ if !ok {
+ return nil, fmt.Errorf("chain %d not found", chain)
+ }
+ if chainState.Timelock == nil {
+ return nil, fmt.Errorf("timelock not found for chain %d", chain)
+ }
+ timelocks[chain] = chainState.Timelock.Address()
+ }
+ return timelocks, nil
}
func (s CCIPOnChainState) View(chains []uint64) (map[string]view.ChainView, error) {
@@ -345,13 +387,13 @@ func LoadOnchainState(e deployment.Environment) (CCIPOnChainState, error) {
// LoadChainState Loads all state for a chain into state
func LoadChainState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (CCIPChainState, error) {
var state CCIPChainState
- mcmsWithTimelock, err := commoncs.MaybeLoadMCMSWithTimelockState(chain, addresses)
+ mcmsWithTimelock, err := commoncs.MaybeLoadMCMSWithTimelockChainState(chain, addresses)
if err != nil {
return state, err
}
state.MCMSWithTimelockState = *mcmsWithTimelock
- linkState, err := commoncs.MaybeLoadLinkTokenState(chain, addresses)
+ linkState, err := commoncs.MaybeLoadLinkTokenChainState(chain, addresses)
if err != nil {
return state, err
}
diff --git a/deployment/ccip/changeset/test_assertions.go b/deployment/ccip/changeset/test_assertions.go
index c0b510acc07..a114e52b361 100644
--- a/deployment/ccip/changeset/test_assertions.go
+++ b/deployment/ccip/changeset/test_assertions.go
@@ -221,8 +221,8 @@ func ConfirmCommitForAllWithExpectedSeqNums(
return false
}
},
- 3*time.Minute,
- 1*time.Second,
+ tests.WaitTimeout(t),
+ 2*time.Second,
"all commitments did not confirm",
)
}
diff --git a/deployment/ccip/changeset/test_environment.go b/deployment/ccip/changeset/test_environment.go
index 171190c7308..a7d8a923da5 100644
--- a/deployment/ccip/changeset/test_environment.go
+++ b/deployment/ccip/changeset/test_environment.go
@@ -20,6 +20,7 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
)
@@ -299,12 +300,7 @@ func NewEnvironmentWithJobsAndContracts(t *testing.T, tc *TestConfigs, tEnv Test
mcmsCfg := make(map[uint64]commontypes.MCMSWithTimelockConfig)
for _, c := range e.Env.AllChainSelectors() {
- mcmsCfg[c] = commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ mcmsCfg[c] = proposalutils.SingleGroupTimelockConfig(t)
}
var prereqCfg []DeployPrerequisiteConfigPerChain
@@ -347,6 +343,12 @@ func NewEnvironmentWithJobsAndContracts(t *testing.T, tc *TestConfigs, tEnv Test
HomeChainSelector: e.HomeChainSel,
},
},
+ {
+ Changeset: commonchangeset.WrapChangeSet(SetRMNRemoteOnRMNProxy),
+ Config: SetRMNRemoteOnRMNProxyConfig{
+ ChainSelectors: allChains,
+ },
+ },
})
require.NoError(t, err)
@@ -382,9 +384,9 @@ func NewEnvironmentWithJobsAndContracts(t *testing.T, tc *TestConfigs, tEnv Test
}
// Build the per chain config.
chainConfigs := make(map[uint64]CCIPOCRParams)
- timelockContractsPerChain := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+ timelockContractsPerChain := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for _, chain := range allChains {
- timelockContractsPerChain[chain] = &commonchangeset.TimelockExecutionContracts{
+ timelockContractsPerChain[chain] = &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[chain].Timelock,
CallProxy: state.Chains[chain].CallProxy,
}
diff --git a/deployment/ccip/changeset/view_test.go b/deployment/ccip/changeset/view_test.go
index 11430bfbddf..35193979849 100644
--- a/deployment/ccip/changeset/view_test.go
+++ b/deployment/ccip/changeset/view_test.go
@@ -7,6 +7,7 @@ import (
)
func TestSmokeView(t *testing.T) {
+ t.Parallel()
tenv := NewMemoryEnvironment(t, WithChains(3))
_, err := ViewCCIP(tenv.Env)
require.NoError(t, err)
diff --git a/deployment/ccip/view/v1_6/rmnhome.go b/deployment/ccip/view/v1_6/rmnhome.go
new file mode 100644
index 00000000000..82d39074d6f
--- /dev/null
+++ b/deployment/ccip/view/v1_6/rmnhome.go
@@ -0,0 +1,214 @@
+package v1_6
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/smartcontractkit/chainlink/deployment/common/view/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_home"
+)
+
+type RMNHomeView struct {
+ types.ContractMetaData
+ CandidateConfig *RMNHomeVersionedConfig `json:"candidateConfig,omitempty"`
+ ActiveConfig *RMNHomeVersionedConfig `json:"activeConfig,omitempty"`
+}
+
+type RMNHomeVersionedConfig struct {
+ Version uint32 `json:"version"`
+ StaticConfig RMNHomeStaticConfig `json:"staticConfig"`
+ DynamicConfig RMNHomeDynamicConfig `json:"dynamicConfig"`
+ Digest [32]byte `json:"digest"`
+}
+
+func decodeHexString(hexStr string, expectedLength int) ([]byte, error) {
+ bytes, err := hex.DecodeString(hexStr)
+ if err != nil {
+ return nil, err
+ }
+ if len(bytes) != expectedLength {
+ return nil, fmt.Errorf("invalid length: expected %d, got %d", expectedLength, len(bytes))
+ }
+ return bytes, nil
+}
+
+func (c RMNHomeVersionedConfig) MarshalJSON() ([]byte, error) {
+ type Alias RMNHomeVersionedConfig
+ return json.Marshal(&struct {
+ Digest string `json:"digest"`
+ *Alias
+ }{
+ Digest: hex.EncodeToString(c.Digest[:]),
+ Alias: (*Alias)(&c),
+ })
+}
+
+func (c *RMNHomeVersionedConfig) UnmarshalJSON(data []byte) error {
+ type Alias RMNHomeVersionedConfig
+ aux := &struct {
+ Digest string `json:"digest"`
+ *Alias
+ }{
+ Alias: (*Alias)(c),
+ }
+
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ digestBytes, err := decodeHexString(aux.Digest, 32)
+ if err != nil {
+ return err
+ }
+ copy(c.Digest[:], digestBytes)
+ return nil
+}
+
+type RMNHomeStaticConfig struct {
+ Nodes []RMNHomeNode `json:"nodes"`
+}
+
+type RMNHomeDynamicConfig struct {
+ SourceChains []RMNHomeSourceChain `json:"sourceChains"`
+}
+
+type RMNHomeSourceChain struct {
+ ChainSelector uint64 `json:"selector"`
+ F uint64 `json:"f"`
+ ObserverNodesBitmap *big.Int `json:"observerNodesBitmap"`
+}
+
+type RMNHomeNode struct {
+ PeerId [32]byte `json:"peerId"`
+ OffchainPublicKey [32]byte `json:"offchainPublicKey"`
+}
+
+func (n RMNHomeNode) MarshalJSON() ([]byte, error) {
+ type Alias RMNHomeNode
+ return json.Marshal(&struct {
+ PeerId string `json:"peerId"`
+ OffchainPublicKey string `json:"offchainPublicKey"`
+ *Alias
+ }{
+ PeerId: hex.EncodeToString(n.PeerId[:]),
+ OffchainPublicKey: hex.EncodeToString(n.OffchainPublicKey[:]),
+ Alias: (*Alias)(&n),
+ })
+}
+
+func (n *RMNHomeNode) UnmarshalJSON(data []byte) error {
+ type Alias RMNHomeNode
+ aux := &struct {
+ PeerId string `json:"peerId"`
+ OffchainPublicKey string `json:"offchainPublicKey"`
+ *Alias
+ }{
+ Alias: (*Alias)(n),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ peerIdBytes, err := decodeHexString(aux.PeerId, 32)
+ if err != nil {
+ return err
+ }
+ copy(n.PeerId[:], peerIdBytes)
+
+ offchainPublicKeyBytes, err := decodeHexString(aux.OffchainPublicKey, 32)
+ if err != nil {
+ return err
+ }
+ copy(n.OffchainPublicKey[:], offchainPublicKeyBytes)
+
+ return nil
+}
+
+type DigestFunc func(*bind.CallOpts) ([32]byte, error)
+
+func mapNodes(nodes []rmn_home.RMNHomeNode) []RMNHomeNode {
+ result := make([]RMNHomeNode, len(nodes))
+ for i, node := range nodes {
+ result[i] = RMNHomeNode{
+ PeerId: node.PeerId,
+ OffchainPublicKey: node.OffchainPublicKey,
+ }
+ }
+ return result
+}
+
+func mapSourceChains(chains []rmn_home.RMNHomeSourceChain) []RMNHomeSourceChain {
+ result := make([]RMNHomeSourceChain, len(chains))
+ for i, chain := range chains {
+ result[i] = RMNHomeSourceChain{
+ ChainSelector: chain.ChainSelector,
+ F: chain.F,
+ ObserverNodesBitmap: chain.ObserverNodesBitmap,
+ }
+ }
+ return result
+}
+
+func generateRmnHomeVersionedConfig(reader *rmn_home.RMNHome, digestFunc DigestFunc) (*RMNHomeVersionedConfig, error) {
+ address := reader.Address()
+ digest, err := digestFunc(nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get digest for contract %s: %w", address, err)
+ }
+
+ if digest == [32]byte{} {
+ return nil, nil
+ }
+
+ config, err := reader.GetConfig(nil, digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get config for contract %s: %w", address, err)
+ }
+
+ staticConfig := RMNHomeStaticConfig{
+ Nodes: mapNodes(config.VersionedConfig.StaticConfig.Nodes),
+ }
+
+ dynamicConfig := RMNHomeDynamicConfig{
+ SourceChains: mapSourceChains(config.VersionedConfig.DynamicConfig.SourceChains),
+ }
+
+ return &RMNHomeVersionedConfig{
+ Version: config.VersionedConfig.Version,
+ Digest: config.VersionedConfig.ConfigDigest,
+ StaticConfig: staticConfig,
+ DynamicConfig: dynamicConfig,
+ }, nil
+}
+
+func GenerateRMNHomeView(rmnReader *rmn_home.RMNHome) (RMNHomeView, error) {
+ if rmnReader == nil {
+ return RMNHomeView{}, nil
+ }
+
+ address := rmnReader.Address()
+
+ activeConfig, err := generateRmnHomeVersionedConfig(rmnReader, rmnReader.GetActiveDigest)
+ if err != nil {
+ return RMNHomeView{}, fmt.Errorf("failed to generate active config for contract %s: %w", address, err)
+ }
+
+ candidateConfig, err := generateRmnHomeVersionedConfig(rmnReader, rmnReader.GetCandidateDigest)
+ if err != nil {
+ return RMNHomeView{}, fmt.Errorf("failed to generate candidate config for contract %s: %w", address, err)
+ }
+
+ contractMetaData, err := types.NewContractMetaData(rmnReader, rmnReader.Address())
+ if err != nil {
+ return RMNHomeView{}, fmt.Errorf("failed to create contract metadata for contract %s: %w", address, err)
+ }
+
+ return RMNHomeView{
+ ContractMetaData: contractMetaData,
+ CandidateConfig: candidateConfig,
+ ActiveConfig: activeConfig,
+ }, nil
+}
diff --git a/deployment/ccip/view/view.go b/deployment/ccip/view/view.go
index 0abfb103c94..8f698ba2277 100644
--- a/deployment/ccip/view/view.go
+++ b/deployment/ccip/view/view.go
@@ -28,6 +28,7 @@ type ChainView struct {
FeeQuoter map[string]v1_6.FeeQuoterView `json:"feeQuoter,omitempty"`
NonceManager map[string]v1_6.NonceManagerView `json:"nonceManager,omitempty"`
RMNRemote map[string]v1_6.RMNRemoteView `json:"rmnRemote,omitempty"`
+ RMNHome map[string]v1_6.RMNHomeView `json:"rmnHome,omitempty"`
OnRamp map[string]v1_6.OnRampView `json:"onRamp,omitempty"`
OffRamp map[string]v1_6.OffRampView `json:"offRamp,omitempty"`
// TODO: Perhaps restrict to one CCIPHome/CR? Shouldn't
@@ -44,14 +45,19 @@ func NewChain() ChainView {
// v1.0
RMNProxy: make(map[string]v1_0.RMNProxyView),
// v1.2
- Router: make(map[string]v1_2.RouterView),
+ Router: make(map[string]v1_2.RouterView),
+ PriceRegistry: make(map[string]v1_2.PriceRegistryView),
// v1.5
TokenAdminRegistry: make(map[string]v1_5.TokenAdminRegistryView),
CommitStore: make(map[string]v1_5.CommitStoreView),
+ EVM2EVMOnRamp: make(map[string]v1_5.OnRampView),
+ EVM2EVMOffRamp: make(map[string]v1_5.OffRampView),
+ RMN: make(map[string]v1_5.RMNView),
// v1.6
FeeQuoter: make(map[string]v1_6.FeeQuoterView),
NonceManager: make(map[string]v1_6.NonceManagerView),
RMNRemote: make(map[string]v1_6.RMNRemoteView),
+ RMNHome: make(map[string]v1_6.RMNHomeView),
OnRamp: make(map[string]v1_6.OnRampView),
OffRamp: make(map[string]v1_6.OffRampView),
CapabilityRegistry: make(map[string]common_v1_0.CapabilityRegistryView),
diff --git a/deployment/common/changeset/deploy_link_token.go b/deployment/common/changeset/deploy_link_token.go
index 292c07c93df..c115a7ee083 100644
--- a/deployment/common/changeset/deploy_link_token.go
+++ b/deployment/common/changeset/deploy_link_token.go
@@ -12,7 +12,7 @@ import (
var _ deployment.ChangeSet[[]uint64] = DeployLinkToken
-// DeployLinkToken deploys a link token contract to the chain identified by the chainSelector.
+// DeployLinkToken deploys a link token contract to the chain identified by the ChainSelector.
func DeployLinkToken(e deployment.Environment, chains []uint64) (deployment.ChangesetOutput, error) {
for _, chain := range chains {
_, ok := e.Chains[chain]
diff --git a/deployment/common/changeset/deploy_link_token_test.go b/deployment/common/changeset/deploy_link_token_test.go
index a61743e9bf4..bc472d2a247 100644
--- a/deployment/common/changeset/deploy_link_token_test.go
+++ b/deployment/common/changeset/deploy_link_token_test.go
@@ -27,7 +27,7 @@ func TestDeployLinkToken(t *testing.T) {
require.NoError(t, err)
addrs, err := e.ExistingAddresses.AddressesForChain(chain1)
require.NoError(t, err)
- state, err := changeset.MaybeLoadLinkTokenState(e.Chains[chain1], addrs)
+ state, err := changeset.MaybeLoadLinkTokenChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
// View itself already unit tested
_, err = state.GenerateLinkView()
diff --git a/deployment/common/changeset/example/add_mint_burners_link.go b/deployment/common/changeset/example/add_mint_burners_link.go
new file mode 100644
index 00000000000..7322f99dd60
--- /dev/null
+++ b/deployment/common/changeset/example/add_mint_burners_link.go
@@ -0,0 +1,70 @@
+package example
+
+import (
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+)
+
+type AddMintersBurnersLinkConfig struct {
+ ChainSelector uint64
+ Minters []common.Address
+ Burners []common.Address
+}
+
+var _ deployment.ChangeSet[*AddMintersBurnersLinkConfig] = AddMintersBurnersLink
+
+// AddMintersBurnersLink grants the minter / burner role to the provided addresses.
+func AddMintersBurnersLink(e deployment.Environment, cfg *AddMintersBurnersLinkConfig) (deployment.ChangesetOutput, error) {
+
+ chain := e.Chains[cfg.ChainSelector]
+ addresses, err := e.ExistingAddresses.AddressesForChain(cfg.ChainSelector)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addresses)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ for _, minter := range cfg.Minters {
+ // check if minter is already a minter
+ isMinter, err := linkState.LinkToken.IsMinter(&bind.CallOpts{Context: e.GetContext()}, minter)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ if isMinter {
+ continue
+ }
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, minter)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ }
+ for _, burner := range cfg.Burners {
+ // check if burner is already a burner
+ isBurner, err := linkState.LinkToken.IsBurner(&bind.CallOpts{Context: e.GetContext()}, burner)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ if isBurner {
+ continue
+ }
+ tx, err := linkState.LinkToken.GrantBurnRole(chain.DeployerKey, burner)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ }
+ return deployment.ChangesetOutput{}, nil
+
+}
diff --git a/deployment/common/changeset/example/add_mint_burners_link_test.go b/deployment/common/changeset/example/add_mint_burners_link_test.go
new file mode 100644
index 00000000000..4dbfddc0b30
--- /dev/null
+++ b/deployment/common/changeset/example/add_mint_burners_link_test.go
@@ -0,0 +1,50 @@
+package example_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset/example"
+)
+
+// TestAddMintersBurnersLink tests the AddMintersBurnersLink changeset
+func TestAddMintersBurnersLink(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+ // Deploy Link Token and Timelock contracts and add addresses to environment
+ env := setupLinkTransferTestEnv(t)
+
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ _, err = example.AddMintersBurnersLink(env, &example.AddMintersBurnersLinkConfig{
+ ChainSelector: chainSelector,
+ Minters: []common.Address{timelockAddress},
+ Burners: []common.Address{timelockAddress},
+ })
+ require.NoError(t, err)
+
+ // check timelock balance
+ isMinter, err := linkState.LinkToken.IsMinter(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ require.True(t, isMinter)
+ isBurner, err := linkState.LinkToken.IsBurner(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ require.True(t, isBurner)
+}
diff --git a/deployment/common/changeset/example/link_transfer.go b/deployment/common/changeset/example/link_transfer.go
new file mode 100644
index 00000000000..2e3be48a4d1
--- /dev/null
+++ b/deployment/common/changeset/example/link_transfer.go
@@ -0,0 +1,239 @@
+package example
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ ethTypes "github.com/ethereum/go-ethereum/core/types"
+ owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
+ chain_selectors "github.com/smartcontractkit/chain-selectors"
+
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
+ "github.com/smartcontractkit/chainlink/deployment/common/types"
+)
+
+const MaxTimelockDelay = 24 * 7 * time.Hour
+
+type TransferConfig struct {
+ To common.Address
+ Value *big.Int
+}
+
+type MCMSConfig struct {
+ MinDelay time.Duration // delay for timelock worker to execute the transfers.
+ OverrideRoot bool
+}
+
+type LinkTransferConfig struct {
+ Transfers map[uint64][]TransferConfig
+ From common.Address
+ McmsConfig *MCMSConfig
+}
+
+var _ deployment.ChangeSet[*LinkTransferConfig] = LinkTransfer
+
+func getDeployer(e deployment.Environment, chain uint64, mcmConfig *MCMSConfig) *bind.TransactOpts {
+ if mcmConfig == nil {
+ return e.Chains[chain].DeployerKey
+ }
+
+ return deployment.SimTransactOpts()
+}
+
+// Validate checks that the LinkTransferConfig is valid.
+func (cfg LinkTransferConfig) Validate(e deployment.Environment) error {
+ ctx := e.GetContext()
+ // Check that Transfers map has at least one chainSel
+ if len(cfg.Transfers) == 0 {
+ return errors.New("transfers map must have at least one chainSel")
+ }
+
+ // Check transfers config values.
+ for chainSel, transfers := range cfg.Transfers {
+ selector, err := chain_selectors.GetSelectorFamily(chainSel)
+ if err != nil {
+ return fmt.Errorf("invalid chain selector: %w", err)
+ }
+ if selector != chain_selectors.FamilyEVM {
+ return fmt.Errorf("chain selector %d is not an EVM chain", chainSel)
+ }
+ chain, ok := e.Chains[chainSel]
+ if !ok {
+ return fmt.Errorf("chain with selector %d not found", chainSel)
+ }
+ addrs, err := e.ExistingAddresses.AddressesForChain(chainSel)
+ if err != nil {
+ return fmt.Errorf("error getting addresses for chain %d: %w", chainSel, err)
+ }
+ if len(transfers) == 0 {
+ return fmt.Errorf("transfers for chainSel %d must have at least one LinkTransfer", chainSel)
+ }
+ totalAmount := big.NewInt(0)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ if err != nil {
+ return fmt.Errorf("error loading link token state during validation: %w", err)
+ }
+ for _, transfer := range transfers {
+ if transfer.To == (common.Address{}) {
+ return errors.New("'to' address for transfers must be set")
+ }
+ if transfer.Value == nil {
+ return errors.New("value for transfers must be set")
+ }
+ if transfer.Value.Cmp(big.NewInt(0)) == 0 {
+ return errors.New("value for transfers must be non-zero")
+ }
+ if transfer.Value.Cmp(big.NewInt(0)) == -1 {
+ return errors.New("value for transfers must be positive")
+ }
+ totalAmount.Add(totalAmount, transfer.Value)
+ }
+ // check that from address has enough funds for the transfers
+ balance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, cfg.From)
+ if balance.Cmp(totalAmount) < 0 {
+ return fmt.Errorf("sender does not have enough funds for transfers for chain selector %d, required: %s, available: %s", chainSel, totalAmount.String(), balance.String())
+ }
+ }
+
+ if cfg.McmsConfig == nil {
+ return nil
+ }
+
+ // Upper bound for min delay (7 days)
+ if cfg.McmsConfig.MinDelay > MaxTimelockDelay {
+ return errors.New("minDelay must be less than 7 days")
+ }
+
+ return nil
+}
+
+// initStatePerChain initializes the state for each chain selector on the provided config
+func initStatePerChain(cfg *LinkTransferConfig, e deployment.Environment) (
+ linkStatePerChain map[uint64]*changeset.LinkTokenState,
+ mcmsStatePerChain map[uint64]*changeset.MCMSWithTimelockState,
+ err error) {
+ linkStatePerChain = map[uint64]*changeset.LinkTokenState{}
+ mcmsStatePerChain = map[uint64]*changeset.MCMSWithTimelockState{}
+ // Load state for each chain
+ chainSelectors := []uint64{}
+ for chainSelector := range cfg.Transfers {
+ chainSelectors = append(chainSelectors, chainSelector)
+ }
+ linkStatePerChain, err = changeset.MaybeLoadLinkTokenState(e, chainSelectors)
+ if err != nil {
+ return nil, nil, err
+ }
+ mcmsStatePerChain, err = changeset.MaybeLoadMCMSWithTimelockState(e, chainSelectors)
+ if err != nil {
+ return nil, nil, err
+
+ }
+ return linkStatePerChain, mcmsStatePerChain, nil
+}
+
+// transferOrBuildTx transfers the LINK tokens or builds the tx for the MCMS proposal
+func transferOrBuildTx(
+ e deployment.Environment,
+ linkState *changeset.LinkTokenState,
+ transfer TransferConfig,
+ opts *bind.TransactOpts,
+ chain deployment.Chain,
+ mcmsConfig *MCMSConfig) (*ethTypes.Transaction, error) {
+ tx, err := linkState.LinkToken.Transfer(opts, transfer.To, transfer.Value)
+ if err != nil {
+ return nil, fmt.Errorf("error packing transfer tx data: %w", err)
+ }
+ // only wait for tx if we are not using MCMS
+ if mcmsConfig == nil {
+ if _, err := deployment.ConfirmIfNoError(chain, tx, err); err != nil {
+ e.Logger.Errorw("Failed to confirm transfer tx", "chain", chain.String(), "err", err)
+ return nil, err
+ }
+ }
+ return tx, nil
+
+}
+
+// LinkTransfer takes the given link transfers and executes them or creates an MCMS proposal for them.
+func LinkTransfer(e deployment.Environment, cfg *LinkTransferConfig) (deployment.ChangesetOutput, error) {
+
+ err := cfg.Validate(e)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("invalid LinkTransferConfig: %w", err)
+ }
+ chainSelectors := []uint64{}
+ for chainSelector := range cfg.Transfers {
+ chainSelectors = append(chainSelectors, chainSelector)
+ }
+ mcmsPerChain := map[uint64]*owner_helpers.ManyChainMultiSig{}
+
+ timelockAddresses := map[uint64]common.Address{}
+ // Initialize state for each chain
+ linkStatePerChain, mcmsStatePerChain, err := initStatePerChain(cfg, e)
+
+ allBatches := []timelock.BatchChainOperation{}
+ for chainSelector := range cfg.Transfers {
+ chainID := mcms.ChainIdentifier(chainSelector)
+ chain := e.Chains[chainSelector]
+ linkAddress := linkStatePerChain[chainSelector].LinkToken.Address()
+ mcmsState := mcmsStatePerChain[chainSelector]
+ linkState := linkStatePerChain[chainSelector]
+
+ timelockAddress := mcmsState.Timelock.Address()
+
+ mcmsPerChain[uint64(chainID)] = mcmsState.ProposerMcm
+
+ timelockAddresses[chainSelector] = timelockAddress
+ batch := timelock.BatchChainOperation{
+ ChainIdentifier: chainID,
+ Batch: []mcms.Operation{},
+ }
+
+ opts := getDeployer(e, chainSelector, cfg.McmsConfig)
+ totalAmount := big.NewInt(0)
+ for _, transfer := range cfg.Transfers[chainSelector] {
+ tx, err := transferOrBuildTx(e, linkState, transfer, opts, chain, cfg.McmsConfig)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ op := mcms.Operation{
+ To: linkAddress,
+ Data: tx.Data(),
+ Value: big.NewInt(0),
+ ContractType: string(types.LinkToken),
+ }
+ batch.Batch = append(batch.Batch, op)
+ totalAmount.Add(totalAmount, transfer.Value)
+ }
+
+ allBatches = append(allBatches, batch)
+ }
+
+ if cfg.McmsConfig != nil {
+ proposal, err := proposalutils.BuildProposalFromBatches(
+ timelockAddresses,
+ mcmsPerChain,
+ allBatches,
+ "LINK Value transfer proposal",
+ cfg.McmsConfig.MinDelay,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ return deployment.ChangesetOutput{
+ Proposals: []timelock.MCMSWithTimelockProposal{*proposal},
+ }, nil
+ }
+
+ return deployment.ChangesetOutput{}, nil
+}
diff --git a/deployment/common/changeset/example/link_transfer_test.go b/deployment/common/changeset/example/link_transfer_test.go
new file mode 100644
index 00000000000..eecfbd37c95
--- /dev/null
+++ b/deployment/common/changeset/example/link_transfer_test.go
@@ -0,0 +1,373 @@
+package example_test
+
+import (
+ "context"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ chain_selectors "github.com/smartcontractkit/chain-selectors"
+
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset/example"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/types"
+ "github.com/smartcontractkit/chainlink/deployment/environment/memory"
+)
+
+// setupLinkTransferContracts deploys all required contracts for the link transfer tests and returns the updated env.
+func setupLinkTransferTestEnv(t *testing.T) deployment.Environment {
+
+ lggr := logger.TestLogger(t)
+ cfg := memory.MemoryEnvironmentConfig{
+ Nodes: 1,
+ Chains: 2,
+ }
+ env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg)
+ chainSelector := env.AllChainSelectors()[0]
+ config := proposalutils.SingleGroupMCMS(t)
+
+ // Deploy MCMS and Timelock
+ env, err := changeset.ApplyChangesets(t, env, nil, []changeset.ChangesetApplication{
+ {
+ Changeset: changeset.WrapChangeSet(changeset.DeployLinkToken),
+ Config: []uint64{chainSelector},
+ },
+ {
+ Changeset: changeset.WrapChangeSet(changeset.DeployMCMSWithTimelock),
+ Config: map[uint64]types.MCMSWithTimelockConfig{
+ chainSelector: {
+ Canceller: config,
+ Bypasser: config,
+ Proposer: config,
+ TimelockMinDelay: big.NewInt(0),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ return env
+}
+
+// TestLinkTransferMCMS tests the LinkTransfer changeset by sending LINK from a timelock contract
+// to the deployer key via mcms proposal.
+func TestLinkTransferMCMS(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+
+ env := setupLinkTransferTestEnv(t)
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ // grant minter permissions
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, chain.DeployerKey.From)
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ tx, err = linkState.LinkToken.Mint(chain.DeployerKey, timelockAddress, big.NewInt(750))
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ timelocks := map[uint64]*proposalutils.TimelockExecutionContracts{
+ chainSelector: {
+ Timelock: mcmsState.Timelock,
+ CallProxy: mcmsState.CallProxy,
+ },
+ }
+ // Apply the changeset
+ _, err = changeset.ApplyChangesets(t, env, timelocks, []changeset.ChangesetApplication{
+ // the changeset produces proposals, ApplyChangesets will sign & execute them.
+ // in practice, signing and executing are separated processes.
+ {
+ Changeset: changeset.WrapChangeSet(example.LinkTransfer),
+ Config: &example.LinkTransferConfig{
+ From: timelockAddress,
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {
+ To: chain.DeployerKey.From,
+ Value: big.NewInt(500),
+ },
+ },
+ },
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: 0,
+ OverrideRoot: true,
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // Check new balances
+ endBalance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, chain.DeployerKey.From)
+ require.NoError(t, err)
+ expectedBalance := big.NewInt(500)
+ require.Equal(t, expectedBalance, endBalance)
+
+ // check timelock balance
+ endBalance, err = linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ expectedBalance = big.NewInt(250)
+ require.Equal(t, expectedBalance, endBalance)
+}
+
+// TestLinkTransfer tests the LinkTransfer changeset by sending LINK from a timelock contract to the deployer key.
+func TestLinkTransfer(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+
+ env := setupLinkTransferTestEnv(t)
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ // grant minter permissions
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, chain.DeployerKey.From)
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ tx, err = linkState.LinkToken.Mint(chain.DeployerKey, chain.DeployerKey.From, big.NewInt(750))
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ timelocks := map[uint64]*proposalutils.TimelockExecutionContracts{
+ chainSelector: {
+ Timelock: mcmsState.Timelock,
+ CallProxy: mcmsState.CallProxy,
+ },
+ }
+
+ // Apply the changeset
+ _, err = changeset.ApplyChangesets(t, env, timelocks, []changeset.ChangesetApplication{
+ // the changeset produces proposals, ApplyChangesets will sign & execute them.
+ // in practice, signing and executing are separated processes.
+ {
+ Changeset: changeset.WrapChangeSet(example.LinkTransfer),
+ Config: &example.LinkTransferConfig{
+ From: chain.DeployerKey.From,
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {
+ To: timelockAddress,
+ Value: big.NewInt(500),
+ },
+ },
+ },
+ // No MCMSConfig here means we'll execute the txs directly.
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // Check new balances
+ endBalance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, chain.DeployerKey.From)
+ require.NoError(t, err)
+ expectedBalance := big.NewInt(250)
+ require.Equal(t, expectedBalance, endBalance)
+
+ // check timelock balance
+ endBalance, err = linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ expectedBalance = big.NewInt(500)
+ require.Equal(t, expectedBalance, endBalance)
+}
+
+func TestValidate(t *testing.T) {
+ env := setupLinkTransferTestEnv(t)
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, chain.DeployerKey.From)
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+ tx, err = linkState.LinkToken.Mint(chain.DeployerKey, chain.DeployerKey.From, big.NewInt(750))
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+
+ require.NoError(t, err)
+ tests := []struct {
+ name string
+ cfg example.LinkTransferConfig
+ errorMsg string
+ }{
+ {
+ name: "valid config",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: time.Hour,
+ },
+ },
+ },
+ {
+ name: "valid non mcms config",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ },
+ },
+ {
+ name: "insufficient funds",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: big.NewInt(100)},
+ {To: chain.DeployerKey.From, Value: big.NewInt(500)},
+ {To: chain.DeployerKey.From, Value: big.NewInt(1250)},
+ },
+ },
+ From: mcmsState.Timelock.Address(),
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: time.Hour,
+ },
+ },
+ errorMsg: "sender does not have enough funds for transfers for chain selector 909606746561742123, required: 1850, available: 0",
+ },
+ {
+ name: "invalid config: empty transfers",
+ cfg: example.LinkTransferConfig{Transfers: map[uint64][]example.TransferConfig{}},
+ errorMsg: "transfers map must have at least one chainSel",
+ },
+ {
+ name: "invalid chain selector",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ 1: {{To: common.Address{}, Value: big.NewInt(100)}}},
+ },
+ errorMsg: "invalid chain selector: unknown chain selector 1",
+ },
+ {
+ name: "chain selector not found",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chain_selectors.ETHEREUM_TESTNET_GOERLI_ARBITRUM_1.Selector: {{To: common.Address{}, Value: big.NewInt(100)}}},
+ },
+ errorMsg: "chain with selector 6101244977088475029 not found",
+ },
+ {
+ name: "empty transfer list",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {},
+ },
+ },
+ errorMsg: "transfers for chainSel 909606746561742123 must have at least one LinkTransfer",
+ },
+ {
+ name: "empty value",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: nil},
+ },
+ },
+ },
+ errorMsg: "value for transfers must be set",
+ },
+ {
+ name: "zero value",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: big.NewInt(0)},
+ },
+ },
+ },
+ errorMsg: "value for transfers must be non-zero",
+ },
+ {
+ name: "negative value",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: big.NewInt(-5)},
+ },
+ },
+ },
+ errorMsg: "value for transfers must be positive",
+ },
+ {
+ name: "non-evm-chain",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chain_selectors.APTOS_MAINNET.Selector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ },
+ errorMsg: "chain selector 4741433654826277614 is not an EVM chain",
+ },
+ {
+ name: "delay greater than max allowed",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: time.Hour * 24 * 10,
+ },
+ },
+ errorMsg: "minDelay must be less than 7 days",
+ },
+ {
+ name: "invalid config: transfer to address missing",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: common.Address{}, Value: big.NewInt(100)}}},
+ },
+ errorMsg: "'to' address for transfers must be set",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.cfg.Validate(env)
+ if tt.errorMsg != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tt.errorMsg)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/deployment/common/changeset/example/mint_link.go b/deployment/common/changeset/example/mint_link.go
new file mode 100644
index 00000000000..dc50f8a1a27
--- /dev/null
+++ b/deployment/common/changeset/example/mint_link.go
@@ -0,0 +1,43 @@
+package example
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+)
+
+type MintLinkConfig struct {
+ Amount *big.Int
+ ChainSelector uint64
+ To common.Address
+}
+
+var _ deployment.ChangeSet[*MintLinkConfig] = MintLink
+
+// MintLink mints LINK to the provided contract.
+func MintLink(e deployment.Environment, cfg *MintLinkConfig) (deployment.ChangesetOutput, error) {
+
+ chain := e.Chains[cfg.ChainSelector]
+ addresses, err := e.ExistingAddresses.AddressesForChain(cfg.ChainSelector)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addresses)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ tx, err := linkState.LinkToken.Mint(chain.DeployerKey, cfg.To, cfg.Amount)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ return deployment.ChangesetOutput{}, nil
+
+}
diff --git a/deployment/common/changeset/example/mint_link_test.go b/deployment/common/changeset/example/mint_link_test.go
new file mode 100644
index 00000000000..1c60c3221de
--- /dev/null
+++ b/deployment/common/changeset/example/mint_link_test.go
@@ -0,0 +1,58 @@
+package example_test
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset/example"
+)
+
+// TestMintLink tests the MintLink changeset
+func TestMintLink(t *testing.T) {
+ t.Parallel()
+ env := setupLinkTransferTestEnv(t)
+ ctx := env.GetContext()
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+
+ _, err = changeset.ApplyChangesets(t, env, nil, []changeset.ChangesetApplication{
+ {
+ Changeset: changeset.WrapChangeSet(example.AddMintersBurnersLink),
+ Config: &example.AddMintersBurnersLinkConfig{
+ ChainSelector: chainSelector,
+ Minters: []common.Address{chain.DeployerKey.From},
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ _, err = example.MintLink(env, &example.MintLinkConfig{
+ ChainSelector: chainSelector,
+ To: timelockAddress,
+ Amount: big.NewInt(7568),
+ })
+ require.NoError(t, err)
+
+ // check timelock balance
+ endBalance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ expectedBalance := big.NewInt(7568)
+ require.Equal(t, expectedBalance, endBalance)
+}
diff --git a/deployment/common/changeset/internal/mcms_test.go b/deployment/common/changeset/internal/mcms_test.go
index 10fb1d980de..ff013717d30 100644
--- a/deployment/common/changeset/internal/mcms_test.go
+++ b/deployment/common/changeset/internal/mcms_test.go
@@ -2,7 +2,6 @@ package internal_test
import (
"encoding/json"
- "math/big"
"testing"
chainsel "github.com/smartcontractkit/chain-selectors"
@@ -11,6 +10,7 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
"github.com/smartcontractkit/chainlink/deployment/common/changeset"
"github.com/smartcontractkit/chainlink/deployment/common/changeset/internal"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -23,7 +23,7 @@ func TestDeployMCMSWithConfig(t *testing.T) {
})
ab := deployment.NewMemoryAddressBook()
_, err := internal.DeployMCMSWithConfig(types.ProposerManyChainMultisig,
- lggr, chains[chainsel.TEST_90000001.Selector], ab, changeset.SingleGroupMCMS(t))
+ lggr, chains[chainsel.TEST_90000001.Selector], ab, proposalutils.SingleGroupMCMS(t))
require.NoError(t, err)
}
@@ -35,17 +35,12 @@ func TestDeployMCMSWithTimelockContracts(t *testing.T) {
ab := deployment.NewMemoryAddressBook()
_, err := internal.DeployMCMSWithTimelockContracts(lggr,
chains[chainsel.TEST_90000001.Selector],
- ab, types.MCMSWithTimelockConfig{
- Canceller: changeset.SingleGroupMCMS(t),
- Bypasser: changeset.SingleGroupMCMS(t),
- Proposer: changeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- })
+ ab, proposalutils.SingleGroupTimelockConfig(t))
require.NoError(t, err)
addresses, err := ab.AddressesForChain(chainsel.TEST_90000001.Selector)
require.NoError(t, err)
require.Len(t, addresses, 5)
- mcmsState, err := changeset.MaybeLoadMCMSWithTimelockState(chains[chainsel.TEST_90000001.Selector], addresses)
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chains[chainsel.TEST_90000001.Selector], addresses)
require.NoError(t, err)
v, err := mcmsState.GenerateMCMSWithTimelockView()
b, err := json.MarshalIndent(v, "", " ")
diff --git a/deployment/common/changeset/state.go b/deployment/common/changeset/state.go
index a580c13b40b..0db34abad71 100644
--- a/deployment/common/changeset/state.go
+++ b/deployment/common/changeset/state.go
@@ -8,6 +8,7 @@ import (
owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/common/view/v1_0"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
@@ -19,32 +20,7 @@ import (
// It is public for use in product specific packages.
// Either all fields are nil or all fields are non-nil.
type MCMSWithTimelockState struct {
- CancellerMcm *owner_helpers.ManyChainMultiSig
- BypasserMcm *owner_helpers.ManyChainMultiSig
- ProposerMcm *owner_helpers.ManyChainMultiSig
- Timelock *owner_helpers.RBACTimelock
- CallProxy *owner_helpers.CallProxy
-}
-
-// Validate checks that all fields are non-nil, ensuring it's ready
-// for use generating views or interactions.
-func (state MCMSWithTimelockState) Validate() error {
- if state.Timelock == nil {
- return errors.New("timelock not found")
- }
- if state.CancellerMcm == nil {
- return errors.New("canceller not found")
- }
- if state.ProposerMcm == nil {
- return errors.New("proposer not found")
- }
- if state.BypasserMcm == nil {
- return errors.New("bypasser not found")
- }
- if state.CallProxy == nil {
- return errors.New("call proxy not found")
- }
- return nil
+ *proposalutils.MCMSWithTimelockContracts
}
func (state MCMSWithTimelockState) GenerateMCMSWithTimelockView() (v1_0.MCMSWithTimelockView, error) {
@@ -80,15 +56,38 @@ func (state MCMSWithTimelockState) GenerateMCMSWithTimelockView() (v1_0.MCMSWith
}, nil
}
-// MaybeLoadMCMSWithTimelockState looks for the addresses corresponding to
+// MaybeLoadMCMSWithTimelockState loads the MCMSWithTimelockState state for each chain in the given environment.
+func MaybeLoadMCMSWithTimelockState(env deployment.Environment, chainSelectors []uint64) (map[uint64]*MCMSWithTimelockState, error) {
+ result := map[uint64]*MCMSWithTimelockState{}
+ for _, chainSelector := range chainSelectors {
+ chain, ok := env.Chains[chainSelector]
+ if !ok {
+ return nil, fmt.Errorf("chain %d not found", chainSelector)
+ }
+ addressesChain, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ if err != nil {
+ return nil, err
+ }
+ state, err := MaybeLoadMCMSWithTimelockChainState(chain, addressesChain)
+ if err != nil {
+ return nil, err
+ }
+ result[chainSelector] = state
+ }
+ return result, nil
+}
+
+// MaybeLoadMCMSWithTimelockChainState looks for the addresses corresponding to
// contracts deployed with DeployMCMSWithTimelock and loads them into a
// MCMSWithTimelockState struct. If none of the contracts are found, the state struct will be nil.
// An error indicates:
// - Found but was unable to load a contract
// - It only found part of the bundle of contracts
// - If found more than one instance of a contract (we expect one bundle in the given addresses)
-func MaybeLoadMCMSWithTimelockState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*MCMSWithTimelockState, error) {
- state := MCMSWithTimelockState{}
+func MaybeLoadMCMSWithTimelockChainState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*MCMSWithTimelockState, error) {
+ state := MCMSWithTimelockState{
+ MCMSWithTimelockContracts: &proposalutils.MCMSWithTimelockContracts{},
+ }
// We expect one of each contract on the chain.
timelock := deployment.NewTypeAndVersion(types.RBACTimelock, deployment.Version1_0_0)
callProxy := deployment.NewTypeAndVersion(types.CallProxy, deployment.Version1_0_0)
@@ -153,7 +152,28 @@ func (s LinkTokenState) GenerateLinkView() (v1_0.LinkTokenView, error) {
return v1_0.GenerateLinkTokenView(s.LinkToken)
}
-func MaybeLoadLinkTokenState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*LinkTokenState, error) {
+// MaybeLoadLinkTokenState loads the LinkTokenState state for each chain in the given environment.
+func MaybeLoadLinkTokenState(env deployment.Environment, chainSelectors []uint64) (map[uint64]*LinkTokenState, error) {
+ result := map[uint64]*LinkTokenState{}
+ for _, chainSelector := range chainSelectors {
+ chain, ok := env.Chains[chainSelector]
+ if !ok {
+ return nil, fmt.Errorf("chain %d not found", chainSelector)
+ }
+ addressesChain, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ if err != nil {
+ return nil, err
+ }
+ state, err := MaybeLoadLinkTokenChainState(chain, addressesChain)
+ if err != nil {
+ return nil, err
+ }
+ result[chainSelector] = state
+ }
+ return result, nil
+}
+
+func MaybeLoadLinkTokenChainState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*LinkTokenState, error) {
state := LinkTokenState{}
linkToken := deployment.NewTypeAndVersion(types.LinkToken, deployment.Version1_0_0)
// Perhaps revisit if we have a use case for multiple.
diff --git a/deployment/common/changeset/test_helpers.go b/deployment/common/changeset/test_helpers.go
index 8fce5ea79f2..e92b36e5b55 100644
--- a/deployment/common/changeset/test_helpers.go
+++ b/deployment/common/changeset/test_helpers.go
@@ -9,6 +9,7 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext"
"github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
)
type ChangesetApplication struct {
@@ -32,7 +33,7 @@ func WrapChangeSet[C any](fn deployment.ChangeSet[C]) func(e deployment.Environm
}
// ApplyChangesets applies the changeset applications to the environment and returns the updated environment.
-func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPerChain map[uint64]*TimelockExecutionContracts, changesetApplications []ChangesetApplication) (deployment.Environment, error) {
+func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPerChain map[uint64]*proposalutils.TimelockExecutionContracts, changesetApplications []ChangesetApplication) (deployment.Environment, error) {
currentEnv := e
for i, csa := range changesetApplications {
out, err := csa.Changeset(currentEnv, csa.Config)
@@ -72,14 +73,14 @@ func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPe
chains.Add(uint64(op.ChainIdentifier))
}
- signed := SignProposal(t, e, &prop)
+ signed := proposalutils.SignProposal(t, e, &prop)
for _, sel := range chains.ToSlice() {
timelockContracts, ok := timelockContractsPerChain[sel]
if !ok || timelockContracts == nil {
return deployment.Environment{}, fmt.Errorf("timelock contracts not found for chain %d", sel)
}
- ExecuteProposal(t, e, signed, timelockContracts, sel)
+ proposalutils.ExecuteProposal(t, e, signed, timelockContracts, sel)
}
}
}
@@ -91,6 +92,7 @@ func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPe
NodeIDs: e.NodeIDs,
Offchain: e.Offchain,
OCRSecrets: e.OCRSecrets,
+ GetContext: e.GetContext,
}
}
return currentEnv, nil
diff --git a/deployment/common/changeset/transfer_to_mcms_with_timelock.go b/deployment/common/changeset/transfer_to_mcms_with_timelock.go
index e48d29af92b..1980dfaa378 100644
--- a/deployment/common/changeset/transfer_to_mcms_with_timelock.go
+++ b/deployment/common/changeset/transfer_to_mcms_with_timelock.go
@@ -1,6 +1,7 @@
package changeset
import (
+ "encoding/binary"
"fmt"
"math/big"
"time"
@@ -142,3 +143,71 @@ func TransferToMCMSWithTimelock(
return deployment.ChangesetOutput{Proposals: []timelock.MCMSWithTimelockProposal{*proposal}}, nil
}
+
+var _ deployment.ChangeSet[TransferToDeployerConfig] = TransferToDeployer
+
+type TransferToDeployerConfig struct {
+ ContractAddress common.Address
+ ChainSel uint64
+}
+
+// TransferToDeployer relies on the deployer key
+// still being a timelock admin and transfers the ownership of a contract
+// back to the deployer key. It's effectively the rollback function of transferring
+// to the timelock.
+func TransferToDeployer(e deployment.Environment, cfg TransferToDeployerConfig) (deployment.ChangesetOutput, error) {
+ owner, ownable, err := LoadOwnableContract(cfg.ContractAddress, e.Chains[cfg.ChainSel].Client)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ if owner == e.Chains[cfg.ChainSel].DeployerKey.From {
+ e.Logger.Infof("Contract %s already owned by deployer", cfg.ContractAddress)
+ return deployment.ChangesetOutput{}, nil
+ }
+ tx, err := ownable.TransferOwnership(deployment.SimTransactOpts(), e.Chains[cfg.ChainSel].DeployerKey.From)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ addrs, err := e.ExistingAddresses.AddressesForChain(cfg.ChainSel)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ tls, err := MaybeLoadMCMSWithTimelockChainState(e.Chains[cfg.ChainSel], addrs)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ calls := []owner_helpers.RBACTimelockCall{
+ {
+ Target: ownable.Address(),
+ Data: tx.Data(),
+ Value: big.NewInt(0),
+ },
+ }
+ var salt [32]byte
+ binary.BigEndian.PutUint32(salt[:], uint32(time.Now().Unix()))
+ tx, err = tls.Timelock.ScheduleBatch(e.Chains[cfg.ChainSel].DeployerKey, calls, [32]byte{}, salt, big.NewInt(0))
+ if _, err = deployment.ConfirmIfNoError(e.Chains[cfg.ChainSel], tx, err); err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ e.Logger.Infof("scheduled transfer ownership batch with tx %s", tx.Hash().Hex())
+ timelockExecutorProxy, err := owner_helpers.NewRBACTimelock(tls.CallProxy.Address(), e.Chains[cfg.ChainSel].Client)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("error creating timelock executor proxy: %w", err)
+ }
+ tx, err = timelockExecutorProxy.ExecuteBatch(
+ e.Chains[cfg.ChainSel].DeployerKey, calls, [32]byte{}, salt)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("error executing batch: %w", err)
+ }
+ if _, err = deployment.ConfirmIfNoError(e.Chains[cfg.ChainSel], tx, err); err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ e.Logger.Infof("executed transfer ownership to deployer key with tx %s", tx.Hash().Hex())
+
+ tx, err = ownable.AcceptOwnership(e.Chains[cfg.ChainSel].DeployerKey)
+ if _, err = deployment.ConfirmIfNoError(e.Chains[cfg.ChainSel], tx, err); err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ e.Logger.Infof("deployer key accepted ownership tx %s", tx.Hash().Hex())
+ return deployment.ChangesetOutput{}, nil
+}
diff --git a/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go b/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go
index 6c68924b35e..daf4309398f 100644
--- a/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go
+++ b/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go
@@ -6,8 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
- "math/big"
-
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -28,23 +27,18 @@ func TestTransferToMCMSWithTimelock(t *testing.T) {
{
Changeset: WrapChangeSet(DeployMCMSWithTimelock),
Config: map[uint64]types.MCMSWithTimelockConfig{
- chain1: {
- Canceller: SingleGroupMCMS(t),
- Bypasser: SingleGroupMCMS(t),
- Proposer: SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- },
+ chain1: proposalutils.SingleGroupTimelockConfig(t),
},
},
})
require.NoError(t, err)
addrs, err := e.ExistingAddresses.AddressesForChain(chain1)
require.NoError(t, err)
- state, err := MaybeLoadMCMSWithTimelockState(e.Chains[chain1], addrs)
+ state, err := MaybeLoadMCMSWithTimelockChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
- link, err := MaybeLoadLinkTokenState(e.Chains[chain1], addrs)
+ link, err := MaybeLoadLinkTokenChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
- e, err = ApplyChangesets(t, e, map[uint64]*TimelockExecutionContracts{
+ e, err = ApplyChangesets(t, e, map[uint64]*proposalutils.TimelockExecutionContracts{
chain1: {
Timelock: state.Timelock,
CallProxy: state.CallProxy,
@@ -62,9 +56,25 @@ func TestTransferToMCMSWithTimelock(t *testing.T) {
})
require.NoError(t, err)
// We expect now that the link token is owned by the MCMS timelock.
- link, err = MaybeLoadLinkTokenState(e.Chains[chain1], addrs)
+ link, err = MaybeLoadLinkTokenChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
o, err := link.LinkToken.Owner(nil)
require.NoError(t, err)
require.Equal(t, state.Timelock.Address(), o)
+
+ // Try a rollback to the deployer.
+ e, err = ApplyChangesets(t, e, nil, []ChangesetApplication{
+ {
+ Changeset: WrapChangeSet(TransferToDeployer),
+ Config: TransferToDeployerConfig{
+ ContractAddress: link.LinkToken.Address(),
+ ChainSel: chain1,
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ o, err = link.LinkToken.Owner(nil)
+ require.NoError(t, err)
+ require.Equal(t, e.Chains[chain1].DeployerKey.From, o)
}
diff --git a/deployment/common/proposalutils/mcms_helpers.go b/deployment/common/proposalutils/mcms_helpers.go
new file mode 100644
index 00000000000..51a720a4389
--- /dev/null
+++ b/deployment/common/proposalutils/mcms_helpers.go
@@ -0,0 +1,275 @@
+package proposalutils
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/types"
+)
+
+// TimelockExecutionContracts is a helper struct for executing timelock proposals. it contains
+// the timelock and call proxy contracts.
+type TimelockExecutionContracts struct {
+ Timelock *owner_helpers.RBACTimelock
+ CallProxy *owner_helpers.CallProxy
+}
+
+// NewTimelockExecutionContracts creates a new TimelockExecutionContracts struct.
+// If there are multiple timelocks or call proxy on the chain, an error is returned.
+// If there is a missing timelocks or call proxy on the chain, an error is returned.
+func NewTimelockExecutionContracts(env deployment.Environment, chainSelector uint64) (*TimelockExecutionContracts, error) {
+ addrTypeVer, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ if err != nil {
+ return nil, fmt.Errorf("error getting addresses for chain: %w", err)
+ }
+ var timelock *owner_helpers.RBACTimelock
+ var callProxy *owner_helpers.CallProxy
+ for addr, tv := range addrTypeVer {
+ if tv.Type == types.RBACTimelock {
+ if timelock != nil {
+ return nil, fmt.Errorf("multiple timelocks found on chain %d", chainSelector)
+ }
+ var err error
+ timelock, err = owner_helpers.NewRBACTimelock(common.HexToAddress(addr), env.Chains[chainSelector].Client)
+ if err != nil {
+ return nil, fmt.Errorf("error creating timelock: %w", err)
+ }
+ }
+ if tv.Type == types.CallProxy {
+ if callProxy != nil {
+ return nil, fmt.Errorf("multiple call proxies found on chain %d", chainSelector)
+ }
+ var err error
+ callProxy, err = owner_helpers.NewCallProxy(common.HexToAddress(addr), env.Chains[chainSelector].Client)
+ if err != nil {
+ return nil, fmt.Errorf("error creating call proxy: %w", err)
+ }
+ }
+ }
+ if timelock == nil || callProxy == nil {
+ return nil, fmt.Errorf("missing timelock (%T) or call proxy(%T) on chain %d", timelock == nil, callProxy == nil, chainSelector)
+ }
+ return &TimelockExecutionContracts{
+ Timelock: timelock,
+ CallProxy: callProxy,
+ }, nil
+}
+
+type RunTimelockExecutorConfig struct {
+ Executor *mcms.Executor
+ TimelockContracts *TimelockExecutionContracts
+ ChainSelector uint64
+ // BlockStart is optional. It filter the timelock scheduled events.
+ // If not provided, the executor assumes that the operations have not been executed yet
+ // executes all the operations for the given chain.
+ BlockStart *uint64
+ BlockEnd *uint64
+}
+
+func (cfg RunTimelockExecutorConfig) Validate() error {
+ if cfg.Executor == nil {
+ return fmt.Errorf("executor is nil")
+ }
+ if cfg.TimelockContracts == nil {
+ return fmt.Errorf("timelock contracts is nil")
+ }
+ if cfg.ChainSelector == 0 {
+ return fmt.Errorf("chain selector is 0")
+ }
+ if cfg.BlockStart != nil && cfg.BlockEnd == nil {
+ if *cfg.BlockStart > *cfg.BlockEnd {
+ return fmt.Errorf("block start is greater than block end")
+ }
+ }
+ if cfg.BlockStart == nil && cfg.BlockEnd != nil {
+ return fmt.Errorf("block start must not be nil when block end is not nil")
+ }
+
+ if len(cfg.Executor.Operations[mcms.ChainIdentifier(cfg.ChainSelector)]) == 0 {
+ return fmt.Errorf("no operations for chain %d", cfg.ChainSelector)
+ }
+ return nil
+}
+
+// RunTimelockExecutor runs the scheduled operations for the given chain.
+// If the block start is not provided, it assumes that the operations have not been scheduled yet
+// and executes all the operations for the given chain.
+// It is an error if there are no operations for the given chain.
+func RunTimelockExecutor(env deployment.Environment, cfg RunTimelockExecutorConfig) error {
+ // TODO: This sort of helper probably should move to the MCMS lib.
+ // Execute all the transactions in the proposal which are for this chain.
+ if err := cfg.Validate(); err != nil {
+ return fmt.Errorf("error validating config: %w", err)
+ }
+ for _, chainOp := range cfg.Executor.Operations[mcms.ChainIdentifier(cfg.ChainSelector)] {
+ for idx, op := range cfg.Executor.ChainAgnosticOps {
+ start := cfg.BlockStart
+ end := cfg.BlockEnd
+ if bytes.Equal(op.Data, chainOp.Data) && op.To == chainOp.To {
+ if start == nil {
+ opTx, err2 := cfg.Executor.ExecuteOnChain(env.Chains[cfg.ChainSelector].Client, env.Chains[cfg.ChainSelector].DeployerKey, idx)
+ if err2 != nil {
+ return fmt.Errorf("error executing on chain: %w", err2)
+ }
+ block, err2 := env.Chains[cfg.ChainSelector].Confirm(opTx)
+ if err2 != nil {
+ return fmt.Errorf("error confirming on chain: %w", err2)
+ }
+ start = &block
+ end = &block
+ }
+
+ it, err2 := cfg.TimelockContracts.Timelock.FilterCallScheduled(&bind.FilterOpts{
+ Start: *start,
+ End: end,
+ Context: env.GetContext(),
+ }, nil, nil)
+ if err2 != nil {
+ return fmt.Errorf("error filtering call scheduled: %w", err2)
+ }
+ var calls []owner_helpers.RBACTimelockCall
+ var pred, salt [32]byte
+ for it.Next() {
+ // Note these are the same for the whole batch, can overwrite
+ pred = it.Event.Predecessor
+ salt = it.Event.Salt
+ verboseDebug(env.Logger, it.Event)
+ env.Logger.Info("scheduled", "event", it.Event)
+ calls = append(calls, owner_helpers.RBACTimelockCall{
+ Target: it.Event.Target,
+ Data: it.Event.Data,
+ Value: it.Event.Value,
+ })
+ }
+ if len(calls) == 0 {
+ return fmt.Errorf("no calls found for chain %d in blocks [%d, %d]", cfg.ChainSelector, *start, *end)
+ }
+ timelockExecutorProxy, err := owner_helpers.NewRBACTimelock(cfg.TimelockContracts.CallProxy.Address(), env.Chains[cfg.ChainSelector].Client)
+ if err != nil {
+ return fmt.Errorf("error creating timelock executor proxy: %w", err)
+ }
+ tx, err := timelockExecutorProxy.ExecuteBatch(
+ env.Chains[cfg.ChainSelector].DeployerKey, calls, pred, salt)
+ if err != nil {
+ return fmt.Errorf("error executing batch: %w", err)
+ }
+ _, err = env.Chains[cfg.ChainSelector].Confirm(tx)
+ if err != nil {
+ return fmt.Errorf("error confirming batch: %w", err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func verboseDebug(lggr logger.Logger, event *owner_helpers.RBACTimelockCallScheduled) {
+ b, err := json.Marshal(event)
+ if err != nil {
+ panic(err)
+ }
+ lggr.Debug("scheduled", "event", string(b))
+}
+
+// MCMSWithTimelockContracts holds the Go bindings
+// for a MCMSWithTimelock contract deployment.
+// It is public for use in product specific packages.
+// Either all fields are nil or all fields are non-nil.
+type MCMSWithTimelockContracts struct {
+ CancellerMcm *owner_helpers.ManyChainMultiSig
+ BypasserMcm *owner_helpers.ManyChainMultiSig
+ ProposerMcm *owner_helpers.ManyChainMultiSig
+ Timelock *owner_helpers.RBACTimelock
+ CallProxy *owner_helpers.CallProxy
+}
+
+// Validate checks that all fields are non-nil, ensuring it's ready
+// for use generating views or interactions.
+func (state MCMSWithTimelockContracts) Validate() error {
+ if state.Timelock == nil {
+ return errors.New("timelock not found")
+ }
+ if state.CancellerMcm == nil {
+ return errors.New("canceller not found")
+ }
+ if state.ProposerMcm == nil {
+ return errors.New("proposer not found")
+ }
+ if state.BypasserMcm == nil {
+ return errors.New("bypasser not found")
+ }
+ if state.CallProxy == nil {
+ return errors.New("call proxy not found")
+ }
+ return nil
+}
+
+// MaybeLoadMCMSWithTimelockContracts looks for the addresses corresponding to
+// contracts deployed with DeployMCMSWithTimelock and loads them into a
+// MCMSWithTimelockState struct. If none of the contracts are found, the state struct will be nil.
+// An error indicates:
+// - Found but was unable to load a contract
+// - It only found part of the bundle of contracts
+// - If found more than one instance of a contract (we expect one bundle in the given addresses)
+func MaybeLoadMCMSWithTimelockContracts(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*MCMSWithTimelockContracts, error) {
+ state := MCMSWithTimelockContracts{}
+ // We expect one of each contract on the chain.
+ timelock := deployment.NewTypeAndVersion(types.RBACTimelock, deployment.Version1_0_0)
+ callProxy := deployment.NewTypeAndVersion(types.CallProxy, deployment.Version1_0_0)
+ proposer := deployment.NewTypeAndVersion(types.ProposerManyChainMultisig, deployment.Version1_0_0)
+ canceller := deployment.NewTypeAndVersion(types.CancellerManyChainMultisig, deployment.Version1_0_0)
+ bypasser := deployment.NewTypeAndVersion(types.BypasserManyChainMultisig, deployment.Version1_0_0)
+
+ // Ensure we either have the bundle or not.
+ _, err := deployment.AddressesContainBundle(addresses,
+ map[deployment.TypeAndVersion]struct{}{
+ timelock: {}, proposer: {}, canceller: {}, bypasser: {}, callProxy: {},
+ })
+ if err != nil {
+ return nil, fmt.Errorf("unable to check MCMS contracts on chain %s error: %w", chain.Name(), err)
+ }
+
+ for address, tvStr := range addresses {
+ switch tvStr {
+ case timelock:
+ tl, err := owner_helpers.NewRBACTimelock(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.Timelock = tl
+ case callProxy:
+ cp, err := owner_helpers.NewCallProxy(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.CallProxy = cp
+ case proposer:
+ mcms, err := owner_helpers.NewManyChainMultiSig(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.ProposerMcm = mcms
+ case bypasser:
+ mcms, err := owner_helpers.NewManyChainMultiSig(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.BypasserMcm = mcms
+ case canceller:
+ mcms, err := owner_helpers.NewManyChainMultiSig(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.CancellerMcm = mcms
+ }
+ }
+ return &state, nil
+}
diff --git a/deployment/common/changeset/mcms_test_helpers.go b/deployment/common/proposalutils/mcms_test_helpers.go
similarity index 54%
rename from deployment/common/changeset/mcms_test_helpers.go
rename to deployment/common/proposalutils/mcms_test_helpers.go
index ffa99114d74..610fe84f34c 100644
--- a/deployment/common/changeset/mcms_test_helpers.go
+++ b/deployment/common/proposalutils/mcms_test_helpers.go
@@ -1,22 +1,21 @@
-package changeset
+package proposalutils
import (
- "bytes"
- "context"
"crypto/ecdsa"
+ "math/big"
"testing"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/config"
- owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
chainsel "github.com/smartcontractkit/chain-selectors"
"github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink/deployment"
+ commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
+ // "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
)
var (
@@ -25,13 +24,6 @@ var (
TestXXXMCMSSigner *ecdsa.PrivateKey
)
-// TimelockExecutionContracts is a helper struct for executing timelock proposals. it contains
-// the timelock and call proxy contracts.
-type TimelockExecutionContracts struct {
- Timelock *owner_helpers.RBACTimelock
- CallProxy *owner_helpers.CallProxy
-}
-
func init() {
key, err := crypto.GenerateKey()
if err != nil {
@@ -79,45 +71,22 @@ func ExecuteProposal(t *testing.T, env deployment.Environment, executor *mcms.Ex
if err2 != nil {
require.NoError(t, deployment.MaybeDataErr(err2))
}
+
_, err2 = env.Chains[sel].Confirm(tx)
require.NoError(t, err2)
+ cfg := RunTimelockExecutorConfig{
+ Executor: executor,
+ TimelockContracts: timelockContracts,
+ ChainSelector: sel,
+ }
+ require.NoError(t, RunTimelockExecutor(env, cfg))
+}
- // TODO: This sort of helper probably should move to the MCMS lib.
- // Execute all the transactions in the proposal which are for this chain.
- for _, chainOp := range executor.Operations[mcms.ChainIdentifier(sel)] {
- for idx, op := range executor.ChainAgnosticOps {
- if bytes.Equal(op.Data, chainOp.Data) && op.To == chainOp.To {
- opTx, err3 := executor.ExecuteOnChain(env.Chains[sel].Client, env.Chains[sel].DeployerKey, idx)
- require.NoError(t, err3)
- block, err3 := env.Chains[sel].Confirm(opTx)
- require.NoError(t, err3)
- t.Log("executed", chainOp)
- it, err3 := timelockContracts.Timelock.FilterCallScheduled(&bind.FilterOpts{
- Start: block,
- End: &block,
- Context: context.Background(),
- }, nil, nil)
- require.NoError(t, err3)
- var calls []owner_helpers.RBACTimelockCall
- var pred, salt [32]byte
- for it.Next() {
- // Note these are the same for the whole batch, can overwrite
- pred = it.Event.Predecessor
- salt = it.Event.Salt
- t.Log("scheduled", it.Event)
- calls = append(calls, owner_helpers.RBACTimelockCall{
- Target: it.Event.Target,
- Data: it.Event.Data,
- Value: it.Event.Value,
- })
- }
- timelockExecutorProxy, err := owner_helpers.NewRBACTimelock(timelockContracts.CallProxy.Address(), env.Chains[sel].Client)
- tx, err := timelockExecutorProxy.ExecuteBatch(
- env.Chains[sel].DeployerKey, calls, pred, salt)
- require.NoError(t, err)
- _, err = env.Chains[sel].Confirm(tx)
- require.NoError(t, err)
- }
- }
+func SingleGroupTimelockConfig(t *testing.T) commontypes.MCMSWithTimelockConfig {
+ return commontypes.MCMSWithTimelockConfig{
+ Canceller: SingleGroupMCMS(t),
+ Bypasser: SingleGroupMCMS(t),
+ Proposer: SingleGroupMCMS(t),
+ TimelockMinDelay: big.NewInt(0),
}
}
diff --git a/deployment/common/proposalutils/propose.go b/deployment/common/proposalutils/propose.go
index f525c0b6643..32a5bcdfda2 100644
--- a/deployment/common/proposalutils/propose.go
+++ b/deployment/common/proposalutils/propose.go
@@ -11,7 +11,12 @@ import (
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
)
-func buildProposalMetadata(
+const (
+ DefaultValidUntil = 72 * time.Hour
+)
+
+
+func BuildProposalMetadata(
chainSelectors []uint64,
proposerMcmsesPerChain map[uint64]*gethwrappers.ManyChainMultiSig,
) (map[mcms.ChainIdentifier]mcms.ChainMetadata, error) {
@@ -52,7 +57,7 @@ func BuildProposalFromBatches(
chains.Add(uint64(op.ChainIdentifier))
}
- mcmsMd, err := buildProposalMetadata(chains.ToSlice(), proposerMcmsesPerChain)
+ mcmsMd, err := BuildProposalMetadata(chains.ToSlice(), proposerMcmsesPerChain)
if err != nil {
return nil, err
}
@@ -61,10 +66,10 @@ func BuildProposalFromBatches(
for chainId, tl := range timelocksPerChain {
tlsPerChainId[mcms.ChainIdentifier(chainId)] = tl
}
-
+ validUntil := time.Now().Unix() + int64(DefaultValidUntil.Seconds())
return timelock.NewMCMSWithTimelockProposal(
"1",
- 2004259681, // TODO: should be parameterized and based on current block timestamp.
+ uint32(validUntil),
[]mcms.Signature{},
false,
mcmsMd,
diff --git a/deployment/environment.go b/deployment/environment.go
index 3d120adbbf1..0823404da2d 100644
--- a/deployment/environment.go
+++ b/deployment/environment.go
@@ -95,6 +95,7 @@ type Environment struct {
Logger logger.Logger
ExistingAddresses AddressBook
Chains map[uint64]Chain
+ SolChains map[uint64]SolChain
NodeIDs []string
Offchain OffchainClient
GetContext func() context.Context
@@ -180,7 +181,7 @@ func MaybeDataErr(err error) error {
var d rpc.DataError
ok := errors.As(err, &d)
if ok {
- return d
+ return fmt.Errorf("%s: %v", d.Error(), d.ErrorData())
}
return err
}
@@ -331,7 +332,6 @@ func NodeInfo(nodeIDs []string, oc NodeChainConfigsLister) (Nodes, error) {
Enabled: 1,
Ids: nodeIDs,
}
-
}
nodesFromJD, err := oc.ListNodes(context.Background(), &nodev1.ListNodesRequest{
Filter: filter,
diff --git a/deployment/environment/crib/ccip_deployer.go b/deployment/environment/crib/ccip_deployer.go
new file mode 100644
index 00000000000..aea7ad0cb8f
--- /dev/null
+++ b/deployment/environment/crib/ccip_deployer.go
@@ -0,0 +1,136 @@
+package crib
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/config"
+ commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
+ "github.com/smartcontractkit/chainlink/deployment/environment/devenv"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay"
+ "math/big"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/ccip/changeset"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+// DeployHomeChainContracts deploys the home chain contracts so that the chainlink nodes can be started with the CR address in Capabilities.ExternalRegistry
+// DeployHomeChainContracts is to 1. Set up crib with chains and chainlink nodes ( cap reg is not known yet so not setting the config with capreg address)
+// Call DeployHomeChain changeset with nodeinfo ( the peer id and all)
+func DeployHomeChainContracts(ctx context.Context, lggr logger.Logger, envConfig devenv.EnvironmentConfig, homeChainSel uint64, feedChainSel uint64) (deployment.CapabilityRegistryConfig, deployment.AddressBook, error) {
+ e, _, err := devenv.NewEnvironment(func() context.Context { return ctx }, lggr, envConfig)
+ if err != nil {
+ return deployment.CapabilityRegistryConfig{}, nil, err
+ }
+ if e == nil {
+ return deployment.CapabilityRegistryConfig{}, nil, errors.New("environment is nil")
+ }
+
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
+ if err != nil {
+ return deployment.CapabilityRegistryConfig{}, e.ExistingAddresses, fmt.Errorf("failed to get node info from env: %w", err)
+ }
+ p2pIds := nodes.NonBootstraps().PeerIDs()
+ *e, err = commonchangeset.ApplyChangesets(nil, *e, nil, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.DeployHomeChain),
+ Config: changeset.DeployHomeChainConfig{
+ HomeChainSel: homeChainSel,
+ RMNStaticConfig: changeset.NewTestRMNStaticConfig(),
+ RMNDynamicConfig: changeset.NewTestRMNDynamicConfig(),
+ NodeOperators: changeset.NewTestNodeOperator(e.Chains[homeChainSel].DeployerKey.From),
+ NodeP2PIDsPerNodeOpAdmin: map[string][][32]byte{
+ "NodeOperator": p2pIds,
+ },
+ },
+ },
+ })
+
+ state, err := changeset.LoadOnchainState(*e)
+ if err != nil {
+ return deployment.CapabilityRegistryConfig{}, e.ExistingAddresses, fmt.Errorf("failed to load on chain state: %w", err)
+ }
+ capRegAddr := state.Chains[homeChainSel].CapabilityRegistry.Address()
+ if capRegAddr == common.HexToAddress("0x") {
+ return deployment.CapabilityRegistryConfig{}, e.ExistingAddresses, fmt.Errorf("cap Reg address not found: %w", err)
+ }
+ capRegConfig := deployment.CapabilityRegistryConfig{
+ EVMChainID: homeChainSel,
+ Contract: state.Chains[homeChainSel].CapabilityRegistry.Address(),
+ NetworkType: relay.NetworkEVM,
+ }
+ return capRegConfig, e.ExistingAddresses, nil
+}
+
+func DeployCCIPAndAddLanes(ctx context.Context, lggr logger.Logger, envConfig devenv.EnvironmentConfig, homeChainSel, feedChainSel uint64, ab deployment.AddressBook) (DeployCCIPOutput, error) {
+ e, _, err := devenv.NewEnvironment(func() context.Context { return ctx }, lggr, envConfig)
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to initiate new environment: %w", err)
+ }
+ e.ExistingAddresses = ab
+ allChainIds := e.AllChainSelectors()
+ cfg := make(map[uint64]commontypes.MCMSWithTimelockConfig)
+ for _, chain := range e.AllChainSelectors() {
+ mcmsConfig, err := config.NewConfig(1, []common.Address{e.Chains[chain].DeployerKey.From}, []config.Config{})
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to create mcms config: %w", err)
+ }
+ cfg[chain] = commontypes.MCMSWithTimelockConfig{
+ Canceller: *mcmsConfig,
+ Bypasser: *mcmsConfig,
+ Proposer: *mcmsConfig,
+ TimelockMinDelay: big.NewInt(0),
+ }
+ }
+
+ // This will not apply any proposals because we pass nil to testing.
+ // However, setup is ok because we only need to deploy the contracts and distribute job specs
+ *e, err = commonchangeset.ApplyChangesets(nil, *e, nil, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployLinkToken),
+ Config: allChainIds,
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.DeployPrerequisites),
+ Config: changeset.DeployPrerequisiteConfig{
+ ChainSelectors: allChainIds,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployMCMSWithTimelock),
+ Config: cfg,
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.DeployChainContracts),
+ Config: changeset.DeployChainContractsConfig{
+ ChainSelectors: allChainIds,
+ HomeChainSelector: homeChainSel,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.CCIPCapabilityJobspec),
+ Config: struct{}{},
+ },
+ })
+ state, err := changeset.LoadOnchainState(*e)
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to load onchain state: %w", err)
+ }
+ // Add all lanes
+ err = changeset.AddLanesForAll(*e, state)
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to add lanes: %w", err)
+ }
+
+ addresses, err := e.ExistingAddresses.Addresses()
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to get convert address book to address book map: %w", err)
+ }
+ return DeployCCIPOutput{
+ AddressBook: *deployment.NewMemoryAddressBookFromMap(addresses),
+ NodeIDs: e.NodeIDs,
+ }, err
+}
diff --git a/deployment/environment/crib/data.go b/deployment/environment/crib/data.go
new file mode 100644
index 00000000000..b9197691613
--- /dev/null
+++ b/deployment/environment/crib/data.go
@@ -0,0 +1,81 @@
+package crib
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/environment/devenv"
+)
+
+type OutputReader struct {
+ outputDir string
+}
+
+func NewOutputReader(outputDir string) *OutputReader {
+ return &OutputReader{outputDir: outputDir}
+}
+
+func (r *OutputReader) ReadNodesDetails() NodesDetails {
+ byteValue := r.readFile(NodesDetailsFileName)
+
+ var result NodesDetails
+
+ // Unmarshal the JSON into the map
+ err := json.Unmarshal(byteValue, &result)
+ if err != nil {
+ fmt.Println("Error unmarshalling JSON:", err)
+ panic(err)
+ }
+
+ return result
+}
+
+func (r *OutputReader) ReadChainConfigs() []devenv.ChainConfig {
+ byteValue := r.readFile(ChainsConfigsFileName)
+
+ var result []devenv.ChainConfig
+
+ // Unmarshal the JSON into the map
+ err := json.Unmarshal(byteValue, &result)
+ if err != nil {
+ fmt.Println("Error unmarshalling JSON:", err)
+ panic(err)
+ }
+
+ return result
+}
+
+func (r *OutputReader) ReadAddressBook() *deployment.AddressBookMap {
+ byteValue := r.readFile(AddressBookFileName)
+
+ var result map[uint64]map[string]deployment.TypeAndVersion
+
+ // Unmarshal the JSON into the map
+ err := json.Unmarshal(byteValue, &result)
+ if err != nil {
+ fmt.Println("Error unmarshalling JSON:", err)
+ panic(err)
+ }
+
+ return deployment.NewMemoryAddressBookFromMap(result)
+}
+
+func (r *OutputReader) readFile(fileName string) []byte {
+ file, err := os.Open(fmt.Sprintf("%s/%s", r.outputDir, fileName))
+ if err != nil {
+ fmt.Println("Error opening file:", err)
+ panic(err)
+ }
+ defer file.Close()
+
+ // Read the file's content into a byte slice
+ byteValue, err := io.ReadAll(file)
+ if err != nil {
+ fmt.Println("Error reading file:", err)
+ panic(err)
+ }
+ return byteValue
+}
diff --git a/deployment/environment/crib/env.go b/deployment/environment/crib/env.go
new file mode 100644
index 00000000000..3af1acaf754
--- /dev/null
+++ b/deployment/environment/crib/env.go
@@ -0,0 +1,45 @@
+package crib
+
+const (
+ AddressBookFileName = "ccip-v2-scripts-address-book.json"
+ NodesDetailsFileName = "ccip-v2-scripts-nodes-details.json"
+ ChainsConfigsFileName = "ccip-v2-scripts-chains-details.json"
+)
+
+type CRIBEnv struct {
+ envStateDir string
+}
+
+func NewDevspaceEnvFromStateDir(envStateDir string) CRIBEnv {
+ return CRIBEnv{
+ envStateDir: envStateDir,
+ }
+}
+
+func (c CRIBEnv) GetConfig() DeployOutput {
+ reader := NewOutputReader(c.envStateDir)
+ nodesDetails := reader.ReadNodesDetails()
+ chainConfigs := reader.ReadChainConfigs()
+ return DeployOutput{
+ AddressBook: reader.ReadAddressBook(),
+ NodeIDs: nodesDetails.NodeIDs,
+ Chains: chainConfigs,
+ }
+}
+
+type RPC struct {
+ External *string
+ Internal *string
+}
+
+type ChainConfig struct {
+ ChainID uint64 // chain id as per EIP-155, mainly applicable for EVM chains
+ ChainName string // name of the chain populated from chainselector repo
+ ChainType string // should denote the chain family. Acceptable values are EVM, COSMOS, SOLANA, STARKNET, APTOS etc
+ WSRPCs []RPC // websocket rpcs to connect to the chain
+ HTTPRPCs []RPC // http rpcs to connect to the chain
+}
+
+type NodesDetails struct {
+ NodeIDs []string
+}
diff --git a/deployment/environment/crib/env_test.go b/deployment/environment/crib/env_test.go
new file mode 100644
index 00000000000..262a2540923
--- /dev/null
+++ b/deployment/environment/crib/env_test.go
@@ -0,0 +1,18 @@
+package crib
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestShouldProvideEnvironmentConfig(t *testing.T) {
+ t.Parallel()
+ env := NewDevspaceEnvFromStateDir("testdata/lanes-deployed-state")
+ config := env.GetConfig()
+ require.NotNil(t, config)
+ assert.NotEmpty(t, config.NodeIDs)
+ assert.NotNil(t, config.AddressBook)
+ assert.NotEmpty(t, config.Chains)
+}
diff --git a/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-address-book.json b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-address-book.json
new file mode 100644
index 00000000000..e4b2672cb5f
--- /dev/null
+++ b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-address-book.json
@@ -0,0 +1 @@
+{"12922642891491394802":{"0x05Aa229Aec102f78CE0E852A812a388F076Aa555":{"Type":"CancellerManyChainMultiSig","Version":"1.0.0"},"0x0D4ff719551E23185Aeb16FFbF2ABEbB90635942":{"Type":"TestRouter","Version":"1.2.0"},"0x0f5D1ef48f12b6f691401bfe88c2037c690a6afe":{"Type":"ProposerManyChainMultiSig","Version":"1.0.0"},"0x2dE080e97B0caE9825375D31f5D0eD5751fDf16D":{"Type":"CCIPReceiver","Version":"1.0.0"},"0x2fc631e4B3018258759C52AF169200213e84ABab":{"Type":"OnRamp","Version":"1.6.0-dev"},"0x5C7c905B505f0Cf40Ab6600d05e677F717916F6B":{"Type":"Router","Version":"1.2.0"},"0x63cf2Cd54fE91e3545D1379abf5bfd194545259d":{"Type":"OffRamp","Version":"1.6.0-dev"},"0x712516e61C8B383dF4A63CFe83d7701Bce54B03e":{"Type":"LinkToken","Version":"1.0.0"},"0x71C95911E9a5D330f4D621842EC243EE1343292e":{"Type":"PriceFeed","Version":"1.0.0"},"0x73eccD6288e117cAcA738BDAD4FEC51312166C1A":{"Type":"RMNRemote","Version":"1.6.0-dev"},"0x8464135c8F25Da09e49BC8782676a84730C318bC":{"Type":"PriceFeed","Version":"1.0.0"},"0x85C5Dd61585773423e378146D4bEC6f8D149E248":{"Type":"TokenAdminRegistry","Version":"1.5.0"},"0x948B3c65b89DF0B4894ABE91E6D02FE579834F8F":{"Type":"WETH9","Version":"1.0.0"},"0xAfe1b5bdEbD4ae65AF2024738bf0735fbb65d44b":{"Type":"FeeQuoter","Version":"1.6.0-dev"},"0xC6bA8C3233eCF65B761049ef63466945c362EdD2":{"Type":"BypasserManyChainMultiSig","Version":"1.0.0"},"0xbCF26943C0197d2eE0E5D05c716Be60cc2761508":{"Type":"AdminManyChainMultiSig","Version":"1.0.0"},"0xcA03Dc4665A8C3603cb4Fd5Ce71Af9649dC00d44":{"Type":"RBACTimelock","Version":"1.0.0"},"0xe6b98F104c1BEf218F3893ADab4160Dc73Eb8367":{"Type":"ARMProxy","Version":"1.0.0"},"0xfbAb4aa40C202E4e80390171E82379824f7372dd":{"Type":"NonceManager","Version":"1.6.0-dev"}},"3379446385462418246":{"0x09635F643e140090A9A8Dcd712eD6285858ceBef":{"Type":"RMNRemote","Version":"1.6.0-dev"},"0x0B306BF915C4d645ff596e518fAf3F9669b97016":{"Type":"LinkToken","Version":"1.0.0"},"0x1613beB3B2C4f22Ee086B2b38C1476A3cE7f78E8":{"Type":"OnRamp","Version":"1.6.0-dev"},"0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6":{"Type":"CCIPHome","Version":"1.6.0-dev"},"0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44":{"Type":"ProposerManyChainMultiSig","Version":"1.0.0"},"0x3Aa5ebB10DC797CAC828524e59A333d0A371443c":{"Type":"BypasserManyChainMultiSig","Version":"1.0.0"},"0x4A679253410272dd5232B3Ff7cF5dbB88f295319":{"Type":"RBACTimelock","Version":"1.0.0"},"0x59b670e9fA9D0A427751Af201D676719a970857b":{"Type":"CancellerManyChainMultiSig","Version":"1.0.0"},"0x67d269191c92Caf3cD7723F116c85e6E9bf55933":{"Type":"ARMProxy","Version":"1.0.0"},"0x7a2088a1bFc9d81c55368AE168C2C02570cB814F":{"Type":"CCIPReceiver","Version":"1.0.0"},"0x84eA74d481Ee0A5332c457a4d796187F6Ba67fEB":{"Type":"TokenAdminRegistry","Version":"1.5.0"},"0x851356ae760d987E095750cCeb3bC6014560891C":{"Type":"OffRamp","Version":"1.6.0-dev"},"0x8A791620dd6260079BF849Dc5567aDC3F2FdC318":{"Type":"RMNHome","Version":"1.6.0-dev"},"0x9A676e781A523b5d0C0e43731313A708CB607508":{"Type":"WETH9","Version":"1.0.0"},"0x9A9f2CCfdE556A7E9Ff0848998Aa4a0CFD8863AE":{"Type":"AdminManyChainMultiSig","Version":"1.0.0"},"0x9E545E3C0baAB3E08CdfD552C960A1050f373042":{"Type":"NonceManager","Version":"1.6.0-dev"},"0xE6E340D132b5f46d1e472DebcD681B2aBc16e57E":{"Type":"Router","Version":"1.2.0"},"0xa513E6E4b8f2a923D98304ec87F64353C4D5C853":{"Type":"CapabilitiesRegistry","Version":"1.0.0"},"0xa82fF9aFd8f496c3d6ac40E2a0F282E47488CFc9":{"Type":"FeeQuoter","Version":"1.6.0-dev"},"0xc3e53F4d16Ae77Db1c982e75a937B9f60FE63690":{"Type":"TestRouter","Version":"1.2.0"}}}
diff --git a/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-chains-details.json b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-chains-details.json
new file mode 100644
index 00000000000..f93ea4ce231
--- /dev/null
+++ b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-chains-details.json
@@ -0,0 +1,24 @@
+[
+ {
+ "ChainID": 1337,
+ "ChainName": "alpha",
+ "ChainType": "EVM",
+ "WSRPCs": [
+ "wss://crib-local-geth-1337-ws.local:443"
+ ],
+ "HTTPRPCs": [
+ "https://crib-local-geth-1337-ws.local:443"
+ ]
+ },
+ {
+ "ChainID": 2337,
+ "ChainName": "alpha",
+ "ChainType": "EVM",
+ "WSRPCs": [
+ "wss://crib-local-geth-2337-ws.local:443"
+ ],
+ "HTTPRPCs": [
+ "https://crib-local-geth-2337-ws.local:443"
+ ]
+ }
+]
diff --git a/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-nodes-details.json b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-nodes-details.json
new file mode 100644
index 00000000000..477ae0527b1
--- /dev/null
+++ b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-nodes-details.json
@@ -0,0 +1 @@
+{"NodeIDs":["node_2URuou3RXmtZu5gLQX8qd","node_m9TTQbUxBx3WjDEjmpVDL","node_4FiKVPtuQjCTvHnS7QpES","node_A4VTgecDwMoG2YYicyjuG","node_jQFpzXDadzaADq147nThS"]}
diff --git a/deployment/environment/crib/types.go b/deployment/environment/crib/types.go
new file mode 100644
index 00000000000..d19c8424443
--- /dev/null
+++ b/deployment/environment/crib/types.go
@@ -0,0 +1,39 @@
+package crib
+
+import (
+ "context"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/environment/devenv"
+)
+
+const (
+ CRIB_ENV_NAME = "Crib Environment"
+)
+
+type DeployOutput struct {
+ NodeIDs []string
+ Chains []devenv.ChainConfig // chain selector -> Chain Config
+ AddressBook deployment.AddressBook // Addresses of all contracts
+}
+
+type DeployCCIPOutput struct {
+ AddressBook deployment.AddressBookMap
+ NodeIDs []string
+}
+
+func NewDeployEnvironmentFromCribOutput(lggr logger.Logger, output DeployOutput) (*deployment.Environment, error) {
+ chains, err := devenv.NewChains(lggr, output.Chains)
+ if err != nil {
+ return nil, err
+ }
+ return deployment.NewEnvironment(
+ CRIB_ENV_NAME,
+ lggr,
+ output.AddressBook,
+ chains,
+ output.NodeIDs,
+ nil, // todo: populate the offchain client using output.DON
+ func() context.Context { return context.Background() }, deployment.XXXGenerateTestOCRSecrets(),
+ ), nil
+}
diff --git a/deployment/environment/devenv/don.go b/deployment/environment/devenv/don.go
index 05a3d5bea08..76f6ee92b68 100644
--- a/deployment/environment/devenv/don.go
+++ b/deployment/environment/devenv/don.go
@@ -2,7 +2,9 @@ package devenv
import (
"context"
+ "errors"
"fmt"
+ chainsel "github.com/smartcontractkit/chain-selectors"
"strconv"
"strings"
"time"
@@ -10,8 +12,6 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/rs/zerolog"
"github.com/sethvargo/go-retry"
- chainsel "github.com/smartcontractkit/chain-selectors"
-
nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node"
clclient "github.com/smartcontractkit/chainlink/deployment/environment/nodeclient"
"github.com/smartcontractkit/chainlink/deployment/environment/web/sdk/client"
@@ -185,7 +185,7 @@ type JDChainConfigInput struct {
// It expects bootstrap nodes to have label with key "type" and value as "bootstrap".
// It fetches the account address, peer id, and OCR2 key bundle id and creates the JobDistributorChainConfig.
func (n *Node) CreateCCIPOCRSupportedChains(ctx context.Context, chains []JDChainConfigInput, jd JobDistributor) error {
- for i, chain := range chains {
+ for _, chain := range chains {
chainId := strconv.FormatUint(chain.ChainID, 10)
var account string
switch chain.ChainType {
@@ -239,35 +239,51 @@ func (n *Node) CreateCCIPOCRSupportedChains(ctx context.Context, chains []JDChai
break
}
}
- // JD silently fails to update nodeChainConfig. Therefore, we fetch the node config and
- // if it's not updated , throw an error
- _, err = n.gqlClient.CreateJobDistributorChainConfig(ctx, client.JobDistributorChainConfigInput{
- JobDistributorID: n.JDId,
- ChainID: chainId,
- ChainType: chain.ChainType,
- AccountAddr: account,
- AdminAddr: n.adminAddr,
- Ocr2Enabled: true,
- Ocr2IsBootstrap: isBootstrap,
- Ocr2Multiaddr: n.multiAddr,
- Ocr2P2PPeerID: value(peerID),
- Ocr2KeyBundleID: ocr2BundleId,
- Ocr2Plugins: `{"commit":true,"execute":true,"median":false,"mercury":false}`,
+
+ // retry twice with 5 seconds interval to create JobDistributorChainConfig
+ err = retry.Do(ctx, retry.WithMaxDuration(10*time.Second, retry.NewConstant(3*time.Second)), func(ctx context.Context) error {
+ // check the node chain config to see if this chain already exists
+ nodeChainConfigs, err := jd.ListNodeChainConfigs(context.Background(), &nodev1.ListNodeChainConfigsRequest{
+ Filter: &nodev1.ListNodeChainConfigsRequest_Filter{
+ NodeIds: []string{n.NodeId},
+ }})
+ if err != nil {
+ return retry.RetryableError(fmt.Errorf("failed to list node chain configs for node %s, retrying..: %w", n.Name, err))
+ }
+ if nodeChainConfigs != nil {
+ for _, chainConfig := range nodeChainConfigs.ChainConfigs {
+ if chainConfig.Chain.Id == chainId {
+ return nil
+ }
+ }
+ }
+
+ // JD silently fails to update nodeChainConfig. Therefore, we fetch the node config and
+ // if it's not updated , throw an error
+ _, err = n.gqlClient.CreateJobDistributorChainConfig(ctx, client.JobDistributorChainConfigInput{
+ JobDistributorID: n.JDId,
+ ChainID: chainId,
+ ChainType: chain.ChainType,
+ AccountAddr: account,
+ AdminAddr: n.adminAddr,
+ Ocr2Enabled: true,
+ Ocr2IsBootstrap: isBootstrap,
+ Ocr2Multiaddr: n.multiAddr,
+ Ocr2P2PPeerID: value(peerID),
+ Ocr2KeyBundleID: ocr2BundleId,
+ Ocr2Plugins: `{"commit":true,"execute":true,"median":false,"mercury":false}`,
+ })
+ // todo: add a check if the chain config failed because of a duplicate in that case, should we update or return success?
+ if err != nil {
+ return fmt.Errorf("failed to create CCIPOCR2SupportedChains for node %s: %w", n.Name, err)
+ }
+
+ return retry.RetryableError(errors.New("retrying CreateChainConfig in JD"))
})
+
if err != nil {
return fmt.Errorf("failed to create CCIPOCR2SupportedChains for node %s: %w", n.Name, err)
}
- // query the node chain config to check if it's created
- nodeChainConfigs, err := jd.ListNodeChainConfigs(context.Background(), &nodev1.ListNodeChainConfigsRequest{
- Filter: &nodev1.ListNodeChainConfigsRequest_Filter{
- NodeIds: []string{n.NodeId},
- }})
- if err != nil {
- return fmt.Errorf("failed to list node chain configs for node %s: %w", n.Name, err)
- }
- if nodeChainConfigs == nil || len(nodeChainConfigs.ChainConfigs) < i+1 {
- return fmt.Errorf("failed to create chain config for node %s", n.Name)
- }
}
return nil
}
@@ -377,6 +393,17 @@ func (n *Node) CreateJobDistributor(ctx context.Context, jd JobDistributor) (str
return "", err
}
// create the job distributor in the node with the csa key
+ resp, err := n.gqlClient.ListJobDistributors(ctx)
+ if err != nil {
+ return "", fmt.Errorf("could not list job distrubutors: %w", err)
+ }
+ if len(resp.FeedsManagers.Results) > 0 {
+ for _, fm := range resp.FeedsManagers.Results {
+ if fm.GetPublicKey() == csaKey {
+ return fm.GetId(), nil
+ }
+ }
+ }
return n.gqlClient.CreateJobDistributor(ctx, client.JobDistributorInput{
Name: "Job Distributor",
Uri: jd.WSRPC,
@@ -394,8 +421,9 @@ func (n *Node) SetUpAndLinkJobDistributor(ctx context.Context, jd JobDistributor
}
// now create the job distributor in the node
id, err := n.CreateJobDistributor(ctx, jd)
- if err != nil && !strings.Contains(err.Error(), "DuplicateFeedsManagerError") {
- return err
+ if err != nil &&
+ (!strings.Contains(err.Error(), "only a single feeds manager is supported") || !strings.Contains(err.Error(), "DuplicateFeedsManagerError")) {
+ return fmt.Errorf("failed to create job distributor in node %s: %w", n.Name, err)
}
// wait for the node to connect to the job distributor
err = retry.Do(ctx, retry.WithMaxDuration(1*time.Minute, retry.NewFibonacci(1*time.Second)), func(ctx context.Context) error {
diff --git a/deployment/environment/devenv/rmn.go b/deployment/environment/devenv/rmn.go
index 63f27f1e422..3e0c6efe0cd 100644
--- a/deployment/environment/devenv/rmn.go
+++ b/deployment/environment/devenv/rmn.go
@@ -22,7 +22,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/lib/docker"
"github.com/smartcontractkit/chainlink-testing-framework/lib/docker/test_env"
"github.com/smartcontractkit/chainlink-testing-framework/lib/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/lib/logstream"
p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types"
)
@@ -51,7 +50,6 @@ func NewRage2ProxyComponent(
imageVersion string,
local ProxyLocalConfig,
shared ProxySharedConfig,
- logStream *logstream.LogStream,
) (*RageProxy, error) {
rageName := fmt.Sprintf("%s-proxy-%s", name, uuid.NewString()[0:8])
@@ -71,7 +69,6 @@ func NewRage2ProxyComponent(
ContainerImage: imageName,
ContainerVersion: imageVersion,
Networks: networks,
- LogStream: logStream,
},
Passphrase: DefaultAFNPassphrase,
proxyListenerPort: listenPort,
@@ -193,8 +190,7 @@ func NewAFN2ProxyComponent(
imageName,
imageVersion string,
shared SharedConfig,
- local LocalConfig,
- logStream *logstream.LogStream) (*AFN2Proxy, error) {
+ local LocalConfig) (*AFN2Proxy, error) {
afnName := fmt.Sprintf("%s-%s", name, uuid.NewString()[0:8])
rmn := &AFN2Proxy{
EnvComponent: test_env.EnvComponent{
@@ -202,7 +198,6 @@ func NewAFN2ProxyComponent(
ContainerImage: imageName,
ContainerVersion: imageVersion,
Networks: networks,
- LogStream: logStream,
},
AFNPassphrase: DefaultAFNPassphrase,
Shared: shared,
@@ -343,7 +338,6 @@ func NewRMNCluster(
proxyVersion string,
rmnImage string,
rmnVersion string,
- logStream *logstream.LogStream,
) (*RMNCluster, error) {
rmn := &RMNCluster{
t: t,
@@ -351,7 +345,7 @@ func NewRMNCluster(
Nodes: make(map[string]RMNNode),
}
for name, rmnConfig := range config {
- proxy, err := NewRage2ProxyComponent(networks, name, proxyImage, proxyVersion, rmnConfig.ProxyLocal, rmnConfig.ProxyShared, logStream)
+ proxy, err := NewRage2ProxyComponent(networks, name, proxyImage, proxyVersion, rmnConfig.ProxyLocal, rmnConfig.ProxyShared)
if err != nil {
return nil, err
}
@@ -371,7 +365,7 @@ func NewRMNCluster(
return nil, err
}
rmnConfig.Local.Networking.RageProxy = strings.TrimPrefix(fmt.Sprintf("%s:%s", proxyName, port), "/")
- afn, err := NewAFN2ProxyComponent(networks, name, rmnImage, rmnVersion, rmnConfig.Shared, rmnConfig.Local, logStream)
+ afn, err := NewAFN2ProxyComponent(networks, name, rmnImage, rmnVersion, rmnConfig.Shared, rmnConfig.Local)
if err != nil {
return nil, err
}
diff --git a/deployment/environment/memory/node.go b/deployment/environment/memory/node.go
index fd08d3cf17b..84f0d2e443f 100644
--- a/deployment/environment/memory/node.go
+++ b/deployment/environment/memory/node.go
@@ -286,6 +286,8 @@ func CreateKeys(t *testing.T,
}
backend := chain.Client.(*Backend).Sim
fundAddress(t, chain.DeployerKey, transmitters[evmChainID], assets.Ether(1000).ToInt(), backend)
+ // need to look more into it, but it seems like with sim chains nodes are sending txs with 0x from address
+ fundAddress(t, chain.DeployerKey, common.Address{}, assets.Ether(1000).ToInt(), backend)
}
return Keys{
diff --git a/deployment/environment/web/sdk/client/client.go b/deployment/environment/web/sdk/client/client.go
index 5472591ef94..e0a56b9e642 100644
--- a/deployment/environment/web/sdk/client/client.go
+++ b/deployment/environment/web/sdk/client/client.go
@@ -4,10 +4,11 @@ import (
"context"
"encoding/json"
"fmt"
+ "github.com/Khan/genqlient/graphql"
+ "github.com/sethvargo/go-retry"
"net/http"
"strings"
-
- "github.com/Khan/genqlient/graphql"
+ "time"
"github.com/smartcontractkit/chainlink/deployment/environment/web/sdk/client/doer"
"github.com/smartcontractkit/chainlink/deployment/environment/web/sdk/internal/generated"
@@ -60,8 +61,15 @@ func New(baseURI string, creds Credentials) (Client, error) {
endpoints: ep,
credentials: creds,
}
-
- if err := c.login(); err != nil {
+
+ err := retry.Do(context.Background(), retry.WithMaxDuration(10*time.Second, retry.NewFibonacci(2*time.Second)), func(ctx context.Context) error {
+ err := c.login()
+ if err != nil {
+ return retry.RetryableError(fmt.Errorf("retrying login to node: %w", err))
+ }
+ return nil
+ })
+ if err != nil {
return nil, fmt.Errorf("failed to login to node: %w", err)
}
diff --git a/deployment/go.mod b/deployment/go.mod
index 8c30d54bdff..5551034d579 100644
--- a/deployment/go.mod
+++ b/deployment/go.mod
@@ -26,10 +26,10 @@ require (
github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.33.0
github.com/sethvargo/go-retry v0.2.4
- github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86
+ github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix
github.com/smartcontractkit/chain-selectors v1.0.34
- github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0
- github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83
+ github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b
+ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0
github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13
github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12
@@ -40,7 +40,7 @@ require (
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
golang.org/x/oauth2 v0.23.0
- golang.org/x/sync v0.8.0
+ golang.org/x/sync v0.10.0
google.golang.org/grpc v1.67.1
google.golang.org/protobuf v1.35.1
gopkg.in/guregu/null.v4 v4.0.0
@@ -82,7 +82,7 @@ require (
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
github.com/alexflint/go-arg v1.4.2 // indirect
github.com/alexflint/go-scalar v1.0.0 // indirect
- github.com/andybalholm/brotli v1.1.0 // indirect
+ github.com/andybalholm/brotli v1.1.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c // indirect
@@ -223,7 +223,7 @@ require (
github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
github.com/go-webauthn/webauthn v0.9.4 // indirect
github.com/go-webauthn/x v0.1.5 // indirect
- github.com/goccy/go-json v0.10.2 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/goccy/go-yaml v1.12.0 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
@@ -315,8 +315,8 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kelseyhightower/envconfig v1.4.0 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
@@ -409,7 +409,7 @@ require (
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e // indirect
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db // indirect
github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect
- github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 // indirect
+ github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 // indirect
github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0 // indirect
@@ -492,12 +492,12 @@ require (
go.uber.org/ratelimit v0.3.1 // indirect
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect
golang.org/x/arch v0.11.0 // indirect
- golang.org/x/crypto v0.28.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect
- golang.org/x/sys v0.26.0 // indirect
- golang.org/x/term v0.25.0 // indirect
- golang.org/x/text v0.19.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
diff --git a/deployment/go.sum b/deployment/go.sum
index b1ce805ba28..66170b80629 100644
--- a/deployment/go.sum
+++ b/deployment/go.sum
@@ -184,9 +184,11 @@ github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2uc
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
-github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
+github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
+github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow-go/v18 v18.0.0 h1:1dBDaSbH3LtulTyOVYaBCHO3yVRwjV+TZaqn3g6V7ZM=
+github.com/apache/arrow-go/v18 v18.0.0/go.mod h1:t6+cWRSmKgdQ6HsxisQjok+jBpKGhRDiqcf3p0p/F+A=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -548,8 +550,8 @@ github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9y
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
-github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
+github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw=
+github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w=
github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw=
github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE=
@@ -638,8 +640,8 @@ github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0=
github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM=
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
@@ -707,6 +709,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
+github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -1032,11 +1036,11 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
-github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
@@ -1091,6 +1095,8 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk=
+github.com/marcboeker/go-duckdb v1.8.3 h1:ZkYwiIZhbYsT6MmJsZ3UPTHrTZccDdM4ztoqSlEMXiQ=
+github.com/marcboeker/go-duckdb v1.8.3/go.mod h1:C9bYRE1dPYb1hhfu/SSomm78B0FXmNgRvv6YBW/Hooc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -1262,8 +1268,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
-github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
-github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
@@ -1403,16 +1409,16 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 h1:qQH6fZZe31nBAG6INHph3z5ysDTPptyu0TR9uoJ1+ok=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86/go.mod h1:WtWOoVQQEHxRHL2hNmuRrvDfYfQG/CioFNoa9Rr2mBE=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix h1:DPJD++yKLSx0EfT+U14P8vLVxjXFmoIETiCO9lVwQo8=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix/go.mod h1:NnT6w4Kj42OFFXhSx99LvJZWPpMjmo4+CpDEWfw61xY=
github.com/smartcontractkit/chain-selectors v1.0.34 h1:MJ17OGu8+jjl426pcKrJkCf3fePb3eCreuAnUA3RBj4=
github.com/smartcontractkit/chain-selectors v1.0.34/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8=
github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU=
github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0 h1:/1L+v4SxUD2K5RMRbfByyLfePMAgQKeD0onSetPnGmA=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83 h1:NjrU7KOn3Tk+C6QFo9tQBqeotPKytpBwhn/J1s+yiiY=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83/go.mod h1:bQktEJf7sJ0U3SmIcXvbGUox7SmXcnSEZ4kUbT8R5Nk=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b h1:iSQJ6ng4FhEswf8SXunGkaJlVP3E3JlgLB8Oo2f3Ud4=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805 h1:Pz8jB/6qe10xT10h2S3LFYJrnebNpG5rJ/w16HZGwPQ=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805/go.mod h1:yti7e1+G9hhkYhj+L5sVUULn9Bn3bBL5/AxaNqdJ5YQ=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e h1:PRoeby6ZlTuTkv2f+7tVU4+zboTfRzI+beECynF4JQ0=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e/go.mod h1:mUh5/woemsVaHgTorA080hrYmO3syBCmPdnWc/5dOqk=
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db h1:N1RH1hSr2ACzOFc9hkCcjE8pRBTdcU3p8nsTJByaLes=
@@ -1421,8 +1427,8 @@ github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6An
github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 h1:0ewLMbAz3rZrovdRUCgd028yOXX8KigB4FndAUdI2kM=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 h1:onBe3DqNrbtOAzKS4PrPIiJX65BGo1aYiYZxFVEW+jc=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc h1:dssRwJhmzJkUN/OajaDj2GsxBn+Tupk3bI1BkPEoJg0=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc/go.mod h1:p8aUDfJeley6oer7y+Ucd3edOtRlMTnWg3mN6rhaLWo=
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 h1:tNS7U9lrxkFvEuyxQv11HHOiV9LPDGC9wYEy+yM/Jv4=
@@ -1589,6 +1595,8 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
+github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1600,6 +1608,8 @@ github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE
github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U=
github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
@@ -1746,8 +1756,8 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1877,8 +1887,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1972,8 +1982,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1984,8 +1994,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
-golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2000,8 +2010,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
-golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/deployment/keystone/changeset/accept_ownership_test.go b/deployment/keystone/changeset/accept_ownership_test.go
index b2aa1b20194..d949e63c7aa 100644
--- a/deployment/keystone/changeset/accept_ownership_test.go
+++ b/deployment/keystone/changeset/accept_ownership_test.go
@@ -1,7 +1,6 @@
package changeset_test
import (
- "math/big"
"testing"
"github.com/stretchr/testify/require"
@@ -10,6 +9,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/logger"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
@@ -44,23 +44,18 @@ func TestAcceptAllOwnership(t *testing.T) {
{
Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployMCMSWithTimelock),
Config: map[uint64]types.MCMSWithTimelockConfig{
- registrySel: {
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- },
+ registrySel: proposalutils.SingleGroupTimelockConfig(t),
},
},
})
require.NoError(t, err)
addrs, err := env.ExistingAddresses.AddressesForChain(registrySel)
require.NoError(t, err)
- timelock, err := commonchangeset.MaybeLoadMCMSWithTimelockState(env.Chains[registrySel], addrs)
+ timelock, err := commonchangeset.MaybeLoadMCMSWithTimelockChainState(env.Chains[registrySel], addrs)
require.NoError(t, err)
- _, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*commonchangeset.TimelockExecutionContracts{
- registrySel: &commonchangeset.TimelockExecutionContracts{
+ _, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ registrySel: &proposalutils.TimelockExecutionContracts{
Timelock: timelock.Timelock,
CallProxy: timelock.CallProxy,
},
diff --git a/deployment/keystone/changeset/append_node_capabilities_test.go b/deployment/keystone/changeset/append_node_capabilities_test.go
index 159500ab5a7..bfc01b309f5 100644
--- a/deployment/keystone/changeset/append_node_capabilities_test.go
+++ b/deployment/keystone/changeset/append_node_capabilities_test.go
@@ -8,6 +8,7 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
@@ -87,7 +88,7 @@ func TestAppendNodeCapabilities(t *testing.T) {
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/deploy_forwarder_test.go b/deployment/keystone/changeset/deploy_forwarder_test.go
index dd894fde9d9..e04bac6d264 100644
--- a/deployment/keystone/changeset/deploy_forwarder_test.go
+++ b/deployment/keystone/changeset/deploy_forwarder_test.go
@@ -11,6 +11,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
)
@@ -116,11 +117,11 @@ func TestConfigureForwarders(t *testing.T) {
require.Len(t, csOut.Proposals, nChains)
require.Nil(t, csOut.AddressBook)
- timelockContracts := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+ timelockContracts := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for selector, contractSet := range te.ContractSets() {
require.NotNil(t, contractSet.Timelock)
require.NotNil(t, contractSet.CallProxy)
- timelockContracts[selector] = &commonchangeset.TimelockExecutionContracts{
+ timelockContracts[selector] = &proposalutils.TimelockExecutionContracts{
Timelock: contractSet.Timelock,
CallProxy: contractSet.CallProxy,
}
diff --git a/deployment/keystone/changeset/deploy_ocr3_test.go b/deployment/keystone/changeset/deploy_ocr3_test.go
index 5d02f83500d..7a276886242 100644
--- a/deployment/keystone/changeset/deploy_ocr3_test.go
+++ b/deployment/keystone/changeset/deploy_ocr3_test.go
@@ -13,6 +13,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/logger"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
kslib "github.com/smartcontractkit/chainlink/deployment/keystone"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
@@ -118,7 +119,7 @@ func TestConfigureOCR3(t *testing.T) {
contracts := te.ContractSets()[te.RegistrySelector]
require.NoError(t, err)
- var timelockContracts = map[uint64]*commonchangeset.TimelockExecutionContracts{
+ var timelockContracts = map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/helpers_test.go b/deployment/keystone/changeset/helpers_test.go
index 4e7553d0b8e..f51a4ed610c 100644
--- a/deployment/keystone/changeset/helpers_test.go
+++ b/deployment/keystone/changeset/helpers_test.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"math"
- "math/big"
"sort"
"testing"
@@ -21,6 +20,7 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/deployment/keystone"
@@ -41,7 +41,7 @@ func TestSetupTestEnv(t *testing.T) {
NumChains: 3,
UseMCMS: useMCMS,
})
- t.Run(fmt.Sprintf("set up test env using MCMS: %T", useMCMS), func(t *testing.T) {
+ t.Run(fmt.Sprintf("set up test env using MCMS: %t", useMCMS), func(t *testing.T) {
require.NotNil(t, te.Env.ExistingAddresses)
require.Len(t, te.Env.Chains, 3)
require.NotEmpty(t, te.RegistrySelector)
@@ -258,12 +258,7 @@ func SetupTestEnv(t *testing.T, c TestConfig) TestEnv {
timelockCfgs := make(map[uint64]commontypes.MCMSWithTimelockConfig)
for sel := range env.Chains {
t.Logf("Enabling MCMS on chain %d", sel)
- timelockCfgs[sel] = commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ timelockCfgs[sel] = proposalutils.SingleGroupTimelockConfig(t)
}
env, err = commonchangeset.ApplyChangesets(t, env, nil, []commonchangeset.ChangesetApplication{
{
@@ -284,7 +279,7 @@ func SetupTestEnv(t *testing.T, c TestConfig) TestEnv {
require.NoError(t, mcms.Validate())
// transfer ownership of all contracts to the MCMS
- env, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*commonchangeset.TimelockExecutionContracts{sel: {Timelock: mcms.Timelock, CallProxy: mcms.CallProxy}}, []commonchangeset.ChangesetApplication{
+ env, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*proposalutils.TimelockExecutionContracts{sel: {Timelock: mcms.Timelock, CallProxy: mcms.CallProxy}}, []commonchangeset.ChangesetApplication{
{
Changeset: commonchangeset.WrapChangeSet(kschangeset.AcceptAllOwnershipsProposal),
Config: &kschangeset.AcceptAllOwnershipRequest{
diff --git a/deployment/keystone/changeset/update_don_test.go b/deployment/keystone/changeset/update_don_test.go
index 18287da6887..012111c4e62 100644
--- a/deployment/keystone/changeset/update_don_test.go
+++ b/deployment/keystone/changeset/update_don_test.go
@@ -7,6 +7,7 @@ import (
"github.com/stretchr/testify/require"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset/internal"
kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
@@ -103,22 +104,14 @@ func TestUpdateDon(t *testing.T) {
csOut, err := changeset.UpdateDon(te.Env, &cfg)
require.NoError(t, err)
- if true {
- require.Len(t, csOut.Proposals, 1)
- require.Len(t, csOut.Proposals[0].Transactions, 1) // append node capabilties cs, update don
- require.Len(t, csOut.Proposals[0].Transactions[0].Batch, 3) // add capabilities, update nodes, update don
- require.Nil(t, csOut.AddressBook)
- } else {
- require.Len(t, csOut.Proposals, 1)
- require.Len(t, csOut.Proposals[0].Transactions, 2) // append node capabilties cs, update don
- require.Len(t, csOut.Proposals[0].Transactions[0].Batch, 2) // add capabilities, update nodes
- require.Len(t, csOut.Proposals[0].Transactions[1].Batch, 1) // update don
- require.Nil(t, csOut.AddressBook)
- }
+ require.Len(t, csOut.Proposals, 1)
+ require.Len(t, csOut.Proposals[0].Transactions, 1) // append node capabilties cs, update don
+ require.Len(t, csOut.Proposals[0].Transactions[0].Batch, 3) // add capabilities, update nodes, update don
+ require.Nil(t, csOut.AddressBook)
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/update_node_capabilities_test.go b/deployment/keystone/changeset/update_node_capabilities_test.go
index cb5588ff3d1..87b49acf614 100644
--- a/deployment/keystone/changeset/update_node_capabilities_test.go
+++ b/deployment/keystone/changeset/update_node_capabilities_test.go
@@ -8,6 +8,7 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
@@ -118,7 +119,7 @@ func TestUpdateNodeCapabilities(t *testing.T) {
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/update_nodes_test.go b/deployment/keystone/changeset/update_nodes_test.go
index be3bfb12ee6..31f71cd9603 100644
--- a/deployment/keystone/changeset/update_nodes_test.go
+++ b/deployment/keystone/changeset/update_nodes_test.go
@@ -9,6 +9,7 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
)
@@ -89,7 +90,7 @@ func TestUpdateNodes(t *testing.T) {
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/deploy.go b/deployment/keystone/deploy.go
index 7d3e5391219..cb7804d0051 100644
--- a/deployment/keystone/deploy.go
+++ b/deployment/keystone/deploy.go
@@ -483,6 +483,10 @@ func RegisterCapabilities(lggr logger.Logger, req RegisterCapabilitiesRequest) (
for cap := range uniqueCaps {
capabilities = append(capabilities, cap)
}
+ if len(capabilities) == 0 {
+ lggr.Warn("no new capabilities to register")
+ return &RegisterCapabilitiesResponse{}, nil
+ }
// not using mcms; ignore proposals
_, err = AddCapabilities(lggr, &contracts, registryChain, capabilities, false)
if err != nil {
diff --git a/deployment/keystone/ocr3config.go b/deployment/keystone/ocr3config.go
index aed142ea116..d80c4930df4 100644
--- a/deployment/keystone/ocr3config.go
+++ b/deployment/keystone/ocr3config.go
@@ -328,7 +328,7 @@ func configureOCR3contract(req configureOCR3Request) (*configureOCR3Response, er
)
if err != nil {
err = DecodeErr(kocr3.OCR3CapabilityABI, err)
- return nil, fmt.Errorf("failed to call SetConfig for OCR3 contract %s using mcms: %T: %w", req.contract.Address().String(), req.useMCMS, err)
+ return nil, fmt.Errorf("failed to call SetConfig for OCR3 contract %s using mcms: %t: %w", req.contract.Address().String(), req.useMCMS, err)
}
var ops *timelock.BatchChainOperation
diff --git a/deployment/keystone/state.go b/deployment/keystone/state.go
index cbf449c7f31..0ac7cdc89ed 100644
--- a/deployment/keystone/state.go
+++ b/deployment/keystone/state.go
@@ -78,7 +78,7 @@ func GetContractSets(lggr logger.Logger, req *GetContractSetsRequest) (*GetContr
func loadContractSet(lggr logger.Logger, chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*ContractSet, error) {
var out ContractSet
- mcmsWithTimelock, err := commonchangeset.MaybeLoadMCMSWithTimelockState(chain, addresses)
+ mcmsWithTimelock, err := commonchangeset.MaybeLoadMCMSWithTimelockChainState(chain, addresses)
if err != nil {
return nil, fmt.Errorf("failed to load mcms contract: %w", err)
}
diff --git a/deployment/solana_chain.go b/deployment/solana_chain.go
new file mode 100644
index 00000000000..338642e3e32
--- /dev/null
+++ b/deployment/solana_chain.go
@@ -0,0 +1,5 @@
+package deployment
+
+// SolChain represents a Solana chain.
+type SolChain struct {
+}
diff --git a/go.mod b/go.mod
index 2149898f15b..7dcbbfd631b 100644
--- a/go.mod
+++ b/go.mod
@@ -11,7 +11,7 @@ require (
github.com/NethermindEth/juno v0.3.1
github.com/NethermindEth/starknet.go v0.7.1-0.20240401080518-34a506f3cfdb
github.com/XSAM/otelsql v0.27.0
- github.com/andybalholm/brotli v1.1.0
+ github.com/andybalholm/brotli v1.1.1
github.com/avast/retry-go/v4 v4.6.0
github.com/btcsuite/btcd/btcec/v2 v2.3.4
github.com/cometbft/cometbft v0.37.5
@@ -25,7 +25,7 @@ require (
github.com/fxamacker/cbor/v2 v2.7.0
github.com/gagliardetto/solana-go v1.8.4
github.com/getsentry/sentry-go v0.27.0
- github.com/gin-contrib/cors v1.5.0
+ github.com/gin-contrib/cors v1.7.2
github.com/gin-contrib/expvar v0.0.1
github.com/gin-contrib/sessions v0.0.5
github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4
@@ -78,12 +78,12 @@ require (
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chain-selectors v1.0.34
github.com/smartcontractkit/chainlink-automation v0.8.1
- github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0
- github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83
+ github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b
+ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db
github.com/smartcontractkit/chainlink-feeds v0.1.1
- github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2
+ github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8
github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12
@@ -110,12 +110,12 @@ require (
go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/crypto v0.28.0
+ golang.org/x/crypto v0.31.0
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
golang.org/x/mod v0.21.0
- golang.org/x/sync v0.8.0
- golang.org/x/term v0.25.0
- golang.org/x/text v0.19.0
+ golang.org/x/sync v0.10.0
+ golang.org/x/term v0.27.0
+ golang.org/x/text v0.21.0
golang.org/x/time v0.7.0
golang.org/x/tools v0.26.0
gonum.org/v1/gonum v0.15.1
@@ -148,6 +148,7 @@ require (
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
+ github.com/apache/arrow-go/v18 v18.0.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
@@ -158,13 +159,14 @@ require (
github.com/blendle/zapdriver v1.3.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/bytecodealliance/wasmtime-go/v23 v23.0.0 // indirect
- github.com/bytedance/sonic v1.10.1 // indirect
+ github.com/bytedance/sonic v1.11.6 // indirect
+ github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
- github.com/chenzhuoyu/iasm v0.9.0 // indirect
+ github.com/cloudwego/base64x v0.1.4 // indirect
+ github.com/cloudwego/iasm v0.2.0 // indirect
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
@@ -219,7 +221,7 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.22.0 // indirect
github.com/go-webauthn/x v0.1.5 // indirect
- github.com/goccy/go-json v0.10.2 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/goccy/go-yaml v1.12.0 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
@@ -232,6 +234,7 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/btree v1.1.2 // indirect
+ github.com/google/flatbuffers v24.3.25+incompatible // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-tpm v0.9.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
@@ -269,8 +272,8 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jmhodges/levigo v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/klauspost/cpuid/v2 v2.2.5 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
@@ -279,6 +282,7 @@ require (
github.com/logrusorgru/aurora v2.0.3+incompatible // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/marcboeker/go-duckdb v1.8.3 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
@@ -302,6 +306,7 @@ require (
github.com/opencontainers/runc v1.1.10 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/procfs v0.15.1 // indirect
@@ -343,6 +348,7 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
+ github.com/zeebo/xxh3 v1.0.2 // indirect
github.com/zondax/hid v0.9.2 // indirect
github.com/zondax/ledger-go v0.14.3 // indirect
go.dedis.ch/protobuf v1.0.11 // indirect
@@ -369,7 +375,7 @@ require (
go.uber.org/ratelimit v0.3.1 // indirect
golang.org/x/arch v0.11.0 // indirect
golang.org/x/net v0.30.0 // indirect
- golang.org/x/sys v0.26.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/api v0.202.0 // indirect
google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect
diff --git a/go.sum b/go.sum
index 45a2dfab4fe..bc6352c2ee0 100644
--- a/go.sum
+++ b/go.sum
@@ -144,9 +144,13 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg=
-github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
-github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
+github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
+github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow-go/v18 v18.0.0 h1:1dBDaSbH3LtulTyOVYaBCHO3yVRwjV+TZaqn3g6V7ZM=
+github.com/apache/arrow-go/v18 v18.0.0/go.mod h1:t6+cWRSmKgdQ6HsxisQjok+jBpKGhRDiqcf3p0p/F+A=
+github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE=
+github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw=
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -196,10 +200,10 @@ github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMU
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bytecodealliance/wasmtime-go/v23 v23.0.0 h1:NJvU4S8KEk1GnF6+FvlnzMD/8wXTj/mYJSG6Q4yu3Pw=
github.com/bytecodealliance/wasmtime-go/v23 v23.0.0/go.mod h1:5YIL+Ouiww2zpO7u+iZ1U1G5NvmwQYaXdmCZQGjQM0U=
-github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
-github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
-github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
-github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
+github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
+github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
+github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
+github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
@@ -215,12 +219,6 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
-github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
-github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0=
-github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
-github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
-github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
@@ -229,6 +227,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
+github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
+github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
+github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -411,8 +413,8 @@ github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9y
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
-github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
+github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw=
+github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w=
github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw=
github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE=
@@ -484,8 +486,8 @@ github.com/go-webauthn/webauthn v0.9.4/go.mod h1:LqupCtzSef38FcxzaklmOn7AykGKhAh
github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0=
github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM=
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
@@ -550,6 +552,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
+github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -814,14 +818,16 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
+github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
-github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -868,6 +874,8 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk=
+github.com/marcboeker/go-duckdb v1.8.3 h1:ZkYwiIZhbYsT6MmJsZ3UPTHrTZccDdM4ztoqSlEMXiQ=
+github.com/marcboeker/go-duckdb v1.8.3/go.mod h1:C9bYRE1dPYb1hhfu/SSomm78B0FXmNgRvv6YBW/Hooc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -900,6 +908,10 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94=
github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -1002,6 +1014,8 @@ github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xl
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@@ -1123,18 +1137,18 @@ github.com/smartcontractkit/chain-selectors v1.0.34 h1:MJ17OGu8+jjl426pcKrJkCf3f
github.com/smartcontractkit/chain-selectors v1.0.34/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8=
github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU=
github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0 h1:/1L+v4SxUD2K5RMRbfByyLfePMAgQKeD0onSetPnGmA=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83 h1:NjrU7KOn3Tk+C6QFo9tQBqeotPKytpBwhn/J1s+yiiY=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83/go.mod h1:bQktEJf7sJ0U3SmIcXvbGUox7SmXcnSEZ4kUbT8R5Nk=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b h1:iSQJ6ng4FhEswf8SXunGkaJlVP3E3JlgLB8Oo2f3Ud4=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805 h1:Pz8jB/6qe10xT10h2S3LFYJrnebNpG5rJ/w16HZGwPQ=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805/go.mod h1:yti7e1+G9hhkYhj+L5sVUULn9Bn3bBL5/AxaNqdJ5YQ=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e h1:PRoeby6ZlTuTkv2f+7tVU4+zboTfRzI+beECynF4JQ0=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e/go.mod h1:mUh5/woemsVaHgTorA080hrYmO3syBCmPdnWc/5dOqk=
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db h1:N1RH1hSr2ACzOFc9hkCcjE8pRBTdcU3p8nsTJByaLes=
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db/go.mod h1:yjb9d4q7+m8aGbjfTbkNoNuA4PeSxcUszsSZHDrvS0E=
github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6AnNt+Wg64sVG+XSA49c=
github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 h1:onBe3DqNrbtOAzKS4PrPIiJX65BGo1aYiYZxFVEW+jc=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc h1:dssRwJhmzJkUN/OajaDj2GsxBn+Tupk3bI1BkPEoJg0=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc/go.mod h1:p8aUDfJeley6oer7y+Ucd3edOtRlMTnWg3mN6rhaLWo=
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 h1:tNS7U9lrxkFvEuyxQv11HHOiV9LPDGC9wYEy+yM/Jv4=
@@ -1280,6 +1294,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
+github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1289,6 +1305,10 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
+github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U=
github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
@@ -1425,8 +1445,8 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1551,8 +1571,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1636,8 +1656,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1647,8 +1667,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
-golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1663,8 +1683,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
-golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/integration-tests/ccip-tests/testconfig/README.md b/integration-tests/ccip-tests/testconfig/README.md
index ff57ecaa220..d614ed62ea4 100644
--- a/integration-tests/ccip-tests/testconfig/README.md
+++ b/integration-tests/ccip-tests/testconfig/README.md
@@ -430,32 +430,6 @@ Example usage:
TTL = "11h"
```
-### CCIP.Env.Logging
-
-Specifies the logging configuration for the test. Imported from [LoggingConfig](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/config/logging.go#L11) in chainlink-testing-framework.
-Example usage:
-
-```toml
-[CCIP.Env.Logging]
-test_log_collect = false # if set to true will save logs even if test did not fail
-
-[CCIP.Env.Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets = ["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout = "10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit = 10
-
-[CCIP.Env.Logging.Loki]
-tenant_id = "..."
-endpoint = "https://loki...."
-
-[CCIP.Env.Logging.Grafana]
-base_url = "https://grafana..../"
-dashboard_url = "/d/6vjVx-1V8/ccip-long-running-tests"
-```
-
### CCIP.Env.Lane.LeaderLaneEnabled
Specifies whether to enable the leader lane feature. This setting is only applicable for new deployments.
diff --git a/integration-tests/ccip-tests/testconfig/global.go b/integration-tests/ccip-tests/testconfig/global.go
index 4caa8a9ac00..8866d31705a 100644
--- a/integration-tests/ccip-tests/testconfig/global.go
+++ b/integration-tests/ccip-tests/testconfig/global.go
@@ -175,120 +175,6 @@ type Common struct {
func (p *Common) ReadFromEnvVar() error {
logger := logging.GetTestLogger(nil)
- testLogCollect := ctfconfig.MustReadEnvVar_Boolean(ctfconfig.E2E_TEST_LOG_COLLECT_ENV)
- if testLogCollect != nil {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.TestLogCollect", ctfconfig.E2E_TEST_LOG_COLLECT_ENV)
- p.Logging.TestLogCollect = testLogCollect
- }
-
- loggingRunID := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_LOGGING_RUN_ID_ENV)
- if loggingRunID != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.RunID", ctfconfig.E2E_TEST_LOGGING_RUN_ID_ENV)
- p.Logging.RunId = &loggingRunID
- }
-
- logstreamLogTargets := ctfconfig.MustReadEnvVar_Strings(ctfconfig.E2E_TEST_LOG_STREAM_LOG_TARGETS_ENV, ",")
- if len(logstreamLogTargets) > 0 {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.LogStream == nil {
- p.Logging.LogStream = &ctfconfig.LogStreamConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.LogStream.LogTargets", ctfconfig.E2E_TEST_LOG_STREAM_LOG_TARGETS_ENV)
- p.Logging.LogStream.LogTargets = logstreamLogTargets
- }
-
- lokiTenantID := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_LOKI_TENANT_ID_ENV)
- if lokiTenantID != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.Loki == nil {
- p.Logging.Loki = &ctfconfig.LokiConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.Loki.TenantId", ctfconfig.E2E_TEST_LOKI_TENANT_ID_ENV)
- p.Logging.Loki.TenantId = &lokiTenantID
- }
-
- lokiEndpoint := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_LOKI_ENDPOINT_ENV)
- if lokiEndpoint != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.Loki == nil {
- p.Logging.Loki = &ctfconfig.LokiConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.Loki.Endpoint", ctfconfig.E2E_TEST_LOKI_ENDPOINT_ENV)
- p.Logging.Loki.Endpoint = &lokiEndpoint
- }
-
- lokiBasicAuth := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_LOKI_BASIC_AUTH_ENV)
- if lokiBasicAuth != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.Loki == nil {
- p.Logging.Loki = &ctfconfig.LokiConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.Loki.BasicAuth", ctfconfig.E2E_TEST_LOKI_BASIC_AUTH_ENV)
- p.Logging.Loki.BasicAuth = &lokiBasicAuth
- }
-
- lokiBearerToken := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_LOKI_BEARER_TOKEN_ENV)
- if lokiBearerToken != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.Loki == nil {
- p.Logging.Loki = &ctfconfig.LokiConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.Loki.BearerToken", ctfconfig.E2E_TEST_LOKI_BEARER_TOKEN_ENV)
- p.Logging.Loki.BearerToken = &lokiBearerToken
- }
-
- grafanaBaseUrl := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_GRAFANA_BASE_URL_ENV)
- if grafanaBaseUrl != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.Grafana == nil {
- p.Logging.Grafana = &ctfconfig.GrafanaConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.Grafana.BaseUrl", ctfconfig.E2E_TEST_GRAFANA_BASE_URL_ENV)
- p.Logging.Grafana.BaseUrl = &grafanaBaseUrl
- }
-
- grafanaDashboardUrl := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_GRAFANA_DASHBOARD_URL_ENV)
- if grafanaDashboardUrl != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.Grafana == nil {
- p.Logging.Grafana = &ctfconfig.GrafanaConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.Grafana.DashboardUrl", ctfconfig.E2E_TEST_GRAFANA_DASHBOARD_URL_ENV)
- p.Logging.Grafana.DashboardUrl = &grafanaDashboardUrl
- }
-
- grafanaBearerToken := ctfconfig.MustReadEnvVar_String(ctfconfig.E2E_TEST_GRAFANA_BEARER_TOKEN_ENV)
- if grafanaBearerToken != "" {
- if p.Logging == nil {
- p.Logging = &ctfconfig.LoggingConfig{}
- }
- if p.Logging.Grafana == nil {
- p.Logging.Grafana = &ctfconfig.GrafanaConfig{}
- }
- logger.Debug().Msgf("Using %s env var to override Logging.Grafana.BearerToken", ctfconfig.E2E_TEST_GRAFANA_BEARER_TOKEN_ENV)
- p.Logging.Grafana.BearerToken = &grafanaBearerToken
- }
-
selectedNetworks := ctfconfig.MustReadEnvVar_Strings(ctfconfig.E2E_TEST_SELECTED_NETWORK_ENV, ",")
if len(selectedNetworks) > 0 {
if p.Network == nil {
@@ -421,9 +307,6 @@ func (p *Common) GetSethConfig() *seth.Config {
}
func (p *Common) Validate() error {
- if err := p.Logging.Validate(); err != nil {
- return fmt.Errorf("error validating logging config %w", err)
- }
if p.Network == nil {
return errors.New("no networks specified")
}
diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip-default.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip-default.toml
index c82e2f930be..89858a94ddb 100644
--- a/integration-tests/ccip-tests/testconfig/tomls/ccip-default.toml
+++ b/integration-tests/ccip-tests/testconfig/tomls/ccip-default.toml
@@ -73,17 +73,6 @@ addresses_to_fund = [
[CCIP.Env.PrivateEthereumNetworks.SIMULATED_2.EthereumChainConfig.HardForkEpochs]
Deneb = 500
-[CCIP.Env.Logging]
-test_log_collect = false # if set to true will save logs even if test did not fail
-
-[CCIP.Env.Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets = ["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout = "10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit = 10
-
# these values will be used to set up chainlink DON
# along with these values, the secrets needs to be specified as part of .env variables
#
diff --git a/integration-tests/ccip-tests/testsetups/test_env.go b/integration-tests/ccip-tests/testsetups/test_env.go
index 263d291453d..3c3406a3e5a 100644
--- a/integration-tests/ccip-tests/testsetups/test_env.go
+++ b/integration-tests/ccip-tests/testsetups/test_env.go
@@ -352,7 +352,6 @@ func DeployLocalCluster(
pointer.GetString(clNode.ChainlinkImage.Image),
pointer.GetString(clNode.ChainlinkImage.Version),
toml,
- env.LogStream,
test_env.WithPgDBOptions(
ctftestenv.WithPostgresImageName(clNode.DBImage),
ctftestenv.WithPostgresImageVersion(clNode.DBTag),
@@ -381,7 +380,6 @@ func DeployLocalCluster(
pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkImage.Image),
pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkImage.Version),
toml,
- env.LogStream,
test_env.WithPgDBOptions(
ctftestenv.WithPostgresImageName(testInputs.EnvInput.NewCLCluster.Common.DBImage),
ctftestenv.WithPostgresImageVersion(testInputs.EnvInput.NewCLCluster.Common.DBTag),
diff --git a/integration-tests/docker/test_env/cl_node.go b/integration-tests/docker/test_env/cl_node.go
index b5c2505b252..8ebaf579d0a 100644
--- a/integration-tests/docker/test_env/cl_node.go
+++ b/integration-tests/docker/test_env/cl_node.go
@@ -24,7 +24,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/lib/docker"
"github.com/smartcontractkit/chainlink-testing-framework/lib/docker/test_env"
"github.com/smartcontractkit/chainlink-testing-framework/lib/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/lib/logstream"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
@@ -126,11 +125,11 @@ func WithPgDBOptions(opts ...test_env.PostgresDbOption) ClNodeOption {
}
}
-func NewClNode(networks []string, imageName, imageVersion string, nodeConfig *chainlink.Config, logStream *logstream.LogStream, opts ...ClNodeOption) (*ClNode, error) {
+func NewClNode(networks []string, imageName, imageVersion string, nodeConfig *chainlink.Config, opts ...ClNodeOption) (*ClNode, error) {
nodeDefaultCName := fmt.Sprintf("%s-%s", "cl-node", uuid.NewString()[0:8])
pgDefaultCName := fmt.Sprintf("pg-%s", nodeDefaultCName)
- pgDb, err := test_env.NewPostgresDb(networks, test_env.WithPostgresDbContainerName(pgDefaultCName), test_env.WithPostgresDbLogStream(logStream))
+ pgDb, err := test_env.NewPostgresDb(networks, test_env.WithPostgresDbContainerName(pgDefaultCName))
if err != nil {
return nil, err
}
@@ -140,7 +139,6 @@ func NewClNode(networks []string, imageName, imageVersion string, nodeConfig *ch
ContainerImage: imageName,
ContainerVersion: imageVersion,
Networks: networks,
- LogStream: logStream,
StartupTimeout: 3 * time.Minute,
},
UserEmail: "local@local.com",
@@ -490,28 +488,6 @@ func (n *ClNode) getContainerRequest(secrets string) (
FileMode: 0644,
},
},
- LifecycleHooks: []tc.ContainerLifecycleHooks{
- {
- PostStarts: []tc.ContainerHook{
- func(ctx context.Context, c tc.Container) error {
- if n.LogStream != nil {
- return n.LogStream.ConnectContainer(ctx, c, "")
- }
- return nil
- },
- },
- PreStops: []tc.ContainerHook{
- func(ctx context.Context, c tc.Container) error {
- if n.LogStream != nil {
- return n.LogStream.DisconnectContainer(c)
- }
- return nil
- },
- },
- PostStops: n.PostStopsHooks,
- PreTerminates: n.PreTerminatesHooks,
- },
- },
}, nil
}
diff --git a/integration-tests/docker/test_env/test_env.go b/integration-tests/docker/test_env/test_env.go
index 1ca50760d17..a37b7f813a7 100644
--- a/integration-tests/docker/test_env/test_env.go
+++ b/integration-tests/docker/test_env/test_env.go
@@ -20,8 +20,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/lib/docker"
"github.com/smartcontractkit/chainlink-testing-framework/lib/docker/test_env"
"github.com/smartcontractkit/chainlink-testing-framework/lib/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/lib/logstream"
- "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/runid"
"github.com/smartcontractkit/chainlink/integration-tests/testconfig/ccip"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
@@ -36,7 +34,6 @@ var (
type CLClusterTestEnv struct {
Cfg *TestEnvConfig
DockerNetwork *tc.DockerNetwork
- LogStream *logstream.LogStream
TestConfig ctf_config.GlobalTestConfig
/* components */
@@ -69,7 +66,7 @@ func (te *CLClusterTestEnv) WithTestEnvConfig(cfg *TestEnvConfig) *CLClusterTest
te.Cfg = cfg
if cfg.MockAdapter.ContainerName != "" {
n := []string{te.DockerNetwork.Name}
- te.MockAdapter = test_env.NewKillgrave(n, te.Cfg.MockAdapter.ImpostersPath, test_env.WithContainerName(te.Cfg.MockAdapter.ContainerName), test_env.WithLogStream(te.LogStream))
+ te.MockAdapter = test_env.NewKillgrave(n, te.Cfg.MockAdapter.ImpostersPath, test_env.WithContainerName(te.Cfg.MockAdapter.ContainerName))
}
return te
}
@@ -99,7 +96,6 @@ func (te *CLClusterTestEnv) StartEthereumNetwork(cfg *ctf_config.EthereumNetwork
builder := test_env.NewEthereumNetworkBuilder()
c, err := builder.WithExistingConfig(*cfg).
WithTest(te.t).
- WithLogStream(te.LogStream).
Build()
if err != nil {
return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err
@@ -132,7 +128,6 @@ func (te *CLClusterTestEnv) StartJobDistributor(cfg *ccip.JDConfig) error {
job_distributor.WithVersion(cfg.GetJDVersion()),
job_distributor.WithDBURL(jdDB.InternalURL.String()),
)
- jd.LogStream = te.LogStream
err = jd.StartContainer()
if err != nil {
return fmt.Errorf("failed to start job-distributor: %w", err)
@@ -160,7 +155,7 @@ func (te *CLClusterTestEnv) StartClCluster(nodeConfig *chainlink.Config, count i
opts = append(opts, WithSecrets(secretsConfig))
te.ClCluster = &ClCluster{}
for i := 0; i < count; i++ {
- ocrNode, err := NewClNode([]string{te.DockerNetwork.Name}, *testconfig.GetChainlinkImageConfig().Image, *testconfig.GetChainlinkImageConfig().Version, nodeConfig, te.LogStream, opts...)
+ ocrNode, err := NewClNode([]string{te.DockerNetwork.Name}, *testconfig.GetChainlinkImageConfig().Image, *testconfig.GetChainlinkImageConfig().Version, nodeConfig, opts...)
if err != nil {
return err
}
@@ -193,11 +188,6 @@ type CleanupOpts struct {
func (te *CLClusterTestEnv) Cleanup(opts CleanupOpts) error {
te.l.Info().Msg("Cleaning up test environment")
- runIdErr := runid.RemoveLocalRunId(te.TestConfig.GetLoggingConfig().RunId)
- if runIdErr != nil {
- te.l.Warn().Msgf("Failed to remove .run.id file due to: %s (not a big deal, you can still remove it manually)", runIdErr.Error())
- }
-
if te.t == nil {
return fmt.Errorf("cannot cleanup test environment without a testing.T")
}
diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go
index cdce826f2c2..e11a3c96095 100644
--- a/integration-tests/docker/test_env/test_env_builder.go
+++ b/integration-tests/docker/test_env/test_env_builder.go
@@ -2,28 +2,25 @@ package test_env
import (
"fmt"
- "math"
"os"
"path/filepath"
- "slices"
"strings"
+ "sync"
"testing"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"go.uber.org/zap/zapcore"
-
- "github.com/smartcontractkit/chainlink-testing-framework/seth"
+ "golang.org/x/sync/errgroup"
"github.com/smartcontractkit/chainlink-testing-framework/lib/blockchain"
ctf_config "github.com/smartcontractkit/chainlink-testing-framework/lib/config"
+ ctf_docker "github.com/smartcontractkit/chainlink-testing-framework/lib/docker"
"github.com/smartcontractkit/chainlink-testing-framework/lib/docker/test_env"
"github.com/smartcontractkit/chainlink-testing-framework/lib/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/lib/logstream"
"github.com/smartcontractkit/chainlink-testing-framework/lib/networks"
"github.com/smartcontractkit/chainlink-testing-framework/lib/testreporters"
- "github.com/smartcontractkit/chainlink-testing-framework/lib/testsummary"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/osutil"
"github.com/smartcontractkit/chainlink/integration-tests/testconfig/ccip"
@@ -46,7 +43,6 @@ type ChainlinkNodeLogScannerSettings struct {
}
type CLTestEnvBuilder struct {
- hasLogStream bool
hasKillgrave bool
jdConfig *ccip.JDConfig
clNodeConfig *chainlink.Config
@@ -90,7 +86,6 @@ func GetDefaultChainlinkNodeLogScannerSettingsWithExtraAllowedMessages(extraAllo
func NewCLTestEnvBuilder() *CLTestEnvBuilder {
return &CLTestEnvBuilder{
l: log.Logger,
- hasLogStream: true,
isEVM: true,
chainlinkNodeLogScannerSettings: &DefaultChainlinkNodeLogScannerSettings,
}
@@ -134,12 +129,6 @@ func (b *CLTestEnvBuilder) WithTestInstance(t *testing.T) *CLTestEnvBuilder {
return b
}
-// WithoutLogStream disables LogStream logging component
-func (b *CLTestEnvBuilder) WithoutLogStream() *CLTestEnvBuilder {
- b.hasLogStream = false
- return b
-}
-
func (b *CLTestEnvBuilder) WithoutChainlinkNodeLogScanner() *CLTestEnvBuilder {
b.chainlinkNodeLogScannerSettings = &ChainlinkNodeLogScannerSettings{}
return b
@@ -250,102 +239,105 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
b.te.WithTestInstance(b.t)
}
- if b.hasLogStream {
- loggingConfig := b.testConfig.GetLoggingConfig()
- // we need to enable logging to file if we want to scan logs
- if b.chainlinkNodeLogScannerSettings != nil && !slices.Contains(loggingConfig.LogStream.LogTargets, string(logstream.File)) {
- b.l.Debug().Msg("Enabling logging to file in order to support Chainlink node log scanning")
- loggingConfig.LogStream.LogTargets = append(loggingConfig.LogStream.LogTargets, string(logstream.File))
- }
- b.te.LogStream, err = logstream.NewLogStream(b.te.t, b.testConfig.GetLoggingConfig())
- if err != nil {
- return nil, err
- }
-
- // this clean up has to be added as the FIRST one, because cleanup functions are executed in reverse order (LIFO)
- if b.t != nil && b.cleanUpType != CleanUpTypeNone {
- b.t.Cleanup(func() {
- b.l.Info().Msg("Shutting down LogStream")
- logPath, err := osutil.GetAbsoluteFolderPath("logs")
- if err == nil {
- b.l.Info().Str("Absolute path", logPath).Msg("LogStream logs folder location")
- }
-
- // flush logs when test failed or when we are explicitly told to collect logs
- flushLogStream := b.t.Failed() || *b.testConfig.GetLoggingConfig().TestLogCollect
+ // this clean up has to be added as the FIRST one, because cleanup functions are executed in reverse order (LIFO)
+ if b.t != nil && b.cleanUpType != CleanUpTypeNone {
+ b.t.Cleanup(func() {
+ logsDir := fmt.Sprintf("logs/%s-%s", b.t.Name(), time.Now().Format("2006-01-02T15-04-05"))
+ loggingErr := ctf_docker.WriteAllContainersLogs(b.l, logsDir)
+ if loggingErr != nil {
+ b.l.Error().Err(loggingErr).Msg("Error writing all Docker containers logs")
+ }
- // run even if test has failed, as we might be able to catch additional problems without running the test again
- if b.chainlinkNodeLogScannerSettings != nil {
- logProcessor := logstream.NewLogProcessor[int](b.te.LogStream)
+ if b == nil || b.te == nil || b.te.ClCluster == nil || b.te.ClCluster.Nodes == nil {
+ log.Warn().Msg("Won't dump container and postgres logs, because test environment doesn't have any nodes")
+ return
+ }
- processFn := func(log logstream.LogContent, count *int) error {
- countSoFar := count
- if *countSoFar < 0 {
- return fmt.Errorf("negative count: %d", *countSoFar)
- }
- newCount, err := testreporters.ScanLogLine(b.l, string(log.Content), b.chainlinkNodeLogScannerSettings.FailingLogLevel, uint(*countSoFar), b.chainlinkNodeLogScannerSettings.Threshold, b.chainlinkNodeLogScannerSettings.AllowedMessages)
- if err != nil {
- return err
- }
- if newCount > math.MaxInt {
- return fmt.Errorf("new count overflows int: %d", newCount)
- }
- *count = int(newCount)
- return nil
- }
+ if b.chainlinkNodeLogScannerSettings != nil {
+ var logFiles []*os.File
- // we cannot do parallel processing here, because ProcessContainerLogs() locks a mutex that controls whether
- // new logs can be added to the log stream, so parallel processing would get stuck on waiting for it to be unlocked
- LogScanningLoop:
- for i := 0; i < b.clNodesCount; i++ {
- // if something went wrong during environment setup we might not have all nodes, and we don't want an NPE
- if b == nil || b.te == nil || b.te.ClCluster == nil || b.te.ClCluster.Nodes == nil || len(b.te.ClCluster.Nodes)-1 < i || b.te.ClCluster.Nodes[i] == nil {
+ // when tests run in parallel, we need to make sure that we only process logs that belong to nodes created by the current test
+ // that is required, because some tests might have custom log messages that are allowed, but only for that test (e.g. because they restart the CL node)
+ var belongsToCurrentEnv = func(filePath string) bool {
+ for _, clNode := range b.te.ClCluster.Nodes {
+ if clNode == nil {
continue
}
- // ignore count return, because we are only interested in the error
- _, err := logProcessor.ProcessContainerLogs(b.te.ClCluster.Nodes[i].ContainerName, processFn)
- if err != nil && !strings.Contains(err.Error(), testreporters.MultipleLogsAtLogLevelErr) && !strings.Contains(err.Error(), testreporters.OneLogAtLogLevelErr) {
- b.l.Error().Err(err).Msg("Error processing CL node logs")
- continue
- } else if err != nil && (strings.Contains(err.Error(), testreporters.MultipleLogsAtLogLevelErr) || strings.Contains(err.Error(), testreporters.OneLogAtLogLevelErr)) {
- flushLogStream = true
- b.t.Errorf("Found a concerning log in Chainklink Node logs: %v", err)
- break LogScanningLoop
+ if strings.EqualFold(filePath, clNode.ContainerName+".log") {
+ return true
}
}
- b.l.Info().Msg("Finished scanning Chainlink Node logs for concerning errors")
+ return false
}
- if flushLogStream {
- b.l.Info().Msg("Flushing LogStream logs")
- // we can't do much if this fails, so we just log the error in LogStream
- if err := b.te.LogStream.FlushAndShutdown(); err != nil {
- b.l.Error().Err(err).Msg("Error flushing and shutting down LogStream")
+ fileWalkErr := filepath.Walk(logsDir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
}
- b.te.LogStream.PrintLogTargetsLocations()
- b.te.LogStream.SaveLogLocationInTestSummary()
- }
- b.l.Info().Msg("Finished shutting down LogStream")
+ if !info.IsDir() && belongsToCurrentEnv(info.Name()) {
+ file, fileErr := os.Open(path)
+ if fileErr != nil {
+ return fmt.Errorf("failed to open file %s: %w", path, fileErr)
+ }
+ logFiles = append(logFiles, file)
+ }
+ return nil
+ })
- if b.t.Failed() || *b.testConfig.GetLoggingConfig().TestLogCollect {
- b.l.Info().Msg("Dump state of all Postgres DBs used by Chainlink Nodes")
+ if len(logFiles) != len(b.te.ClCluster.Nodes) {
+ b.l.Warn().Int("Expected", len(b.te.ClCluster.Nodes)).Int("Got", len(logFiles)).Msg("Number of log files does not match number of nodes. Some logs might be missing.")
+ }
- dbDumpFolder := "db_dumps"
- dbDumpPath := fmt.Sprintf("%s/%s-%s", dbDumpFolder, b.t.Name(), time.Now().Format("2006-01-02T15-04-05"))
- if err := os.MkdirAll(dbDumpPath, os.ModePerm); err != nil {
- b.l.Error().Err(err).Msg("Error creating folder for Postgres DB dump")
- return
+ if fileWalkErr != nil {
+ b.l.Error().Err(fileWalkErr).Msg("Error walking through log files. Skipping log verification.")
+ } else {
+ verifyLogsGroup := &errgroup.Group{}
+ for _, f := range logFiles {
+ file := f
+ verifyLogsGroup.Go(func() error {
+ verifyErr := testreporters.VerifyLogFile(file, b.chainlinkNodeLogScannerSettings.FailingLogLevel, b.chainlinkNodeLogScannerSettings.Threshold, b.chainlinkNodeLogScannerSettings.AllowedMessages...)
+ _ = file.Close()
+ // ignore processing errors
+ if verifyErr != nil && !strings.Contains(verifyErr.Error(), testreporters.MultipleLogsAtLogLevelErr) && !strings.Contains(verifyErr.Error(), testreporters.OneLogAtLogLevelErr) {
+ b.l.Error().Err(verifyErr).Msg("Error processing CL node logs")
+
+ return nil
+
+ // if it's not a processing error, we want to fail the test; we also can stop processing logs all together at this point
+ } else if verifyErr != nil && (strings.Contains(verifyErr.Error(), testreporters.MultipleLogsAtLogLevelErr) || strings.Contains(verifyErr.Error(), testreporters.OneLogAtLogLevelErr)) {
+
+ return verifyErr
+ }
+ return nil
+ })
}
- absDbDumpPath, err := osutil.GetAbsoluteFolderPath(dbDumpFolder)
- if err == nil {
- b.l.Info().Str("Absolute path", absDbDumpPath).Msg("PostgresDB dump folder location")
+ if logVerificationErr := verifyLogsGroup.Wait(); logVerificationErr != nil {
+ b.t.Errorf("Found a concerning log in Chainklink Node logs: %v", logVerificationErr)
}
+ }
+ }
- for i := 0; i < b.clNodesCount; i++ {
+ b.l.Info().Msg("Staring to dump state of all Postgres DBs used by Chainlink Nodes")
+
+ dbDumpFolder := "db_dumps"
+ dbDumpPath := fmt.Sprintf("%s/%s-%s", dbDumpFolder, b.t.Name(), time.Now().Format("2006-01-02T15-04-05"))
+ if err := os.MkdirAll(dbDumpPath, os.ModePerm); err != nil {
+ b.l.Error().Err(err).Msg("Error creating folder for Postgres DB dump")
+ } else {
+ absDbDumpPath, err := osutil.GetAbsoluteFolderPath(dbDumpFolder)
+ if err == nil {
+ b.l.Info().Str("Absolute path", absDbDumpPath).Msg("PostgresDB dump folder location")
+ }
+
+ dbDumpGroup := sync.WaitGroup{}
+ for i := 0; i < b.clNodesCount; i++ {
+ dbDumpGroup.Add(1)
+ go func() {
+ defer dbDumpGroup.Done()
// if something went wrong during environment setup we might not have all nodes, and we don't want an NPE
if b == nil || b.te == nil || b.te.ClCluster == nil || b.te.ClCluster.Nodes == nil || len(b.te.ClCluster.Nodes)-1 < i || b.te.ClCluster.Nodes[i] == nil || b.te.ClCluster.Nodes[i].PostgresDb == nil {
- continue
+ return
}
filePath := filepath.Join(dbDumpPath, fmt.Sprintf("postgres_db_dump_%s.sql", b.te.ClCluster.Nodes[i].ContainerName))
@@ -353,24 +345,23 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
if err != nil {
b.l.Error().Err(err).Msg("Error creating localDbDumpFile for Postgres DB dump")
_ = localDbDumpFile.Close()
- continue
+ return
}
if err := b.te.ClCluster.Nodes[i].PostgresDb.ExecPgDumpFromContainer(localDbDumpFile); err != nil {
b.l.Error().Err(err).Msg("Error dumping Postgres DB")
}
_ = localDbDumpFile.Close()
- }
- b.l.Info().Msg("Finished dumping state of all Postgres DBs used by Chainlink Nodes")
+ }()
}
- if b.testConfig.GetSethConfig() != nil && ((b.t.Failed() && slices.Contains(b.testConfig.GetSethConfig().TraceOutputs, seth.TraceOutput_DOT) && b.testConfig.GetSethConfig().TracingLevel != seth.TracingLevel_None) || (!b.t.Failed() && slices.Contains(b.testConfig.GetSethConfig().TraceOutputs, seth.TraceOutput_DOT) && b.testConfig.GetSethConfig().TracingLevel == seth.TracingLevel_All)) {
- _ = testsummary.AddEntry(b.t.Name(), "dot_graphs", "true")
- }
- })
- } else {
- b.l.Warn().Msg("LogStream won't be cleaned up, because either test instance is not set or cleanup type is set to none")
- }
+ dbDumpGroup.Wait()
+
+ b.l.Info().Msg("Finished dumping state of all Postgres DBs used by Chainlink Nodes")
+ }
+ })
+ } else {
+ b.l.Warn().Msg("Won't dump container and postgres logs, because either test instance is not set or cleanup type is set to none")
}
if b.hasKillgrave {
@@ -378,7 +369,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
return nil, fmt.Errorf("test environment builder failed: %w", fmt.Errorf("cannot start mock adapter without a network"))
}
- b.te.MockAdapter = test_env.NewKillgrave([]string{b.te.DockerNetwork.Name}, "", test_env.WithLogStream(b.te.LogStream))
+ b.te.MockAdapter = test_env.NewKillgrave([]string{b.te.DockerNetwork.Name}, "")
err = b.te.StartMockAdapter()
if err != nil {
@@ -406,10 +397,6 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
return b.te, fmt.Errorf("test environment builder failed: %w", fmt.Errorf("explicit cleanup type must be set when building test environment"))
}
- if b.te.LogStream == nil && b.chainlinkNodeLogScannerSettings != nil {
- log.Warn().Msg("Chainlink node log scanner settings provided, but LogStream is not enabled. Ignoring Chainlink node log scanner settings, as no logs will be available.")
- }
-
if b.jdConfig != nil {
err := b.te.StartJobDistributor(b.jdConfig)
if err != nil {
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index d94c15de0cb..a29d07dec21 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -46,11 +46,11 @@ require (
github.com/slack-go/slack v0.15.0
github.com/smartcontractkit/chain-selectors v1.0.34
github.com/smartcontractkit/chainlink-automation v0.8.1
- github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0
- github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83
+ github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b
+ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0
github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2
- github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.18
+ github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.19
github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0
github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.9
github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.2
@@ -64,10 +64,10 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/crypto v0.28.0
+ golang.org/x/crypto v0.31.0
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
- golang.org/x/sync v0.8.0
- golang.org/x/text v0.19.0
+ golang.org/x/sync v0.10.0
+ golang.org/x/text v0.21.0
google.golang.org/grpc v1.67.1
gopkg.in/guregu/null.v4 v4.0.0
k8s.io/apimachinery v0.31.2
@@ -105,7 +105,7 @@ require (
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/XSAM/otelsql v0.27.0 // indirect
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
- github.com/andybalholm/brotli v1.1.0 // indirect
+ github.com/andybalholm/brotli v1.1.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c // indirect
@@ -245,7 +245,7 @@ require (
github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
github.com/go-webauthn/webauthn v0.9.4 // indirect
github.com/go-webauthn/x v0.1.5 // indirect
- github.com/goccy/go-json v0.10.2 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/goccy/go-yaml v1.12.0 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
@@ -337,8 +337,8 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kelseyhightower/envconfig v1.4.0 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
@@ -423,11 +423,11 @@ require (
github.com/shirou/gopsutil/v3 v3.24.3 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 // indirect
+ github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix // indirect
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e // indirect
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db // indirect
github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect
- github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 // indirect
+ github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 // indirect
github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 // indirect
@@ -509,8 +509,8 @@ require (
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
- golang.org/x/sys v0.26.0 // indirect
- golang.org/x/term v0.25.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 49e87a613fd..633e8e9b691 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -180,9 +180,11 @@ github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2uc
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
-github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
+github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
+github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow-go/v18 v18.0.0 h1:1dBDaSbH3LtulTyOVYaBCHO3yVRwjV+TZaqn3g6V7ZM=
+github.com/apache/arrow-go/v18 v18.0.0/go.mod h1:t6+cWRSmKgdQ6HsxisQjok+jBpKGhRDiqcf3p0p/F+A=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -550,8 +552,8 @@ github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9y
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
-github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
+github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw=
+github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w=
github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw=
github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE=
@@ -642,8 +644,8 @@ github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0=
github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM=
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
@@ -711,6 +713,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
+github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -1039,11 +1043,11 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
-github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
@@ -1100,6 +1104,8 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk=
+github.com/marcboeker/go-duckdb v1.8.3 h1:ZkYwiIZhbYsT6MmJsZ3UPTHrTZccDdM4ztoqSlEMXiQ=
+github.com/marcboeker/go-duckdb v1.8.3/go.mod h1:C9bYRE1dPYb1hhfu/SSomm78B0FXmNgRvv6YBW/Hooc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -1279,8 +1285,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
-github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
-github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
@@ -1424,16 +1430,16 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0=
github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 h1:qQH6fZZe31nBAG6INHph3z5ysDTPptyu0TR9uoJ1+ok=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86/go.mod h1:WtWOoVQQEHxRHL2hNmuRrvDfYfQG/CioFNoa9Rr2mBE=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix h1:DPJD++yKLSx0EfT+U14P8vLVxjXFmoIETiCO9lVwQo8=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix/go.mod h1:NnT6w4Kj42OFFXhSx99LvJZWPpMjmo4+CpDEWfw61xY=
github.com/smartcontractkit/chain-selectors v1.0.34 h1:MJ17OGu8+jjl426pcKrJkCf3fePb3eCreuAnUA3RBj4=
github.com/smartcontractkit/chain-selectors v1.0.34/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8=
github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU=
github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0 h1:/1L+v4SxUD2K5RMRbfByyLfePMAgQKeD0onSetPnGmA=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83 h1:NjrU7KOn3Tk+C6QFo9tQBqeotPKytpBwhn/J1s+yiiY=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83/go.mod h1:bQktEJf7sJ0U3SmIcXvbGUox7SmXcnSEZ4kUbT8R5Nk=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b h1:iSQJ6ng4FhEswf8SXunGkaJlVP3E3JlgLB8Oo2f3Ud4=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805 h1:Pz8jB/6qe10xT10h2S3LFYJrnebNpG5rJ/w16HZGwPQ=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805/go.mod h1:yti7e1+G9hhkYhj+L5sVUULn9Bn3bBL5/AxaNqdJ5YQ=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e h1:PRoeby6ZlTuTkv2f+7tVU4+zboTfRzI+beECynF4JQ0=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e/go.mod h1:mUh5/woemsVaHgTorA080hrYmO3syBCmPdnWc/5dOqk=
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db h1:N1RH1hSr2ACzOFc9hkCcjE8pRBTdcU3p8nsTJByaLes=
@@ -1442,16 +1448,16 @@ github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6An
github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 h1:0ewLMbAz3rZrovdRUCgd028yOXX8KigB4FndAUdI2kM=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 h1:onBe3DqNrbtOAzKS4PrPIiJX65BGo1aYiYZxFVEW+jc=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc h1:dssRwJhmzJkUN/OajaDj2GsxBn+Tupk3bI1BkPEoJg0=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc/go.mod h1:p8aUDfJeley6oer7y+Ucd3edOtRlMTnWg3mN6rhaLWo=
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 h1:tNS7U9lrxkFvEuyxQv11HHOiV9LPDGC9wYEy+yM/Jv4=
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8/go.mod h1:EBrEgcdIbwepqguClkv8Ohy7CbyWSJaE4EC9aBJlQK0=
github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2 h1:GDGrC5OGiV0RyM1znYWehSQXyZQWTOzrEeJRYmysPCE=
github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2/go.mod h1:DsT43c1oTBmp3iQkMcoZOoKThwZvt8X3Pz6UmznJ4GY=
-github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.18 h1:a3xetGZh2nFO1iX5xd9OuqiCkgbWLvW6fTN6fgVubPo=
-github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.18/go.mod h1:NwmlNKqrb02v4Sci4b5KW644nfH2BW+FrKbWwTN5r6M=
+github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.19 h1:9PMwKNqFKc5FXf4VchyD3CGzZelnSgi13fgVdT2X7T4=
+github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.19/go.mod h1:ag7LEgejsVtPXaUNkcoFPpAoDkl1J8V2HSbqVUxfEtk=
github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0 h1:VIxK8u0Jd0Q/VuhmsNm6Bls6Tb31H/sA3A/rbc5hnhg=
github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0/go.mod h1:lyAu+oMXdNUzEDScj2DXB2IueY+SDXPPfyl/kb63tMM=
github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.9 h1:yB1x5UXvpZNka+5h57yo1/GrKfXKCqMzChCISpldZx4=
@@ -1614,6 +1620,8 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
+github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1625,6 +1633,8 @@ github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE
github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U=
github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
@@ -1772,8 +1782,8 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1903,8 +1913,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -2000,8 +2010,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -2012,8 +2022,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
-golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2028,8 +2038,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
-golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index f73d84e3fc5..eb68f0722fb 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -27,8 +27,8 @@ require (
github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.33.0
github.com/slack-go/slack v0.15.0
- github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83
- github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.18
+ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805
+ github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.19
github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.9
github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.2
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20241009055228-33d0c0bf38de
@@ -72,7 +72,7 @@ require (
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/XSAM/otelsql v0.27.0 // indirect
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
- github.com/andybalholm/brotli v1.1.0 // indirect
+ github.com/andybalholm/brotli v1.1.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c // indirect
@@ -216,7 +216,7 @@ require (
github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
github.com/go-webauthn/webauthn v0.9.4 // indirect
github.com/go-webauthn/x v0.1.5 // indirect
- github.com/goccy/go-json v0.10.2 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/goccy/go-yaml v1.12.0 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
@@ -312,8 +312,8 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kelseyhightower/envconfig v1.4.0 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
@@ -398,6 +398,7 @@ require (
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
+ github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shirou/gopsutil/v3 v3.24.3 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
@@ -406,11 +407,11 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartcontractkit/chain-selectors v1.0.34 // indirect
github.com/smartcontractkit/chainlink-automation v0.8.1 // indirect
- github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0 // indirect
+ github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b // indirect
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e // indirect
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db // indirect
github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect
- github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 // indirect
+ github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 // indirect
github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2 // indirect
@@ -498,14 +499,14 @@ require (
go.uber.org/zap v1.27.0 // indirect
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect
golang.org/x/arch v0.11.0 // indirect
- golang.org/x/crypto v0.28.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
- golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.26.0 // indirect
- golang.org/x/term v0.25.0 // indirect
- golang.org/x/text v0.19.0 // indirect
+ golang.org/x/sync v0.10.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index 3bc63a508ac..02c9357b14f 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -184,9 +184,11 @@ github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2uc
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
-github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
+github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
+github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow-go/v18 v18.0.0 h1:1dBDaSbH3LtulTyOVYaBCHO3yVRwjV+TZaqn3g6V7ZM=
+github.com/apache/arrow-go/v18 v18.0.0/go.mod h1:t6+cWRSmKgdQ6HsxisQjok+jBpKGhRDiqcf3p0p/F+A=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -544,8 +546,8 @@ github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9y
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
-github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
+github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw=
+github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w=
github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw=
github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE=
@@ -636,8 +638,8 @@ github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0=
github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM=
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
@@ -705,6 +707,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
+github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -1035,11 +1039,11 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
-github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
@@ -1094,6 +1098,8 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U=
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk=
+github.com/marcboeker/go-duckdb v1.8.3 h1:ZkYwiIZhbYsT6MmJsZ3UPTHrTZccDdM4ztoqSlEMXiQ=
+github.com/marcboeker/go-duckdb v1.8.3/go.mod h1:C9bYRE1dPYb1hhfu/SSomm78B0FXmNgRvv6YBW/Hooc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -1269,8 +1275,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
-github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
-github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
@@ -1415,16 +1421,16 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0=
github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 h1:qQH6fZZe31nBAG6INHph3z5ysDTPptyu0TR9uoJ1+ok=
-github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86/go.mod h1:WtWOoVQQEHxRHL2hNmuRrvDfYfQG/CioFNoa9Rr2mBE=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix h1:DPJD++yKLSx0EfT+U14P8vLVxjXFmoIETiCO9lVwQo8=
+github.com/smartcontractkit/ccip-owner-contracts v0.0.0-salt-fix/go.mod h1:NnT6w4Kj42OFFXhSx99LvJZWPpMjmo4+CpDEWfw61xY=
github.com/smartcontractkit/chain-selectors v1.0.34 h1:MJ17OGu8+jjl426pcKrJkCf3fePb3eCreuAnUA3RBj4=
github.com/smartcontractkit/chain-selectors v1.0.34/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8=
github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU=
github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0 h1:/1L+v4SxUD2K5RMRbfByyLfePMAgQKeD0onSetPnGmA=
-github.com/smartcontractkit/chainlink-ccip v0.0.0-20241211150100-7683331f64a0/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83 h1:NjrU7KOn3Tk+C6QFo9tQBqeotPKytpBwhn/J1s+yiiY=
-github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83/go.mod h1:bQktEJf7sJ0U3SmIcXvbGUox7SmXcnSEZ4kUbT8R5Nk=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b h1:iSQJ6ng4FhEswf8SXunGkaJlVP3E3JlgLB8Oo2f3Ud4=
+github.com/smartcontractkit/chainlink-ccip v0.0.0-20241213122413-5e8f65dd6b1b/go.mod h1:F8xQAIW0ymb2BZhqn89sWZLXreJhM5KDVF6Qb4y44N0=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805 h1:Pz8jB/6qe10xT10h2S3LFYJrnebNpG5rJ/w16HZGwPQ=
+github.com/smartcontractkit/chainlink-common v0.3.1-0.20241214155818-b403079b2805/go.mod h1:yti7e1+G9hhkYhj+L5sVUULn9Bn3bBL5/AxaNqdJ5YQ=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e h1:PRoeby6ZlTuTkv2f+7tVU4+zboTfRzI+beECynF4JQ0=
github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e/go.mod h1:mUh5/woemsVaHgTorA080hrYmO3syBCmPdnWc/5dOqk=
github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241202141438-a90db35252db h1:N1RH1hSr2ACzOFc9hkCcjE8pRBTdcU3p8nsTJByaLes=
@@ -1433,16 +1439,16 @@ github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6An
github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 h1:0ewLMbAz3rZrovdRUCgd028yOXX8KigB4FndAUdI2kM=
github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2 h1:onBe3DqNrbtOAzKS4PrPIiJX65BGo1aYiYZxFVEW+jc=
-github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.2/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ=
+github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc h1:dssRwJhmzJkUN/OajaDj2GsxBn+Tupk3bI1BkPEoJg0=
github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241210172617-6fd1891d0fbc/go.mod h1:p8aUDfJeley6oer7y+Ucd3edOtRlMTnWg3mN6rhaLWo=
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 h1:tNS7U9lrxkFvEuyxQv11HHOiV9LPDGC9wYEy+yM/Jv4=
github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8/go.mod h1:EBrEgcdIbwepqguClkv8Ohy7CbyWSJaE4EC9aBJlQK0=
github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2 h1:GDGrC5OGiV0RyM1znYWehSQXyZQWTOzrEeJRYmysPCE=
github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2/go.mod h1:DsT43c1oTBmp3iQkMcoZOoKThwZvt8X3Pz6UmznJ4GY=
-github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.18 h1:a3xetGZh2nFO1iX5xd9OuqiCkgbWLvW6fTN6fgVubPo=
-github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.18/go.mod h1:NwmlNKqrb02v4Sci4b5KW644nfH2BW+FrKbWwTN5r6M=
+github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.19 h1:9PMwKNqFKc5FXf4VchyD3CGzZelnSgi13fgVdT2X7T4=
+github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.19/go.mod h1:ag7LEgejsVtPXaUNkcoFPpAoDkl1J8V2HSbqVUxfEtk=
github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0 h1:VIxK8u0Jd0Q/VuhmsNm6Bls6Tb31H/sA3A/rbc5hnhg=
github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0/go.mod h1:lyAu+oMXdNUzEDScj2DXB2IueY+SDXPPfyl/kb63tMM=
github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.9 h1:yB1x5UXvpZNka+5h57yo1/GrKfXKCqMzChCISpldZx4=
@@ -1605,6 +1611,8 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
+github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1616,6 +1624,8 @@ github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE
github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U=
github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
@@ -1763,8 +1773,8 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1894,8 +1904,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1989,8 +1999,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -2001,8 +2011,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
-golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2017,8 +2027,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
-golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/integration-tests/smoke/ccip/ccip_rmn_test.go b/integration-tests/smoke/ccip/ccip_rmn_test.go
index adf07be290f..166f4422fe6 100644
--- a/integration-tests/smoke/ccip/ccip_rmn_test.go
+++ b/integration-tests/smoke/ccip/ccip_rmn_test.go
@@ -18,11 +18,12 @@ import (
"github.com/rs/zerolog"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
"github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/osutil"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext"
+ "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
+
"github.com/smartcontractkit/chainlink/deployment/ccip/changeset"
"github.com/smartcontractkit/chainlink/deployment/environment/devenv"
@@ -178,11 +179,14 @@ func TestRMN_DifferentRmnNodesForDifferentChains(t *testing.T) {
func TestRMN_TwoMessagesOneSourceChainCursed(t *testing.T) {
runRmnTestCase(t, rmnTestCase{
- name: "two messages, one source chain is cursed",
+ name: "two messages, one source chain is cursed the other chain was cursed but curse is revoked",
passIfNoCommitAfter: 15 * time.Second,
cursedSubjectsPerChain: map[int][]int{
chain1: {chain0},
},
+ revokedCursedSubjectsPerChain: map[int]map[int]time.Duration{
+ chain0: {globalCurse: 5 * time.Second}, // chain0 will be globally cursed and curse will be revoked later
+ },
homeChainConfig: homeChainConfig{
f: map[int]int{chain0: 1, chain1: 1},
},
@@ -254,9 +258,6 @@ func runRmnTestCase(t *testing.T, tc rmnTestCase) {
require.NoError(t, err)
t.Logf("onChainState: %#v", onChainState)
- homeChain, ok := envWithRMN.Env.Chains[envWithRMN.HomeChainSel]
- require.True(t, ok)
-
homeChainState, ok := onChainState.Chains[envWithRMN.HomeChainSel]
require.True(t, ok)
@@ -270,23 +271,28 @@ func runRmnTestCase(t *testing.T, tc rmnTestCase) {
dynamicConfig := rmn_home.RMNHomeDynamicConfig{SourceChains: tc.pf.rmnHomeSourceChains, OffchainConfig: []byte{}}
t.Logf("Setting RMNHome candidate with staticConfig: %+v, dynamicConfig: %+v, current candidateDigest: %x",
staticConfig, dynamicConfig, allDigests.CandidateConfigDigest[:])
- tx, err := homeChainState.RMNHome.SetCandidate(homeChain.DeployerKey, staticConfig, dynamicConfig, allDigests.CandidateConfigDigest)
+
+ candidateDigest, err := homeChainState.RMNHome.GetCandidateDigest(&bind.CallOpts{Context: ctx})
require.NoError(t, err)
- _, err = deployment.ConfirmIfNoError(homeChain, tx, err)
+ _, err = changeset.NewSetRMNHomeCandidateConfigChangeset(envWithRMN.Env, changeset.SetRMNHomeCandidateConfig{
+ HomeChainSelector: envWithRMN.HomeChainSel,
+ RMNStaticConfig: staticConfig,
+ RMNDynamicConfig: dynamicConfig,
+ DigestToOverride: candidateDigest,
+ })
require.NoError(t, err)
- candidateDigest, err := homeChainState.RMNHome.GetCandidateDigest(&bind.CallOpts{Context: ctx})
+ candidateDigest, err = homeChainState.RMNHome.GetCandidateDigest(&bind.CallOpts{Context: ctx})
require.NoError(t, err)
t.Logf("RMNHome candidateDigest after setting new candidate: %x", candidateDigest[:])
t.Logf("Promoting RMNHome candidate with candidateDigest: %x", candidateDigest[:])
- tx, err = homeChainState.RMNHome.PromoteCandidateAndRevokeActive(
- homeChain.DeployerKey, candidateDigest, allDigests.ActiveConfigDigest)
- require.NoError(t, err)
-
- _, err = deployment.ConfirmIfNoError(homeChain, tx, err)
+ _, err = changeset.NewPromoteCandidateConfigChangeset(envWithRMN.Env, changeset.PromoteRMNHomeCandidateConfig{
+ HomeChainSelector: envWithRMN.HomeChainSel,
+ DigestToPromote: candidateDigest,
+ })
require.NoError(t, err)
// check the active digest is the same as the candidate digest
@@ -296,7 +302,23 @@ func runRmnTestCase(t *testing.T, tc rmnTestCase) {
"active digest should be the same as the previously candidate digest after promotion, previous candidate: %x, active: %x",
candidateDigest[:], activeDigest[:])
- tc.setRmnRemoteConfig(ctx, t, onChainState, activeDigest, envWithRMN)
+ rmnRemoteConfig := make(map[uint64]changeset.RMNRemoteConfig)
+ for _, remoteCfg := range tc.remoteChainsConfig {
+ selector := tc.pf.chainSelectors[remoteCfg.chainIdx]
+ if remoteCfg.f < 0 {
+ t.Fatalf("remoteCfg.f is negative: %d", remoteCfg.f)
+ }
+ rmnRemoteConfig[selector] = changeset.RMNRemoteConfig{
+ F: uint64(remoteCfg.f),
+ Signers: tc.pf.rmnRemoteSigners,
+ }
+ }
+
+ _, err = changeset.NewSetRMNRemoteConfigChangeset(envWithRMN.Env, changeset.SetRMNRemoteConfig{
+ HomeChainSelector: envWithRMN.HomeChainSel,
+ RMNRemoteConfigs: rmnRemoteConfig,
+ })
+ require.NoError(t, err)
tc.killMarkedRmnNodes(t, rmnCluster)
@@ -308,6 +330,7 @@ func runRmnTestCase(t *testing.T, tc rmnTestCase) {
t.Logf("Sent all messages, seqNumCommit: %v seqNumExec: %v", seqNumCommit, seqNumExec)
tc.callContractsToCurseChains(ctx, t, onChainState, envWithRMN)
+ tc.callContractsToCurseAndRevokeCurse(ctx, t, onChainState, envWithRMN)
tc.enableOracles(ctx, t, envWithRMN, disabledNodes)
@@ -420,22 +443,25 @@ type rmnTestCase struct {
// If set to a positive value, the test will wait for that duration and will assert that commit report was not delivered.
passIfNoCommitAfter time.Duration
cursedSubjectsPerChain map[int][]int
- waitForExec bool
- homeChainConfig homeChainConfig
- remoteChainsConfig []remoteChainConfig
- rmnNodes []rmnNode
- messagesToSend []messageToSend
+ // revokedCursedSubjectsPerChain is used to revoke this specific curses after a timer expires
+ revokedCursedSubjectsPerChain map[int]map[int]time.Duration // chainIdx -> subjectIdx -> timer to revoke
+ waitForExec bool
+ homeChainConfig homeChainConfig
+ remoteChainsConfig []remoteChainConfig
+ rmnNodes []rmnNode
+ messagesToSend []messageToSend
// populated fields after environment setup
pf testCasePopulatedFields
}
type testCasePopulatedFields struct {
- chainSelectors []uint64
- rmnHomeNodes []rmn_home.RMNHomeNode
- rmnRemoteSigners []rmn_remote.RMNRemoteSigner
- rmnHomeSourceChains []rmn_home.RMNHomeSourceChain
- cursedSubjectsPerChainSel map[uint64][]uint64
+ chainSelectors []uint64
+ rmnHomeNodes []rmn_home.RMNHomeNode
+ rmnRemoteSigners []rmn_remote.RMNRemoteSigner
+ rmnHomeSourceChains []rmn_home.RMNHomeSourceChain
+ cursedSubjectsPerChainSel map[uint64][]uint64
+ revokedCursedSubjectsPerChainSel map[uint64]map[uint64]time.Duration
}
func (tc *rmnTestCase) populateFields(t *testing.T, envWithRMN changeset.DeployedEnv, rmnCluster devenv.RMNCluster) {
@@ -490,6 +516,22 @@ func (tc *rmnTestCase) populateFields(t *testing.T, envWithRMN changeset.Deploye
tc.pf.cursedSubjectsPerChainSel[chainSel] = append(tc.pf.cursedSubjectsPerChainSel[chainSel], subjSel)
}
}
+
+ // populate revoked cursed subjects with actual chain selectors
+ tc.pf.revokedCursedSubjectsPerChainSel = make(map[uint64]map[uint64]time.Duration)
+ for chainIdx, subjects := range tc.revokedCursedSubjectsPerChain {
+ chainSel := tc.pf.chainSelectors[chainIdx]
+ for subject, revokeAfter := range subjects {
+ subjSel := uint64(globalCurse)
+ if subject != globalCurse {
+ subjSel = tc.pf.chainSelectors[subject]
+ }
+ if _, ok := tc.pf.revokedCursedSubjectsPerChainSel[chainSel]; !ok {
+ tc.pf.revokedCursedSubjectsPerChainSel[chainSel] = make(map[uint64]time.Duration)
+ }
+ tc.pf.revokedCursedSubjectsPerChainSel[chainSel][subjSel] = revokeAfter
+ }
+ }
}
func (tc rmnTestCase) validate() error {
@@ -500,46 +542,6 @@ func (tc rmnTestCase) validate() error {
return nil
}
-func (tc rmnTestCase) setRmnRemoteConfig(
- ctx context.Context,
- t *testing.T,
- onChainState changeset.CCIPOnChainState,
- activeDigest [32]byte,
- envWithRMN changeset.DeployedEnv) {
- for _, remoteCfg := range tc.remoteChainsConfig {
- remoteSel := tc.pf.chainSelectors[remoteCfg.chainIdx]
- chState, ok := onChainState.Chains[remoteSel]
- require.True(t, ok)
- if remoteCfg.f < 0 {
- t.Fatalf("negative F: %d", remoteCfg.f)
- }
- rmnRemoteConfig := rmn_remote.RMNRemoteConfig{
- RmnHomeContractConfigDigest: activeDigest,
- Signers: tc.pf.rmnRemoteSigners,
- F: uint64(remoteCfg.f),
- }
-
- chain := envWithRMN.Env.Chains[tc.pf.chainSelectors[remoteCfg.chainIdx]]
-
- t.Logf("Setting RMNRemote config with RMNHome active digest: %x, cfg: %+v", activeDigest[:], rmnRemoteConfig)
- tx2, err2 := chState.RMNRemote.SetConfig(chain.DeployerKey, rmnRemoteConfig)
- require.NoError(t, err2)
- _, err2 = deployment.ConfirmIfNoError(chain, tx2, err2)
- require.NoError(t, err2)
-
- // confirm the config is set correctly
- config, err2 := chState.RMNRemote.GetVersionedConfig(&bind.CallOpts{Context: ctx})
- require.NoError(t, err2)
- require.Equalf(t,
- activeDigest,
- config.Config.RmnHomeContractConfigDigest,
- "RMNRemote config digest should be the same as the active digest of RMNHome after setting, RMNHome active: %x, RMNRemote config: %x",
- activeDigest[:], config.Config.RmnHomeContractConfigDigest[:])
-
- t.Logf("RMNRemote config digest after setting: %x", config.Config.RmnHomeContractConfigDigest[:])
- }
-}
-
func (tc rmnTestCase) killMarkedRmnNodes(t *testing.T, rmnCluster devenv.RMNCluster) {
for _, n := range tc.rmnNodes {
if n.forceExit {
@@ -637,6 +639,44 @@ func (tc rmnTestCase) callContractsToCurseChains(ctx context.Context, t *testing
}
}
+func (tc rmnTestCase) callContractsToCurseAndRevokeCurse(ctx context.Context, t *testing.T, onChainState changeset.CCIPOnChainState, envWithRMN changeset.DeployedEnv) {
+ for _, remoteCfg := range tc.remoteChainsConfig {
+ remoteSel := tc.pf.chainSelectors[remoteCfg.chainIdx]
+ chState, ok := onChainState.Chains[remoteSel]
+ require.True(t, ok)
+ chain, ok := envWithRMN.Env.Chains[remoteSel]
+ require.True(t, ok)
+
+ cursedSubjects, ok := tc.revokedCursedSubjectsPerChain[remoteCfg.chainIdx]
+ if !ok {
+ continue // nothing to curse on this chain
+ }
+
+ for subjectDescription, revokeAfter := range cursedSubjects {
+ subj := reader.GlobalCurseSubject
+ if subjectDescription != globalCurse {
+ subj = chainSelectorToBytes16(tc.pf.chainSelectors[subjectDescription])
+ }
+ t.Logf("cursing subject %d (%d)", subj, subjectDescription)
+ txCurse, errCurse := chState.RMNRemote.Curse(chain.DeployerKey, subj)
+ _, errConfirm := deployment.ConfirmIfNoError(chain, txCurse, errCurse)
+ require.NoError(t, errConfirm)
+
+ go func() {
+ <-time.NewTimer(revokeAfter).C
+ t.Logf("revoking curse on subject %d (%d)", subj, subjectDescription)
+ txUncurse, errUncurse := chState.RMNRemote.Uncurse(chain.DeployerKey, subj)
+ _, errConfirm = deployment.ConfirmIfNoError(chain, txUncurse, errUncurse)
+ require.NoError(t, errConfirm)
+ }()
+ }
+
+ cs, err := chState.RMNRemote.GetCursedSubjects(&bind.CallOpts{Context: ctx})
+ require.NoError(t, err)
+ t.Logf("Cursed subjects: %v", cs)
+ }
+}
+
func (tc rmnTestCase) enableOracles(ctx context.Context, t *testing.T, envWithRMN changeset.DeployedEnv, nodeIDs []string) {
for _, n := range nodeIDs {
_, err := envWithRMN.Env.Offchain.EnableNode(ctx, &node.EnableNodeRequest{Id: n})
diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go
index a011dfdffc6..8416ec05c7e 100644
--- a/integration-tests/smoke/ocr2_test.go
+++ b/integration-tests/smoke/ocr2_test.go
@@ -1,14 +1,19 @@
package smoke
import (
+ "bufio"
"fmt"
"math/big"
"net/http"
+ "os"
+ "path/filepath"
+ "regexp"
"strings"
+ "sync"
"testing"
"time"
- "github.com/smartcontractkit/chainlink/integration-tests/utils"
+ "github.com/onsi/gomega"
"github.com/ethereum/go-ethereum/common"
"github.com/rs/zerolog"
@@ -16,8 +21,8 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/seth"
+ ctf_docker "github.com/smartcontractkit/chainlink-testing-framework/lib/docker"
"github.com/smartcontractkit/chainlink-testing-framework/lib/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/lib/logstream"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext"
"github.com/smartcontractkit/chainlink/v2/core/config/env"
@@ -26,6 +31,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
type ocr2test struct {
@@ -224,33 +230,150 @@ func prepareORCv2SmokeTestEnv(t *testing.T, testData ocr2test, l zerolog.Logger,
}
func assertCorrectNodeConfiguration(t *testing.T, l zerolog.Logger, totalNodeCount int, testData ocr2test, testEnv *test_env.CLClusterTestEnv) {
- expectedNodesWithConfiguration := totalNodeCount - 1 // minus bootstrap node
- var expectedPatterns []string
+ l.Info().Msg("Checking if all nodes have correct plugin configuration applied")
- if testData.env[string(env.MedianPlugin.Cmd)] != "" {
- expectedPatterns = append(expectedPatterns, "Registered loopp.*OCR2.*Median.*")
- }
+ // we have to use gomega here, because sometimes there's a delay in the logs being written (especially in the CI)
+ // and this check fails on the first execution, and we don't want to add any hardcoded sleeps
- if testData.chainReaderAndCodec {
- expectedPatterns = append(expectedPatterns, "relayConfig\\.chainReader")
- } else {
- expectedPatterns = append(expectedPatterns, "ChainReader missing from RelayConfig; falling back to internal MedianContract")
- }
+ gom := gomega.NewGomegaWithT(t)
+ gom.Eventually(func(g gomega.Gomega) {
+ allNodesHaveCorrectConfig := false
+
+ var expectedPatterns []string
+ expectedNodeCount := totalNodeCount - 1
+
+ if testData.env[string(env.MedianPlugin.Cmd)] != "" {
+ expectedPatterns = append(expectedPatterns, `Registered loopp.*OCR2.*Median.*`)
+ }
+
+ if testData.chainReaderAndCodec {
+ expectedPatterns = append(expectedPatterns, `relayConfig.chainReader`)
+ } else {
+ expectedPatterns = append(expectedPatterns, "ChainReader missing from RelayConfig; falling back to internal MedianContract")
+ }
+
+ logFilePaths := make(map[string]string)
+ tempLogsDir := os.TempDir()
+
+ var nodesToInclude []string
+ for i := 1; i < totalNodeCount; i++ {
+ nodesToInclude = append(nodesToInclude, testEnv.ClCluster.Nodes[i].ContainerName+".log")
+ }
+
+ // save all log files in temp dir
+ loggingErr := ctf_docker.WriteAllContainersLogs(l, tempLogsDir)
+ if loggingErr != nil {
+ l.Debug().Err(loggingErr).Msg("Error writing all containers logs. Trying again...")
+
+ // try again
+ return
+ }
+
+ var fileNameIncludeFilter = func(name string) bool {
+ for _, n := range nodesToInclude {
+ if strings.EqualFold(name, n) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // find log files for CL nodes
+ fileWalkErr := filepath.Walk(tempLogsDir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsPermission(err) {
+ return nil
+ }
+ return err
+ }
+ if !info.IsDir() && fileNameIncludeFilter(info.Name()) {
+ absPath, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+ logFilePaths[strings.TrimSuffix(info.Name(), ".log")] = absPath
+ }
+ return nil
+ })
+
+ if fileWalkErr != nil {
+ l.Debug().Err(fileWalkErr).Msg("Error walking through log files. Trying again...")
+
+ return
+ }
+
+ if len(logFilePaths) != expectedNodeCount {
+ l.Debug().Msgf("Expected number of log files to match number of nodes (excluding bootstrap node). Expected: %d, Found: %d. Trying again...", expectedNodeCount, len(logFilePaths))
+
+ return
+ }
+
+ // search for expected pattern in log file
+ var searchForLineInFile = func(filePath string, pattern string) bool {
+ file, fileErr := os.Open(filePath)
+ if fileErr != nil {
+ return false
+ }
+
+ defer func(file *os.File) {
+ _ = file.Close()
+ }(file)
+
+ scanner := bufio.NewScanner(file)
+ scanner.Split(bufio.ScanLines)
+ pc := regexp.MustCompile(pattern)
+
+ for scanner.Scan() {
+ jsonLogLine := scanner.Text()
+ if pc.MatchString(jsonLogLine) {
+ return true
+ }
+
+ }
+ return false
+ }
+
+ wg := sync.WaitGroup{}
+ resultsCh := make(chan map[string][]string, len(logFilePaths))
+
+ // process all logs in parallel
+ for nodeName, logFilePath := range logFilePaths {
+ wg.Add(1)
+ filePath := logFilePath
+ go func() {
+ defer wg.Done()
+ var patternsFound []string
+ for _, pattern := range expectedPatterns {
+ found := searchForLineInFile(filePath, pattern)
+ if found {
+ patternsFound = append(patternsFound, pattern)
+ }
+ }
+ resultsCh <- map[string][]string{nodeName: patternsFound}
+ }()
+ }
+
+ wg.Wait()
+ close(resultsCh)
- // make sure that nodes are correctly configured by scanning the logs
- for _, pattern := range expectedPatterns {
- l.Info().Msgf("Checking for pattern: '%s' in CL node logs", pattern)
var correctlyConfiguredNodes []string
- for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
- logProcessor, processFn, err := logstream.GetRegexMatchingProcessor(testEnv.LogStream, pattern)
- require.NoError(t, err, "Error getting regex matching processor")
-
- count, err := logProcessor.ProcessContainerLogs(testEnv.ClCluster.Nodes[i].ContainerName, processFn)
- require.NoError(t, err, "Error processing container logs")
- if *count >= 1 {
- correctlyConfiguredNodes = append(correctlyConfiguredNodes, testEnv.ClCluster.Nodes[i].ContainerName)
+ var incorrectlyConfiguredNodes []string
+
+ // check results
+ for result := range resultsCh {
+ for nodeName, patternsFound := range result {
+ if len(patternsFound) == len(expectedPatterns) {
+ correctlyConfiguredNodes = append(correctlyConfiguredNodes, nodeName)
+ } else {
+ incorrectlyConfiguredNodes = append(incorrectlyConfiguredNodes, nodeName)
+ }
}
}
- require.Equal(t, expectedNodesWithConfiguration, len(correctlyConfiguredNodes), "expected correct plugin config to be applied to %d cl-nodes, but only following ones had it: %s; regexp used: %s", expectedNodesWithConfiguration, strings.Join(correctlyConfiguredNodes, ", "), string(pattern))
- }
+
+ allNodesHaveCorrectConfig = len(correctlyConfiguredNodes) == expectedNodeCount
+
+ g.Expect(allNodesHaveCorrectConfig).To(gomega.BeTrue(), "%d nodes' logs were missing expected plugin configuration entries. Correctly configured nodes: %s. Nodes with missing configuration: %s. Expected log patterns: %s", expectedNodeCount-len(correctlyConfiguredNodes), strings.Join(correctlyConfiguredNodes, ", "), strings.Join(incorrectlyConfiguredNodes, ", "), strings.Join(expectedPatterns, ", "))
+ }, "1m", "10s").Should(gomega.Succeed())
+
+ l.Info().Msg("All nodes have correct plugin configuration applied")
}
diff --git a/integration-tests/testconfig/automation/example.toml b/integration-tests/testconfig/automation/example.toml
index 3bbe78d693d..c239e5a3966 100644
--- a/integration-tests/testconfig/automation/example.toml
+++ b/integration-tests/testconfig/automation/example.toml
@@ -7,14 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/ccip/config.go b/integration-tests/testconfig/ccip/config.go
index 72c81f05f47..70c850fd591 100644
--- a/integration-tests/testconfig/ccip/config.go
+++ b/integration-tests/testconfig/ccip/config.go
@@ -147,6 +147,9 @@ func (o *JDConfig) GetJDDBVersion() string {
func (o *Config) Validate() error {
var chainIds []int64
for _, net := range o.PrivateEthereumNetworks {
+ if net.EthereumChainConfig.ChainID < 0 {
+ return fmt.Errorf("negative chain ID found for network %d", net.EthereumChainConfig.ChainID)
+ }
chainIds = append(chainIds, int64(net.EthereumChainConfig.ChainID))
}
homeChainSelector, err := strconv.ParseUint(pointer.GetString(o.HomeChainSelector), 10, 64)
@@ -189,14 +192,21 @@ func IsSelectorValid(selector uint64, chainIds []int64) (bool, error) {
if err != nil {
return false, err
}
- if chainId >= math.MaxInt64 {
- return false, fmt.Errorf("chain id overflows int64: %d", chainId)
- }
- expId := int64(chainId)
- for _, id := range chainIds {
- if id == expId {
+
+ for _, cID := range chainIds {
+ if isEqualUint64AndInt64(chainId, cID) {
return true, nil
}
}
return false, nil
}
+
+func isEqualUint64AndInt64(u uint64, i int64) bool {
+ if i < 0 {
+ return false // uint64 cannot be equal to a negative int64
+ }
+ if u > math.MaxInt64 {
+ return false // uint64 cannot be equal to an int64 if it exceeds the maximum int64 value
+ }
+ return u == uint64(i)
+}
diff --git a/integration-tests/testconfig/ccip/overrides/sepolia_avax_binance.toml b/integration-tests/testconfig/ccip/overrides/sepolia_avax_binance.toml
index 06af64d5d91..72c43b12da5 100644
--- a/integration-tests/testconfig/ccip/overrides/sepolia_avax_binance.toml
+++ b/integration-tests/testconfig/ccip/overrides/sepolia_avax_binance.toml
@@ -5,10 +5,6 @@ chainlink_node_funding = 2
[Logging]
test_log_collect = true
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persisted
-log_targets = ["loki"]
-
[Network]
selected_networks = ['SEPOLIA', 'AVALANCHE_FUJI', 'BSC_TESTNET']
diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml
index b9987d4571d..8180b40ae21 100644
--- a/integration-tests/testconfig/default.toml
+++ b/integration-tests/testconfig/default.toml
@@ -2,19 +2,6 @@
# set to true to flush logs to selected target regardless of test result; otherwise logs are only flushed if test failed
test_log_collect = false
-[Logging.Grafana]
-base_url = "https://grafana.ops.prod.cldev.sh"
-base_url_github_ci = "http://localhost:8080/primary"
-dashboard_url = "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs"
-
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persisted
-log_targets = ["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout = "10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit = 10
-
[ChainlinkImage]
# postgres version to use
postgres_version = "12.0"
diff --git a/integration-tests/testconfig/forwarder_ocr/example.toml b/integration-tests/testconfig/forwarder_ocr/example.toml
index 517a341f803..6ca4b8bbcc3 100644
--- a/integration-tests/testconfig/forwarder_ocr/example.toml
+++ b/integration-tests/testconfig/forwarder_ocr/example.toml
@@ -7,33 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
-[Logging.Loki]
-tenant_id="tenant_id"
-# full URL of Loki ingest endpoint
-endpoint="https://loki.url/api/v3/push"
-# currently only needed when using public instance
-basic_auth_secret="loki-basic-auth"
-# only needed for cloud grafana
-bearer_token_secret="bearer_token"
-
-# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set)
-[Logging.Grafana]
-# grafana url (trailing "/" will be stripped)
-base_url="http://grafana.url"
-# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard
-dashboard_url="/d/your-dashboard"
-# Grafana dashboard uid to annotate. Find it in Dashboard Settings -> JSON Model
-dashboard_uid="dashboard-uid-to-annotate"
-bearer_token_secret="my-awesome-token"
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/forwarder_ocr2/example.toml b/integration-tests/testconfig/forwarder_ocr2/example.toml
index 3ec3e4c690a..e3fb66a0f3a 100644
--- a/integration-tests/testconfig/forwarder_ocr2/example.toml
+++ b/integration-tests/testconfig/forwarder_ocr2/example.toml
@@ -8,33 +8,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
-[Logging.Loki]
-tenant_id="tenant_id"
-# full URL of Loki ingest endpoint
-endpoint="https://loki.url/api/v3/push"
-# currently only needed when using public instance
-basic_auth_secret="loki-basic-auth"
-# only needed for cloud grafana
-bearer_token_secret="bearer_token"
-
-# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set)
-[Logging.Grafana]
-# grafana url (trailing "/" will be stripped)
-base_url="http://grafana.url"
-# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard
-dashboard_url="/d/your-dashboard"
-# Grafana dashboard uid to annotate. Find it in Dashboard Settings -> JSON Model
-dashboard_uid="dashboard-uid-to-annotate"
-bearer_token_secret="my-awesome-token"
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/functions/example.toml b/integration-tests/testconfig/functions/example.toml
index 74d931632a8..ec7076fa9f9 100644
--- a/integration-tests/testconfig/functions/example.toml
+++ b/integration-tests/testconfig/functions/example.toml
@@ -7,14 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
# if you want to use simulated network
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/keeper/example.toml b/integration-tests/testconfig/keeper/example.toml
index 4efbf974827..7fe3bf26d0a 100644
--- a/integration-tests/testconfig/keeper/example.toml
+++ b/integration-tests/testconfig/keeper/example.toml
@@ -7,14 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/log_poller/example.toml b/integration-tests/testconfig/log_poller/example.toml
index 78f3b5482d9..b94b6e0e202 100644
--- a/integration-tests/testconfig/log_poller/example.toml
+++ b/integration-tests/testconfig/log_poller/example.toml
@@ -7,14 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/node/example.toml b/integration-tests/testconfig/node/example.toml
index bc5628e46b3..4635e40c037 100644
--- a/integration-tests/testconfig/node/example.toml
+++ b/integration-tests/testconfig/node/example.toml
@@ -7,14 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/ocr/example.toml b/integration-tests/testconfig/ocr/example.toml
index 7c1c755567f..d1edd3a67fd 100644
--- a/integration-tests/testconfig/ocr/example.toml
+++ b/integration-tests/testconfig/ocr/example.toml
@@ -7,33 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
-[Logging.Loki]
-tenant_id="tenant_id"
-# full URL of Loki ingest endpoint
-endpoint="https://loki.url/api/v3/push"
-# currently only needed when using public instance
-basic_auth_secret="loki-basic-auth"
-# only needed for cloud grafana
-bearer_token_secret="bearer_token"
-
-# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set)
-[Logging.Grafana]
-# grafana url (trailing "/" will be stripped)
-base_url="http://grafana.url"
-# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard
-dashboard_url="/d/your-dashboard"
-# Grafana dashboard uid to annotate. Find it in Dashboard Settings -> JSON Model
-dashboard_uid="dashboard-uid-to-annotate"
-bearer_token_secret="my-awesome-token"
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/ocr2/example.toml b/integration-tests/testconfig/ocr2/example.toml
index 319f64d2580..679e4527a31 100644
--- a/integration-tests/testconfig/ocr2/example.toml
+++ b/integration-tests/testconfig/ocr2/example.toml
@@ -7,33 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
-[Logging.Loki]
-tenant_id="tenant_id"
-# full URL of Loki ingest endpoint
-endpoint="https://loki.url/api/v3/push"
-# currently only needed when using public instance
-basic_auth_secret="loki-basic-auth"
-# only needed for cloud grafana
-bearer_token_secret="bearer_token"
-
-# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set)
-[Logging.Grafana]
-# grafana url (trailing "/" will be stripped)
-base_url="http://grafana.url"
-# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard
-dashboard_url="/d/your-dashboard"
-# Grafana dashboard uid to annotate. Find it in Dashboard Settings -> JSON Model
-dashboard_uid="dashboard-uid-to-annotate"
-bearer_token_secret="my-awesome-token"
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/testconfig.go b/integration-tests/testconfig/testconfig.go
index 545818e3348..19e3f0b7ada 100644
--- a/integration-tests/testconfig/testconfig.go
+++ b/integration-tests/testconfig/testconfig.go
@@ -6,7 +6,6 @@ import (
"fmt"
"math/big"
"os"
- "slices"
"strings"
"github.com/barkimedes/go-deepcopy"
@@ -631,26 +630,6 @@ func (c *TestConfig) Validate() error {
return fmt.Errorf("logging config must be set")
}
- if err := c.Logging.Validate(); err != nil {
- return errors.Wrapf(err, "logging config validation failed")
- }
-
- if c.Logging.Loki != nil {
- if err := c.Logging.Loki.Validate(); err != nil {
- return errors.Wrapf(err, "loki config validation failed")
- }
- }
-
- if c.Logging.LogStream != nil && slices.Contains(c.Logging.LogStream.LogTargets, "loki") {
- if c.Logging.Loki == nil {
- return fmt.Errorf("in order to use Loki as logging target you must set Loki config in logging config")
- }
-
- if err := c.Logging.Loki.Validate(); err != nil {
- return errors.Wrapf(err, "loki config validation failed")
- }
- }
-
if c.Pyroscope != nil {
if err := c.Pyroscope.Validate(); err != nil {
return errors.Wrapf(err, "pyroscope config validation failed")
diff --git a/integration-tests/testconfig/vrfv2/example.toml b/integration-tests/testconfig/vrfv2/example.toml
index 13af6dee620..3665c2f43cf 100644
--- a/integration-tests/testconfig/vrfv2/example.toml
+++ b/integration-tests/testconfig/vrfv2/example.toml
@@ -7,14 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testconfig/vrfv2plus/example.toml b/integration-tests/testconfig/vrfv2plus/example.toml
index 160e9ba03a9..a45d53f67b8 100644
--- a/integration-tests/testconfig/vrfv2plus/example.toml
+++ b/integration-tests/testconfig/vrfv2plus/example.toml
@@ -7,14 +7,6 @@ version="2.7.0"
# if set to true will save logs even if test did not fail
test_log_collect=false
-[Logging.LogStream]
-# supported targets: file, loki, in-memory. if empty no logs will be persistet
-log_targets=["file"]
-# context timeout for starting log producer and also time-frame for requesting logs
-log_producer_timeout="10s"
-# number of retries before log producer gives up and stops listening to logs
-log_producer_retry_limit=10
-
# if you want to use polygon_mumbial
[Network]
selected_networks=["polygon_mumbai"]
diff --git a/integration-tests/testsetups/ccip/test_helpers.go b/integration-tests/testsetups/ccip/test_helpers.go
index 937c2283421..56c6e7310ba 100644
--- a/integration-tests/testsetups/ccip/test_helpers.go
+++ b/integration-tests/testsetups/ccip/test_helpers.go
@@ -11,6 +11,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
chainsel "github.com/smartcontractkit/chain-selectors"
+ "go.uber.org/zap/zapcore"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-testing-framework/lib/blockchain"
@@ -18,6 +19,7 @@ import (
ctftestenv "github.com/smartcontractkit/chainlink-testing-framework/lib/docker/test_env"
"github.com/smartcontractkit/chainlink-testing-framework/lib/logging"
"github.com/smartcontractkit/chainlink-testing-framework/lib/networks"
+ "github.com/smartcontractkit/chainlink-testing-framework/lib/testreporters"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/conversions"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/ptr"
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext"
@@ -169,7 +171,6 @@ func NewIntegrationEnvironment(t *testing.T, opts ...changeset.TestOps) (changes
dockerEnv.devEnvTestCfg.CCIP.RMNConfig.GetProxyVersion(),
dockerEnv.devEnvTestCfg.CCIP.RMNConfig.GetAFN2ProxyImage(),
dockerEnv.devEnvTestCfg.CCIP.RMNConfig.GetAFN2ProxyVersion(),
- dockerEnv.testEnv.LogStream,
)
require.NoError(t, err)
return deployedEnv, *rmnCluster
@@ -323,11 +324,30 @@ func CreateDockerEnv(t *testing.T) (
}
}
+ // ignore critical CL node logs until they are fixed, as otherwise tests will fail
+ var logScannerSettings = test_env.GetDefaultChainlinkNodeLogScannerSettingsWithExtraAllowedMessages(testreporters.NewAllowedLogMessage(
+ "No live RPC nodes available",
+ "CL nodes are started before simulated chains, so this is expected",
+ zapcore.DPanicLevel,
+ testreporters.WarnAboutAllowedMsgs_No),
+ testreporters.NewAllowedLogMessage(
+ "Error stopping job service",
+ "Possible lifecycle bug in chainlink: failed to close RMN home reader: has already been stopped: already stopped",
+ zapcore.DPanicLevel,
+ testreporters.WarnAboutAllowedMsgs_No),
+ testreporters.NewAllowedLogMessage(
+ "Shutdown grace period of 5s exceeded, closing DB and exiting...",
+ "Possible lifecycle bug in chainlink.",
+ zapcore.DPanicLevel,
+ testreporters.WarnAboutAllowedMsgs_No),
+ )
+
builder := test_env.NewCLTestEnvBuilder().
WithTestConfig(&cfg).
WithTestInstance(t).
WithMockAdapter().
WithJobDistributor(cfg.CCIP.JobDistributorConfig).
+ WithChainlinkNodeLogScanner(logScannerSettings).
WithStandardCleanup()
// if private ethereum networks are provided, we will use them to create the test environment
@@ -434,7 +454,6 @@ func StartChainlinkNodes(
pointer.GetString(cfg.GetChainlinkImageConfig().Image),
pointer.GetString(cfg.GetChainlinkImageConfig().Version),
toml,
- env.LogStream,
test_env.WithPgDBOptions(
ctftestenv.WithPostgresImageVersion(pointer.GetString(cfg.GetChainlinkImageConfig().PostgresVersion)),
),
diff --git a/integration-tests/utils/pgtest/pgtest.go b/integration-tests/utils/pgtest/pgtest.go
index 8b11f9ef424..3baccc791b6 100644
--- a/integration-tests/utils/pgtest/pgtest.go
+++ b/integration-tests/utils/pgtest/pgtest.go
@@ -3,33 +3,18 @@ package pgtest
import (
"testing"
- "github.com/google/uuid"
"github.com/jmoiron/sqlx"
- "github.com/scylladb/go-reflectx"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
- "github.com/smartcontractkit/chainlink-common/pkg/utils"
- "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
+ "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
+
+ "github.com/smartcontractkit/chainlink/v2/core/config/env"
)
func NewSqlxDB(t testing.TB) *sqlx.DB {
- db, err := sqlx.Open(string(dialects.TransactionWrappedPostgres), uuid.New().String())
- require.NoError(t, err)
- t.Cleanup(func() { assert.NoError(t, db.Close()) })
- db.MapperFunc(reflectx.CamelToSnakeASCII)
-
- return db
-}
-
-func MustExec(t *testing.T, ds sqlutil.DataSource, stmt string, args ...interface{}) {
- ctx := tests.Context(t)
- require.NoError(t, utils.JustError(ds.ExecContext(ctx, stmt, args...)))
-}
-
-func MustCount(t *testing.T, db *sqlx.DB, stmt string, args ...interface{}) (cnt int) {
- require.NoError(t, db.Get(&cnt, stmt, args...))
- return
+ dbURL := string(env.DatabaseURL.Get())
+ if dbURL == "" {
+ t.Errorf("you must provide a CL_DATABASE_URL environment variable")
+ return nil
+ }
+ return pg.NewTestDB(t, dbURL)
}
diff --git a/integration-tests/utils/pgtest/txdb.go b/integration-tests/utils/pgtest/txdb.go
deleted file mode 100644
index f28b6f95f2b..00000000000
--- a/integration-tests/utils/pgtest/txdb.go
+++ /dev/null
@@ -1,510 +0,0 @@
-package pgtest
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "flag"
- "fmt"
- "io"
- "net/url"
- "strings"
- "sync"
- "testing"
-
- "github.com/jmoiron/sqlx"
- "go.uber.org/multierr"
-
- "github.com/smartcontractkit/chainlink/v2/core/config/env"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
-)
-
-// txdb is a simplified version of https://github.com/DATA-DOG/go-txdb
-//
-// The original lib has various problems and is hard to understand because it
-// tries to be more general. The version in this file is more tightly focused
-// to our needs and should be easier to reason about and less likely to have
-// subtle bugs/races.
-//
-// It doesn't currently support savepoints but could be made to if necessary.
-//
-// Transaction BEGIN/ROLLBACK effectively becomes a no-op, this should have no
-// negative impact on normal test operation.
-//
-// If you MUST test BEGIN/ROLLBACK behaviour, you will have to configure your
-// store to use the raw DialectPostgres dialect and setup a one-use database.
-// See heavyweight.FullTestDB() as a convenience function to help you do this,
-// but please use sparingly because as it's name implies, it is expensive.
-func init() {
- testing.Init()
- if !flag.Parsed() {
- flag.Parse()
- }
- if testing.Short() {
- // -short tests don't need a DB
- return
- }
- dbURL := string(env.DatabaseURL.Get())
- if dbURL == "" {
- panic("you must provide a CL_DATABASE_URL environment variable")
- }
-
- parsed, err := url.Parse(dbURL)
- if err != nil {
- panic(err)
- }
- if parsed.Path == "" {
- msg := fmt.Sprintf("invalid %[1]s: `%[2]s`. You must set %[1]s env var to point to your test database. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try %[1]s=postgresql://postgres@localhost:5432/chainlink_test?sslmode=disable", env.DatabaseURL, parsed.String())
- panic(msg)
- }
- if !strings.HasSuffix(parsed.Path, "_test") {
- msg := fmt.Sprintf("cannot run tests against database named `%s`. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try %s=postgresql://postgres@localhost:5432/chainlink_test?sslmode=disable", parsed.Path[1:], env.DatabaseURL)
- panic(msg)
- }
- name := string(dialects.TransactionWrappedPostgres)
- sql.Register(name, &txDriver{
- dbURL: dbURL,
- conns: make(map[string]*conn),
- })
- sqlx.BindDriver(name, sqlx.DOLLAR)
-}
-
-var _ driver.Conn = &conn{}
-
-var _ driver.Validator = &conn{}
-var _ driver.SessionResetter = &conn{}
-
-// txDriver is an sql driver which runs on a single transaction.
-// When `Close` is called, transaction is rolled back.
-type txDriver struct {
- sync.Mutex
- db *sql.DB
- conns map[string]*conn
-
- dbURL string
-}
-
-func (d *txDriver) Open(dsn string) (driver.Conn, error) {
- d.Lock()
- defer d.Unlock()
- // Open real db connection if its the first call
- if d.db == nil {
- db, err := sql.Open(string(dialects.Postgres), d.dbURL)
- if err != nil {
- return nil, err
- }
- d.db = db
- }
- c, exists := d.conns[dsn]
- if !exists || !c.tryOpen() {
- tx, err := d.db.Begin()
- if err != nil {
- return nil, err
- }
- c = &conn{tx: tx, opened: 1, dsn: dsn}
- c.removeSelf = func() error {
- return d.deleteConn(c)
- }
- d.conns[dsn] = c
- }
- return c, nil
-}
-
-// deleteConn is called by a connection when it is closed via the `close` method.
-// It also auto-closes the DB when the last checked out connection is closed.
-func (d *txDriver) deleteConn(c *conn) error {
- // must lock here to avoid racing with Open
- d.Lock()
- defer d.Unlock()
-
- if d.conns[c.dsn] != c {
- return nil // already been replaced
- }
- delete(d.conns, c.dsn)
- if len(d.conns) == 0 && d.db != nil {
- if err := d.db.Close(); err != nil {
- return err
- }
- d.db = nil
- }
- return nil
-}
-
-type conn struct {
- sync.Mutex
- dsn string
- tx *sql.Tx // tx may be shared by many conns, definitive one lives in the map keyed by DSN on the txDriver. Do not modify from conn
- closed bool
- opened int
- removeSelf func() error
-}
-
-func (c *conn) Begin() (driver.Tx, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
- // Begin is a noop because the transaction was already opened
- return tx{c.tx}, nil
-}
-
-// Implement the "ConnBeginTx" interface
-func (c *conn) BeginTx(_ context.Context, opts driver.TxOptions) (driver.Tx, error) {
- // Context is ignored, because single transaction is shared by all callers, thus caller should not be able to
- // control it with local context
- return c.Begin()
-}
-
-// Prepare returns a prepared statement, bound to this connection.
-func (c *conn) Prepare(query string) (driver.Stmt, error) {
- return c.PrepareContext(context.Background(), query)
-}
-
-// Implement the "ConnPrepareContext" interface
-func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
-
- // TODO: Fix context handling
- // FIXME: It is not safe to give the passed in context to the tx directly
- // because the tx is shared by many conns and cancelling the context will
- // destroy the tx which can affect other conns
- st, err := c.tx.PrepareContext(context.Background(), query)
- if err != nil {
- return nil, err
- }
- return &stmt{st, c}, nil
-}
-
-// IsValid is called prior to placing the connection into the
-// connection pool by database/sql. The connection will be discarded if false is returned.
-func (c *conn) IsValid() bool {
- c.Lock()
- defer c.Unlock()
- return !c.closed
-}
-
-func (c *conn) ResetSession(ctx context.Context) error {
- // Ensure bad connections are reported: From database/sql/driver:
- // If a connection is never returned to the connection pool but immediately reused, then
- // ResetSession is called prior to reuse but IsValid is not called.
- c.Lock()
- defer c.Unlock()
- if c.closed {
- return driver.ErrBadConn
- }
-
- return nil
-}
-
-// pgx returns nil
-func (c *conn) CheckNamedValue(nv *driver.NamedValue) error {
- return nil
-}
-
-// Implement the "QueryerContext" interface
-func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
-
- // TODO: Fix context handling
- rs, err := c.tx.QueryContext(context.Background(), query, mapNamedArgs(args)...)
- if err != nil {
- return nil, err
- }
- defer rs.Close()
-
- return buildRows(rs)
-}
-
-// Implement the "ExecerContext" interface
-func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- panic("conn is closed")
- }
- // TODO: Fix context handling
- return c.tx.ExecContext(context.Background(), query, mapNamedArgs(args)...)
-}
-
-// tryOpen attempts to increment the open count, but returns false if closed.
-func (c *conn) tryOpen() bool {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- return false
- }
- c.opened++
- return true
-}
-
-// Close invalidates and potentially stops any current
-// prepared statements and transactions, marking this
-// connection as no longer in use.
-//
-// Because the sql package maintains a free pool of
-// connections and only calls Close when there's a surplus of
-// idle connections, it shouldn't be necessary for drivers to
-// do their own connection caching.
-//
-// Drivers must ensure all network calls made by Close
-// do not block indefinitely (e.g. apply a timeout).
-func (c *conn) Close() (err error) {
- if !c.close() {
- return
- }
- // Wait to remove self to avoid nesting locks.
- if err := c.removeSelf(); err != nil {
- panic(err)
- }
- return
-}
-
-//nolint:revive
-func (c *conn) close() bool {
- c.Lock()
- defer c.Unlock()
- if c.closed {
- // Double close, should be a safe to make this a noop
- // PGX allows double close
- // See: https://github.com/jackc/pgx/blob/a457da8bffa4f90ad672fa093ee87f20cf06687b/conn.go#L249
- return false
- }
-
- c.opened--
- if c.opened > 0 {
- return false
- }
- if c.tx != nil {
- if err := c.tx.Rollback(); err != nil {
- panic(err)
- }
- c.tx = nil
- }
- c.closed = true
- return true
-}
-
-type tx struct {
- tx *sql.Tx
-}
-
-func (tx tx) Commit() error {
- // Commit is a noop because the transaction will be rolled back at the end
- return nil
-}
-
-func (tx tx) Rollback() error {
- // Rollback is a noop because the transaction will be rolled back at the end
- return nil
-}
-
-type stmt struct {
- st *sql.Stmt
- conn *conn
-}
-
-func (s stmt) Exec(args []driver.Value) (driver.Result, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- return s.st.Exec(mapArgs(args)...)
-}
-
-// Implement the "StmtExecContext" interface
-func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- // TODO: Fix context handling
- return s.st.ExecContext(context.Background(), mapNamedArgs(args)...)
-}
-
-func mapArgs(args []driver.Value) (res []interface{}) {
- res = make([]interface{}, len(args))
- for i := range args {
- res[i] = args[i]
- }
- return
-}
-
-func (s stmt) NumInput() int {
- return -1
-}
-
-func (s stmt) Query(args []driver.Value) (driver.Rows, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- rows, err := s.st.Query(mapArgs(args)...)
- defer func() {
- err = multierr.Combine(err, rows.Close())
- }()
- if err != nil {
- return nil, err
- }
- return buildRows(rows)
-}
-
-// Implement the "StmtQueryContext" interface
-func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
- s.conn.Lock()
- defer s.conn.Unlock()
- if s.conn.closed {
- panic("conn is closed")
- }
- // TODO: Fix context handling
- rows, err := s.st.QueryContext(context.Background(), mapNamedArgs(args)...)
- if err != nil {
- return nil, err
- }
- return buildRows(rows)
-}
-
-func (s stmt) Close() error {
- s.conn.Lock()
- defer s.conn.Unlock()
- return s.st.Close()
-}
-
-func buildRows(r *sql.Rows) (driver.Rows, error) {
- set := &rowSets{}
- rs := &rows{}
- if err := rs.read(r); err != nil {
- return set, err
- }
- set.sets = append(set.sets, rs)
- for r.NextResultSet() {
- rss := &rows{}
- if err := rss.read(r); err != nil {
- return set, err
- }
- set.sets = append(set.sets, rss)
- }
- return set, nil
-}
-
-// Implement the "RowsNextResultSet" interface
-func (rs *rowSets) HasNextResultSet() bool {
- return rs.pos+1 < len(rs.sets)
-}
-
-// Implement the "RowsNextResultSet" interface
-func (rs *rowSets) NextResultSet() error {
- if !rs.HasNextResultSet() {
- return io.EOF
- }
-
- rs.pos++
- return nil
-}
-
-type rows struct {
- rows [][]driver.Value
- pos int
- cols []string
- colTypes []*sql.ColumnType
-}
-
-func (r *rows) Columns() []string {
- return r.cols
-}
-
-func (r *rows) ColumnTypeDatabaseTypeName(index int) string {
- return r.colTypes[index].DatabaseTypeName()
-}
-
-func (r *rows) Next(dest []driver.Value) error {
- r.pos++
- if r.pos > len(r.rows) {
- return io.EOF
- }
-
- for i, val := range r.rows[r.pos-1] {
- dest[i] = *(val.(*interface{}))
- }
-
- return nil
-}
-
-func (r *rows) Close() error {
- return nil
-}
-
-func (r *rows) read(rs *sql.Rows) error {
- var err error
- r.cols, err = rs.Columns()
- if err != nil {
- return err
- }
-
- r.colTypes, err = rs.ColumnTypes()
- if err != nil {
- return err
- }
-
- for rs.Next() {
- values := make([]interface{}, len(r.cols))
- for i := range values {
- values[i] = new(interface{})
- }
- if err := rs.Scan(values...); err != nil {
- return err
- }
- row := make([]driver.Value, len(r.cols))
- for i, v := range values {
- row[i] = driver.Value(v)
- }
- r.rows = append(r.rows, row)
- }
- return rs.Err()
-}
-
-type rowSets struct {
- sets []*rows
- pos int
-}
-
-func (rs *rowSets) Columns() []string {
- return rs.sets[rs.pos].cols
-}
-
-func (rs *rowSets) ColumnTypeDatabaseTypeName(index int) string {
- return rs.sets[rs.pos].ColumnTypeDatabaseTypeName(index)
-}
-
-func (rs *rowSets) Close() error {
- return nil
-}
-
-// advances to next row
-func (rs *rowSets) Next(dest []driver.Value) error {
- return rs.sets[rs.pos].Next(dest)
-}
-
-func mapNamedArgs(args []driver.NamedValue) (res []interface{}) {
- res = make([]interface{}, len(args))
- for i := range args {
- name := args[i].Name
- if name != "" {
- res[i] = sql.Named(name, args[i].Value)
- } else {
- res[i] = args[i].Value
- }
- }
- return
-}
diff --git a/internal/testdb/testdb.go b/internal/testdb/testdb.go
index 88251ae2c6f..1a52b1173e3 100644
--- a/internal/testdb/testdb.go
+++ b/internal/testdb/testdb.go
@@ -7,7 +7,7 @@ import (
"net/url"
"strings"
- "github.com/smartcontractkit/chainlink/v2/core/store/dialects"
+ pgcommon "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg"
)
const (
@@ -33,7 +33,7 @@ func CreateOrReplace(parsed url.URL, suffix string, withTemplate bool) (string,
// Cannot drop test database if we are connected to it, so we must connect
// to a different one. 'postgres' should be present on all postgres installations
parsed.Path = "/postgres"
- db, err := sql.Open(string(dialects.Postgres), parsed.String())
+ db, err := sql.Open(string(pgcommon.Postgres), parsed.String())
if err != nil {
return "", fmt.Errorf("in order to drop the test database, we need to connect to a separate database"+
" called 'postgres'. But we are unable to open 'postgres' database: %+v\n", err)
@@ -66,7 +66,7 @@ func Drop(dbURL url.URL) error {
// Cannot drop test database if we are connected to it, so we must connect
// to a different one. 'postgres' should be present on all postgres installations
dbURL.Path = "/postgres"
- db, err := sql.Open(string(dialects.Postgres), dbURL.String())
+ db, err := sql.Open(string(pgcommon.Postgres), dbURL.String())
if err != nil {
return fmt.Errorf("in order to drop the test database, we need to connect to a separate database"+
" called 'postgres'. But we are unable to open 'postgres' database: %+v\n", err)
diff --git a/plugins/registrar.go b/plugins/registrar.go
index 2a82f2a6204..8523d3980cc 100644
--- a/plugins/registrar.go
+++ b/plugins/registrar.go
@@ -6,7 +6,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/loop"
)
-// RegistrarConfig generates contains static configuration inher
+// RegistrarConfig generates contains static configuration
type RegistrarConfig interface {
RegisterLOOP(config CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error)
UnregisterLOOP(ID string)