From c1bd103e9b134a90e0bd5f77b6e54797c7c881a8 Mon Sep 17 00:00:00 2001
From: Kodey Thomas
Date: Fri, 2 Aug 2024 12:50:23 +0100
Subject: [PATCH 01/52] Add L3X Config (#13987)
* Add L3X Config
* Changeset
* comments
* comments
---
.changeset/cool-mirrors-beg.md | 5 +
.../evm/config/toml/defaults/L3X_Mainnet.toml | 18 ++
.../evm/config/toml/defaults/L3X_Sepolia.toml | 18 ++
docs/CONFIG.md | 190 ++++++++++++++++++
4 files changed, 231 insertions(+)
create mode 100644 .changeset/cool-mirrors-beg.md
create mode 100644 core/chains/evm/config/toml/defaults/L3X_Mainnet.toml
create mode 100644 core/chains/evm/config/toml/defaults/L3X_Sepolia.toml
diff --git a/.changeset/cool-mirrors-beg.md b/.changeset/cool-mirrors-beg.md
new file mode 100644
index 00000000000..a030ac7e3a6
--- /dev/null
+++ b/.changeset/cool-mirrors-beg.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#added L3X Config
diff --git a/core/chains/evm/config/toml/defaults/L3X_Mainnet.toml b/core/chains/evm/config/toml/defaults/L3X_Mainnet.toml
new file mode 100644
index 00000000000..1fbda42fd2a
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/L3X_Mainnet.toml
@@ -0,0 +1,18 @@
+ChainID = '12324'
+ChainType = 'arbitrum'
+FinalityTagEnabled = true
+FinalityDepth = 10
+LinkContractAddress = '0x79f531a3D07214304F259DC28c7191513223bcf3'
+# Produces blocks on-demand
+NoNewHeadsThreshold = '0'
+OCR.ContractConfirmations = 1
+LogPollInterval = '10s'
+
+[GasEstimator]
+Mode = 'Arbitrum'
+LimitMax = 1_000_000_000
+# Arbitrum-based chains uses the suggested gas price, so we don't want to place any limits on the minimum
+PriceMin = '0'
+PriceDefault = '0.1 gwei'
+FeeCapDefault = '1000 gwei'
+BumpThreshold = 5
diff --git a/core/chains/evm/config/toml/defaults/L3X_Sepolia.toml b/core/chains/evm/config/toml/defaults/L3X_Sepolia.toml
new file mode 100644
index 00000000000..ee515bb72ba
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/L3X_Sepolia.toml
@@ -0,0 +1,18 @@
+ChainID = '12325'
+ChainType = 'arbitrum'
+FinalityTagEnabled = true
+FinalityDepth = 10
+LinkContractAddress = '0xa71848C99155DA0b245981E5ebD1C94C4be51c43'
+# Produces blocks on-demand
+NoNewHeadsThreshold = '0'
+OCR.ContractConfirmations = 1
+LogPollInterval = '10s'
+
+[GasEstimator]
+Mode = 'Arbitrum'
+LimitMax = 1_000_000_000
+# Arbitrum-based chains uses the suggested gas price, so we don't want to place any limits on the minimum
+PriceMin = '0'
+PriceDefault = '0.1 gwei'
+FeeCapDefault = '1000 gwei'
+BumpThreshold = 5
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index 240ccf1bd42..5caab7614e8 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -5242,6 +5242,196 @@ GasLimit = 5400000
+Arbitrum Mainnet (42161)
```toml
From f5e0bd614a6c42d195c4ad74a10f7070970d01d5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Deividas=20Kar=C5=BEinauskas?=
Date: Fri, 2 Aug 2024 16:42:00 +0300
Subject: [PATCH 02/52] Disallow zero address signers + pin Solidity version
(#13993)
* Disallow zero address signer
* pragma ^0.8.19 => 0.8.24
* Changesets
* Update gethwrappers
---------
Co-authored-by: app-token-issuer-infra-releng[bot] <120227048+app-token-issuer-infra-releng[bot]@users.noreply.github.com>
---
.changeset/slimy-forks-wait.md | 5 +++
contracts/.changeset/silver-pots-cover.md | 5 +++
contracts/gas-snapshots/keystone.gas-snapshot | 40 ++++++++++---------
.../src/v0.8/keystone/KeystoneForwarder.sol | 3 +-
.../src/v0.8/keystone/OCR3Capability.sol | 2 +-
.../interfaces/ICapabilityConfiguration.sol | 2 +-
.../v0.8/keystone/interfaces/IReceiver.sol | 2 +-
.../src/v0.8/keystone/interfaces/IRouter.sol | 2 +-
.../src/v0.8/keystone/ocr/OCR2Abstract.sol | 2 +-
.../src/v0.8/keystone/test/BaseTest.t.sol | 2 +-
.../CapabilitiesRegistry_AddDONTest.t.sol | 2 +-
...esRegistry_DeprecateCapabilitiesTest.t.sol | 2 +-
...bilitiesRegistry_GetCapabilitiesTest.t.sol | 2 +-
.../CapabilitiesRegistry_GetDONsTest.t.sol | 2 +-
...esRegistry_GetHashedCapabilityIdTest.t.sol | 2 +-
...ilitiesRegistry_GetNodeOperatorsTest.t.sol | 2 +-
.../CapabilitiesRegistry_GetNodesTest.t.sol | 2 +-
...tiesRegistry_UpdateNodeOperatorsTest.t.sol | 2 +-
.../src/v0.8/keystone/test/Constants.t.sol | 2 +-
.../test/KeystoneForwarderBaseTest.t.sol | 2 +-
.../test/KeystoneForwarder_ReportTest.t.sol | 2 +-
.../KeystoneForwarder_SetConfigTest.t.sol | 10 ++++-
...KeystoneForwarder_TypeAndVersionTest.t.sol | 2 +-
.../src/v0.8/keystone/test/mocks/Receiver.sol | 2 +-
.../keystone/generated/forwarder/forwarder.go | 2 +-
...rapper-dependency-versions-do-not-edit.txt | 2 +-
26 files changed, 63 insertions(+), 42 deletions(-)
create mode 100644 .changeset/slimy-forks-wait.md
create mode 100644 contracts/.changeset/silver-pots-cover.md
diff --git a/.changeset/slimy-forks-wait.md b/.changeset/slimy-forks-wait.md
new file mode 100644
index 00000000000..0408383bd03
--- /dev/null
+++ b/.changeset/slimy-forks-wait.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#internal
diff --git a/contracts/.changeset/silver-pots-cover.md b/contracts/.changeset/silver-pots-cover.md
new file mode 100644
index 00000000000..93fba83b558
--- /dev/null
+++ b/contracts/.changeset/silver-pots-cover.md
@@ -0,0 +1,5 @@
+---
+'@chainlink/contracts': patch
+---
+
+#internal
diff --git a/contracts/gas-snapshots/keystone.gas-snapshot b/contracts/gas-snapshots/keystone.gas-snapshot
index 2880e4c0e31..759e287b010 100644
--- a/contracts/gas-snapshots/keystone.gas-snapshot
+++ b/contracts/gas-snapshots/keystone.gas-snapshot
@@ -63,24 +63,25 @@ CapabilitiesRegistry_UpdateDONTest:test_RevertWhen_DuplicateCapabilityAdded() (g
CapabilitiesRegistry_UpdateDONTest:test_RevertWhen_DuplicateNodeAdded() (gas: 107643)
CapabilitiesRegistry_UpdateDONTest:test_RevertWhen_NodeDoesNotSupportCapability() (gas: 163357)
CapabilitiesRegistry_UpdateDONTest:test_UpdatesDON() (gas: 371909)
-CapabilitiesRegistry_UpdateNodeOperatorTest:test_RevertWhen_CalledByNonAdminAndNonOwner() (gas: 20631)
+CapabilitiesRegistry_UpdateNodeOperatorTest:test_RevertWhen_CalledByNonAdminAndNonOwner() (gas: 20728)
CapabilitiesRegistry_UpdateNodeOperatorTest:test_RevertWhen_NodeOperatorAdminIsZeroAddress() (gas: 20052)
CapabilitiesRegistry_UpdateNodeOperatorTest:test_RevertWhen_NodeOperatorDoesNotExist() (gas: 19790)
CapabilitiesRegistry_UpdateNodeOperatorTest:test_RevertWhen_NodeOperatorIdAndParamLengthsMismatch() (gas: 15430)
-CapabilitiesRegistry_UpdateNodeOperatorTest:test_UpdatesNodeOperator() (gas: 36937)
-CapabilitiesRegistry_UpdateNodesTest:test_CanUpdateParamsIfNodeSignerAddressNoLongerUsed() (gas: 256157)
-CapabilitiesRegistry_UpdateNodesTest:test_OwnerCanUpdateNodes() (gas: 162059)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_AddingNodeWithInvalidCapability() (gas: 35766)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_CalledByNonNodeOperatorAdminAndNonOwner() (gas: 25069)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_NodeDoesNotExist() (gas: 27308)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_NodeSignerAlreadyAssignedToAnotherNode() (gas: 29219)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_P2PIDEmpty() (gas: 27296)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_RemovingCapabilityRequiredByCapabilityDON() (gas: 470803)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_RemovingCapabilityRequiredByWorkflowDON() (gas: 341084)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_SignerAddressEmpty() (gas: 26951)
-CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_UpdatingNodeWithoutCapabilities() (gas: 25480)
-CapabilitiesRegistry_UpdateNodesTest:test_UpdatesNodeParams() (gas: 162113)
-KeystoneForwarder_ReportTest:test_Report_ConfigVersion() (gas: 1797755)
+CapabilitiesRegistry_UpdateNodeOperatorTest:test_UpdatesNodeOperator() (gas: 37034)
+CapabilitiesRegistry_UpdateNodesTest:test_CanUpdateParamsIfNodeSignerAddressNoLongerUsed() (gas: 256371)
+CapabilitiesRegistry_UpdateNodesTest:test_OwnerCanUpdateNodes() (gas: 162166)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_AddingNodeWithInvalidCapability() (gas: 35873)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_CalledByAnotherNodeOperatorAdmin() (gas: 29200)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_CalledByNonNodeOperatorAdminAndNonOwner() (gas: 29377)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_NodeDoesNotExist() (gas: 29199)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_NodeSignerAlreadyAssignedToAnotherNode() (gas: 31326)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_P2PIDEmpty() (gas: 29165)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_RemovingCapabilityRequiredByCapabilityDON() (gas: 470910)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_RemovingCapabilityRequiredByWorkflowDON() (gas: 341191)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_SignerAddressEmpty() (gas: 29058)
+CapabilitiesRegistry_UpdateNodesTest:test_RevertWhen_UpdatingNodeWithoutCapabilities() (gas: 27587)
+CapabilitiesRegistry_UpdateNodesTest:test_UpdatesNodeParams() (gas: 162220)
+KeystoneForwarder_ReportTest:test_Report_ConfigVersion() (gas: 1798375)
KeystoneForwarder_ReportTest:test_Report_FailedDeliveryWhenReceiverInterfaceNotSupported() (gas: 125910)
KeystoneForwarder_ReportTest:test_Report_FailedDeliveryWhenReceiverNotContract() (gas: 127403)
KeystoneForwarder_ReportTest:test_Report_SuccessfulDelivery() (gas: 155928)
@@ -96,10 +97,11 @@ KeystoneForwarder_ReportTest:test_RevertWhen_TooManySignatures() (gas: 56050)
KeystoneForwarder_SetConfigTest:test_RevertWhen_ExcessSigners() (gas: 20184)
KeystoneForwarder_SetConfigTest:test_RevertWhen_FaultToleranceIsZero() (gas: 88057)
KeystoneForwarder_SetConfigTest:test_RevertWhen_InsufficientSigners() (gas: 14533)
-KeystoneForwarder_SetConfigTest:test_RevertWhen_NotOwner() (gas: 88788)
-KeystoneForwarder_SetConfigTest:test_RevertWhen_ProvidingDuplicateSigners() (gas: 114507)
-KeystoneForwarder_SetConfigTest:test_SetConfig_FirstTime() (gas: 1539921)
-KeystoneForwarder_SetConfigTest:test_SetConfig_WhenSignersAreRemoved() (gas: 1534476)
+KeystoneForwarder_SetConfigTest:test_RevertWhen_NotOwner() (gas: 88766)
+KeystoneForwarder_SetConfigTest:test_RevertWhen_ProvidingDuplicateSigners() (gas: 114570)
+KeystoneForwarder_SetConfigTest:test_RevertWhen_ProvidingZeroAddressSigner() (gas: 114225)
+KeystoneForwarder_SetConfigTest:test_SetConfig_FirstTime() (gas: 1540541)
+KeystoneForwarder_SetConfigTest:test_SetConfig_WhenSignersAreRemoved() (gas: 1535211)
KeystoneForwarder_TypeAndVersionTest:test_TypeAndVersion() (gas: 9641)
KeystoneRouter_SetConfigTest:test_AddForwarder_RevertWhen_NotOwner() (gas: 10978)
KeystoneRouter_SetConfigTest:test_RemoveForwarder_RevertWhen_NotOwner() (gas: 10923)
diff --git a/contracts/src/v0.8/keystone/KeystoneForwarder.sol b/contracts/src/v0.8/keystone/KeystoneForwarder.sol
index 4b44feccbfe..b18e381cc6f 100644
--- a/contracts/src/v0.8/keystone/KeystoneForwarder.sol
+++ b/contracts/src/v0.8/keystone/KeystoneForwarder.sol
@@ -49,7 +49,7 @@ contract KeystoneForwarder is OwnerIsCreator, ITypeAndVersion, IRouter {
error InvalidConfig(uint64 configId);
/// @notice This error is thrown whenever a signer address is not in the
- /// configuration.
+ /// configuration or when trying to set a zero address as a signer.
/// @param signer The signer address that was not in the configuration
error InvalidSigner(address signer);
@@ -187,6 +187,7 @@ contract KeystoneForwarder is OwnerIsCreator, ITypeAndVersion, IRouter {
for (uint256 i = 0; i < signers.length; ++i) {
// assign indices, detect duplicates
address signer = signers[i];
+ if (signer == address(0)) revert InvalidSigner(signer);
if (s_configs[configId]._positions[signer] != 0) revert DuplicateSigner(signer);
s_configs[configId]._positions[signer] = i + 1;
}
diff --git a/contracts/src/v0.8/keystone/OCR3Capability.sol b/contracts/src/v0.8/keystone/OCR3Capability.sol
index 8613a803b20..1ba934b1c40 100644
--- a/contracts/src/v0.8/keystone/OCR3Capability.sol
+++ b/contracts/src/v0.8/keystone/OCR3Capability.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {OCR2Base} from "./ocr/OCR2Base.sol";
diff --git a/contracts/src/v0.8/keystone/interfaces/ICapabilityConfiguration.sol b/contracts/src/v0.8/keystone/interfaces/ICapabilityConfiguration.sol
index 429c2a1d3aa..702d55dba9d 100644
--- a/contracts/src/v0.8/keystone/interfaces/ICapabilityConfiguration.sol
+++ b/contracts/src/v0.8/keystone/interfaces/ICapabilityConfiguration.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
/// @notice Interface for capability configuration contract. It MUST be
/// implemented for a contract to be used as a capability configuration.
diff --git a/contracts/src/v0.8/keystone/interfaces/IReceiver.sol b/contracts/src/v0.8/keystone/interfaces/IReceiver.sol
index f58c2da7ae1..3af340a1215 100644
--- a/contracts/src/v0.8/keystone/interfaces/IReceiver.sol
+++ b/contracts/src/v0.8/keystone/interfaces/IReceiver.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
/// @title IReceiver - receives keystone reports
interface IReceiver {
diff --git a/contracts/src/v0.8/keystone/interfaces/IRouter.sol b/contracts/src/v0.8/keystone/interfaces/IRouter.sol
index a36c17c14d6..95d11b0bb3a 100644
--- a/contracts/src/v0.8/keystone/interfaces/IRouter.sol
+++ b/contracts/src/v0.8/keystone/interfaces/IRouter.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
/// @title IRouter - delivers keystone reports to receiver
interface IRouter {
diff --git a/contracts/src/v0.8/keystone/ocr/OCR2Abstract.sol b/contracts/src/v0.8/keystone/ocr/OCR2Abstract.sol
index 083a4045344..3c1e304748f 100644
--- a/contracts/src/v0.8/keystone/ocr/OCR2Abstract.sol
+++ b/contracts/src/v0.8/keystone/ocr/OCR2Abstract.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol";
diff --git a/contracts/src/v0.8/keystone/test/BaseTest.t.sol b/contracts/src/v0.8/keystone/test/BaseTest.t.sol
index e637406c145..64dc018c3ac 100644
--- a/contracts/src/v0.8/keystone/test/BaseTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/BaseTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {Test} from "forge-std/Test.sol";
import {Constants} from "./Constants.t.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_AddDONTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_AddDONTest.t.sol
index fff6623a59b..65c85e4f74c 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_AddDONTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_AddDONTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
import {ICapabilityConfiguration} from "../interfaces/ICapabilityConfiguration.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_DeprecateCapabilitiesTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_DeprecateCapabilitiesTest.t.sol
index 4d289e7c745..e06fa4a703a 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_DeprecateCapabilitiesTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_DeprecateCapabilitiesTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
import {CapabilitiesRegistry} from "../CapabilitiesRegistry.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetCapabilitiesTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetCapabilitiesTest.t.sol
index 9702c62b9c7..8f39183ee79 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetCapabilitiesTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetCapabilitiesTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
import {CapabilitiesRegistry} from "../CapabilitiesRegistry.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetDONsTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetDONsTest.t.sol
index a83b1421d3c..a79485abad1 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetDONsTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetDONsTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetHashedCapabilityIdTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetHashedCapabilityIdTest.t.sol
index b9a6e6dc97a..cdfb0eb6439 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetHashedCapabilityIdTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetHashedCapabilityIdTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
import {CapabilityConfigurationContract} from "./mocks/CapabilityConfigurationContract.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodeOperatorsTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodeOperatorsTest.t.sol
index 36ef201a998..471f4a86ade 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodeOperatorsTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodeOperatorsTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
import {CapabilitiesRegistry} from "../CapabilitiesRegistry.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodesTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodesTest.t.sol
index 901e7b92728..a5fe5fa1d1a 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodesTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_GetNodesTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
import {CapabilitiesRegistry} from "../CapabilitiesRegistry.sol";
diff --git a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_UpdateNodeOperatorsTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_UpdateNodeOperatorsTest.t.sol
index 721fd35eae7..8f6be580f49 100644
--- a/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_UpdateNodeOperatorsTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/CapabilitiesRegistry_UpdateNodeOperatorsTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./BaseTest.t.sol";
import {CapabilitiesRegistry} from "../CapabilitiesRegistry.sol";
diff --git a/contracts/src/v0.8/keystone/test/Constants.t.sol b/contracts/src/v0.8/keystone/test/Constants.t.sol
index 23c80eea9f1..a540a255725 100644
--- a/contracts/src/v0.8/keystone/test/Constants.t.sol
+++ b/contracts/src/v0.8/keystone/test/Constants.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
contract Constants {
address internal constant ADMIN = address(1);
diff --git a/contracts/src/v0.8/keystone/test/KeystoneForwarderBaseTest.t.sol b/contracts/src/v0.8/keystone/test/KeystoneForwarderBaseTest.t.sol
index 3b3c4060780..c106c2b2b21 100644
--- a/contracts/src/v0.8/keystone/test/KeystoneForwarderBaseTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/KeystoneForwarderBaseTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {Test} from "forge-std/Test.sol";
import {Receiver} from "./mocks/Receiver.sol";
diff --git a/contracts/src/v0.8/keystone/test/KeystoneForwarder_ReportTest.t.sol b/contracts/src/v0.8/keystone/test/KeystoneForwarder_ReportTest.t.sol
index ccb398fac5a..56e421a8c94 100644
--- a/contracts/src/v0.8/keystone/test/KeystoneForwarder_ReportTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/KeystoneForwarder_ReportTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./KeystoneForwarderBaseTest.t.sol";
import {IRouter} from "../interfaces/IRouter.sol";
diff --git a/contracts/src/v0.8/keystone/test/KeystoneForwarder_SetConfigTest.t.sol b/contracts/src/v0.8/keystone/test/KeystoneForwarder_SetConfigTest.t.sol
index 4b908bb702f..5dcf79b38ec 100644
--- a/contracts/src/v0.8/keystone/test/KeystoneForwarder_SetConfigTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/KeystoneForwarder_SetConfigTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./KeystoneForwarderBaseTest.t.sol";
import {KeystoneForwarder} from "../KeystoneForwarder.sol";
@@ -41,6 +41,14 @@ contract KeystoneForwarder_SetConfigTest is BaseTest {
s_forwarder.setConfig(DON_ID, CONFIG_VERSION, F, signers);
}
+ function test_RevertWhen_ProvidingZeroAddressSigner() public {
+ address[] memory signers = _getSignerAddresses();
+ signers[1] = address(0);
+
+ vm.expectRevert(abi.encodeWithSelector(KeystoneForwarder.InvalidSigner.selector, signers[1]));
+ s_forwarder.setConfig(DON_ID, CONFIG_VERSION, F, signers);
+ }
+
function test_SetConfig_FirstTime() public {
s_forwarder.setConfig(DON_ID, CONFIG_VERSION, F, _getSignerAddresses());
}
diff --git a/contracts/src/v0.8/keystone/test/KeystoneForwarder_TypeAndVersionTest.t.sol b/contracts/src/v0.8/keystone/test/KeystoneForwarder_TypeAndVersionTest.t.sol
index 8aad3766497..5a5cc70d2bb 100644
--- a/contracts/src/v0.8/keystone/test/KeystoneForwarder_TypeAndVersionTest.t.sol
+++ b/contracts/src/v0.8/keystone/test/KeystoneForwarder_TypeAndVersionTest.t.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {BaseTest} from "./KeystoneForwarderBaseTest.t.sol";
diff --git a/contracts/src/v0.8/keystone/test/mocks/Receiver.sol b/contracts/src/v0.8/keystone/test/mocks/Receiver.sol
index 25e8755641b..4d6bd2d3acf 100644
--- a/contracts/src/v0.8/keystone/test/mocks/Receiver.sol
+++ b/contracts/src/v0.8/keystone/test/mocks/Receiver.sol
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.19;
+pragma solidity 0.8.24;
import {IReceiver} from "../../interfaces/IReceiver.sol";
diff --git a/core/gethwrappers/keystone/generated/forwarder/forwarder.go b/core/gethwrappers/keystone/generated/forwarder/forwarder.go
index 0412241cf77..3b6fba5c7c2 100644
--- a/core/gethwrappers/keystone/generated/forwarder/forwarder.go
+++ b/core/gethwrappers/keystone/generated/forwarder/forwarder.go
@@ -32,7 +32,7 @@ var (
var KeystoneForwarderMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"transmissionId\",\"type\":\"bytes32\"}],\"name\":\"AlreadyAttempted\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"}],\"name\":\"DuplicateSigner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxSigners\",\"type\":\"uint256\"}],\"name\":\"ExcessSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FaultToleranceMustBePositive\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minSigners\",\"type\":\"uint256\"}],\"name\":\"InsufficientSigners\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"configId\",\"type\":\"uint64\"}],\"name\":\"InvalidConfig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"name\":\"InvalidSignature\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"expected\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"received\",\"type\":\"uint256\"}],\"name\":\"InvalidSignatureCount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"}],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedForwarder\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"donId\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"configVersion\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"forwarder\",\"type\":\"address\"}],\"name\":\"ForwarderAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"forwarder\",\"type\":\"address\"}],\"name\":\"ForwarderRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"receiver\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"workflowExecutionId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes2\",\"name\":\"reportId\",\"type\":\"bytes2\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"result\",\"type\":\"bool\"}],\"name\":\"ReportProcessed\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"forwarder\",\"type\":\"address\"}],\"name\":\"addForwarder\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"donId\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"configVersion\",\"type\":\"uint32\"}],\"name\":\"clearConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"receiver\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"workflowExecutionId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes2\",\"name\":\"reportId\",\"type\":\"bytes2\"}],\"name\":\"getTransmissionId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"receiver\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"workflowExecutionId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes2\",\"name\":\"reportId\",\"type\":\"bytes2\"}],\"name\":\"getTransmissionState\",\"outputs\":[{\"internalType\":\"enumIRouter.TransmissionState\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"receiver\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"workflowExecutionId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes2\",\"name\":\"reportId\",\"type\":\"bytes2\"}],\"name\":\"getTransmitter\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"forwarder\",\"type\":\"address\"}],\"name\":\"isForwarder\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"forwarder\",\"type\":\"address\"}],\"name\":\"removeForwarder\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"receiver\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"rawReport\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"reportContext\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"signatures\",\"type\":\"bytes[]\"}],\"name\":\"report\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"transmissionId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"receiver\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"validatedReport\",\"type\":\"bytes\"}],\"name\":\"route\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"donId\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"configVersion\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
- Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b038481169190911790915581161561009757610097816100b9565b5050306000908152600360205260409020805460ff1916600117905550610162565b336001600160a01b038216036101115760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b611b2d80620001726000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c806379ba50971161008c578063abcef55411610066578063abcef5541461023e578063ee59d26c14610277578063ef6e17a01461028a578063f2fde38b1461029d57600080fd5b806379ba5097146101e05780638864b864146101e85780638da5cb5b1461022057600080fd5b8063354bdd66116100c8578063354bdd661461017957806343c164671461019a5780634d93172d146101ba5780635c41d2fe146101cd57600080fd5b806311289565146100ef578063181f5a7714610104578063233fd52d14610156575b600080fd5b6101026100fd366004611474565b6102b0565b005b6101406040518060400160405280601a81526020017f466f7277617264657220616e6420526f7574657220312e302e3000000000000081525081565b60405161014d919061151f565b60405180910390f35b61016961016436600461158c565b61080d565b604051901515815260200161014d565b61018c610187366004611614565b610a00565b60405190815260200161014d565b6101ad6101a8366004611614565b610a84565b60405161014d9190611679565b6101026101c83660046116ba565b610b09565b6101026101db3660046116ba565b610b85565b610102610c04565b6101fb6101f6366004611614565b610d01565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161014d565b60005473ffffffffffffffffffffffffffffffffffffffff166101fb565b61016961024c3660046116ba565b73ffffffffffffffffffffffffffffffffffffffff1660009081526003602052604090205460ff1690565b6101026102853660046116e9565b610d41565b610102610298366004611767565b6110ba565b6101026102ab3660046116ba565b61115a565b606d8510156102eb576040517fb55ac75400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080600061032f89898080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061116e92505050565b67ffffffffffffffff8216600090815260026020526040812080549497509195509193509160ff16908190036103a2576040517fdf3b81ea00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff841660048201526024015b60405180910390fd5b856103ae8260016117c9565b60ff1614610400576103c18160016117c9565b6040517fd6022e8e00000000000000000000000000000000000000000000000000000000815260ff909116600482015260248101879052604401610399565b60008b8b6040516104129291906117e8565b60405190819003812061042b918c908c906020016117f8565b60405160208183030381529060405280519060200120905061044b611301565b60005b888110156106cd573660008b8b8481811061046b5761046b611812565b905060200281019061047d9190611841565b9092509050604181146104c05781816040517f2adfdc300000000000000000000000000000000000000000000000000000000081526004016103999291906118ef565b6000600186848460408181106104d8576104d8611812565b6104ea92013560f81c9050601b6117c9565b6104f860206000878961190b565b61050191611935565b61050f60406020888a61190b565b61051891611935565b6040805160008152602081018083529590955260ff909316928401929092526060830152608082015260a0016020604051602081039080840390855afa158015610566573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015173ffffffffffffffffffffffffffffffffffffffff8116600090815260028c0160205291822054909350915081900361060c576040517fbf18af4300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83166004820152602401610399565b600086826020811061062057610620611812565b602002015173ffffffffffffffffffffffffffffffffffffffff161461068a576040517fe021c4f200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83166004820152602401610399565b8186826020811061069d5761069d611812565b73ffffffffffffffffffffffffffffffffffffffff909216602092909202015250506001909201915061044e9050565b50505050505060003073ffffffffffffffffffffffffffffffffffffffff1663233fd52d6106fc8c8686610a00565b338d8d8d602d90606d926107129392919061190b565b8f8f606d9080926107259392919061190b565b6040518863ffffffff1660e01b81526004016107479796959493929190611971565b6020604051808303816000875af1158015610766573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061078a91906119d2565b9050817dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916838b73ffffffffffffffffffffffffffffffffffffffff167f3617b009e9785c42daebadb6d3fb553243a4bf586d07ea72d65d80013ce116b5846040516107f9911515815260200190565b60405180910390a450505050505050505050565b3360009081526003602052604081205460ff16610856576040517fd79e123d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008881526004602052604090205473ffffffffffffffffffffffffffffffffffffffff16156108b5576040517fa53dc8ca00000000000000000000000000000000000000000000000000000000815260048101899052602401610399565b600088815260046020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8a81169190911790915587163b9003610917575060006109f5565b6040517f805f213200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff87169063805f21329061096f9088908890889088906004016119f4565b600060405180830381600087803b15801561098957600080fd5b505af192505050801561099a575060015b6109a6575060006109f5565b50600087815260046020526040902080547fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000017905560015b979650505050505050565b6040517fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606085901b166020820152603481018390527fffff000000000000000000000000000000000000000000000000000000000000821660548201526000906056016040516020818303038152906040528051906020012090505b9392505050565b600080610a92858585610a00565b60008181526004602052604090205490915073ffffffffffffffffffffffffffffffffffffffff16610ac8576000915050610a7d565b60008181526004602052604090205474010000000000000000000000000000000000000000900460ff16610afd576002610b00565b60015b95945050505050565b610b11611189565b73ffffffffffffffffffffffffffffffffffffffff811660008181526003602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055517fb96d15bf9258c7b8df062753a6a262864611fc7b060a5ee2e57e79b85f898d389190a250565b610b8d611189565b73ffffffffffffffffffffffffffffffffffffffff811660008181526003602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055517f0ea0ce2c048ff45a4a95f2947879de3fb94abec2f152190400cab2d1272a68e79190a250565b60015473ffffffffffffffffffffffffffffffffffffffff163314610c85576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610399565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b600060046000610d12868686610a00565b815260208101919091526040016000205473ffffffffffffffffffffffffffffffffffffffff16949350505050565b610d49611189565b8260ff16600003610d86576040517f0743bae600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601f811115610dcb576040517f61750f4000000000000000000000000000000000000000000000000000000000815260048101829052601f6024820152604401610399565b610dd6836003611a1b565b60ff168111610e345780610deb846003611a1b565b610df69060016117c9565b6040517f9dd9e6d8000000000000000000000000000000000000000000000000000000008152600481019290925260ff166024820152604401610399565b67ffffffff00000000602086901b1663ffffffff85161760005b67ffffffffffffffff8216600090815260026020526040902060010154811015610ee45767ffffffffffffffff8216600090815260026020819052604082206001810180549190920192919084908110610eaa57610eaa611812565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff168352820192909252604001812055600101610e4e565b5060005b82811015610ffc576000848483818110610f0457610f04611812565b9050602002016020810190610f1991906116ba565b67ffffffffffffffff8416600090815260026020818152604080842073ffffffffffffffffffffffffffffffffffffffff86168552909201905290205490915015610fa8576040517fe021c4f200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610399565b610fb3826001611a3e565b67ffffffffffffffff8416600090815260026020818152604080842073ffffffffffffffffffffffffffffffffffffffff90961684529490910190529190912055600101610ee8565b5067ffffffffffffffff81166000908152600260205260409020611024906001018484611320565b5067ffffffffffffffff81166000908152600260205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff87161790555163ffffffff86811691908816907f4120bd3b23957dd423555817d55654d4481b438aa15485c21b4180c784f1a455906110aa90889088908890611a51565b60405180910390a3505050505050565b6110c2611189565b63ffffffff818116602084811b67ffffffff00000000168217600090815260028252604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690558051828152928301905291928516917f4120bd3b23957dd423555817d55654d4481b438aa15485c21b4180c784f1a4559160405161114e929190611ab7565b60405180910390a35050565b611162611189565b61116b8161120c565b50565b60218101516045820151608b90920151909260c09290921c91565b60005473ffffffffffffffffffffffffffffffffffffffff16331461120a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610399565b565b3373ffffffffffffffffffffffffffffffffffffffff82160361128b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610399565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6040518061040001604052806020906020820280368337509192915050565b828054828255906000526020600020908101928215611398579160200282015b828111156113985781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190611340565b506113a49291506113a8565b5090565b5b808211156113a457600081556001016113a9565b803573ffffffffffffffffffffffffffffffffffffffff811681146113e157600080fd5b919050565b60008083601f8401126113f857600080fd5b50813567ffffffffffffffff81111561141057600080fd5b60208301915083602082850101111561142857600080fd5b9250929050565b60008083601f84011261144157600080fd5b50813567ffffffffffffffff81111561145957600080fd5b6020830191508360208260051b850101111561142857600080fd5b60008060008060008060006080888a03121561148f57600080fd5b611498886113bd565b9650602088013567ffffffffffffffff808211156114b557600080fd5b6114c18b838c016113e6565b909850965060408a01359150808211156114da57600080fd5b6114e68b838c016113e6565b909650945060608a01359150808211156114ff57600080fd5b5061150c8a828b0161142f565b989b979a50959850939692959293505050565b60006020808352835180602085015260005b8181101561154d57858101830151858201604001528201611531565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b600080600080600080600060a0888a0312156115a757600080fd5b873596506115b7602089016113bd565b95506115c5604089016113bd565b9450606088013567ffffffffffffffff808211156115e257600080fd5b6115ee8b838c016113e6565b909650945060808a013591508082111561160757600080fd5b5061150c8a828b016113e6565b60008060006060848603121561162957600080fd5b611632846113bd565b92506020840135915060408401357fffff0000000000000000000000000000000000000000000000000000000000008116811461166e57600080fd5b809150509250925092565b60208101600383106116b4577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b91905290565b6000602082840312156116cc57600080fd5b610a7d826113bd565b803563ffffffff811681146113e157600080fd5b60008060008060006080868803121561170157600080fd5b61170a866116d5565b9450611718602087016116d5565b9350604086013560ff8116811461172e57600080fd5b9250606086013567ffffffffffffffff81111561174a57600080fd5b6117568882890161142f565b969995985093965092949392505050565b6000806040838503121561177a57600080fd5b611783836116d5565b9150611791602084016116d5565b90509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60ff81811683821601908111156117e2576117e261179a565b92915050565b8183823760009101908152919050565b838152818360208301376000910160200190815292915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261187657600080fd5b83018035915067ffffffffffffffff82111561189157600080fd5b60200191503681900382131561142857600080fd5b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b6020815260006119036020830184866118a6565b949350505050565b6000808585111561191b57600080fd5b8386111561192857600080fd5b5050820193919092039150565b803560208310156117e2577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff602084900360031b1b1692915050565b878152600073ffffffffffffffffffffffffffffffffffffffff808916602084015280881660408401525060a060608301526119b160a0830186886118a6565b82810360808401526119c48185876118a6565b9a9950505050505050505050565b6000602082840312156119e457600080fd5b81518015158114610a7d57600080fd5b604081526000611a086040830186886118a6565b82810360208401526109f58185876118a6565b60ff8181168382160290811690818114611a3757611a3761179a565b5092915050565b808201808211156117e2576117e261179a565b60ff8416815260406020808301829052908201839052600090849060608401835b86811015611aab5773ffffffffffffffffffffffffffffffffffffffff611a98856113bd565b1682529282019290820190600101611a72565b50979650505050505050565b60006040820160ff8516835260206040602085015281855180845260608601915060208701935060005b81811015611b1357845173ffffffffffffffffffffffffffffffffffffffff1683529383019391830191600101611ae1565b509097965050505050505056fea164736f6c6343000818000a",
+ Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b038481169190911790915581161561009757610097816100b9565b5050306000908152600360205260409020805460ff1916600117905550610162565b336001600160a01b038216036101115760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b611b9180620001726000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c806379ba50971161008c578063abcef55411610066578063abcef5541461023e578063ee59d26c14610277578063ef6e17a01461028a578063f2fde38b1461029d57600080fd5b806379ba5097146101e05780638864b864146101e85780638da5cb5b1461022057600080fd5b8063354bdd66116100c8578063354bdd661461017957806343c164671461019a5780634d93172d146101ba5780635c41d2fe146101cd57600080fd5b806311289565146100ef578063181f5a7714610104578063233fd52d14610156575b600080fd5b6101026100fd3660046114d8565b6102b0565b005b6101406040518060400160405280601a81526020017f466f7277617264657220616e6420526f7574657220312e302e3000000000000081525081565b60405161014d9190611583565b60405180910390f35b6101696101643660046115f0565b61080d565b604051901515815260200161014d565b61018c610187366004611678565b610a00565b60405190815260200161014d565b6101ad6101a8366004611678565b610a84565b60405161014d91906116dd565b6101026101c836600461171e565b610b09565b6101026101db36600461171e565b610b85565b610102610c04565b6101fb6101f6366004611678565b610d01565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161014d565b60005473ffffffffffffffffffffffffffffffffffffffff166101fb565b61016961024c36600461171e565b73ffffffffffffffffffffffffffffffffffffffff1660009081526003602052604090205460ff1690565b61010261028536600461174d565b610d41565b6101026102983660046117cb565b61111e565b6101026102ab36600461171e565b6111be565b606d8510156102eb576040517fb55ac75400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080600061032f89898080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506111d292505050565b67ffffffffffffffff8216600090815260026020526040812080549497509195509193509160ff16908190036103a2576040517fdf3b81ea00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff841660048201526024015b60405180910390fd5b856103ae82600161182d565b60ff1614610400576103c181600161182d565b6040517fd6022e8e00000000000000000000000000000000000000000000000000000000815260ff909116600482015260248101879052604401610399565b60008b8b60405161041292919061184c565b60405190819003812061042b918c908c9060200161185c565b60405160208183030381529060405280519060200120905061044b611365565b60005b888110156106cd573660008b8b8481811061046b5761046b611876565b905060200281019061047d91906118a5565b9092509050604181146104c05781816040517f2adfdc30000000000000000000000000000000000000000000000000000000008152600401610399929190611953565b6000600186848460408181106104d8576104d8611876565b6104ea92013560f81c9050601b61182d565b6104f860206000878961196f565b61050191611999565b61050f60406020888a61196f565b61051891611999565b6040805160008152602081018083529590955260ff909316928401929092526060830152608082015260a0016020604051602081039080840390855afa158015610566573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015173ffffffffffffffffffffffffffffffffffffffff8116600090815260028c0160205291822054909350915081900361060c576040517fbf18af4300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83166004820152602401610399565b600086826020811061062057610620611876565b602002015173ffffffffffffffffffffffffffffffffffffffff161461068a576040517fe021c4f200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83166004820152602401610399565b8186826020811061069d5761069d611876565b73ffffffffffffffffffffffffffffffffffffffff909216602092909202015250506001909201915061044e9050565b50505050505060003073ffffffffffffffffffffffffffffffffffffffff1663233fd52d6106fc8c8686610a00565b338d8d8d602d90606d926107129392919061196f565b8f8f606d9080926107259392919061196f565b6040518863ffffffff1660e01b815260040161074797969594939291906119d5565b6020604051808303816000875af1158015610766573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061078a9190611a36565b9050817dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916838b73ffffffffffffffffffffffffffffffffffffffff167f3617b009e9785c42daebadb6d3fb553243a4bf586d07ea72d65d80013ce116b5846040516107f9911515815260200190565b60405180910390a450505050505050505050565b3360009081526003602052604081205460ff16610856576040517fd79e123d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008881526004602052604090205473ffffffffffffffffffffffffffffffffffffffff16156108b5576040517fa53dc8ca00000000000000000000000000000000000000000000000000000000815260048101899052602401610399565b600088815260046020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8a81169190911790915587163b9003610917575060006109f5565b6040517f805f213200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff87169063805f21329061096f908890889088908890600401611a58565b600060405180830381600087803b15801561098957600080fd5b505af192505050801561099a575060015b6109a6575060006109f5565b50600087815260046020526040902080547fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000017905560015b979650505050505050565b6040517fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606085901b166020820152603481018390527fffff000000000000000000000000000000000000000000000000000000000000821660548201526000906056016040516020818303038152906040528051906020012090505b9392505050565b600080610a92858585610a00565b60008181526004602052604090205490915073ffffffffffffffffffffffffffffffffffffffff16610ac8576000915050610a7d565b60008181526004602052604090205474010000000000000000000000000000000000000000900460ff16610afd576002610b00565b60015b95945050505050565b610b116111ed565b73ffffffffffffffffffffffffffffffffffffffff811660008181526003602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055517fb96d15bf9258c7b8df062753a6a262864611fc7b060a5ee2e57e79b85f898d389190a250565b610b8d6111ed565b73ffffffffffffffffffffffffffffffffffffffff811660008181526003602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055517f0ea0ce2c048ff45a4a95f2947879de3fb94abec2f152190400cab2d1272a68e79190a250565b60015473ffffffffffffffffffffffffffffffffffffffff163314610c85576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610399565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b600060046000610d12868686610a00565b815260208101919091526040016000205473ffffffffffffffffffffffffffffffffffffffff16949350505050565b610d496111ed565b8260ff16600003610d86576040517f0743bae600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601f811115610dcb576040517f61750f4000000000000000000000000000000000000000000000000000000000815260048101829052601f6024820152604401610399565b610dd6836003611a7f565b60ff168111610e345780610deb846003611a7f565b610df690600161182d565b6040517f9dd9e6d8000000000000000000000000000000000000000000000000000000008152600481019290925260ff166024820152604401610399565b67ffffffff00000000602086901b1663ffffffff85161760005b67ffffffffffffffff8216600090815260026020526040902060010154811015610ee45767ffffffffffffffff8216600090815260026020819052604082206001810180549190920192919084908110610eaa57610eaa611876565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff168352820192909252604001812055600101610e4e565b5060005b82811015611060576000848483818110610f0457610f04611876565b9050602002016020810190610f19919061171e565b905073ffffffffffffffffffffffffffffffffffffffff8116610f80576040517fbf18af4300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610399565b67ffffffffffffffff8316600090815260026020818152604080842073ffffffffffffffffffffffffffffffffffffffff8616855290920190529020541561100c576040517fe021c4f200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610399565b611017826001611aa2565b67ffffffffffffffff8416600090815260026020818152604080842073ffffffffffffffffffffffffffffffffffffffff90961684529490910190529190912055600101610ee8565b5067ffffffffffffffff81166000908152600260205260409020611088906001018484611384565b5067ffffffffffffffff81166000908152600260205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff87161790555163ffffffff86811691908816907f4120bd3b23957dd423555817d55654d4481b438aa15485c21b4180c784f1a4559061110e90889088908890611ab5565b60405180910390a3505050505050565b6111266111ed565b63ffffffff818116602084811b67ffffffff00000000168217600090815260028252604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690558051828152928301905291928516917f4120bd3b23957dd423555817d55654d4481b438aa15485c21b4180c784f1a455916040516111b2929190611b1b565b60405180910390a35050565b6111c66111ed565b6111cf81611270565b50565b60218101516045820151608b90920151909260c09290921c91565b60005473ffffffffffffffffffffffffffffffffffffffff16331461126e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610399565b565b3373ffffffffffffffffffffffffffffffffffffffff8216036112ef576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610399565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6040518061040001604052806020906020820280368337509192915050565b8280548282559060005260206000209081019282156113fc579160200282015b828111156113fc5781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8435161782556020909201916001909101906113a4565b5061140892915061140c565b5090565b5b80821115611408576000815560010161140d565b803573ffffffffffffffffffffffffffffffffffffffff8116811461144557600080fd5b919050565b60008083601f84011261145c57600080fd5b50813567ffffffffffffffff81111561147457600080fd5b60208301915083602082850101111561148c57600080fd5b9250929050565b60008083601f8401126114a557600080fd5b50813567ffffffffffffffff8111156114bd57600080fd5b6020830191508360208260051b850101111561148c57600080fd5b60008060008060008060006080888a0312156114f357600080fd5b6114fc88611421565b9650602088013567ffffffffffffffff8082111561151957600080fd5b6115258b838c0161144a565b909850965060408a013591508082111561153e57600080fd5b61154a8b838c0161144a565b909650945060608a013591508082111561156357600080fd5b506115708a828b01611493565b989b979a50959850939692959293505050565b60006020808352835180602085015260005b818110156115b157858101830151858201604001528201611595565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b600080600080600080600060a0888a03121561160b57600080fd5b8735965061161b60208901611421565b955061162960408901611421565b9450606088013567ffffffffffffffff8082111561164657600080fd5b6116528b838c0161144a565b909650945060808a013591508082111561166b57600080fd5b506115708a828b0161144a565b60008060006060848603121561168d57600080fd5b61169684611421565b92506020840135915060408401357fffff000000000000000000000000000000000000000000000000000000000000811681146116d257600080fd5b809150509250925092565b6020810160038310611718577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b91905290565b60006020828403121561173057600080fd5b610a7d82611421565b803563ffffffff8116811461144557600080fd5b60008060008060006080868803121561176557600080fd5b61176e86611739565b945061177c60208701611739565b9350604086013560ff8116811461179257600080fd5b9250606086013567ffffffffffffffff8111156117ae57600080fd5b6117ba88828901611493565b969995985093965092949392505050565b600080604083850312156117de57600080fd5b6117e783611739565b91506117f560208401611739565b90509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60ff8181168382160190811115611846576118466117fe565b92915050565b8183823760009101908152919050565b838152818360208301376000910160200190815292915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126118da57600080fd5b83018035915067ffffffffffffffff8211156118f557600080fd5b60200191503681900382131561148c57600080fd5b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b60208152600061196760208301848661190a565b949350505050565b6000808585111561197f57600080fd5b8386111561198c57600080fd5b5050820193919092039150565b80356020831015611846577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff602084900360031b1b1692915050565b878152600073ffffffffffffffffffffffffffffffffffffffff808916602084015280881660408401525060a06060830152611a1560a08301868861190a565b8281036080840152611a2881858761190a565b9a9950505050505050505050565b600060208284031215611a4857600080fd5b81518015158114610a7d57600080fd5b604081526000611a6c60408301868861190a565b82810360208401526109f581858761190a565b60ff8181168382160290811690818114611a9b57611a9b6117fe565b5092915050565b80820180821115611846576118466117fe565b60ff8416815260406020808301829052908201839052600090849060608401835b86811015611b0f5773ffffffffffffffffffffffffffffffffffffffff611afc85611421565b1682529282019290820190600101611ad6565b50979650505050505050565b60006040820160ff8516835260206040602085015281855180845260608601915060208701935060005b81811015611b7757845173ffffffffffffffffffffffffffffffffffffffff1683529383019391830191600101611b45565b509097965050505050505056fea164736f6c6343000818000a",
}
var KeystoneForwarderABI = KeystoneForwarderMetaData.ABI
diff --git a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt
index 7d25f651dda..98d0a4bd02c 100644
--- a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt
+++ b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt
@@ -1,5 +1,5 @@
GETH_VERSION: 1.13.8
capabilities_registry: ../../../contracts/solc/v0.8.24/CapabilitiesRegistry/CapabilitiesRegistry.abi ../../../contracts/solc/v0.8.24/CapabilitiesRegistry/CapabilitiesRegistry.bin 6d2e3aa3a6f3aed2cf24b613743bb9ae4b9558f48a6864dc03b8b0ebb37235e3
feeds_consumer: ../../../contracts/solc/v0.8.24/KeystoneFeedsConsumer/KeystoneFeedsConsumer.abi ../../../contracts/solc/v0.8.24/KeystoneFeedsConsumer/KeystoneFeedsConsumer.bin f098e25df6afc100425fcad7f5107aec0844cc98315117e49da139a179d0eead
-forwarder: ../../../contracts/solc/v0.8.24/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.24/KeystoneForwarder/KeystoneForwarder.bin dc98a86a3775ead987b79d5b6079ee0e26f31c0626032bdd6508f986e2423227
+forwarder: ../../../contracts/solc/v0.8.24/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.24/KeystoneForwarder/KeystoneForwarder.bin 21a203d62a69338a5ca260907a31727421114ca25679330ada5d68f0092725bf
ocr3_capability: ../../../contracts/solc/v0.8.24/OCR3Capability/OCR3Capability.abi ../../../contracts/solc/v0.8.24/OCR3Capability/OCR3Capability.bin 8bf0f53f222efce7143dea6134552eb26ea1eef845407b4475a0d79b7d7ba9f8
From 2a032e83a5e09ae128e8c751779a7d1eebb729ea Mon Sep 17 00:00:00 2001
From: amit-momin <108959691+amit-momin@users.noreply.github.com>
Date: Fri, 2 Aug 2024 10:35:40 -0500
Subject: [PATCH 03/52] Update AutoPurge config interface and add header for
Scroll API (#13999)
* Updated AutoPurge heuristic configs to be optional
* Added content-type header for Scroll stuck tx API call
* Fixed linting
* Added changeset
---
.changeset/violet-clouds-rhyme.md | 5 ++++
.../evm/config/chain_scoped_transactions.go | 8 +++----
core/chains/evm/config/config.go | 4 ++--
core/chains/evm/txmgr/stuck_tx_detector.go | 23 ++++++++++++++-----
.../evm/txmgr/stuck_tx_detector_test.go | 16 ++++++-------
5 files changed, 36 insertions(+), 20 deletions(-)
create mode 100644 .changeset/violet-clouds-rhyme.md
diff --git a/.changeset/violet-clouds-rhyme.md b/.changeset/violet-clouds-rhyme.md
new file mode 100644
index 00000000000..b6db0e85c4f
--- /dev/null
+++ b/.changeset/violet-clouds-rhyme.md
@@ -0,0 +1,5 @@
+---
+"chainlink": minor
+---
+
+Updated AutoPurge.Threshold and AutoPurge.MinAttempts configs to only be required for heuristic and added content-type header for Scroll API #internal
diff --git a/core/chains/evm/config/chain_scoped_transactions.go b/core/chains/evm/config/chain_scoped_transactions.go
index 87031a4c66e..27edb12648a 100644
--- a/core/chains/evm/config/chain_scoped_transactions.go
+++ b/core/chains/evm/config/chain_scoped_transactions.go
@@ -47,12 +47,12 @@ func (a *autoPurgeConfig) Enabled() bool {
return *a.c.Enabled
}
-func (a *autoPurgeConfig) Threshold() uint32 {
- return *a.c.Threshold
+func (a *autoPurgeConfig) Threshold() *uint32 {
+ return a.c.Threshold
}
-func (a *autoPurgeConfig) MinAttempts() uint32 {
- return *a.c.MinAttempts
+func (a *autoPurgeConfig) MinAttempts() *uint32 {
+ return a.c.MinAttempts
}
func (a *autoPurgeConfig) DetectionApiUrl() *url.URL {
diff --git a/core/chains/evm/config/config.go b/core/chains/evm/config/config.go
index b0a5772f739..3ccdfeea8b8 100644
--- a/core/chains/evm/config/config.go
+++ b/core/chains/evm/config/config.go
@@ -110,8 +110,8 @@ type Transactions interface {
type AutoPurgeConfig interface {
Enabled() bool
- Threshold() uint32
- MinAttempts() uint32
+ Threshold() *uint32
+ MinAttempts() *uint32
DetectionApiUrl() *url.URL
}
diff --git a/core/chains/evm/txmgr/stuck_tx_detector.go b/core/chains/evm/txmgr/stuck_tx_detector.go
index 1beb857af8f..5901be0b02d 100644
--- a/core/chains/evm/txmgr/stuck_tx_detector.go
+++ b/core/chains/evm/txmgr/stuck_tx_detector.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"math/big"
"net/http"
@@ -37,8 +38,8 @@ type stuckTxDetectorTxStore interface {
type stuckTxDetectorConfig interface {
Enabled() bool
- Threshold() uint32
- MinAttempts() uint32
+ Threshold() *uint32
+ MinAttempts() *uint32
DetectionApiUrl() *url.URL
}
@@ -78,7 +79,7 @@ func NewStuckTxDetector(lggr logger.Logger, chainID *big.Int, chainType chaintyp
func (d *stuckTxDetector) LoadPurgeBlockNumMap(ctx context.Context, addresses []common.Address) error {
// Skip loading purge block num map if auto-purge feature disabled or Threshold is set to 0
- if !d.cfg.Enabled() || d.cfg.Threshold() == 0 {
+ if !d.cfg.Enabled() || d.cfg.Threshold() == nil || *d.cfg.Threshold() == 0 {
return nil
}
d.purgeBlockNumLock.Lock()
@@ -172,6 +173,11 @@ func (d *stuckTxDetector) FindUnconfirmedTxWithLowestNonce(ctx context.Context,
// 4. If 3 is true, check if the latest attempt's gas price is higher than what our gas estimator's GetFee method returns
// 5. If 4 is true, the transaction is likely stuck due to overflow
func (d *stuckTxDetector) detectStuckTransactionsHeuristic(ctx context.Context, txs []Tx, blockNum int64) ([]Tx, error) {
+ if d.cfg.Threshold() == nil || d.cfg.MinAttempts() == nil {
+ err := errors.New("missing required configs for the stuck transaction heuristic. Transactions.AutoPurge.Threshold and Transactions.AutoPurge.MinAttempts are required")
+ d.lggr.Error(err.Error())
+ return txs, err
+ }
d.purgeBlockNumLock.RLock()
defer d.purgeBlockNumLock.RUnlock()
// Get gas price from internal gas estimator
@@ -187,17 +193,17 @@ func (d *stuckTxDetector) detectStuckTransactionsHeuristic(ctx context.Context,
d.purgeBlockNumLock.RLock()
lastPurgeBlockNum := d.purgeBlockNumMap[tx.FromAddress]
d.purgeBlockNumLock.RUnlock()
- if lastPurgeBlockNum > blockNum-int64(d.cfg.Threshold()) {
+ if lastPurgeBlockNum > blockNum-int64(*d.cfg.Threshold()) {
continue
}
// Tx attempts are loaded from newest to oldest
oldestBroadcastAttempt, newestBroadcastAttempt, broadcastedAttemptsCount := findBroadcastedAttempts(tx)
// 2. Check if Threshold amount of blocks have passed since the oldest attempt's broadcast block num
- if *oldestBroadcastAttempt.BroadcastBeforeBlockNum > blockNum-int64(d.cfg.Threshold()) {
+ if *oldestBroadcastAttempt.BroadcastBeforeBlockNum > blockNum-int64(*d.cfg.Threshold()) {
continue
}
// 3. Check if the transaction has at least MinAttempts amount of broadcasted attempts
- if broadcastedAttemptsCount < d.cfg.MinAttempts() {
+ if broadcastedAttemptsCount < *d.cfg.MinAttempts() {
continue
}
// 4. Check if the newest broadcasted attempt's gas price is higher than what our gas estimator's GetFee method returns
@@ -278,6 +284,10 @@ func (d *stuckTxDetector) detectStuckTransactionsScroll(ctx context.Context, txs
if err != nil {
return nil, fmt.Errorf("failed to make new request with context: %w", err)
}
+
+ // Add Content-Type header
+ postReq.Header.Add("Content-Type", "application/json")
+
// Send request
resp, err := d.httpClient.Do(postReq)
if err != nil {
@@ -287,6 +297,7 @@ func (d *stuckTxDetector) detectStuckTransactionsScroll(ctx context.Context, txs
if resp.StatusCode != 200 {
return nil, fmt.Errorf("request failed with status %d", resp.StatusCode)
}
+
// Decode the response into expected type
scrollResp := new(scrollResponse)
err = json.NewDecoder(resp.Body).Decode(scrollResp)
diff --git a/core/chains/evm/txmgr/stuck_tx_detector_test.go b/core/chains/evm/txmgr/stuck_tx_detector_test.go
index e980527c989..5f0d73be184 100644
--- a/core/chains/evm/txmgr/stuck_tx_detector_test.go
+++ b/core/chains/evm/txmgr/stuck_tx_detector_test.go
@@ -78,8 +78,8 @@ func TestStuckTxDetector_LoadPurgeBlockNumMap(t *testing.T) {
autoPurgeMinAttempts := uint32(3)
autoPurgeCfg := testAutoPurgeConfig{
enabled: true, // Enable auto-purge feature for testing
- threshold: autoPurgeThreshold,
- minAttempts: autoPurgeMinAttempts,
+ threshold: &autoPurgeThreshold,
+ minAttempts: &autoPurgeMinAttempts,
}
stuckTxDetector := txmgr.NewStuckTxDetector(lggr, testutils.FixtureChainID, "", assets.NewWei(assets.NewEth(100).ToInt()), autoPurgeCfg, feeEstimator, txStore, ethClient)
@@ -176,8 +176,8 @@ func TestStuckTxDetector_DetectStuckTransactionsHeuristic(t *testing.T) {
autoPurgeMinAttempts := uint32(3)
autoPurgeCfg := testAutoPurgeConfig{
enabled: true, // Enable auto-purge feature for testing
- threshold: autoPurgeThreshold,
- minAttempts: autoPurgeMinAttempts,
+ threshold: &autoPurgeThreshold,
+ minAttempts: &autoPurgeMinAttempts,
}
blockNum := int64(100)
stuckTxDetector := txmgr.NewStuckTxDetector(lggr, testutils.FixtureChainID, "", assets.NewWei(assets.NewEth(100).ToInt()), autoPurgeCfg, feeEstimator, txStore, ethClient)
@@ -423,12 +423,12 @@ func mustInsertUnconfirmedEthTxWithBroadcastPurgeAttempt(t *testing.T, txStore t
type testAutoPurgeConfig struct {
enabled bool
- threshold uint32
- minAttempts uint32
+ threshold *uint32
+ minAttempts *uint32
detectionApiUrl *url.URL
}
func (t testAutoPurgeConfig) Enabled() bool { return t.enabled }
-func (t testAutoPurgeConfig) Threshold() uint32 { return t.threshold }
-func (t testAutoPurgeConfig) MinAttempts() uint32 { return t.minAttempts }
+func (t testAutoPurgeConfig) Threshold() *uint32 { return t.threshold }
+func (t testAutoPurgeConfig) MinAttempts() *uint32 { return t.minAttempts }
func (t testAutoPurgeConfig) DetectionApiUrl() *url.URL { return t.detectionApiUrl }
From 82accfff5c445fd1d29a26607234eba73e6b30fd Mon Sep 17 00:00:00 2001
From: Matthew Pendrey
Date: Fri, 2 Aug 2024 18:22:49 +0200
Subject: [PATCH 04/52] fix keystone e2e test dispatcher to correctly replicate
duplicate registration behaviour (#14018)
---
.changeset/happy-adults-wash.md | 5 +++++
.../integration_tests/mock_dispatcher.go | 19 +++++++++++++++++++
2 files changed, 24 insertions(+)
create mode 100644 .changeset/happy-adults-wash.md
diff --git a/.changeset/happy-adults-wash.md b/.changeset/happy-adults-wash.md
new file mode 100644
index 00000000000..738f8998b20
--- /dev/null
+++ b/.changeset/happy-adults-wash.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#internal fix to keystone e2e test dispatcher to correctly mock duplicate registration error
diff --git a/core/capabilities/integration_tests/mock_dispatcher.go b/core/capabilities/integration_tests/mock_dispatcher.go
index f685f0ad2e9..1230e59427d 100644
--- a/core/capabilities/integration_tests/mock_dispatcher.go
+++ b/core/capabilities/integration_tests/mock_dispatcher.go
@@ -9,6 +9,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote"
remotetypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/types"
p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types"
@@ -58,6 +59,7 @@ func (a *testAsyncMessageBroker) NewDispatcherForNode(nodePeerID p2ptypes.PeerID
return &brokerDispatcher{
callerPeerID: nodePeerID,
broker: a,
+ receivers: map[key]remotetypes.Receiver{},
}
}
@@ -158,6 +160,14 @@ type broker interface {
type brokerDispatcher struct {
callerPeerID p2ptypes.PeerID
broker broker
+
+ receivers map[key]remotetypes.Receiver
+ mu sync.Mutex
+}
+
+type key struct {
+ capId string
+ donId uint32
}
func (t *brokerDispatcher) Send(peerID p2ptypes.PeerID, msgBody *remotetypes.MessageBody) error {
@@ -171,6 +181,15 @@ func (t *brokerDispatcher) Send(peerID p2ptypes.PeerID, msgBody *remotetypes.Mes
}
func (t *brokerDispatcher) SetReceiver(capabilityId string, donId uint32, receiver remotetypes.Receiver) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ k := key{capabilityId, donId}
+ _, ok := t.receivers[k]
+ if ok {
+ return fmt.Errorf("%w: receiver already exists for capability %s and don %d", remote.ErrReceiverExists, capabilityId, donId)
+ }
+ t.receivers[k] = receiver
+
t.broker.(*testAsyncMessageBroker).registerReceiverNode(t.callerPeerID, capabilityId, donId, receiver)
return nil
}
From af7dab653b10d8c201a2e6a8b3a587e43abd220c Mon Sep 17 00:00:00 2001
From: Jordan Krage
Date: Fri, 2 Aug 2024 19:06:45 +0200
Subject: [PATCH 05/52] common/headtracker: improve health error (#13966)
---
common/headtracker/head_tracker.go | 8 ++++----
integration-tests/reorg/automation_reorg_test.go | 2 +-
integration-tests/smoke/vrfv2_test.go | 2 +-
integration-tests/smoke/vrfv2plus_test.go | 2 +-
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/common/headtracker/head_tracker.go b/common/headtracker/head_tracker.go
index afa5d931ee6..851458591b8 100644
--- a/common/headtracker/head_tracker.go
+++ b/common/headtracker/head_tracker.go
@@ -236,8 +236,7 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) handleNewHead(ctx context.Context
"blockDifficulty", head.BlockDifficulty(),
)
- err := ht.headSaver.Save(ctx, head)
- if ctx.Err() != nil {
+ if err := ht.headSaver.Save(ctx, head); ctx.Err() != nil {
return nil
} else if err != nil {
return fmt.Errorf("failed to save head: %#v: %w", head, err)
@@ -264,8 +263,9 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) handleNewHead(ctx context.Context
if prevLatestFinalized != nil && head.BlockNumber() <= prevLatestFinalized.BlockNumber() {
promOldHead.WithLabelValues(ht.chainID.String()).Inc()
- ht.log.Criticalf("Got very old block with number %d (highest seen was %d). This is a problem and either means a very deep re-org occurred, one of the RPC nodes has gotten far out of sync, or the chain went backwards in block numbers. This node may not function correctly without manual intervention.", head.BlockNumber(), prevHead.BlockNumber())
- ht.SvcErrBuffer.Append(errors.New("got very old block"))
+ err := fmt.Errorf("got very old block with number %d (highest seen was %d)", head.BlockNumber(), prevHead.BlockNumber())
+ ht.log.Critical("Got very old block. Either a very deep re-org occurred, one of the RPC nodes has gotten far out of sync, or the chain went backwards in block numbers. This node may not function correctly without manual intervention.", "err", err)
+ ht.SvcErrBuffer.Append(err)
}
}
return nil
diff --git a/integration-tests/reorg/automation_reorg_test.go b/integration-tests/reorg/automation_reorg_test.go
index 1b9cf5819b9..808e394d69b 100644
--- a/integration-tests/reorg/automation_reorg_test.go
+++ b/integration-tests/reorg/automation_reorg_test.go
@@ -43,7 +43,7 @@ var (
)
var logScannerSettings = test_env.GetDefaultChainlinkNodeLogScannerSettingsWithExtraAllowedMessages(testreporters.NewAllowedLogMessage(
- "Got very old block with number",
+ "Got very old block.",
"It is expected, because we are causing reorgs",
zapcore.DPanicLevel,
testreporters.WarnAboutAllowedMsgs_No,
diff --git a/integration-tests/smoke/vrfv2_test.go b/integration-tests/smoke/vrfv2_test.go
index 7a53d2c57c8..48fbc0071c5 100644
--- a/integration-tests/smoke/vrfv2_test.go
+++ b/integration-tests/smoke/vrfv2_test.go
@@ -1063,7 +1063,7 @@ func TestVRFV2NodeReorg(t *testing.T) {
chainlinkNodeLogScannerSettings := test_env.GetDefaultChainlinkNodeLogScannerSettingsWithExtraAllowedMessages(
testreporters.NewAllowedLogMessage(
- "This is a problem and either means a very deep re-org occurred",
+ "Got very old block.",
"Test is expecting a reorg to occur",
zapcore.DPanicLevel,
testreporters.WarnAboutAllowedMsgs_No),
diff --git a/integration-tests/smoke/vrfv2plus_test.go b/integration-tests/smoke/vrfv2plus_test.go
index f519aa6cd5f..da2989d8fc9 100644
--- a/integration-tests/smoke/vrfv2plus_test.go
+++ b/integration-tests/smoke/vrfv2plus_test.go
@@ -1959,7 +1959,7 @@ func TestVRFv2PlusNodeReorg(t *testing.T) {
}
chainlinkNodeLogScannerSettings := test_env.GetDefaultChainlinkNodeLogScannerSettingsWithExtraAllowedMessages(
testreporters.NewAllowedLogMessage(
- "This is a problem and either means a very deep re-org occurred",
+ "Got very old block.",
"Test is expecting a reorg to occur",
zapcore.DPanicLevel,
testreporters.WarnAboutAllowedMsgs_No),
From 05ef7fdbb115f55a85bcbbc5402350818501e1f5 Mon Sep 17 00:00:00 2001
From: martin-cll <121895364+martin-cll@users.noreply.github.com>
Date: Fri, 2 Aug 2024 19:17:10 +0200
Subject: [PATCH 06/52] MERC-6004 Add Mercury v4 schema (#13862)
* Add Mercury v4
* Uncomment out test
* Add v4 telemetry
* MERC-6004 Fix build
* Update chainlink-common
* Add changeset
* Update market status proto enum values
* Add changeset hashtag
---
.changeset/eight-rocks-notice.md | 5 +
core/scripts/go.mod | 4 +-
core/scripts/go.sum | 8 +-
.../ocr2/plugins/mercury/config/config.go | 4 +-
.../ocr2/plugins/mercury/helpers_test.go | 95 +++++
.../ocr2/plugins/mercury/integration_test.go | 323 +++++++++++++++-
core/services/ocr2/plugins/mercury/plugin.go | 46 +++
.../ocr2/plugins/mercury/plugin_test.go | 32 ++
core/services/ocrcommon/telemetry.go | 29 ++
core/services/relay/evm/evm.go | 6 +-
.../services/relay/evm/mercury/utils/feeds.go | 2 +
.../relay/evm/mercury/v4/data_source.go | 290 +++++++++++++++
.../relay/evm/mercury/v4/data_source_test.go | 349 ++++++++++++++++++
.../mercury/v4/reportcodec/report_codec.go | 82 ++++
.../v4/reportcodec/report_codec_test.go | 163 ++++++++
.../relay/evm/mercury/v4/types/types.go | 58 +++
core/services/relay/evm/mercury_provider.go | 10 +-
.../synchronization/telem/telem.pb.go | 10 +-
.../telem/telem_automation_custom.pb.go | 12 +-
.../telem/telem_enhanced_ea.pb.go | 6 +-
.../telem/telem_enhanced_ea_mercury.pb.go | 128 +++++--
.../telem/telem_enhanced_ea_mercury.proto | 9 +
.../telem/telem_functions_request.pb.go | 6 +-
go.mod | 4 +-
go.sum | 8 +-
integration-tests/go.mod | 4 +-
integration-tests/go.sum | 8 +-
integration-tests/load/go.mod | 4 +-
integration-tests/load/go.sum | 8 +-
29 files changed, 1630 insertions(+), 83 deletions(-)
create mode 100644 .changeset/eight-rocks-notice.md
create mode 100644 core/services/relay/evm/mercury/v4/data_source.go
create mode 100644 core/services/relay/evm/mercury/v4/data_source_test.go
create mode 100644 core/services/relay/evm/mercury/v4/reportcodec/report_codec.go
create mode 100644 core/services/relay/evm/mercury/v4/reportcodec/report_codec_test.go
create mode 100644 core/services/relay/evm/mercury/v4/types/types.go
diff --git a/.changeset/eight-rocks-notice.md b/.changeset/eight-rocks-notice.md
new file mode 100644
index 00000000000..230abaec481
--- /dev/null
+++ b/.changeset/eight-rocks-notice.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+New Mercury v4 report schema #added
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 5cd4aaf63c0..4ee443d46f8 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -22,7 +22,7 @@ require (
github.com/prometheus/client_golang v1.17.0
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7
github.com/spf13/cobra v1.8.0
@@ -271,7 +271,7 @@ require (
github.com/shirou/gopsutil/v3 v3.24.3 // indirect
github.com/smartcontractkit/chain-selectors v1.0.10 // indirect
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 // indirect
- github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa // indirect
+ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index c383b6bf81e..3ae26beb633 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -1184,12 +1184,12 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996 h1:6s4cTIE3NbATxWLrD5JLCq097PC5Y4GKK/Kk4fhURpY=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa h1:g75H8oh2ws52s8BekwvGQ9XvBVu3E7WM1rfiA0PN0zk=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa/go.mod h1:wZvLHX/Sd9hskN51016cTFcT3G62KXVa6xbVDS7tRjc=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
diff --git a/core/services/ocr2/plugins/mercury/config/config.go b/core/services/ocr2/plugins/mercury/config/config.go
index 5763b883ac0..40854bd8c0a 100644
--- a/core/services/ocr2/plugins/mercury/config/config.go
+++ b/core/services/ocr2/plugins/mercury/config/config.go
@@ -108,7 +108,7 @@ func ValidatePluginConfig(config PluginConfig, feedID mercuryutils.FeedID) (merr
if config.NativeFeedID != nil {
merr = errors.Join(merr, errors.New("nativeFeedID may not be specified for v1 jobs"))
}
- case 2, 3:
+ case 2, 3, 4:
if config.LinkFeedID == nil {
merr = errors.Join(merr, fmt.Errorf("linkFeedID must be specified for v%d jobs", feedID.Version()))
}
@@ -119,7 +119,7 @@ func ValidatePluginConfig(config PluginConfig, feedID mercuryutils.FeedID) (merr
merr = errors.Join(merr, fmt.Errorf("initialBlockNumber may not be specified for v%d jobs", feedID.Version()))
}
default:
- merr = errors.Join(merr, fmt.Errorf("got unsupported schema version %d; supported versions are 1,2,3", feedID.Version()))
+ merr = errors.Join(merr, fmt.Errorf("got unsupported schema version %d; supported versions are 1,2,3,4", feedID.Version()))
}
return merr
diff --git a/core/services/ocr2/plugins/mercury/helpers_test.go b/core/services/ocr2/plugins/mercury/helpers_test.go
index 43d709453b7..9691e8d4fab 100644
--- a/core/services/ocr2/plugins/mercury/helpers_test.go
+++ b/core/services/ocr2/plugins/mercury/helpers_test.go
@@ -121,6 +121,7 @@ type Feed struct {
baseBenchmarkPrice *big.Int
baseBid *big.Int
baseAsk *big.Int
+ baseMarketStatus uint32
}
func randomFeedID(version uint16) [32]byte {
@@ -467,3 +468,97 @@ chainID = 1337
nativeFeedID,
))
}
+
+func addV4MercuryJob(
+ t *testing.T,
+ node Node,
+ i int,
+ verifierAddress common.Address,
+ bootstrapPeerID string,
+ bootstrapNodePort int,
+ bmBridge,
+ bidBridge,
+ askBridge,
+ marketStatusBridge string,
+ servers map[string]string,
+ clientPubKey ed25519.PublicKey,
+ feedName string,
+ feedID [32]byte,
+ linkFeedID [32]byte,
+ nativeFeedID [32]byte,
+) {
+ srvs := make([]string, 0, len(servers))
+ for u, k := range servers {
+ srvs = append(srvs, fmt.Sprintf("%q = %q", u, k))
+ }
+ serversStr := fmt.Sprintf("{ %s }", strings.Join(srvs, ", "))
+
+ node.AddJob(t, fmt.Sprintf(`
+type = "offchainreporting2"
+schemaVersion = 1
+name = "mercury-%[1]d-%[11]s"
+forwardingAllowed = false
+maxTaskDuration = "1s"
+contractID = "%[2]s"
+feedID = "0x%[10]x"
+contractConfigTrackerPollInterval = "1s"
+ocrKeyBundleID = "%[3]s"
+p2pv2Bootstrappers = [
+ "%[4]s"
+]
+relay = "evm"
+pluginType = "mercury"
+transmitterID = "%[9]x"
+observationSource = """
+ // Benchmark Price
+ price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
+ price1_parse [type=jsonparse path="result"];
+ price1_multiply [type=multiply times=100000000 index=0];
+
+ price1 -> price1_parse -> price1_multiply;
+
+ // Bid
+ bid [type=bridge name="%[6]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
+ bid_parse [type=jsonparse path="result"];
+ bid_multiply [type=multiply times=100000000 index=1];
+
+ bid -> bid_parse -> bid_multiply;
+
+ // Ask
+ ask [type=bridge name="%[7]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
+ ask_parse [type=jsonparse path="result"];
+ ask_multiply [type=multiply times=100000000 index=2];
+
+ ask -> ask_parse -> ask_multiply;
+
+ // Market Status
+ marketstatus [type=bridge name="%[14]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
+ marketstatus_parse [type=jsonparse path="result" index=3];
+
+ marketstatus -> marketstatus_parse;
+"""
+
+[pluginConfig]
+servers = %[8]s
+linkFeedID = "0x%[12]x"
+nativeFeedID = "0x%[13]x"
+
+[relayConfig]
+chainID = 1337
+ `,
+ i,
+ verifierAddress,
+ node.KeyBundle.ID(),
+ fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort),
+ bmBridge,
+ bidBridge,
+ askBridge,
+ serversStr,
+ clientPubKey,
+ feedID,
+ feedName,
+ linkFeedID,
+ nativeFeedID,
+ marketStatusBridge,
+ ))
+}
diff --git a/core/services/ocr2/plugins/mercury/integration_test.go b/core/services/ocr2/plugins/mercury/integration_test.go
index 832a39237ee..9e34e9da8b4 100644
--- a/core/services/ocr2/plugins/mercury/integration_test.go
+++ b/core/services/ocr2/plugins/mercury/integration_test.go
@@ -24,22 +24,21 @@ import (
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/shopspring/decimal"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest/observer"
-
"github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
"github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper"
ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"github.com/smartcontractkit/wsrpc/credentials"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest/observer"
mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
+ v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
datastreamsmercury "github.com/smartcontractkit/chainlink-data-streams/mercury"
-
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
token "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
@@ -56,6 +55,7 @@ import (
reportcodecv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/reportcodec"
reportcodecv2 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2/reportcodec"
reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec"
+ reportcodecv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
)
@@ -146,9 +146,9 @@ func integration_MercuryV1(t *testing.T) {
pError := atomic.Int64{}
// feeds
- btcFeed := Feed{"BTC/USD", randomFeedID(1), big.NewInt(20_000 * multiplier), big.NewInt(19_997 * multiplier), big.NewInt(20_004 * multiplier)}
- ethFeed := Feed{"ETH/USD", randomFeedID(1), big.NewInt(1_568 * multiplier), big.NewInt(1_566 * multiplier), big.NewInt(1_569 * multiplier)}
- linkFeed := Feed{"LINK/USD", randomFeedID(1), big.NewInt(7150 * multiplier / 1000), big.NewInt(7123 * multiplier / 1000), big.NewInt(7177 * multiplier / 1000)}
+ btcFeed := Feed{"BTC/USD", randomFeedID(1), big.NewInt(20_000 * multiplier), big.NewInt(19_997 * multiplier), big.NewInt(20_004 * multiplier), 0}
+ ethFeed := Feed{"ETH/USD", randomFeedID(1), big.NewInt(1_568 * multiplier), big.NewInt(1_566 * multiplier), big.NewInt(1_569 * multiplier), 0}
+ linkFeed := Feed{"LINK/USD", randomFeedID(1), big.NewInt(7150 * multiplier / 1000), big.NewInt(7123 * multiplier / 1000), big.NewInt(7177 * multiplier / 1000), 0}
feeds := []Feed{btcFeed, ethFeed, linkFeed}
feedM := make(map[[32]byte]Feed, len(feeds))
for i := range feeds {
@@ -1036,3 +1036,308 @@ func integration_MercuryV3(t *testing.T) {
}
})
}
+
+func TestIntegration_MercuryV4(t *testing.T) {
+ t.Parallel()
+
+ integration_MercuryV4(t)
+}
+
+func integration_MercuryV4(t *testing.T) {
+ ctx := testutils.Context(t)
+ var logObservers []*observer.ObservedLogs
+ t.Cleanup(func() {
+ detectPanicLogs(t, logObservers)
+ })
+
+ testStartTimeStamp := uint32(time.Now().Unix())
+
+ // test vars
+ // pError is the probability that an EA will return an error instead of a result, as integer percentage
+ // pError = 0 means it will never return error
+ pError := atomic.Int64{}
+
+ // feeds
+ btcFeed := Feed{
+ name: "BTC/USD",
+ id: randomFeedID(4),
+ baseBenchmarkPrice: big.NewInt(20_000 * multiplier),
+ baseBid: big.NewInt(19_997 * multiplier),
+ baseAsk: big.NewInt(20_004 * multiplier),
+ baseMarketStatus: 1,
+ }
+ ethFeed := Feed{
+ name: "ETH/USD",
+ id: randomFeedID(4),
+ baseBenchmarkPrice: big.NewInt(1_568 * multiplier),
+ baseBid: big.NewInt(1_566 * multiplier),
+ baseAsk: big.NewInt(1_569 * multiplier),
+ baseMarketStatus: 2,
+ }
+ linkFeed := Feed{
+ name: "LINK/USD",
+ id: randomFeedID(4),
+ baseBenchmarkPrice: big.NewInt(7150 * multiplier / 1000),
+ baseBid: big.NewInt(7123 * multiplier / 1000),
+ baseAsk: big.NewInt(7177 * multiplier / 1000),
+ baseMarketStatus: 3,
+ }
+ feeds := []Feed{btcFeed, ethFeed, linkFeed}
+ feedM := make(map[[32]byte]Feed, len(feeds))
+ for i := range feeds {
+ feedM[feeds[i].id] = feeds[i]
+ }
+
+ clientCSAKeys := make([]csakey.KeyV2, n+1)
+ clientPubKeys := make([]ed25519.PublicKey, n+1)
+ for i := 0; i < n+1; i++ {
+ k := big.NewInt(int64(i))
+ key := csakey.MustNewV2XXXTestingOnly(k)
+ clientCSAKeys[i] = key
+ clientPubKeys[i] = key.PublicKey
+ }
+
+ // Test multi-send to three servers
+ const nSrvs = 3
+ reqChs := make([]chan request, nSrvs)
+ servers := make(map[string]string)
+ for i := 0; i < nSrvs; i++ {
+ k := csakey.MustNewV2XXXTestingOnly(big.NewInt(int64(-(i + 1))))
+ reqs := make(chan request, 100)
+ srv := NewMercuryServer(t, ed25519.PrivateKey(k.Raw()), reqs, func() []byte {
+ report, err := (&reportcodecv4.ReportCodec{}).BuildReport(v4.ReportFields{BenchmarkPrice: big.NewInt(234567), Bid: big.NewInt(1), Ask: big.NewInt(1), LinkFee: big.NewInt(1), NativeFee: big.NewInt(1), MarketStatus: 1})
+ if err != nil {
+ panic(err)
+ }
+ return report
+ })
+ serverURL := startMercuryServer(t, srv, clientPubKeys)
+ reqChs[i] = reqs
+ servers[serverURL] = fmt.Sprintf("%x", k.PublicKey)
+ }
+ chainID := testutils.SimulatedChainID
+
+ steve, backend, verifier, verifierAddress := setupBlockchain(t)
+
+ // Setup bootstrap + oracle nodes
+ bootstrapNodePort := freeport.GetOne(t)
+ appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n])
+ bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
+ logObservers = append(logObservers, observedLogs)
+
+ // Commit blocks to finality depth to ensure LogPoller has finalized blocks to read from
+ ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String())
+ require.NoError(t, err)
+ finalityDepth := ch.Config().EVM().FinalityDepth()
+ for i := 0; i < int(finalityDepth); i++ {
+ backend.Commit()
+ }
+
+ // Set up n oracles
+ var (
+ oracles []confighelper.OracleIdentityExtra
+ nodes []Node
+ )
+ ports := freeport.GetN(t, n)
+ for i := 0; i < n; i++ {
+ app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i])
+
+ nodes = append(nodes, Node{
+ app, transmitter, kb,
+ })
+
+ offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x"))
+ oracles = append(oracles, confighelper.OracleIdentityExtra{
+ OracleIdentity: confighelper.OracleIdentity{
+ OnchainPublicKey: offchainPublicKey,
+ TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])),
+ OffchainPublicKey: kb.OffchainPublicKey(),
+ PeerID: peerID,
+ },
+ ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(),
+ })
+ logObservers = append(logObservers, observedLogs)
+ }
+
+ for _, feed := range feeds {
+ addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id)
+ }
+
+ createBridge := func(name string, i int, p *big.Int, marketStatus uint32, borm bridges.ORM) (bridgeName string) {
+ bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
+ b, herr := io.ReadAll(req.Body)
+ require.NoError(t, herr)
+ require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b))
+
+ r := rand.Int63n(101)
+ if r > pError.Load() {
+ res.WriteHeader(http.StatusOK)
+
+ var val string
+ if p != nil {
+ val = decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String()
+ } else {
+ val = fmt.Sprintf("%d", marketStatus)
+ }
+
+ resp := fmt.Sprintf(`{"result": %s}`, val)
+ _, herr = res.Write([]byte(resp))
+ require.NoError(t, herr)
+ } else {
+ res.WriteHeader(http.StatusInternalServerError)
+ resp := `{"error": "pError test error"}`
+ _, herr = res.Write([]byte(resp))
+ require.NoError(t, herr)
+ }
+ }))
+ t.Cleanup(bridge.Close)
+ u, _ := url.Parse(bridge.URL)
+ bridgeName = fmt.Sprintf("bridge-%s-%d", name, i)
+ require.NoError(t, borm.CreateBridgeType(ctx, &bridges.BridgeType{
+ Name: bridges.BridgeName(bridgeName),
+ URL: models.WebURL(*u),
+ }))
+
+ return bridgeName
+ }
+
+ // Add OCR jobs - one per feed on each node
+ for i, node := range nodes {
+ for j, feed := range feeds {
+ bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, 0, node.App.BridgeORM())
+ bidBridge := createBridge(fmt.Sprintf("bid-%d", j), i, feed.baseBid, 0, node.App.BridgeORM())
+ askBridge := createBridge(fmt.Sprintf("ask-%d", j), i, feed.baseAsk, 0, node.App.BridgeORM())
+ marketStatusBridge := createBridge(fmt.Sprintf("marketstatus-%d", j), i, nil, feed.baseMarketStatus, node.App.BridgeORM())
+
+ addV4MercuryJob(
+ t,
+ node,
+ i,
+ verifierAddress,
+ bootstrapPeerID,
+ bootstrapNodePort,
+ bmBridge,
+ bidBridge,
+ askBridge,
+ marketStatusBridge,
+ servers,
+ clientPubKeys[i],
+ feed.name,
+ feed.id,
+ randomFeedID(2),
+ randomFeedID(2),
+ )
+ }
+ }
+
+ // Setup config on contract
+ onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig)
+ require.NoError(t, err)
+
+ reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig)
+ require.NoError(t, err)
+
+ signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02(
+ 2*time.Second, // DeltaProgress
+ 20*time.Second, // DeltaResend
+ 400*time.Millisecond, // DeltaInitial
+ 100*time.Millisecond, // DeltaRound
+ 0, // DeltaGrace
+ 300*time.Millisecond, // DeltaCertifiedCommitRequest
+ 1*time.Minute, // DeltaStage
+ 100, // rMax
+ []int{len(nodes)}, // S
+ oracles,
+ reportingPluginConfig, // reportingPluginConfig []byte,
+ 250*time.Millisecond, // Max duration observation
+ int(f), // f
+ onchainConfig,
+ )
+
+ require.NoError(t, err)
+ signerAddresses, err := evm.OnchainPublicKeyToAddress(signers)
+ require.NoError(t, err)
+
+ offchainTransmitters := make([][32]byte, n)
+ for i := 0; i < n; i++ {
+ offchainTransmitters[i] = nodes[i].ClientPubKey
+ }
+
+ for _, feed := range feeds {
+ _, ferr := verifier.SetConfig(
+ steve,
+ feed.id,
+ signerAddresses,
+ offchainTransmitters,
+ f,
+ onchainConfig,
+ offchainConfigVersion,
+ offchainConfig,
+ nil,
+ )
+ require.NoError(t, ferr)
+ backend.Commit()
+ }
+
+ runTestSetup := func(reqs chan request) {
+ // Expect at least one report per feed from each oracle, per server
+ seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{})
+ for i := range feeds {
+ // feedID will be deleted when all n oracles have reported
+ seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n)
+ }
+
+ for req := range reqs {
+ v := make(map[string]interface{})
+ err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload)
+ require.NoError(t, err)
+ report, exists := v["report"]
+ if !exists {
+ t.Fatalf("expected payload %#v to contain 'report'", v)
+ }
+ reportElems := make(map[string]interface{})
+ err = reportcodecv4.ReportTypes.UnpackIntoMap(reportElems, report.([]byte))
+ require.NoError(t, err)
+
+ feedID := reportElems["feedId"].([32]uint8)
+ feed, exists := feedM[feedID]
+ require.True(t, exists)
+
+ if _, exists := seen[feedID]; !exists {
+ continue // already saw all oracles for this feed
+ }
+
+ expectedFee := datastreamsmercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee)
+ expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow
+
+ assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp))
+ assert.InDelta(t, feed.baseBenchmarkPrice.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000)
+ assert.InDelta(t, feed.baseBid.Int64(), reportElems["bid"].(*big.Int).Int64(), 5000000)
+ assert.InDelta(t, feed.baseAsk.Int64(), reportElems["ask"].(*big.Int).Int64(), 5000000)
+ assert.NotZero(t, reportElems["validFromTimestamp"].(uint32))
+ assert.GreaterOrEqual(t, reportElems["observationsTimestamp"].(uint32), reportElems["validFromTimestamp"].(uint32))
+ assert.Equal(t, expectedExpiresAt, reportElems["expiresAt"].(uint32))
+ assert.Equal(t, expectedFee, reportElems["linkFee"].(*big.Int))
+ assert.Equal(t, expectedFee, reportElems["nativeFee"].(*big.Int))
+ assert.Equal(t, feed.baseMarketStatus, reportElems["marketStatus"].(uint32))
+
+ t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id)
+
+ seen[feedID][req.pk] = struct{}{}
+ if len(seen[feedID]) == n {
+ t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id)
+ delete(seen, feedID)
+ if len(seen) == 0 {
+ break // saw all oracles; success!
+ }
+ }
+ }
+ }
+
+ t.Run("receives at least one report per feed for every server from each oracle when EAs are at 100% reliability", func(t *testing.T) {
+ for i := 0; i < nSrvs; i++ {
+ reqs := reqChs[i]
+ runTestSetup(reqs)
+ }
+ })
+}
diff --git a/core/services/ocr2/plugins/mercury/plugin.go b/core/services/ocr2/plugins/mercury/plugin.go
index c5eba78b0d8..0898c1821ec 100644
--- a/core/services/ocr2/plugins/mercury/plugin.go
+++ b/core/services/ocr2/plugins/mercury/plugin.go
@@ -13,6 +13,7 @@ import (
relaymercuryv1 "github.com/smartcontractkit/chainlink-data-streams/mercury/v1"
relaymercuryv2 "github.com/smartcontractkit/chainlink-data-streams/mercury/v2"
relaymercuryv3 "github.com/smartcontractkit/chainlink-data-streams/mercury/v3"
+ relaymercuryv4 "github.com/smartcontractkit/chainlink-data-streams/mercury/v4"
"github.com/smartcontractkit/chainlink-common/pkg/loop"
commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
@@ -29,6 +30,7 @@ import (
mercuryv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1"
mercuryv2 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2"
mercuryv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3"
+ mercuryv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4"
"github.com/smartcontractkit/chainlink/v2/plugins"
)
@@ -136,6 +138,13 @@ func NewServices(
return nil, fmt.Errorf("failed to create mercury v3 factory: %w", err)
}
srvs = append(srvs, factoryServices...)
+ case 4:
+ factory, factoryServices, err = newv4factory(fCfg)
+ if err != nil {
+ abort()
+ return nil, fmt.Errorf("failed to create mercury v4 factory: %w", err)
+ }
+ srvs = append(srvs, factoryServices...)
default:
return nil, errors.Errorf("unknown Mercury report schema version: %d", feedID.Version())
}
@@ -162,6 +171,43 @@ type factoryCfg struct {
feedID utils.FeedID
}
+func newv4factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) {
+ var factory ocr3types.MercuryPluginFactory
+ srvs := make([]job.ServiceCtx, 0)
+
+ ds := mercuryv4.NewDataSource(
+ factoryCfg.orm,
+ factoryCfg.pipelineRunner,
+ factoryCfg.jb,
+ *factoryCfg.jb.PipelineSpec,
+ factoryCfg.feedID,
+ factoryCfg.lggr,
+ factoryCfg.saver,
+ factoryCfg.chEnhancedTelem,
+ factoryCfg.ocr2Provider.MercuryServerFetcher(),
+ *factoryCfg.reportingPluginConfig.LinkFeedID,
+ *factoryCfg.reportingPluginConfig.NativeFeedID,
+ )
+
+ loopCmd := env.MercuryPlugin.Cmd.Get()
+ loopEnabled := loopCmd != ""
+
+ if loopEnabled {
+ cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
+ }
+ // in loop mode, the factory is grpc server, and we need to handle the server lifecycle
+ factoryServer := loop.NewMercuryV4Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
+ srvs = append(srvs, factoryServer)
+ // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
+ factory = factoryServer
+ } else {
+ factory = relaymercuryv4.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV4())
+ }
+ return factory, srvs, nil
+}
+
func newv3factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) {
var factory ocr3types.MercuryPluginFactory
srvs := make([]job.ServiceCtx, 0)
diff --git a/core/services/ocr2/plugins/mercury/plugin_test.go b/core/services/ocr2/plugins/mercury/plugin_test.go
index 95aaabec142..f9bef4a3f1a 100644
--- a/core/services/ocr2/plugins/mercury/plugin_test.go
+++ b/core/services/ocr2/plugins/mercury/plugin_test.go
@@ -21,6 +21,7 @@ import (
v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
+ v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
mercuryocr2 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury"
@@ -37,6 +38,7 @@ var (
v1FeedId = [32]uint8{00, 01, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
v2FeedId = [32]uint8{00, 02, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
v3FeedId = [32]uint8{00, 03, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
+ v4FeedId = [32]uint8{00, 04, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
testArgsNoPlugin = libocr2.MercuryOracleArgs{
LocalConfig: libocr2types.LocalConfig{
@@ -66,6 +68,13 @@ var (
"nativeFeedID": "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
}
+ v4jsonCfg = job.JSONConfig{
+ "serverURL": "example.com:80",
+ "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93",
+ "linkFeedID": "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
+ "nativeFeedID": "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
+ }
+
testJob = job.Job{
ID: 1,
ExternalJobID: uuid.Must(uuid.NewRandom()),
@@ -135,6 +144,15 @@ func TestNewServices(t *testing.T) {
wantServiceCnt: expectedEmbeddedServiceCnt,
wantErr: false,
},
+ {
+ name: "v4 legacy",
+ args: args{
+ pluginConfig: v4jsonCfg,
+ feedID: v4FeedId,
+ },
+ wantServiceCnt: expectedEmbeddedServiceCnt,
+ wantErr: false,
+ },
{
name: "v1 loop",
loopMode: true,
@@ -168,6 +186,17 @@ func TestNewServices(t *testing.T) {
wantErr: false,
wantLoopFactory: &loop.MercuryV3Service{},
},
+ {
+ name: "v4 loop",
+ loopMode: true,
+ args: args{
+ pluginConfig: v4jsonCfg,
+ feedID: v4FeedId,
+ },
+ wantServiceCnt: expectedLoopServiceCnt,
+ wantErr: false,
+ wantLoopFactory: &loop.MercuryV4Service{},
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -259,6 +288,9 @@ func (*testProvider) ReportCodecV2() v2.ReportCodec { return nil }
// ReportCodecV3 implements types.MercuryProvider.
func (*testProvider) ReportCodecV3() v3.ReportCodec { return nil }
+// ReportCodecV4 implements types.MercuryProvider.
+func (*testProvider) ReportCodecV4() v4.ReportCodec { return nil }
+
// Start implements types.MercuryProvider.
func (*testProvider) Start(context.Context) error { panic("unimplemented") }
diff --git a/core/services/ocrcommon/telemetry.go b/core/services/ocrcommon/telemetry.go
index 2ef76800a42..2cb4fda9105 100644
--- a/core/services/ocrcommon/telemetry.go
+++ b/core/services/ocrcommon/telemetry.go
@@ -15,6 +15,8 @@ import (
v1types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
v2types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
v3types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
+ v4types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
+
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
@@ -41,6 +43,7 @@ type EnhancedTelemetryMercuryData struct {
V1Observation *v1types.Observation
V2Observation *v2types.Observation
V3Observation *v3types.Observation
+ V4Observation *v4types.Observation
TaskRunResults pipeline.TaskRunResults
RepTimestamp ocrtypes.ReportTimestamp
FeedVersion mercuryutils.FeedVersion
@@ -298,6 +301,8 @@ func (e *EnhancedTelemetryService[T]) collectMercuryEnhancedTelemetry(d Enhanced
ask := big.NewInt(0)
// v2+v3 fields
var mfts, lp, np int64
+ // v4 fields
+ var marketStatus telem.MarketStatus
switch {
case d.V1Observation != nil:
@@ -354,6 +359,29 @@ func (e *EnhancedTelemetryService[T]) collectMercuryEnhancedTelemetry(d Enhanced
if obs.Ask.Err == nil && obs.Ask.Val != nil {
ask = obs.Ask.Val
}
+ case d.V4Observation != nil:
+ obs := *d.V4Observation
+ if obs.MaxFinalizedTimestamp.Err == nil {
+ mfts = obs.MaxFinalizedTimestamp.Val
+ }
+ if obs.LinkPrice.Err == nil && obs.LinkPrice.Val != nil {
+ lp = obs.LinkPrice.Val.Int64()
+ }
+ if obs.NativePrice.Err == nil && obs.NativePrice.Val != nil {
+ np = obs.NativePrice.Val.Int64()
+ }
+ if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil {
+ bp = obs.BenchmarkPrice.Val
+ }
+ if obs.Bid.Err == nil && obs.Bid.Val != nil {
+ bid = obs.Bid.Val
+ }
+ if obs.Ask.Err == nil && obs.Ask.Val != nil {
+ ask = obs.Ask.Val
+ }
+ if obs.MarketStatus.Err == nil {
+ marketStatus = telem.MarketStatus(obs.MarketStatus.Val)
+ }
}
for _, trr := range d.TaskRunResults {
@@ -401,6 +429,7 @@ func (e *EnhancedTelemetryService[T]) collectMercuryEnhancedTelemetry(d Enhanced
ObservationBenchmarkPriceString: stringOrEmpty(bp),
ObservationBidString: stringOrEmpty(bid),
ObservationAskString: stringOrEmpty(ask),
+ ObservationMarketStatus: marketStatus,
IsLinkFeed: d.IsLinkFeed,
LinkPrice: lp,
IsNativeFeed: d.IsNativeFeed,
diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go
index 3b3393441a2..a0782380b5b 100644
--- a/core/services/relay/evm/evm.go
+++ b/core/services/relay/evm/evm.go
@@ -25,6 +25,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
coretypes "github.com/smartcontractkit/chainlink-common/pkg/types/core"
+ reportcodecv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txm "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
@@ -296,6 +297,7 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty
reportCodecV1 := reportcodecv1.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV1"))
reportCodecV2 := reportcodecv2.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV2"))
reportCodecV3 := reportcodecv3.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV3"))
+ reportCodecV4 := reportcodecv4.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV4"))
var transmitterCodec mercury.TransmitterReportDecoder
switch feedID.Version() {
@@ -305,12 +307,14 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty
transmitterCodec = reportCodecV2
case 3:
transmitterCodec = reportCodecV3
+ case 4:
+ transmitterCodec = reportCodecV4
default:
return nil, fmt.Errorf("invalid feed version %d", feedID.Version())
}
transmitter := mercury.NewTransmitter(lggr, r.transmitterCfg, clients, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.mercuryORM, transmitterCodec, r.triggerCapability)
- return NewMercuryProvider(cp, r.chainReader, r.codec, NewMercuryChainReader(r.chain.HeadTracker()), transmitter, reportCodecV1, reportCodecV2, reportCodecV3, lggr), nil
+ return NewMercuryProvider(cp, r.chainReader, r.codec, NewMercuryChainReader(r.chain.HeadTracker()), transmitter, reportCodecV1, reportCodecV2, reportCodecV3, reportCodecV4, lggr), nil
}
func (r *Relayer) NewLLOProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.LLOProvider, error) {
diff --git a/core/services/relay/evm/mercury/utils/feeds.go b/core/services/relay/evm/mercury/utils/feeds.go
index 6f8978bbf0d..36d6bc60f58 100644
--- a/core/services/relay/evm/mercury/utils/feeds.go
+++ b/core/services/relay/evm/mercury/utils/feeds.go
@@ -83,6 +83,7 @@ const (
REPORT_V1
REPORT_V2
REPORT_V3
+ REPORT_V4
_
)
@@ -110,3 +111,4 @@ func (f FeedID) Version() FeedVersion {
func (f FeedID) IsV1() bool { return f.Version() == REPORT_V1 }
func (f FeedID) IsV2() bool { return f.Version() == REPORT_V2 }
func (f FeedID) IsV3() bool { return f.Version() == REPORT_V3 }
+func (f FeedID) IsV4() bool { return f.Version() == REPORT_V4 }
diff --git a/core/services/relay/evm/mercury/v4/data_source.go b/core/services/relay/evm/mercury/v4/data_source.go
new file mode 100644
index 00000000000..f9c2c2d5de0
--- /dev/null
+++ b/core/services/relay/evm/mercury/v4/data_source.go
@@ -0,0 +1,290 @@
+package v4
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+
+ pkgerrors "github.com/pkg/errors"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
+ v4types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
+ v4 "github.com/smartcontractkit/chainlink-data-streams/mercury/v4"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
+ mercurytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
+ mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+type Runner interface {
+ ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error)
+}
+
+type LatestReportFetcher interface {
+ LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error)
+ LatestTimestamp(context.Context) (int64, error)
+}
+
+type datasource struct {
+ pipelineRunner Runner
+ jb job.Job
+ spec pipeline.Spec
+ feedID mercuryutils.FeedID
+ lggr logger.Logger
+ saver ocrcommon.Saver
+ orm types.DataSourceORM
+ codec reportcodec.ReportCodec
+
+ fetcher LatestReportFetcher
+ linkFeedID mercuryutils.FeedID
+ nativeFeedID mercuryutils.FeedID
+
+ mu sync.RWMutex
+
+ chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData
+}
+
+var _ v4.DataSource = &datasource{}
+
+func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource {
+ return &datasource{pr, jb, spec, feedID, lggr, s, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan}
+}
+
+func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedTimestamp bool) (obs v4types.Observation, pipelineExecutionErr error) {
+ var wg sync.WaitGroup
+ ctx, cancel := context.WithCancel(ctx)
+
+ if fetchMaxFinalizedTimestamp {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID)
+ if dbErr != nil {
+ obs.MaxFinalizedTimestamp.Err = dbErr
+ return
+ }
+ if latest != nil {
+ maxFinalizedBlockNumber, decodeErr := ds.codec.ObservationTimestampFromReport(latest)
+ obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = int64(maxFinalizedBlockNumber), decodeErr
+ return
+ }
+ obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = ds.fetcher.LatestTimestamp(ctx)
+ }()
+ }
+
+ var trrs pipeline.TaskRunResults
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var run *pipeline.Run
+ run, trrs, pipelineExecutionErr = ds.executeRun(ctx)
+ if pipelineExecutionErr != nil {
+ cancel()
+ pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr)
+ return
+ }
+
+ ds.saver.Save(run)
+
+ var parsed parseOutput
+ parsed, pipelineExecutionErr = ds.parse(trrs)
+ if pipelineExecutionErr != nil {
+ cancel()
+ // This is not expected under normal circumstances
+ ds.lggr.Errorw("Observe failed while parsing run results", "err", pipelineExecutionErr)
+ pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr)
+ return
+ }
+ obs.BenchmarkPrice = parsed.benchmarkPrice
+ obs.Bid = parsed.bid
+ obs.Ask = parsed.ask
+ obs.MarketStatus = parsed.marketStatus
+ }()
+
+ var isLink, isNative bool
+ if ds.feedID == ds.linkFeedID {
+ isLink = true
+ } else {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ obs.LinkPrice.Val, obs.LinkPrice.Err = ds.fetcher.LatestPrice(ctx, ds.linkFeedID)
+ if obs.LinkPrice.Val == nil && obs.LinkPrice.Err == nil {
+ mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.linkFeedID.String()).Inc()
+ ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing LINK feed, using sentinel value of %s", v4.MissingPrice), "linkFeedID", ds.linkFeedID)
+ obs.LinkPrice.Val = v4.MissingPrice
+ } else if obs.LinkPrice.Err != nil {
+ mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.linkFeedID.String()).Inc()
+ ds.lggr.Errorw("Mercury server returned error querying LINK price feed", "err", obs.LinkPrice.Err, "linkFeedID", ds.linkFeedID)
+ }
+ }()
+ }
+
+ if ds.feedID == ds.nativeFeedID {
+ isNative = true
+ } else {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ obs.NativePrice.Val, obs.NativePrice.Err = ds.fetcher.LatestPrice(ctx, ds.nativeFeedID)
+ if obs.NativePrice.Val == nil && obs.NativePrice.Err == nil {
+ mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
+ ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing native feed, using sentinel value of %s", v4.MissingPrice), "nativeFeedID", ds.nativeFeedID)
+ obs.NativePrice.Val = v4.MissingPrice
+ } else if obs.NativePrice.Err != nil {
+ mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
+ ds.lggr.Errorw("Mercury server returned error querying native price feed", "err", obs.NativePrice.Err, "nativeFeedID", ds.nativeFeedID)
+ }
+ }()
+ }
+
+ wg.Wait()
+ cancel()
+
+ if pipelineExecutionErr != nil {
+ return
+ }
+
+ if isLink || isNative {
+ // run has now completed so it is safe to use benchmark price
+ if isLink {
+ // This IS the LINK feed, use our observed price
+ obs.LinkPrice.Val, obs.LinkPrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
+ }
+ if isNative {
+ // This IS the native feed, use our observed price
+ obs.NativePrice.Val, obs.NativePrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
+ }
+ }
+
+ ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{
+ V4Observation: &obs,
+ TaskRunResults: trrs,
+ RepTimestamp: repts,
+ FeedVersion: mercuryutils.REPORT_V4,
+ FetchMaxFinalizedTimestamp: fetchMaxFinalizedTimestamp,
+ IsLinkFeed: isLink,
+ IsNativeFeed: isNative,
+ })
+
+ return obs, nil
+}
+
+func toBigInt(val interface{}) (*big.Int, error) {
+ dec, err := utils.ToDecimal(val)
+ if err != nil {
+ return nil, err
+ }
+ return dec.BigInt(), nil
+}
+
+type parseOutput struct {
+ benchmarkPrice mercury.ObsResult[*big.Int]
+ bid mercury.ObsResult[*big.Int]
+ ask mercury.ObsResult[*big.Int]
+ marketStatus mercury.ObsResult[uint32]
+}
+
+func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) {
+ var finaltrrs []pipeline.TaskRunResult
+ for _, trr := range trrs {
+ // only return terminal trrs from executeRun
+ if trr.IsTerminal() {
+ finaltrrs = append(finaltrrs, trr)
+ }
+ }
+
+ // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed
+ // by the pipeline executor
+ if len(finaltrrs) != 4 {
+ return o, fmt.Errorf("invalid number of results, expected: 4, got: %d", len(finaltrrs))
+ }
+
+ merr = errors.Join(
+ setBenchmarkPrice(&o, finaltrrs[0].Result),
+ setBid(&o, finaltrrs[1].Result),
+ setAsk(&o, finaltrrs[2].Result),
+ setMarketStatus(&o, finaltrrs[3].Result),
+ )
+
+ return o, merr
+}
+
+func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error {
+ if res.Error != nil {
+ o.benchmarkPrice.Err = res.Error
+ return res.Error
+ }
+ val, err := toBigInt(res.Value)
+ if err != nil {
+ return fmt.Errorf("failed to parse BenchmarkPrice: %w", err)
+ }
+ o.benchmarkPrice.Val = val
+ return nil
+}
+
+func setBid(o *parseOutput, res pipeline.Result) error {
+ if res.Error != nil {
+ o.bid.Err = res.Error
+ return res.Error
+ }
+ val, err := toBigInt(res.Value)
+ if err != nil {
+ return fmt.Errorf("failed to parse Bid: %w", err)
+ }
+ o.bid.Val = val
+ return nil
+}
+
+func setAsk(o *parseOutput, res pipeline.Result) error {
+ if res.Error != nil {
+ o.ask.Err = res.Error
+ return res.Error
+ }
+ val, err := toBigInt(res.Value)
+ if err != nil {
+ return fmt.Errorf("failed to parse Ask: %w", err)
+ }
+ o.ask.Val = val
+ return nil
+}
+
+func setMarketStatus(o *parseOutput, res pipeline.Result) error {
+ if res.Error != nil {
+ o.marketStatus.Err = res.Error
+ return res.Error
+ }
+ val, err := toBigInt(res.Value)
+ if err != nil {
+ return fmt.Errorf("failed to parse MarketStatus: %w", err)
+ }
+ o.marketStatus.Val = uint32(val.Int64())
+ return nil
+}
+
+// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod).
+// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod.
+func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) {
+ vars := pipeline.NewVarsFrom(map[string]interface{}{
+ "jb": map[string]interface{}{
+ "databaseID": ds.jb.ID,
+ "externalJobID": ds.jb.ExternalJobID,
+ "name": ds.jb.Name.ValueOrZero(),
+ },
+ })
+
+ run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars)
+ if err != nil {
+ return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID)
+ }
+
+ return run, trrs, err
+}
diff --git a/core/services/relay/evm/mercury/v4/data_source_test.go b/core/services/relay/evm/mercury/v4/data_source_test.go
new file mode 100644
index 00000000000..bce9c3c6088
--- /dev/null
+++ b/core/services/relay/evm/mercury/v4/data_source_test.go
@@ -0,0 +1,349 @@
+package v4
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ "github.com/pkg/errors"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+
+ mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
+ relaymercuryv4 "github.com/smartcontractkit/chainlink-data-streams/mercury/v4"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+ mercurymocks "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
+ reportcodecv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
+)
+
+var _ mercurytypes.ServerFetcher = &mockFetcher{}
+
+type mockFetcher struct {
+ ts int64
+ tsErr error
+ linkPrice *big.Int
+ linkPriceErr error
+ nativePrice *big.Int
+ nativePriceErr error
+}
+
+var feedId utils.FeedID = [32]byte{1}
+var linkFeedId utils.FeedID = [32]byte{2}
+var nativeFeedId utils.FeedID = [32]byte{3}
+
+func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) {
+ return nil, nil
+}
+
+func (m *mockFetcher) LatestPrice(ctx context.Context, fId [32]byte) (*big.Int, error) {
+ if fId == linkFeedId {
+ return m.linkPrice, m.linkPriceErr
+ } else if fId == nativeFeedId {
+ return m.nativePrice, m.nativePriceErr
+ }
+ return nil, nil
+}
+
+func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) {
+ return m.ts, m.tsErr
+}
+
+type mockORM struct {
+ report []byte
+ err error
+}
+
+func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) {
+ return m.report, m.err
+}
+
+type mockSaver struct {
+ r *pipeline.Run
+}
+
+func (ms *mockSaver) Save(r *pipeline.Run) {
+ ms.r = r
+}
+
+func Test_Datasource(t *testing.T) {
+ orm := &mockORM{}
+ ds := &datasource{orm: orm, lggr: logger.TestLogger(t)}
+ ctx := testutils.Context(t)
+ repts := ocrtypes.ReportTimestamp{}
+
+ fetcher := &mockFetcher{}
+ ds.fetcher = fetcher
+
+ saver := &mockSaver{}
+ ds.saver = saver
+
+ goodTrrs := []pipeline.TaskRunResult{
+ {
+ // bp
+ Result: pipeline.Result{Value: "122.345"},
+ Task: &mercurymocks.MockTask{},
+ },
+ {
+ // bid
+ Result: pipeline.Result{Value: "121.993"},
+ Task: &mercurymocks.MockTask{},
+ },
+ {
+ // ask
+ Result: pipeline.Result{Value: "123.111"},
+ Task: &mercurymocks.MockTask{},
+ },
+ {
+ // marketStatus
+ Result: pipeline.Result{Value: "1"},
+ Task: &mercurymocks.MockTask{},
+ },
+ }
+
+ ds.pipelineRunner = &mercurymocks.MockRunner{
+ Trrs: goodTrrs,
+ }
+
+ spec := pipeline.Spec{}
+ ds.spec = spec
+
+ t.Run("when fetchMaxFinalizedTimestamp=true", func(t *testing.T) {
+ t.Run("with latest report in database", func(t *testing.T) {
+ orm.report = buildSamplev4Report()
+ orm.err = nil
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
+ assert.Equal(t, int64(124), obs.MaxFinalizedTimestamp.Val)
+ })
+ t.Run("if querying latest report fails", func(t *testing.T) {
+ orm.report = nil
+ orm.err = errors.New("something exploded")
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "something exploded")
+ assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
+ })
+ t.Run("if codec fails to decode", func(t *testing.T) {
+ orm.report = []byte{1, 2, 3}
+ orm.err = nil
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
+ assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
+ })
+
+ orm.report = nil
+ orm.err = nil
+
+ t.Run("if LatestTimestamp returns error", func(t *testing.T) {
+ fetcher.tsErr = errors.New("some error")
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "some error")
+ assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
+ })
+
+ t.Run("if LatestTimestamp succeeds", func(t *testing.T) {
+ fetcher.tsErr = nil
+ fetcher.ts = 123
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.Equal(t, int64(123), obs.MaxFinalizedTimestamp.Val)
+ assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
+ })
+
+ t.Run("if LatestTimestamp succeeds but ts=0 (new feed)", func(t *testing.T) {
+ fetcher.tsErr = nil
+ fetcher.ts = 0
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
+ assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
+ })
+
+ t.Run("when run execution succeeded", func(t *testing.T) {
+ t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
+ t.Cleanup(func() {
+ ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
+ })
+
+ ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
+
+ fetcher.ts = 123123
+ fetcher.tsErr = nil
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
+ assert.NoError(t, obs.BenchmarkPrice.Err)
+ assert.Equal(t, big.NewInt(121), obs.Bid.Val)
+ assert.NoError(t, obs.Bid.Err)
+ assert.Equal(t, big.NewInt(123), obs.Ask.Val)
+ assert.NoError(t, obs.Ask.Err)
+ assert.Equal(t, int64(123123), obs.MaxFinalizedTimestamp.Val)
+ assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
+ assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
+ assert.NoError(t, obs.LinkPrice.Err)
+ assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
+ assert.NoError(t, obs.NativePrice.Err)
+ assert.Equal(t, uint32(1), obs.MarketStatus.Val)
+ assert.NoError(t, obs.MarketStatus.Err)
+ })
+ })
+ })
+
+ t.Run("when fetchMaxFinalizedTimestamp=false", func(t *testing.T) {
+ t.Run("when run execution fails, returns error", func(t *testing.T) {
+ t.Cleanup(func() {
+ ds.pipelineRunner = &mercurymocks.MockRunner{
+ Trrs: goodTrrs,
+ Err: nil,
+ }
+ })
+
+ ds.pipelineRunner = &mercurymocks.MockRunner{
+ Trrs: goodTrrs,
+ Err: errors.New("run execution failed"),
+ }
+
+ _, err := ds.Observe(ctx, repts, false)
+ assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed")
+ })
+
+ t.Run("when parsing run results fails, return error", func(t *testing.T) {
+ t.Cleanup(func() {
+ runner := &mercurymocks.MockRunner{
+ Trrs: goodTrrs,
+ Err: nil,
+ }
+ ds.pipelineRunner = runner
+ })
+
+ badTrrs := []pipeline.TaskRunResult{
+ {
+ // benchmark price
+ Result: pipeline.Result{Value: "122.345"},
+ Task: &mercurymocks.MockTask{},
+ },
+ {
+ // bid
+ Result: pipeline.Result{Value: "121.993"},
+ Task: &mercurymocks.MockTask{},
+ },
+ {
+ // ask
+ Result: pipeline.Result{Error: errors.New("some error with ask")},
+ Task: &mercurymocks.MockTask{},
+ },
+ {
+ // marketStatus
+ Result: pipeline.Result{Value: "1"},
+ Task: &mercurymocks.MockTask{},
+ },
+ }
+
+ ds.pipelineRunner = &mercurymocks.MockRunner{
+ Trrs: badTrrs,
+ Err: nil,
+ }
+
+ _, err := ds.Observe(ctx, repts, false)
+ assert.EqualError(t, err, "Observe failed while parsing run results: some error with ask")
+ })
+
+ t.Run("when run execution succeeded", func(t *testing.T) {
+ t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
+ t.Cleanup(func() {
+ ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
+ })
+
+ var feedId utils.FeedID = [32]byte{1}
+ ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
+
+ obs, err := ds.Observe(ctx, repts, false)
+ assert.NoError(t, err)
+
+ assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
+ assert.NoError(t, obs.BenchmarkPrice.Err)
+ assert.Equal(t, big.NewInt(121), obs.Bid.Val)
+ assert.NoError(t, obs.Bid.Err)
+ assert.Equal(t, big.NewInt(123), obs.Ask.Val)
+ assert.NoError(t, obs.Ask.Err)
+ assert.Equal(t, int64(0), obs.MaxFinalizedTimestamp.Val)
+ assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
+ assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
+ assert.NoError(t, obs.LinkPrice.Err)
+ assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
+ assert.NoError(t, obs.NativePrice.Err)
+ assert.Equal(t, uint32(1), obs.MarketStatus.Val)
+ assert.NoError(t, obs.MarketStatus.Err)
+ })
+
+ t.Run("when fails to fetch linkPrice or nativePrice", func(t *testing.T) {
+ t.Cleanup(func() {
+ fetcher.linkPriceErr = nil
+ fetcher.nativePriceErr = nil
+ })
+
+ fetcher.linkPriceErr = errors.New("some error fetching link price")
+ fetcher.nativePriceErr = errors.New("some error fetching native price")
+
+ obs, err := ds.Observe(ctx, repts, false)
+ assert.NoError(t, err)
+
+ assert.Nil(t, obs.LinkPrice.Val)
+ assert.EqualError(t, obs.LinkPrice.Err, "some error fetching link price")
+ assert.Nil(t, obs.NativePrice.Val)
+ assert.EqualError(t, obs.NativePrice.Err, "some error fetching native price")
+ })
+
+ t.Run("when succeeds to fetch linkPrice or nativePrice but got nil (new feed)", func(t *testing.T) {
+ obs, err := ds.Observe(ctx, repts, false)
+ assert.NoError(t, err)
+
+ assert.Equal(t, obs.LinkPrice.Val, relaymercuryv4.MissingPrice)
+ assert.Nil(t, obs.LinkPrice.Err)
+ assert.Equal(t, obs.NativePrice.Val, relaymercuryv4.MissingPrice)
+ assert.Nil(t, obs.NativePrice.Err)
+ })
+ })
+ })
+}
+
+var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
+
+func buildSamplev4Report() []byte {
+ feedID := sampleFeedID
+ timestamp := uint32(124)
+ bp := big.NewInt(242)
+ bid := big.NewInt(243)
+ ask := big.NewInt(244)
+ validFromTimestamp := uint32(123)
+ expiresAt := uint32(456)
+ linkFee := big.NewInt(3334455)
+ nativeFee := big.NewInt(556677)
+ marketStatus := uint32(1)
+
+ b, err := reportcodecv4.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask, marketStatus)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/core/services/relay/evm/mercury/v4/reportcodec/report_codec.go b/core/services/relay/evm/mercury/v4/reportcodec/report_codec.go
new file mode 100644
index 00000000000..12f3d88e733
--- /dev/null
+++ b/core/services/relay/evm/mercury/v4/reportcodec/report_codec.go
@@ -0,0 +1,82 @@
+package reportcodec
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+
+ pkgerrors "github.com/pkg/errors"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
+ reporttypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/types"
+)
+
+var ReportTypes = reporttypes.GetSchema()
+var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word
+var zero = big.NewInt(0)
+
+var _ v4.ReportCodec = &ReportCodec{}
+
+type ReportCodec struct {
+ logger logger.Logger
+ feedID utils.FeedID
+}
+
+func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec {
+ return &ReportCodec{lggr, feedID}
+}
+
+func (r *ReportCodec) BuildReport(rf v4.ReportFields) (ocrtypes.Report, error) {
+ var merr error
+ if rf.BenchmarkPrice == nil {
+ merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil"))
+ }
+ if rf.Bid == nil {
+ merr = errors.Join(merr, errors.New("bid may not be nil"))
+ }
+ if rf.Ask == nil {
+ merr = errors.Join(merr, errors.New("ask may not be nil"))
+ }
+ if rf.LinkFee == nil {
+ merr = errors.Join(merr, errors.New("linkFee may not be nil"))
+ } else if rf.LinkFee.Cmp(zero) < 0 {
+ merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee))
+ }
+ if rf.NativeFee == nil {
+ merr = errors.Join(merr, errors.New("nativeFee may not be nil"))
+ } else if rf.NativeFee.Cmp(zero) < 0 {
+ merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee))
+ }
+ if merr != nil {
+ return nil, merr
+ }
+ reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice, rf.Bid, rf.Ask, rf.MarketStatus)
+ return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob")
+}
+
+func (r *ReportCodec) MaxReportLength(n int) (int, error) {
+ return maxReportLength, nil
+}
+
+func (r *ReportCodec) ObservationTimestampFromReport(report ocrtypes.Report) (uint32, error) {
+ decoded, err := r.Decode(report)
+ if err != nil {
+ return 0, err
+ }
+ return decoded.ObservationsTimestamp, nil
+}
+
+func (r *ReportCodec) Decode(report ocrtypes.Report) (*reporttypes.Report, error) {
+ return reporttypes.Decode(report)
+}
+
+func (r *ReportCodec) BenchmarkPriceFromReport(report ocrtypes.Report) (*big.Int, error) {
+ decoded, err := r.Decode(report)
+ if err != nil {
+ return nil, err
+ }
+ return decoded.BenchmarkPrice, nil
+}
diff --git a/core/services/relay/evm/mercury/v4/reportcodec/report_codec_test.go b/core/services/relay/evm/mercury/v4/reportcodec/report_codec_test.go
new file mode 100644
index 00000000000..b62f42ef575
--- /dev/null
+++ b/core/services/relay/evm/mercury/v4/reportcodec/report_codec_test.go
@@ -0,0 +1,163 @@
+package reportcodec
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
+)
+
+func newValidReportFields() v4.ReportFields {
+ return v4.ReportFields{
+ Timestamp: 242,
+ BenchmarkPrice: big.NewInt(243),
+ Bid: big.NewInt(244),
+ Ask: big.NewInt(245),
+ ValidFromTimestamp: 123,
+ ExpiresAt: 20,
+ LinkFee: big.NewInt(456),
+ NativeFee: big.NewInt(457),
+ MarketStatus: 1,
+ }
+}
+
+func Test_ReportCodec_BuildReport(t *testing.T) {
+ r := ReportCodec{}
+
+ t.Run("BuildReport errors on zero values", func(t *testing.T) {
+ _, err := r.BuildReport(v4.ReportFields{})
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "benchmarkPrice may not be nil")
+ assert.Contains(t, err.Error(), "linkFee may not be nil")
+ assert.Contains(t, err.Error(), "nativeFee may not be nil")
+ })
+
+ t.Run("BuildReport constructs a report from observations", func(t *testing.T) {
+ rf := newValidReportFields()
+ // only need to test happy path since validations are done in relaymercury
+
+ report, err := r.BuildReport(rf)
+ require.NoError(t, err)
+
+ reportElems := make(map[string]interface{})
+ err = ReportTypes.UnpackIntoMap(reportElems, report)
+ require.NoError(t, err)
+
+ assert.Equal(t, int(reportElems["observationsTimestamp"].(uint32)), 242)
+ assert.Equal(t, reportElems["benchmarkPrice"].(*big.Int).Int64(), int64(243))
+ assert.Equal(t, reportElems["bid"].(*big.Int).Int64(), int64(244))
+ assert.Equal(t, reportElems["ask"].(*big.Int).Int64(), int64(245))
+ assert.Equal(t, reportElems["validFromTimestamp"].(uint32), uint32(123))
+ assert.Equal(t, reportElems["expiresAt"].(uint32), uint32(20))
+ assert.Equal(t, reportElems["linkFee"].(*big.Int).Int64(), int64(456))
+ assert.Equal(t, reportElems["nativeFee"].(*big.Int).Int64(), int64(457))
+ assert.Equal(t, reportElems["marketStatus"].(uint32), uint32(1))
+
+ assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, report)
+ max, err := r.MaxReportLength(4)
+ require.NoError(t, err)
+ assert.LessOrEqual(t, len(report), max)
+
+ t.Run("Decode decodes the report", func(t *testing.T) {
+ decoded, err := r.Decode(report)
+ require.NoError(t, err)
+
+ require.NotNil(t, decoded)
+
+ assert.Equal(t, uint32(242), decoded.ObservationsTimestamp)
+ assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice)
+ assert.Equal(t, big.NewInt(244), decoded.Bid)
+ assert.Equal(t, big.NewInt(245), decoded.Ask)
+ assert.Equal(t, uint32(123), decoded.ValidFromTimestamp)
+ assert.Equal(t, uint32(20), decoded.ExpiresAt)
+ assert.Equal(t, big.NewInt(456), decoded.LinkFee)
+ assert.Equal(t, big.NewInt(457), decoded.NativeFee)
+ assert.Equal(t, uint32(1), decoded.MarketStatus)
+ })
+ })
+
+ t.Run("errors on negative fee", func(t *testing.T) {
+ rf := newValidReportFields()
+ rf.LinkFee = big.NewInt(-1)
+ rf.NativeFee = big.NewInt(-1)
+ _, err := r.BuildReport(rf)
+ require.Error(t, err)
+
+ assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)")
+ assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)")
+ })
+
+ t.Run("Decode errors on invalid report", func(t *testing.T) {
+ _, err := r.Decode([]byte{1, 2, 3})
+ assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
+
+ longBad := make([]byte, 64)
+ for i := 0; i < len(longBad); i++ {
+ longBad[i] = byte(i)
+ }
+ _, err = r.Decode(longBad)
+ assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value")
+ })
+}
+
+func buildSampleReport(ts int64) []byte {
+ feedID := [32]byte{'f', 'o', 'o'}
+ timestamp := uint32(ts)
+ bp := big.NewInt(242)
+ bid := big.NewInt(243)
+ ask := big.NewInt(244)
+ validFromTimestamp := uint32(123)
+ expiresAt := uint32(456)
+ linkFee := big.NewInt(3334455)
+ nativeFee := big.NewInt(556677)
+ marketStatus := uint32(1)
+
+ b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask, marketStatus)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) {
+ r := ReportCodec{}
+
+ t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) {
+ report := buildSampleReport(123)
+
+ ts, err := r.ObservationTimestampFromReport(report)
+ require.NoError(t, err)
+
+ assert.Equal(t, ts, uint32(123))
+ })
+ t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) {
+ report := []byte{1, 2, 3}
+
+ _, err := r.ObservationTimestampFromReport(report)
+ require.Error(t, err)
+
+ assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
+ })
+}
+
+func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) {
+ r := ReportCodec{}
+
+ t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) {
+ report := buildSampleReport(123)
+
+ bp, err := r.BenchmarkPriceFromReport(report)
+ require.NoError(t, err)
+
+ assert.Equal(t, big.NewInt(242), bp)
+ })
+ t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) {
+ _, err := r.BenchmarkPriceFromReport([]byte{1, 2, 3})
+ require.Error(t, err)
+ assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
+ })
+}
diff --git a/core/services/relay/evm/mercury/v4/types/types.go b/core/services/relay/evm/mercury/v4/types/types.go
new file mode 100644
index 00000000000..3abdd262a65
--- /dev/null
+++ b/core/services/relay/evm/mercury/v4/types/types.go
@@ -0,0 +1,58 @@
+package reporttypes
+
+import (
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+)
+
+var schema = GetSchema()
+
+func GetSchema() abi.Arguments {
+ mustNewType := func(t string) abi.Type {
+ result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{})
+ if err != nil {
+ panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err))
+ }
+ return result
+ }
+ return abi.Arguments([]abi.Argument{
+ {Name: "feedId", Type: mustNewType("bytes32")},
+ {Name: "validFromTimestamp", Type: mustNewType("uint32")},
+ {Name: "observationsTimestamp", Type: mustNewType("uint32")},
+ {Name: "nativeFee", Type: mustNewType("uint192")},
+ {Name: "linkFee", Type: mustNewType("uint192")},
+ {Name: "expiresAt", Type: mustNewType("uint32")},
+ {Name: "benchmarkPrice", Type: mustNewType("int192")},
+ {Name: "bid", Type: mustNewType("int192")},
+ {Name: "ask", Type: mustNewType("int192")},
+ {Name: "marketStatus", Type: mustNewType("uint32")},
+ })
+}
+
+type Report struct {
+ FeedId [32]byte
+ ObservationsTimestamp uint32
+ BenchmarkPrice *big.Int
+ Bid *big.Int
+ Ask *big.Int
+ ValidFromTimestamp uint32
+ ExpiresAt uint32
+ LinkFee *big.Int
+ NativeFee *big.Int
+ MarketStatus uint32
+}
+
+// Decode is made available to external users (i.e. mercury server)
+func Decode(report []byte) (*Report, error) {
+ values, err := schema.Unpack(report)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode report: %w", err)
+ }
+ decoded := new(Report)
+ if err = schema.Copy(decoded, values); err != nil {
+ return nil, fmt.Errorf("failed to copy report values to struct: %w", err)
+ }
+ return decoded, nil
+}
diff --git a/core/services/relay/evm/mercury_provider.go b/core/services/relay/evm/mercury_provider.go
index 48882b701c9..9393f66b0dd 100644
--- a/core/services/relay/evm/mercury_provider.go
+++ b/core/services/relay/evm/mercury_provider.go
@@ -8,13 +8,12 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/services"
commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
-
mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
+ v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
"github.com/smartcontractkit/chainlink-data-streams/mercury"
-
httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
evmmercury "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
@@ -30,6 +29,7 @@ type mercuryProvider struct {
reportCodecV1 v1.ReportCodec
reportCodecV2 v2.ReportCodec
reportCodecV3 v3.ReportCodec
+ reportCodecV4 v4.ReportCodec
mercuryChainReader mercurytypes.ChainReader
logger logger.Logger
ms services.MultiStart
@@ -44,6 +44,7 @@ func NewMercuryProvider(
reportCodecV1 v1.ReportCodec,
reportCodecV2 v2.ReportCodec,
reportCodecV3 v3.ReportCodec,
+ reportCodecV4 v4.ReportCodec,
lggr logger.Logger,
) *mercuryProvider {
return &mercuryProvider{
@@ -54,6 +55,7 @@ func NewMercuryProvider(
reportCodecV1,
reportCodecV2,
reportCodecV3,
+ reportCodecV4,
mercuryChainReader,
lggr,
services.MultiStart{},
@@ -115,6 +117,10 @@ func (p *mercuryProvider) ReportCodecV3() v3.ReportCodec {
return p.reportCodecV3
}
+func (p *mercuryProvider) ReportCodecV4() v4.ReportCodec {
+ return p.reportCodecV4
+}
+
func (p *mercuryProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
return p.transmitter
}
diff --git a/core/services/synchronization/telem/telem.pb.go b/core/services/synchronization/telem/telem.pb.go
index e1945bc26d3..d51b9628e22 100644
--- a/core/services/synchronization/telem/telem.pb.go
+++ b/core/services/synchronization/telem/telem.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc v4.25.1
// source: core/services/synchronization/telem/telem.proto
@@ -264,7 +264,7 @@ func file_core_services_synchronization_telem_telem_proto_rawDescGZIP() []byte {
}
var file_core_services_synchronization_telem_telem_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_core_services_synchronization_telem_telem_proto_goTypes = []interface{}{
+var file_core_services_synchronization_telem_telem_proto_goTypes = []any{
(*TelemRequest)(nil), // 0: telem.TelemRequest
(*TelemBatchRequest)(nil), // 1: telem.TelemBatchRequest
(*TelemResponse)(nil), // 2: telem.TelemResponse
@@ -287,7 +287,7 @@ func file_core_services_synchronization_telem_telem_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_core_services_synchronization_telem_telem_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*TelemRequest); i {
case 0:
return &v.state
@@ -299,7 +299,7 @@ func file_core_services_synchronization_telem_telem_proto_init() {
return nil
}
}
- file_core_services_synchronization_telem_telem_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*TelemBatchRequest); i {
case 0:
return &v.state
@@ -311,7 +311,7 @@ func file_core_services_synchronization_telem_telem_proto_init() {
return nil
}
}
- file_core_services_synchronization_telem_telem_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*TelemResponse); i {
case 0:
return &v.state
diff --git a/core/services/synchronization/telem/telem_automation_custom.pb.go b/core/services/synchronization/telem/telem_automation_custom.pb.go
index a53339eda05..30ddce6f790 100644
--- a/core/services/synchronization/telem/telem_automation_custom.pb.go
+++ b/core/services/synchronization/telem/telem_automation_custom.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc v4.25.1
// source: core/services/synchronization/telem/telem_automation_custom.proto
@@ -289,7 +289,7 @@ func file_core_services_synchronization_telem_telem_automation_custom_proto_rawD
}
var file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_core_services_synchronization_telem_telem_automation_custom_proto_goTypes = []interface{}{
+var file_core_services_synchronization_telem_telem_automation_custom_proto_goTypes = []any{
(*BlockNumber)(nil), // 0: telem.BlockNumber
(*NodeVersion)(nil), // 1: telem.NodeVersion
(*AutomationTelemWrapper)(nil), // 2: telem.AutomationTelemWrapper
@@ -310,7 +310,7 @@ func file_core_services_synchronization_telem_telem_automation_custom_proto_init
return
}
if !protoimpl.UnsafeEnabled {
- file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*BlockNumber); i {
case 0:
return &v.state
@@ -322,7 +322,7 @@ func file_core_services_synchronization_telem_telem_automation_custom_proto_init
return nil
}
}
- file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*NodeVersion); i {
case 0:
return &v.state
@@ -334,7 +334,7 @@ func file_core_services_synchronization_telem_telem_automation_custom_proto_init
return nil
}
}
- file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*AutomationTelemWrapper); i {
case 0:
return &v.state
@@ -347,7 +347,7 @@ func file_core_services_synchronization_telem_telem_automation_custom_proto_init
}
}
}
- file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[2].OneofWrappers = []interface{}{
+ file_core_services_synchronization_telem_telem_automation_custom_proto_msgTypes[2].OneofWrappers = []any{
(*AutomationTelemWrapper_BlockNumber)(nil),
(*AutomationTelemWrapper_NodeVersion)(nil),
}
diff --git a/core/services/synchronization/telem/telem_enhanced_ea.pb.go b/core/services/synchronization/telem/telem_enhanced_ea.pb.go
index a9a81dabfcc..c8983a06fea 100644
--- a/core/services/synchronization/telem/telem_enhanced_ea.pb.go
+++ b/core/services/synchronization/telem/telem_enhanced_ea.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc v4.25.1
// source: core/services/synchronization/telem/telem_enhanced_ea.proto
@@ -239,7 +239,7 @@ func file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescGZI
}
var file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_core_services_synchronization_telem_telem_enhanced_ea_proto_goTypes = []interface{}{
+var file_core_services_synchronization_telem_telem_enhanced_ea_proto_goTypes = []any{
(*EnhancedEA)(nil), // 0: telem.EnhancedEA
}
var file_core_services_synchronization_telem_telem_enhanced_ea_proto_depIdxs = []int32{
@@ -256,7 +256,7 @@ func file_core_services_synchronization_telem_telem_enhanced_ea_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*EnhancedEA); i {
case 0:
return &v.state
diff --git a/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go b/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go
index e152cb4b152..856619e1931 100644
--- a/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go
+++ b/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc v4.25.1
// source: core/services/synchronization/telem/telem_enhanced_ea_mercury.proto
@@ -20,6 +20,56 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
+type MarketStatus int32
+
+const (
+ // Same values as those used by OCR.
+ MarketStatus_UNKNOWN MarketStatus = 0
+ MarketStatus_CLOSED MarketStatus = 1
+ MarketStatus_OPEN MarketStatus = 2
+)
+
+// Enum value maps for MarketStatus.
+var (
+ MarketStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CLOSED",
+ 2: "OPEN",
+ }
+ MarketStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CLOSED": 1,
+ "OPEN": 2,
+ }
+)
+
+func (x MarketStatus) Enum() *MarketStatus {
+ p := new(MarketStatus)
+ *p = x
+ return p
+}
+
+func (x MarketStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MarketStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_enumTypes[0].Descriptor()
+}
+
+func (MarketStatus) Type() protoreflect.EnumType {
+ return &file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_enumTypes[0]
+}
+
+func (x MarketStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use MarketStatus.Descriptor instead.
+func (MarketStatus) EnumDescriptor() ([]byte, []int) {
+ return file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescGZIP(), []int{0}
+}
+
type EnhancedEAMercury struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -58,10 +108,12 @@ type EnhancedEAMercury struct {
ObservationAsk int64 `protobuf:"varint,17,opt,name=observation_ask,json=observationAsk,proto3" json:"observation_ask,omitempty"` // This value overflows, will be reserved and removed in future versions
ObservationBidString string `protobuf:"bytes,23,opt,name=observation_bid_string,json=observationBidString,proto3" json:"observation_bid_string,omitempty"`
ObservationAskString string `protobuf:"bytes,24,opt,name=observation_ask_string,json=observationAskString,proto3" json:"observation_ask_string,omitempty"`
- ConfigDigest string `protobuf:"bytes,18,opt,name=config_digest,json=configDigest,proto3" json:"config_digest,omitempty"`
- Round int64 `protobuf:"varint,19,opt,name=round,proto3" json:"round,omitempty"`
- Epoch int64 `protobuf:"varint,20,opt,name=epoch,proto3" json:"epoch,omitempty"`
- AssetSymbol string `protobuf:"bytes,21,opt,name=asset_symbol,json=assetSymbol,proto3" json:"asset_symbol,omitempty"`
+ // v4
+ ObservationMarketStatus MarketStatus `protobuf:"varint,34,opt,name=observation_market_status,json=observationMarketStatus,proto3,enum=telem.MarketStatus" json:"observation_market_status,omitempty"`
+ ConfigDigest string `protobuf:"bytes,18,opt,name=config_digest,json=configDigest,proto3" json:"config_digest,omitempty"`
+ Round int64 `protobuf:"varint,19,opt,name=round,proto3" json:"round,omitempty"`
+ Epoch int64 `protobuf:"varint,20,opt,name=epoch,proto3" json:"epoch,omitempty"`
+ AssetSymbol string `protobuf:"bytes,21,opt,name=asset_symbol,json=assetSymbol,proto3" json:"asset_symbol,omitempty"`
}
func (x *EnhancedEAMercury) Reset() {
@@ -299,6 +351,13 @@ func (x *EnhancedEAMercury) GetObservationAskString() string {
return ""
}
+func (x *EnhancedEAMercury) GetObservationMarketStatus() MarketStatus {
+ if x != nil {
+ return x.ObservationMarketStatus
+ }
+ return MarketStatus_UNKNOWN
+}
+
func (x *EnhancedEAMercury) GetConfigDigest() string {
if x != nil {
return x.ConfigDigest
@@ -334,7 +393,7 @@ var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_raw
0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f,
0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x65, 0x6e, 0x68, 0x61,
0x6e, 0x63, 0x65, 0x64, 0x5f, 0x65, 0x61, 0x5f, 0x6d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0xa9, 0x0c, 0x0a,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0xfa, 0x0c, 0x0a,
0x11, 0x45, 0x6e, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x45, 0x41, 0x4d, 0x65, 0x72, 0x63, 0x75,
0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x20, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b,
@@ -426,19 +485,28 @@ var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_raw
0x0a, 0x16, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x73,
0x6b, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x73, 0x6b, 0x53, 0x74,
- 0x72, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64,
- 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x75,
- 0x6e, 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x12,
- 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
- 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x73,
- 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x73, 0x73,
- 0x65, 0x74, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x61, 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x69, 0x6e,
- 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x72, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x19, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2e,
+ 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x17, 0x6f, 0x62,
+ 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x74, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f,
+ 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f,
+ 0x75, 0x6e, 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x6e, 0x64,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f,
+ 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x73,
+ 0x73, 0x65, 0x74, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x2a, 0x31, 0x0a, 0x0c, 0x4d, 0x61, 0x72,
+ 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44,
+ 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x02, 0x42, 0x4e, 0x5a, 0x4c,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69,
+ 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -453,16 +521,19 @@ func file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_ra
return file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescData
}
+var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_goTypes = []interface{}{
- (*EnhancedEAMercury)(nil), // 0: telem.EnhancedEAMercury
+var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_goTypes = []any{
+ (MarketStatus)(0), // 0: telem.MarketStatus
+ (*EnhancedEAMercury)(nil), // 1: telem.EnhancedEAMercury
}
var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
+ 0, // 0: telem.EnhancedEAMercury.observation_market_status:type_name -> telem.MarketStatus
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
}
func init() { file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_init() }
@@ -471,7 +542,7 @@ func file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_in
return
}
if !protoimpl.UnsafeEnabled {
- file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*EnhancedEAMercury); i {
case 0:
return &v.state
@@ -489,13 +560,14 @@ func file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_in
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDesc,
- NumEnums: 0,
+ NumEnums: 1,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_goTypes,
DependencyIndexes: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_depIdxs,
+ EnumInfos: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_enumTypes,
MessageInfos: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes,
}.Build()
File_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto = out.File
diff --git a/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto b/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto
index 8488eb1d509..bb41ff86ee3 100644
--- a/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto
+++ b/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto
@@ -4,6 +4,13 @@ option go_package = "github.com/smartcontractkit/chainlink/v2/core/services/sync
package telem;
+enum MarketStatus {
+ // Same values as those used by OCR.
+ UNKNOWN = 0;
+ CLOSED = 1;
+ OPEN = 2;
+}
+
message EnhancedEAMercury {
uint32 version = 32;
@@ -44,6 +51,8 @@ message EnhancedEAMercury {
int64 observation_ask=17; // This value overflows, will be reserved and removed in future versions
string observation_bid_string = 23;
string observation_ask_string = 24;
+ // v4
+ MarketStatus observation_market_status=34;
string config_digest = 18;
int64 round=19;
diff --git a/core/services/synchronization/telem/telem_functions_request.pb.go b/core/services/synchronization/telem/telem_functions_request.pb.go
index 0a4a2649b4e..89aa9e3fe37 100644
--- a/core/services/synchronization/telem/telem_functions_request.pb.go
+++ b/core/services/synchronization/telem/telem_functions_request.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc v4.25.1
// source: core/services/synchronization/telem/telem_functions_request.proto
@@ -119,7 +119,7 @@ func file_core_services_synchronization_telem_telem_functions_request_proto_rawD
}
var file_core_services_synchronization_telem_telem_functions_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_core_services_synchronization_telem_telem_functions_request_proto_goTypes = []interface{}{
+var file_core_services_synchronization_telem_telem_functions_request_proto_goTypes = []any{
(*FunctionsRequest)(nil), // 0: telem.FunctionsRequest
}
var file_core_services_synchronization_telem_telem_functions_request_proto_depIdxs = []int32{
@@ -136,7 +136,7 @@ func file_core_services_synchronization_telem_telem_functions_request_proto_init
return
}
if !protoimpl.UnsafeEnabled {
- file_core_services_synchronization_telem_telem_functions_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_core_services_synchronization_telem_telem_functions_request_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*FunctionsRequest); i {
case 0:
return &v.state
diff --git a/go.mod b/go.mod
index 326c06396da..8e2103eb246 100644
--- a/go.mod
+++ b/go.mod
@@ -72,9 +72,9 @@ require (
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chain-selectors v1.0.10
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45
- github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa
+ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799
diff --git a/go.sum b/go.sum
index 2ec1753593f..73d6d5b227a 100644
--- a/go.sum
+++ b/go.sum
@@ -1136,12 +1136,12 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996 h1:6s4cTIE3NbATxWLrD5JLCq097PC5Y4GKK/Kk4fhURpY=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa h1:g75H8oh2ws52s8BekwvGQ9XvBVu3E7WM1rfiA0PN0zk=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa/go.mod h1:wZvLHX/Sd9hskN51016cTFcT3G62KXVa6xbVDS7tRjc=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index 29af8b4c217..ed693f4fccc 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -28,7 +28,7 @@ require (
github.com/shopspring/decimal v1.4.0
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
github.com/smartcontractkit/chainlink-testing-framework v1.33.0
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
@@ -377,7 +377,7 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartcontractkit/chain-selectors v1.0.10 // indirect
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 // indirect
- github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa // indirect
+ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 57b55a8b017..ca3ce8d903e 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -1486,12 +1486,12 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996 h1:6s4cTIE3NbATxWLrD5JLCq097PC5Y4GKK/Kk4fhURpY=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa h1:g75H8oh2ws52s8BekwvGQ9XvBVu3E7WM1rfiA0PN0zk=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa/go.mod h1:wZvLHX/Sd9hskN51016cTFcT3G62KXVa6xbVDS7tRjc=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index 093a6ac6c58..3d1ae6c7a98 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -16,7 +16,7 @@ require (
github.com/rs/zerolog v1.31.0
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
github.com/smartcontractkit/chainlink-testing-framework v1.33.0
github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c
github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8
@@ -369,7 +369,7 @@ require (
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartcontractkit/chain-selectors v1.0.10 // indirect
- github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa // indirect
+ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index 0db884f178e..2a54ec9254f 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -1468,12 +1468,12 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996 h1:6s4cTIE3NbATxWLrD5JLCq097PC5Y4GKK/Kk4fhURpY=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731121127-5ae22cf04996/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa h1:g75H8oh2ws52s8BekwvGQ9XvBVu3E7WM1rfiA0PN0zk=
-github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240718160222-2dc0c8136bfa/go.mod h1:wZvLHX/Sd9hskN51016cTFcT3G62KXVa6xbVDS7tRjc=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
+github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
From 1ac2902c6c514c00341568b8c6dfd799cd262222 Mon Sep 17 00:00:00 2001
From: Ilja Pavlovs <5300706+iljapavlovs@users.noreply.github.com>
Date: Mon, 5 Aug 2024 15:00:39 +0300
Subject: [PATCH 07/52] VRF-1138: Make CTF tests to reuse existing VRF Wrapper
(#13854)
* VRF-1138: Make CTF tests to reuse existing VRF Wrapper
* VRF-1138: remove old code
* VRF-1138: remove comments
* VRF-1138: refactoring
* VRF-1138: pr comments
* VRF-1138: pr comments
* VRF-1138: fixing lint issues
* VRF-1138: PR comments
---
integration-tests/actions/actions.go | 6 +-
.../actions/vrf/common/actions.go | 30 ++
.../actions/vrf/common/models.go | 1 +
.../actions/vrf/vrfv2/contract_steps.go | 4 +-
.../actions/vrf/vrfv2/setup_steps.go | 40 +--
.../actions/vrf/vrfv2plus/contract_steps.go | 68 ++++-
.../actions/vrf/vrfv2plus/models.go | 4 +-
.../actions/vrf/vrfv2plus/setup_steps.go | 270 +++++++++++++-----
.../contracts/contract_vrf_models.go | 4 +
.../contracts/ethereum_contracts.go | 4 +-
.../contracts/ethereum_vrf_contracts.go | 20 ++
.../contracts/ethereum_vrfv2_contracts.go | 24 ++
.../contracts/ethereum_vrfv2plus_contracts.go | 24 ++
integration-tests/smoke/vrfv2plus_test.go | 45 ++-
.../testconfig/common/vrf/common.go | 36 ++-
integration-tests/testconfig/default.toml | 152 ++++++++++
integration-tests/testconfig/vrfv2/config.go | 6 -
integration-tests/testconfig/vrfv2/vrfv2.toml | 12 +-
.../testconfig/vrfv2plus/vrfv2plus.toml | 23 +-
19 files changed, 599 insertions(+), 174 deletions(-)
diff --git a/integration-tests/actions/actions.go b/integration-tests/actions/actions.go
index 65db18ad6f7..8487e3a264e 100644
--- a/integration-tests/actions/actions.go
+++ b/integration-tests/actions/actions.go
@@ -1216,7 +1216,7 @@ func RandBool() bool {
return rand.Intn(2) == 1
}
-func ContinuouslyGenerateTXsOnChain(sethClient *seth.Client, stopChannel chan bool, l zerolog.Logger) (bool, error) {
+func ContinuouslyGenerateTXsOnChain(sethClient *seth.Client, stopChannel chan bool, wg *sync.WaitGroup, l zerolog.Logger) (bool, error) {
counterContract, err := contracts.DeployCounterContract(sethClient)
if err != nil {
return false, err
@@ -1230,6 +1230,10 @@ func ContinuouslyGenerateTXsOnChain(sethClient *seth.Client, stopChannel chan bo
select {
case <-stopChannel:
l.Info().Str("Number of generated transactions on chain", count.String()).Msg("Stopping generating txs on chain. Desired block number reached.")
+ sleepDuration := time.Second * 10
+ l.Info().Str("Waiting for", sleepDuration.String()).Msg("Waiting for transactions to be mined and avoid nonce issues")
+ time.Sleep(sleepDuration)
+ wg.Done()
return true, nil
default:
err = counterContract.Increment()
diff --git a/integration-tests/actions/vrf/common/actions.go b/integration-tests/actions/vrf/common/actions.go
index e599c705ef0..1300ac8b726 100644
--- a/integration-tests/actions/vrf/common/actions.go
+++ b/integration-tests/actions/vrf/common/actions.go
@@ -20,6 +20,7 @@ import (
ctf_test_env "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env"
"github.com/smartcontractkit/chainlink-testing-framework/utils/conversions"
seth_utils "github.com/smartcontractkit/chainlink-testing-framework/utils/seth"
+ "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/client"
@@ -384,6 +385,35 @@ func BuildNewCLEnvForVRF(l zerolog.Logger, t *testing.T, envConfig VRFEnvConfig,
return env, sethClient, nil
}
+func LoadExistingCLEnvForVRF(
+ t *testing.T,
+ envConfig VRFEnvConfig,
+ commonExistingEnvConfig *vrf_common_config.ExistingEnvConfig,
+ l zerolog.Logger,
+) (*test_env.CLClusterTestEnv, *seth.Client, error) {
+ env, err := test_env.NewCLTestEnvBuilder().
+ WithTestInstance(t).
+ WithTestConfig(&envConfig.TestConfig).
+ WithCustomCleanup(envConfig.CleanupFn).
+ Build()
+ if err != nil {
+ return nil, nil, fmt.Errorf("%s, err: %w", "error creating test env", err)
+ }
+ evmNetwork, err := env.GetFirstEvmNetwork()
+ if err != nil {
+ return nil, nil, err
+ }
+ sethClient, err := seth_utils.GetChainClient(envConfig.TestConfig, *evmNetwork)
+ if err != nil {
+ return nil, nil, err
+ }
+ err = FundNodesIfNeeded(testcontext.Get(t), commonExistingEnvConfig, sethClient, l)
+ if err != nil {
+ return nil, nil, err
+ }
+ return env, sethClient, nil
+}
+
func GetRPCUrl(env *test_env.CLClusterTestEnv, chainID int64) (string, error) {
provider, err := env.GetRpcProvider(chainID)
if err != nil {
diff --git a/integration-tests/actions/vrf/common/models.go b/integration-tests/actions/vrf/common/models.go
index 9baa5c96e1d..f51fd84ba07 100644
--- a/integration-tests/actions/vrf/common/models.go
+++ b/integration-tests/actions/vrf/common/models.go
@@ -55,6 +55,7 @@ type VRFContracts struct {
VRFV2PlusConsumer []contracts.VRFv2PlusLoadTestConsumer
LinkToken contracts.LinkToken
MockETHLINKFeed contracts.VRFMockETHLINKFeed
+ LinkNativeFeedAddress string
}
type VRFOwnerConfig struct {
diff --git a/integration-tests/actions/vrf/vrfv2/contract_steps.go b/integration-tests/actions/vrf/vrfv2/contract_steps.go
index 324b65b5d6c..1b909be9b83 100644
--- a/integration-tests/actions/vrf/vrfv2/contract_steps.go
+++ b/integration-tests/actions/vrf/vrfv2/contract_steps.go
@@ -635,7 +635,7 @@ func SetupNewConsumersAndSubs(
) ([]contracts.VRFv2LoadTestConsumer, []uint64, error) {
consumers, err := DeployVRFV2Consumers(sethClient, coordinator.Address(), numberOfConsumerContractsToDeployAndAddToSub)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
l.Info().
Str("Coordinator", *testConfig.VRFv2.ExistingEnvConfig.ExistingEnvConfig.CoordinatorAddress).
@@ -649,7 +649,7 @@ func SetupNewConsumersAndSubs(
numberOfSubToCreate,
)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
return consumers, subIDs, nil
}
diff --git a/integration-tests/actions/vrf/vrfv2/setup_steps.go b/integration-tests/actions/vrf/vrfv2/setup_steps.go
index c13aed807a9..b5852f82815 100644
--- a/integration-tests/actions/vrf/vrfv2/setup_steps.go
+++ b/integration-tests/actions/vrf/vrfv2/setup_steps.go
@@ -8,8 +8,6 @@ import (
"github.com/smartcontractkit/seth"
- seth_utils "github.com/smartcontractkit/chainlink-testing-framework/utils/seth"
-
"github.com/ethereum/go-ethereum/common"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
@@ -371,38 +369,30 @@ func SetupVRFV2ForNewEnv(
func SetupVRFV2ForExistingEnv(t *testing.T, envConfig vrfcommon.VRFEnvConfig, l zerolog.Logger) (*vrfcommon.VRFContracts, *vrfcommon.VRFKeyData, *test_env.CLClusterTestEnv, *seth.Client, error) {
commonExistingEnvConfig := envConfig.TestConfig.VRFv2.ExistingEnvConfig.ExistingEnvConfig
- env, err := test_env.NewCLTestEnvBuilder().
- WithTestInstance(t).
- WithTestConfig(&envConfig.TestConfig).
- WithCustomCleanup(envConfig.CleanupFn).
- Build()
- if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error creating test env", err)
- }
- evmNetwork, err := env.GetFirstEvmNetwork()
- if err != nil {
- return nil, nil, nil, nil, err
- }
- sethClient, err := seth_utils.GetChainClient(envConfig.TestConfig, *evmNetwork)
+ env, sethClient, err := vrfcommon.LoadExistingCLEnvForVRF(
+ t,
+ envConfig,
+ commonExistingEnvConfig,
+ l,
+ )
if err != nil {
- return nil, nil, nil, nil, err
+ return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading existing CL env", err)
}
coordinator, err := contracts.LoadVRFCoordinatorV2(sethClient, *commonExistingEnvConfig.ConsumerAddress)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading VRFCoordinator2", err)
}
- linkAddr := common.HexToAddress(*commonExistingEnvConfig.LinkAddress)
- linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, linkAddr)
+ linkAddress, err := coordinator.GetLinkAddress(testcontext.Get(t))
if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading LinkToken", err)
+ return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error getting Link address from Coordinator", err)
}
- err = vrfcommon.FundNodesIfNeeded(testcontext.Get(t), commonExistingEnvConfig, sethClient, l)
+ linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, common.HexToAddress(linkAddress.String()))
if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading LinkToken", err)
}
blockHashStoreAddress, err := coordinator.GetBlockHashStoreAddress(testcontext.Get(t))
if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, nil, nil, err
}
blockHashStore, err := contracts.LoadBlockHashStore(sethClient, blockHashStoreAddress.String())
if err != nil {
@@ -449,13 +439,13 @@ func SetupSubsAndConsumersForExistingEnv(
l,
)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
} else {
addr := common.HexToAddress(*commonExistingEnvConfig.ConsumerAddress)
consumer, err := contracts.LoadVRFv2LoadTestConsumer(sethClient, addr)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
consumers = append(consumers, consumer)
subIDs = append(subIDs, *testConfig.VRFv2.ExistingEnvConfig.SubID)
@@ -471,7 +461,7 @@ func SetupSubsAndConsumersForExistingEnv(
l,
)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
}
return subIDs, consumers, nil
diff --git a/integration-tests/actions/vrf/vrfv2plus/contract_steps.go b/integration-tests/actions/vrf/vrfv2plus/contract_steps.go
index 479b00d952e..5a4ec9ba11a 100644
--- a/integration-tests/actions/vrf/vrfv2plus/contract_steps.go
+++ b/integration-tests/actions/vrf/vrfv2plus/contract_steps.go
@@ -56,7 +56,7 @@ func DeployVRFV2_5Contracts(
}
batchCoordinator, err := contracts.DeployBatchVRFCoordinatorV2Plus(chainClient, coordinator.Address())
if err != nil {
- return nil, fmt.Errorf("%s, err %w", ErrDeployBatchCoordinatorV2Plus, err)
+ return nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrDeployBatchCoordinatorV2Plus, err)
}
return &vrfcommon.VRFContracts{
CoordinatorV2Plus: coordinator,
@@ -407,7 +407,7 @@ func DeployVRFV2PlusDirectFundingContracts(
linkTokenAddress string,
linkEthFeedAddress string,
coordinator contracts.VRFCoordinatorV2_5,
- consumerContractsAmount int,
+ numberOfConsumerContracts int,
wrapperSubId *big.Int,
configGeneral *vrfv2plusconfig.General,
) (*VRFV2PlusWrapperContracts, error) {
@@ -432,7 +432,7 @@ func DeployVRFV2PlusDirectFundingContracts(
return nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrDeployWrapper, err)
}
}
- consumers, err := DeployVRFV2PlusWrapperConsumers(sethClient, vrfv2PlusWrapper, consumerContractsAmount)
+ consumers, err := DeployVRFV2PlusWrapperConsumers(sethClient, vrfv2PlusWrapper, numberOfConsumerContracts)
if err != nil {
return nil, err
}
@@ -545,9 +545,9 @@ func WaitRandomWordsFulfilledEvent(
return randomWordsFulfilledEvent, err
}
-func DeployVRFV2PlusWrapperConsumers(client *seth.Client, vrfV2PlusWrapper contracts.VRFV2PlusWrapper, consumerContractsAmount int) ([]contracts.VRFv2PlusWrapperLoadTestConsumer, error) {
+func DeployVRFV2PlusWrapperConsumers(client *seth.Client, vrfV2PlusWrapper contracts.VRFV2PlusWrapper, numberOfConsumerContracts int) ([]contracts.VRFv2PlusWrapperLoadTestConsumer, error) {
var consumers []contracts.VRFv2PlusWrapperLoadTestConsumer
- for i := 1; i <= consumerContractsAmount; i++ {
+ for i := 1; i <= numberOfConsumerContracts; i++ {
loadTestConsumer, err := contracts.DeployVRFV2PlusWrapperLoadTestConsumer(client, vrfV2PlusWrapper.Address())
if err != nil {
return nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrAdvancedConsumer, err)
@@ -609,7 +609,7 @@ func SetupNewConsumersAndSubs(
) ([]contracts.VRFv2PlusLoadTestConsumer, []*big.Int, error) {
consumers, err := DeployVRFV2PlusConsumers(sethClient, coordinator, consumerContractsAmount)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
l.Info().
Str("Coordinator", *testConfig.VRFv2Plus.ExistingEnvConfig.ExistingEnvConfig.CoordinatorAddress).
@@ -627,7 +627,7 @@ func SetupNewConsumersAndSubs(
*testConfig.VRFv2Plus.General.SubscriptionBillingType,
)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
return consumers, subIDs, nil
}
@@ -652,3 +652,57 @@ func CancelSubsAndReturnFunds(ctx context.Context, vrfContracts *vrfcommon.VRFCo
}
}
}
+
+func FundWrapperConsumer(
+ sethClient *seth.Client,
+ subFundingType string,
+ linkToken contracts.LinkToken,
+ wrapperConsumer contracts.VRFv2PlusWrapperLoadTestConsumer,
+ vrfv2PlusConfig *vrfv2plusconfig.General,
+ l zerolog.Logger,
+) error {
+ fundConsumerWithLink := func() error {
+ //fund consumer with Link
+ linkAmount := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(*vrfv2PlusConfig.WrapperConsumerFundingAmountLink))
+ l.Info().
+ Str("Link Amount", linkAmount.String()).
+ Str("WrapperConsumerAddress", wrapperConsumer.Address()).Msg("Funding WrapperConsumer with Link")
+ return linkToken.Transfer(
+ wrapperConsumer.Address(),
+ linkAmount,
+ )
+ }
+ fundConsumerWithNative := func() error {
+ //fund consumer with Eth (native token)
+ _, err := actions.SendFunds(l, sethClient, actions.FundsToSendPayload{
+ ToAddress: common.HexToAddress(wrapperConsumer.Address()),
+ Amount: conversions.EtherToWei(big.NewFloat(*vrfv2PlusConfig.WrapperConsumerFundingAmountNativeToken)),
+ PrivateKey: sethClient.PrivateKeys[0],
+ })
+ return err
+ }
+ switch vrfv2plusconfig.BillingType(subFundingType) {
+ case vrfv2plusconfig.BillingType_Link:
+ err := fundConsumerWithLink()
+ if err != nil {
+ return err
+ }
+ case vrfv2plusconfig.BillingType_Native:
+ err := fundConsumerWithNative()
+ if err != nil {
+ return err
+ }
+ case vrfv2plusconfig.BillingType_Link_and_Native:
+ err := fundConsumerWithLink()
+ if err != nil {
+ return err
+ }
+ err = fundConsumerWithNative()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("invalid billing type: %s", subFundingType)
+ }
+ return nil
+}
diff --git a/integration-tests/actions/vrf/vrfv2plus/models.go b/integration-tests/actions/vrf/vrfv2plus/models.go
index a2ca8ec582b..5198439c050 100644
--- a/integration-tests/actions/vrf/vrfv2plus/models.go
+++ b/integration-tests/actions/vrf/vrfv2plus/models.go
@@ -5,6 +5,6 @@ import (
)
type VRFV2PlusWrapperContracts struct {
- VRFV2PlusWrapper contracts.VRFV2PlusWrapper
- LoadTestConsumers []contracts.VRFv2PlusWrapperLoadTestConsumer
+ VRFV2PlusWrapper contracts.VRFV2PlusWrapper
+ WrapperConsumers []contracts.VRFv2PlusWrapperLoadTestConsumer
}
diff --git a/integration-tests/actions/vrf/vrfv2plus/setup_steps.go b/integration-tests/actions/vrf/vrfv2plus/setup_steps.go
index f3c7d53d6ee..4833afb9fef 100644
--- a/integration-tests/actions/vrf/vrfv2plus/setup_steps.go
+++ b/integration-tests/actions/vrf/vrfv2plus/setup_steps.go
@@ -8,8 +8,6 @@ import (
"github.com/smartcontractkit/seth"
- seth_utils "github.com/smartcontractkit/chainlink-testing-framework/utils/seth"
-
"github.com/shopspring/decimal"
"golang.org/x/sync/errgroup"
@@ -17,7 +15,6 @@ import (
"github.com/google/uuid"
"github.com/rs/zerolog"
- "github.com/smartcontractkit/chainlink-testing-framework/utils/conversions"
"github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
vrfcommon "github.com/smartcontractkit/chainlink/integration-tests/actions/vrf/common"
@@ -28,7 +25,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
- vrfv2plus_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/vrfv2plus"
+ vrfv2plusconfig "github.com/smartcontractkit/chainlink/integration-tests/testconfig/vrfv2plus"
"github.com/smartcontractkit/chainlink/integration-tests/types"
)
@@ -201,7 +198,7 @@ func SetupVRFV2_5Environment(
return vrfContracts, &vrfKeyData, nodeTypeToNodeMap, nil
}
-func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *vrfv2plus_config.General, pubKeyCompressed string, l zerolog.Logger, vrfNode *vrfcommon.VRFNode) error {
+func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *vrfv2plusconfig.General, pubKeyCompressed string, l zerolog.Logger, vrfNode *vrfcommon.VRFNode) error {
vrfJobSpecConfig := vrfcommon.VRFJobSpecConfig{
ForwardingAllowed: *config.VRFJobForwardingAllowed,
CoordinatorAddress: contracts.CoordinatorV2Plus.Address(),
@@ -235,7 +232,10 @@ func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *v
nodeConfig := node.NewConfig(vrfNode.CLNode.NodeConfig,
node.WithKeySpecificMaxGasPrice(vrfNode.TXKeyAddressStrings, *config.CLNodeMaxGasPriceGWei),
)
- l.Info().Msg("Restarting Node with new sending key PriceMax configuration")
+ l.Info().
+ Strs("Sending Keys", vrfNode.TXKeyAddressStrings).
+ Int64("Price Max Setting", *config.CLNodeMaxGasPriceGWei).
+ Msg("Restarting Node with new sending key PriceMax configuration")
err = vrfNode.CLNode.Restart(nodeConfig)
if err != nil {
return fmt.Errorf(vrfcommon.ErrGenericFormat, vrfcommon.ErrRestartCLNode, err)
@@ -243,29 +243,119 @@ func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *v
return nil
}
-func SetupVRFV2PlusWrapperEnvironment(
+func SetupVRFV2PlusWrapperForExistingEnv(
ctx context.Context,
+ sethClient *seth.Client,
+ vrfContracts *vrfcommon.VRFContracts,
+ keyHash [32]byte,
+ vrfv2PlusTestConfig types.VRFv2PlusTestConfig,
+ numberOfConsumerContracts int,
l zerolog.Logger,
+) (*VRFV2PlusWrapperContracts, *big.Int, error) {
+ config := *vrfv2PlusTestConfig.GetVRFv2PlusConfig()
+ var wrapper contracts.VRFV2PlusWrapper
+ var err error
+ if *config.ExistingEnvConfig.UseExistingWrapper {
+ wrapper, err = contracts.LoadVRFV2PlusWrapper(sethClient, *config.ExistingEnvConfig.WrapperAddress)
+ if err != nil {
+ return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, "error loading VRFV2PlusWrapper", err)
+ }
+ } else {
+ wrapperSubId, err := CreateSubAndFindSubID(ctx, sethClient, vrfContracts.CoordinatorV2Plus)
+ if err != nil {
+ return nil, nil, err
+ }
+ wrapper, err = contracts.DeployVRFV2PlusWrapper(sethClient, vrfContracts.LinkToken.Address(), vrfContracts.LinkNativeFeedAddress, vrfContracts.CoordinatorV2Plus.Address(), wrapperSubId)
+ if err != nil {
+ return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrDeployWrapper, err)
+ }
+ err = FundSubscriptions(
+ big.NewFloat(*config.General.SubscriptionFundingAmountNative),
+ big.NewFloat(*config.General.SubscriptionFundingAmountLink),
+ vrfContracts.LinkToken,
+ vrfContracts.CoordinatorV2Plus,
+ []*big.Int{wrapperSubId},
+ *config.General.SubscriptionBillingType,
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ err = vrfContracts.CoordinatorV2Plus.AddConsumer(wrapperSubId, wrapper.Address())
+ if err != nil {
+ return nil, nil, err
+ }
+ err = wrapper.SetConfig(
+ *config.General.WrapperGasOverhead,
+ *config.General.CoordinatorGasOverheadNative,
+ *config.General.CoordinatorGasOverheadLink,
+ *config.General.CoordinatorGasOverheadPerWord,
+ *config.General.CoordinatorNativePremiumPercentage,
+ *config.General.CoordinatorLinkPremiumPercentage,
+ keyHash,
+ *config.General.WrapperMaxNumberOfWords,
+ *config.General.StalenessSeconds,
+ decimal.RequireFromString(*config.General.FallbackWeiPerUnitLink).BigInt(),
+ *config.General.FulfillmentFlatFeeNativePPM,
+ *config.General.FulfillmentFlatFeeLinkDiscountPPM,
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ wrapperSubID, err := wrapper.GetSubID(ctx)
+ if err != nil {
+ return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, "error getting subID", err)
+ }
+ var wrapperConsumers []contracts.VRFv2PlusWrapperLoadTestConsumer
+ if *config.ExistingEnvConfig.CreateFundAddWrapperConsumers {
+ wrapperConsumers, err = DeployVRFV2PlusWrapperConsumers(sethClient, wrapper, numberOfConsumerContracts)
+ if err != nil {
+ return nil, nil, err
+ }
+ } else {
+ wrapperConsumer, err := contracts.LoadVRFV2WrapperLoadTestConsumer(sethClient, *config.ExistingEnvConfig.WrapperConsumerAddress)
+ if err != nil {
+ return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, "error loading VRFV2WrapperLoadTestConsumer", err)
+ }
+ wrapperConsumers = append(wrapperConsumers, wrapperConsumer)
+ }
+ wrapperContracts := &VRFV2PlusWrapperContracts{wrapper, wrapperConsumers}
+ for _, consumer := range wrapperConsumers {
+ err = FundWrapperConsumer(
+ sethClient,
+ *config.General.SubscriptionBillingType,
+ vrfContracts.LinkToken,
+ consumer,
+ config.General,
+ l,
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return wrapperContracts, wrapperSubID, nil
+}
+
+func SetupVRFV2PlusWrapperForNewEnv(
+ ctx context.Context,
sethClient *seth.Client,
vrfv2PlusTestConfig types.VRFv2PlusTestConfig,
- linkToken contracts.LinkToken,
- mockNativeLINKFeed contracts.MockETHLINKFeed,
- coordinator contracts.VRFCoordinatorV2_5,
+ vrfContracts *vrfcommon.VRFContracts,
keyHash [32]byte,
wrapperConsumerContractsAmount int,
+ l zerolog.Logger,
) (*VRFV2PlusWrapperContracts, *big.Int, error) {
// external EOA has to create a subscription for the wrapper first
- wrapperSubId, err := CreateSubAndFindSubID(ctx, sethClient, coordinator)
+ wrapperSubId, err := CreateSubAndFindSubID(ctx, sethClient, vrfContracts.CoordinatorV2Plus)
if err != nil {
return nil, nil, err
}
-
vrfv2PlusConfig := vrfv2PlusTestConfig.GetVRFv2PlusConfig().General
wrapperContracts, err := DeployVRFV2PlusDirectFundingContracts(
sethClient,
- linkToken.Address(),
- mockNativeLINKFeed.Address(),
- coordinator,
+ vrfContracts.LinkToken.Address(),
+ vrfContracts.MockETHLINKFeed.Address(),
+ vrfContracts.CoordinatorV2Plus,
wrapperConsumerContractsAmount,
wrapperSubId,
vrfv2PlusConfig,
@@ -273,13 +363,11 @@ func SetupVRFV2PlusWrapperEnvironment(
if err != nil {
return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, vrfcommon.ErrWaitTXsComplete, err)
}
-
// once the wrapper is deployed, wrapper address will become consumer of external EOA subscription
- err = coordinator.AddConsumer(wrapperSubId, wrapperContracts.VRFV2PlusWrapper.Address())
+ err = vrfContracts.CoordinatorV2Plus.AddConsumer(wrapperSubId, wrapperContracts.VRFV2PlusWrapper.Address())
if err != nil {
return nil, nil, err
}
-
err = wrapperContracts.VRFV2PlusWrapper.SetConfig(
*vrfv2PlusConfig.WrapperGasOverhead,
*vrfv2PlusConfig.CoordinatorGasOverheadNative,
@@ -297,53 +385,35 @@ func SetupVRFV2PlusWrapperEnvironment(
if err != nil {
return nil, nil, err
}
-
//fund sub
wrapperSubID, err := wrapperContracts.VRFV2PlusWrapper.GetSubID(ctx)
if err != nil {
return nil, nil, err
}
-
err = FundSubscriptions(
big.NewFloat(*vrfv2PlusTestConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountNative),
big.NewFloat(*vrfv2PlusTestConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountLink),
- linkToken,
- coordinator,
+ vrfContracts.LinkToken,
+ vrfContracts.CoordinatorV2Plus,
[]*big.Int{wrapperSubID},
*vrfv2PlusConfig.SubscriptionBillingType,
)
if err != nil {
return nil, nil, err
}
-
- //fund consumer with Link
- err = linkToken.Transfer(
- wrapperContracts.LoadTestConsumers[0].Address(),
- big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(*vrfv2PlusConfig.WrapperConsumerFundingAmountLink)),
- )
- if err != nil {
- return nil, nil, err
- }
-
- //fund consumer with Eth (native token)
- _, err = actions.SendFunds(l, sethClient, actions.FundsToSendPayload{
- ToAddress: common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()),
- Amount: conversions.EtherToWei(big.NewFloat(*vrfv2PlusConfig.WrapperConsumerFundingAmountNativeToken)),
- PrivateKey: sethClient.PrivateKeys[0],
- })
- if err != nil {
- return nil, nil, err
- }
-
- wrapperConsumerBalanceBeforeRequestWei, err := sethClient.Client.BalanceAt(ctx, common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()), nil)
- if err != nil {
- return nil, nil, err
+ for _, consumer := range wrapperContracts.WrapperConsumers {
+ err = FundWrapperConsumer(
+ sethClient,
+ *vrfv2PlusConfig.SubscriptionBillingType,
+ vrfContracts.LinkToken,
+ consumer,
+ vrfv2PlusConfig,
+ l,
+ )
+ if err != nil {
+ return nil, nil, err
+ }
}
- l.Info().
- Str("WrapperConsumerBalanceBeforeRequestWei", wrapperConsumerBalanceBeforeRequestWei.String()).
- Str("WrapperConsumerAddress", wrapperContracts.LoadTestConsumers[0].Address()).
- Msg("WrapperConsumerBalanceBeforeRequestWei")
-
return wrapperContracts, wrapperSubID, nil
}
@@ -421,47 +491,45 @@ func SetupVRFV2PlusForNewEnv(
func SetupVRFV2PlusForExistingEnv(t *testing.T, envConfig vrfcommon.VRFEnvConfig, l zerolog.Logger) (*vrfcommon.VRFContracts, *vrfcommon.VRFKeyData, *test_env.CLClusterTestEnv, *seth.Client, error) {
commonExistingEnvConfig := envConfig.TestConfig.VRFv2Plus.ExistingEnvConfig.ExistingEnvConfig
- env, err := test_env.NewCLTestEnvBuilder().
- WithTestInstance(t).
- WithTestConfig(&envConfig.TestConfig).
- WithCustomCleanup(envConfig.CleanupFn).
- Build()
- if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error creating test env", err)
- }
- evmNetwork, err := env.GetFirstEvmNetwork()
- if err != nil {
- return nil, nil, nil, nil, err
- }
- sethClient, err := seth_utils.GetChainClient(envConfig.TestConfig, *evmNetwork)
+ env, sethClient, err := vrfcommon.LoadExistingCLEnvForVRF(
+ t,
+ envConfig,
+ commonExistingEnvConfig,
+ l,
+ )
if err != nil {
- return nil, nil, nil, nil, err
+ return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading existing CL env", err)
}
coordinator, err := contracts.LoadVRFCoordinatorV2_5(sethClient, *commonExistingEnvConfig.CoordinatorAddress)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading VRFCoordinator2_5", err)
}
- linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, common.HexToAddress(*commonExistingEnvConfig.LinkAddress))
+ linkAddress, err := coordinator.GetLinkAddress(testcontext.Get(t))
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error getting Link address from Coordinator", err)
+ }
+ linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, common.HexToAddress(linkAddress.String()))
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading LinkToken", err)
}
- err = vrfcommon.FundNodesIfNeeded(testcontext.Get(t), commonExistingEnvConfig, sethClient, l)
+ linkNativeFeedAddress, err := coordinator.GetLinkNativeFeed(testcontext.Get(t))
if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error getting Link address from Coordinator", err)
}
blockHashStoreAddress, err := coordinator.GetBlockHashStoreAddress(testcontext.Get(t))
if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, nil, nil, err
}
blockHashStore, err := contracts.LoadBlockHashStore(sethClient, blockHashStoreAddress.String())
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading BlockHashStore", err)
}
vrfContracts := &vrfcommon.VRFContracts{
- CoordinatorV2Plus: coordinator,
- VRFV2PlusConsumer: nil,
- LinkToken: linkToken,
- BHS: blockHashStore,
+ CoordinatorV2Plus: coordinator,
+ VRFV2PlusConsumer: nil,
+ LinkToken: linkToken,
+ BHS: blockHashStore,
+ LinkNativeFeedAddress: linkNativeFeedAddress.String(),
}
vrfKey := &vrfcommon.VRFKeyData{
VRFKey: nil,
@@ -500,12 +568,12 @@ func SetupSubsAndConsumersForExistingEnv(
l,
)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
} else {
consumer, err := contracts.LoadVRFv2PlusLoadTestConsumer(sethClient, *commonExistingEnvConfig.ConsumerAddress)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
consumers = append(consumers, consumer)
var ok bool
@@ -527,21 +595,65 @@ func SetupSubsAndConsumersForExistingEnv(
l,
)
if err != nil {
- return nil, nil, fmt.Errorf("err: %w", err)
+ return nil, nil, err
}
}
return subIDs, consumers, nil
}
func SelectBillingTypeWithDistribution(billingType string, distributionFn func() bool) (bool, error) {
- switch vrfv2plus_config.BillingType(billingType) {
- case vrfv2plus_config.BillingType_Link:
+ switch vrfv2plusconfig.BillingType(billingType) {
+ case vrfv2plusconfig.BillingType_Link:
return false, nil
- case vrfv2plus_config.BillingType_Native:
+ case vrfv2plusconfig.BillingType_Native:
return true, nil
- case vrfv2plus_config.BillingType_Link_and_Native:
+ case vrfv2plusconfig.BillingType_Link_and_Native:
return distributionFn(), nil
default:
return false, fmt.Errorf("invalid billing type: %s", billingType)
}
}
+
+func SetupVRFV2PlusWrapperUniverse(
+ ctx context.Context,
+ sethClient *seth.Client,
+ vrfContracts *vrfcommon.VRFContracts,
+ config *tc.TestConfig,
+ keyHash [32]byte,
+ numberOfConsumerContracts int,
+ l zerolog.Logger,
+) (*VRFV2PlusWrapperContracts, *big.Int, error) {
+ var (
+ wrapperContracts *VRFV2PlusWrapperContracts
+ wrapperSubID *big.Int
+ err error
+ )
+ if *config.VRFv2Plus.General.UseExistingEnv {
+ wrapperContracts, wrapperSubID, err = SetupVRFV2PlusWrapperForExistingEnv(
+ ctx,
+ sethClient,
+ vrfContracts,
+ keyHash,
+ config,
+ numberOfConsumerContracts,
+ l,
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ } else {
+ wrapperContracts, wrapperSubID, err = SetupVRFV2PlusWrapperForNewEnv(
+ ctx,
+ sethClient,
+ config,
+ vrfContracts,
+ keyHash,
+ numberOfConsumerContracts,
+ l,
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return wrapperContracts, wrapperSubID, nil
+}
diff --git a/integration-tests/contracts/contract_vrf_models.go b/integration-tests/contracts/contract_vrf_models.go
index 45825a18ff3..c798c4921c6 100644
--- a/integration-tests/contracts/contract_vrf_models.go
+++ b/integration-tests/contracts/contract_vrf_models.go
@@ -76,6 +76,8 @@ type VRFCoordinatorV2 interface {
WaitForConfigSetEvent(timeout time.Duration) (*CoordinatorConfigSet, error)
OracleWithdraw(recipient common.Address, amount *big.Int) error
GetBlockHashStoreAddress(ctx context.Context) (common.Address, error)
+ GetLinkAddress(ctx context.Context) (common.Address, error)
+ GetLinkNativeFeed(ctx context.Context) (common.Address, error)
}
type VRFCoordinatorV2_5 interface {
@@ -121,6 +123,8 @@ type VRFCoordinatorV2_5 interface {
ParseRandomWordsFulfilled(log types.Log) (*CoordinatorRandomWordsFulfilled, error)
WaitForConfigSetEvent(timeout time.Duration) (*CoordinatorConfigSet, error)
GetBlockHashStoreAddress(ctx context.Context) (common.Address, error)
+ GetLinkAddress(ctx context.Context) (common.Address, error)
+ GetLinkNativeFeed(ctx context.Context) (common.Address, error)
}
type VRFCoordinatorV2PlusUpgradedVersion interface {
diff --git a/integration-tests/contracts/ethereum_contracts.go b/integration-tests/contracts/ethereum_contracts.go
index 2db6aeb4637..5b08c9a9fbf 100644
--- a/integration-tests/contracts/ethereum_contracts.go
+++ b/integration-tests/contracts/ethereum_contracts.go
@@ -770,12 +770,12 @@ func DeployLinkTokenContract(l zerolog.Logger, client *seth.Client) (*EthereumLi
}
func LoadLinkTokenContract(l zerolog.Logger, client *seth.Client, address common.Address) (*EthereumLinkToken, error) {
- abi, err := link_token_interface.LinkTokenMetaData.GetAbi()
+ linkABI, err := link_token_interface.LinkTokenMetaData.GetAbi()
if err != nil {
return &EthereumLinkToken{}, fmt.Errorf("failed to get LinkToken ABI: %w", err)
}
- client.ContractStore.AddABI("LinkToken", *abi)
+ client.ContractStore.AddABI("LinkToken", *linkABI)
client.ContractStore.AddBIN("LinkToken", common.FromHex(link_token_interface.LinkTokenMetaData.Bin))
linkToken, err := link_token_interface.NewLinkToken(address, wrappers.MustNewWrappedContractBackend(nil, client))
diff --git a/integration-tests/contracts/ethereum_vrf_contracts.go b/integration-tests/contracts/ethereum_vrf_contracts.go
index e4dbb87d0b2..a09cc809c63 100644
--- a/integration-tests/contracts/ethereum_vrf_contracts.go
+++ b/integration-tests/contracts/ethereum_vrf_contracts.go
@@ -13,6 +13,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2_5"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_optimism"
"github.com/smartcontractkit/seth"
@@ -546,3 +547,22 @@ func LoadVRFV2PlusWrapperOptimism(seth *seth.Client, addr string) (*EthereumVRFV
wrapper: contract,
}, nil
}
+
+func LoadVRFV2WrapperLoadTestConsumer(seth *seth.Client, addr string) (*EthereumVRFV2PlusWrapperLoadTestConsumer, error) {
+ address := common.HexToAddress(addr)
+ abi, err := vrfv2plus_wrapper_load_test_consumer.VRFV2PlusWrapperLoadTestConsumerMetaData.GetAbi()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get VRFV2PlusWrapperLoadTestConsumer ABI: %w", err)
+ }
+ seth.ContractStore.AddABI("VRFV2PlusWrapperLoadTestConsumer", *abi)
+ seth.ContractStore.AddBIN("VRFV2PlusWrapperLoadTestConsumer", common.FromHex(vrfv2plus_wrapper_load_test_consumer.VRFV2PlusWrapperLoadTestConsumerMetaData.Bin))
+ contract, err := vrfv2plus_wrapper_load_test_consumer.NewVRFV2PlusWrapperLoadTestConsumer(address, wrappers.MustNewWrappedContractBackend(nil, seth))
+ if err != nil {
+ return nil, fmt.Errorf("failed to instantiate VRFV2PlusWrapperLoadTestConsumer instance: %w", err)
+ }
+ return &EthereumVRFV2PlusWrapperLoadTestConsumer{
+ client: seth,
+ address: address,
+ consumer: contract,
+ }, nil
+}
diff --git a/integration-tests/contracts/ethereum_vrfv2_contracts.go b/integration-tests/contracts/ethereum_vrfv2_contracts.go
index a9d1a93769d..df4a6fb9fbe 100644
--- a/integration-tests/contracts/ethereum_vrfv2_contracts.go
+++ b/integration-tests/contracts/ethereum_vrfv2_contracts.go
@@ -594,6 +594,30 @@ func (v *EthereumVRFCoordinatorV2) ParseLog(log types.Log) (generated.AbigenLog,
return v.coordinator.ParseLog(log)
}
+func (v *EthereumVRFCoordinatorV2) GetLinkAddress(ctx context.Context) (common.Address, error) {
+ opts := &bind.CallOpts{
+ From: v.client.MustGetRootKeyAddress(),
+ Context: ctx,
+ }
+ address, err := v.coordinator.LINK(opts)
+ if err != nil {
+ return common.Address{}, err
+ }
+ return address, nil
+}
+
+func (v *EthereumVRFCoordinatorV2) GetLinkNativeFeed(ctx context.Context) (common.Address, error) {
+ opts := &bind.CallOpts{
+ From: v.client.MustGetRootKeyAddress(),
+ Context: ctx,
+ }
+ address, err := v.coordinator.LINKETHFEED(opts)
+ if err != nil {
+ return common.Address{}, err
+ }
+ return address, nil
+}
+
// CancelSubscription cancels subscription by Sub owner,
// return funds to specified address,
// checks if pending requests for a sub exist
diff --git a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go
index 8e099b4f6bc..9b286a1d057 100644
--- a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go
+++ b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go
@@ -340,6 +340,30 @@ func (v *EthereumVRFCoordinatorV2_5) GetBlockHashStoreAddress(ctx context.Contex
return blockHashStoreAddress, nil
}
+func (v *EthereumVRFCoordinatorV2_5) GetLinkAddress(ctx context.Context) (common.Address, error) {
+ opts := &bind.CallOpts{
+ From: v.client.MustGetRootKeyAddress(),
+ Context: ctx,
+ }
+ address, err := v.coordinator.LINK(opts)
+ if err != nil {
+ return common.Address{}, err
+ }
+ return address, nil
+}
+
+func (v *EthereumVRFCoordinatorV2_5) GetLinkNativeFeed(ctx context.Context) (common.Address, error) {
+ opts := &bind.CallOpts{
+ From: v.client.MustGetRootKeyAddress(),
+ Context: ctx,
+ }
+ address, err := v.coordinator.LINKNATIVEFEED(opts)
+ if err != nil {
+ return common.Address{}, err
+ }
+ return address, nil
+}
+
// OwnerCancelSubscription cancels subscription by Coordinator owner
// return funds to sub owner,
// does not check if pending requests for a sub exist
diff --git a/integration-tests/smoke/vrfv2plus_test.go b/integration-tests/smoke/vrfv2plus_test.go
index da2989d8fc9..a1ac5fd5544 100644
--- a/integration-tests/smoke/vrfv2plus_test.go
+++ b/integration-tests/smoke/vrfv2plus_test.go
@@ -289,16 +289,14 @@ func TestVRFv2Plus(t *testing.T) {
t.Run("Direct Funding", func(t *testing.T) {
configCopy := config.MustCopy().(tc.TestConfig)
- wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment(
+ wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperUniverse(
testcontext.Get(t),
- l,
sethClient,
+ vrfContracts,
&configCopy,
- vrfContracts.LinkToken,
- vrfContracts.MockETHLINKFeed,
- vrfContracts.CoordinatorV2Plus,
vrfKey.KeyHash,
1,
+ l,
)
require.NoError(t, err)
@@ -307,7 +305,7 @@ func TestVRFv2Plus(t *testing.T) {
testConfig := configCopy.VRFv2Plus.General
var isNativeBilling = false
- wrapperConsumerJuelsBalanceBeforeRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.LoadTestConsumers[0].Address())
+ wrapperConsumerJuelsBalanceBeforeRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.WrapperConsumers[0].Address())
require.NoError(t, err, "error getting wrapper consumer balance")
wrapperSubscription, err := vrfContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID)
@@ -315,7 +313,7 @@ func TestVRFv2Plus(t *testing.T) {
subBalanceBeforeRequest := wrapperSubscription.Balance
randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
- wrapperContracts.LoadTestConsumers[0],
+ wrapperContracts.WrapperConsumers[0],
vrfContracts.CoordinatorV2Plus,
vrfKey,
wrapperSubID,
@@ -331,13 +329,13 @@ func TestVRFv2Plus(t *testing.T) {
subBalanceAfterRequest := wrapperSubscription.Balance
require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest)
- consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
+ consumerStatus, err := wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
require.NoError(t, err, "error getting rand request status")
require.True(t, consumerStatus.Fulfilled)
expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid)
- wrapperConsumerJuelsBalanceAfterRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.LoadTestConsumers[0].Address())
+ wrapperConsumerJuelsBalanceAfterRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.WrapperConsumers[0].Address())
require.NoError(t, err, "error getting wrapper consumer balance")
require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest)
@@ -356,7 +354,7 @@ func TestVRFv2Plus(t *testing.T) {
testConfig := configCopy.VRFv2Plus.General
var isNativeBilling = true
- wrapperConsumerBalanceBeforeRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()), nil)
+ wrapperConsumerBalanceBeforeRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.WrapperConsumers[0].Address()), nil)
require.NoError(t, err, "error getting wrapper consumer balance")
wrapperSubscription, err := vrfContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID)
@@ -364,7 +362,7 @@ func TestVRFv2Plus(t *testing.T) {
subBalanceBeforeRequest := wrapperSubscription.NativeBalance
randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
- wrapperContracts.LoadTestConsumers[0],
+ wrapperContracts.WrapperConsumers[0],
vrfContracts.CoordinatorV2Plus,
vrfKey,
wrapperSubID,
@@ -380,13 +378,13 @@ func TestVRFv2Plus(t *testing.T) {
subBalanceAfterRequest := wrapperSubscription.NativeBalance
require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest)
- consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
+ consumerStatus, err := wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
require.NoError(t, err, "error getting rand request status")
require.True(t, consumerStatus.Fulfilled)
expectedWrapperConsumerWeiBalance := new(big.Int).Sub(wrapperConsumerBalanceBeforeRequestWei, consumerStatus.Paid)
- wrapperConsumerBalanceAfterRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()), nil)
+ wrapperConsumerBalanceAfterRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.WrapperConsumers[0].Address()), nil)
require.NoError(t, err, "error getting wrapper consumer balance")
require.Equal(t, expectedWrapperConsumerWeiBalance, wrapperConsumerBalanceAfterRequestWei)
@@ -1063,16 +1061,14 @@ func TestVRFv2PlusMigration(t *testing.T) {
t.Run("Test migration of direct billing using VRFV2PlusWrapper subID", func(t *testing.T) {
configCopy := config.MustCopy().(tc.TestConfig)
- wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment(
+ wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperUniverse(
testcontext.Get(t),
- l,
sethClient,
+ vrfContracts,
&configCopy,
- vrfContracts.LinkToken,
- vrfContracts.MockETHLINKFeed,
- vrfContracts.CoordinatorV2Plus,
vrfKey.KeyHash,
1,
+ l,
)
require.NoError(t, err)
subID := wrapperSubID
@@ -1203,7 +1199,7 @@ func TestVRFv2PlusMigration(t *testing.T) {
// Verify rand requests fulfills with Link Token billing
isNativeBilling := false
randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
- wrapperContracts.LoadTestConsumers[0],
+ wrapperContracts.WrapperConsumers[0],
newCoordinator,
vrfKey,
subID,
@@ -1212,14 +1208,14 @@ func TestVRFv2PlusMigration(t *testing.T) {
l,
)
require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
- consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
+ consumerStatus, err := wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
require.NoError(t, err, "error getting rand request status")
require.True(t, consumerStatus.Fulfilled)
// Verify rand requests fulfills with Native Token billing
isNativeBilling = true
randomWordsFulfilledEvent, err = vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
- wrapperContracts.LoadTestConsumers[0],
+ wrapperContracts.WrapperConsumers[0],
newCoordinator,
vrfKey,
subID,
@@ -1228,7 +1224,7 @@ func TestVRFv2PlusMigration(t *testing.T) {
l,
)
require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
- consumerStatus, err = wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
+ consumerStatus, err = wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId)
require.NoError(t, err, "error getting rand request status")
require.True(t, consumerStatus.Fulfilled)
})
@@ -1347,11 +1343,10 @@ func TestVRFV2PlusWithBHS(t *testing.T) {
}()
if *configCopy.VRFv2Plus.General.GenerateTXsOnChain {
+ wg.Add(1)
go func() {
- _, err := actions.ContinuouslyGenerateTXsOnChain(sethClient, desiredBlockNumberReached, l)
+ _, err := actions.ContinuouslyGenerateTXsOnChain(sethClient, desiredBlockNumberReached, &wg, l)
require.NoError(t, err)
- // Wait to let the transactions be mined and avoid nonce issues
- time.Sleep(time.Second * 5)
}()
}
wg.Wait()
diff --git a/integration-tests/testconfig/common/vrf/common.go b/integration-tests/testconfig/common/vrf/common.go
index e213191075f..326f7c98c76 100644
--- a/integration-tests/testconfig/common/vrf/common.go
+++ b/integration-tests/testconfig/common/vrf/common.go
@@ -71,10 +71,13 @@ func (c *PerformanceConfig) Validate() error {
type ExistingEnvConfig struct {
CoordinatorAddress *string `toml:"coordinator_address"`
+ UseExistingWrapper *bool `toml:"use_existing_wrapper"`
+ WrapperAddress *string `toml:"wrapper_address"`
ConsumerAddress *string `toml:"consumer_address"`
- LinkAddress *string `toml:"link_address"`
+ WrapperConsumerAddress *string `toml:"wrapper_consumer_address"`
KeyHash *string `toml:"key_hash"`
CreateFundSubsAndAddConsumers *bool `toml:"create_fund_subs_and_add_consumers"`
+ CreateFundAddWrapperConsumers *bool `toml:"create_fund_add_wrapper_consumers"`
NodeSendingKeys []string `toml:"node_sending_keys"`
Funding
}
@@ -83,23 +86,33 @@ func (c *ExistingEnvConfig) Validate() error {
if c.CreateFundSubsAndAddConsumers == nil {
return errors.New("create_fund_subs_and_add_consumers must be set ")
}
+ if c.CreateFundAddWrapperConsumers == nil {
+ return errors.New("create_fund_add_wrapper_consumers must be set ")
+ }
if c.CoordinatorAddress == nil {
return errors.New("coordinator_address must be set when using existing environment")
}
if !common.IsHexAddress(*c.CoordinatorAddress) {
return errors.New("coordinator_address must be a valid hex address")
}
+ if c.UseExistingWrapper == nil {
+ return errors.New("use_existing_wrapper must be set ")
+ }
+ if *c.UseExistingWrapper {
+ if c.WrapperAddress == nil {
+ return errors.New("wrapper_address must be set when using `use_existing_wrapper=true`")
+ }
+ if !common.IsHexAddress(*c.WrapperAddress) {
+ return errors.New("wrapper_address must be a valid hex address")
+ }
+ }
if c.KeyHash == nil {
return errors.New("key_hash must be set when using existing environment")
}
if *c.KeyHash == "" {
return errors.New("key_hash must be a non-empty string")
}
- if *c.CreateFundSubsAndAddConsumers {
- if err := c.Funding.Validate(); err != nil {
- return err
- }
- } else {
+ if !*c.CreateFundSubsAndAddConsumers {
if c.ConsumerAddress == nil || *c.ConsumerAddress == "" {
return errors.New("consumer_address must be set when using existing environment")
}
@@ -107,7 +120,14 @@ func (c *ExistingEnvConfig) Validate() error {
return errors.New("consumer_address must be a valid hex address")
}
}
-
+ if !*c.CreateFundAddWrapperConsumers {
+ if c.WrapperConsumerAddress == nil || *c.WrapperConsumerAddress == "" {
+ return errors.New("wrapper_consumer_address must be set when using existing environment")
+ }
+ if !common.IsHexAddress(*c.WrapperConsumerAddress) {
+ return errors.New("wrapper_consumer_address must be a valid hex address")
+ }
+ }
if c.NodeSendingKeys != nil {
for _, key := range c.NodeSendingKeys {
if !common.IsHexAddress(key) {
@@ -115,7 +135,6 @@ func (c *ExistingEnvConfig) Validate() error {
}
}
}
-
return nil
}
@@ -127,7 +146,6 @@ func (c *Funding) Validate() error {
if c.NodeSendingKeyFundingMin != nil && *c.NodeSendingKeyFundingMin <= 0 {
return errors.New("when set node_sending_key_funding_min must be a positive value")
}
-
return nil
}
diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml
index d317d05bc49..e4e216cf4a8 100644
--- a/integration-tests/testconfig/default.toml
+++ b/integration-tests/testconfig/default.toml
@@ -413,3 +413,155 @@ gas_price_estimation_enabled = true
gas_price_estimation_blocks = 100
# priority of the transaction, can be "fast", "standard" or "slow" (the higher the priority, the higher adjustment factor will be used for gas estimation) [default: "standard"]
gas_price_estimation_tx_priority = "standard"
+
+
+[[Seth.networks]]
+name = "Nexon Mainnet"
+transaction_timeout = "3m"
+eip_1559_dynamic_fees = true
+transfer_gas_fee = 21_000
+
+# manual settings, used when gas_price_estimation_enabled is false or when it fails
+# legacy transactions
+gas_price = 30_000_000_000
+
+# EIP-1559 transactions
+gas_fee_cap = 30_000_000_000
+gas_tip_cap = 1_800_000_000
+
+
+[Network.EVMNetworks.NEXON_MAINNET]
+evm_name = "NEXON_MAINNET"
+#evm_urls = ["rpc ws endpoint"]
+#evm_http_urls = ["rpc http endpoint"]
+client_implementation = "Ethereum"
+#evm_keys = ["private keys you want to use"]
+evm_simulated = false
+evm_chainlink_transaction_limit = 5000
+evm_minimum_confirmations = 1
+evm_gas_estimation_buffer = 10000
+evm_supports_eip1559 = true
+evm_default_gas_limit = 6000000
+evm_chain_id = 60118
+
+[[Seth.networks]]
+name = "Nexon Stage"
+transaction_timeout = "3m"
+eip_1559_dynamic_fees = true
+transfer_gas_fee = 21_000
+
+# manual settings, used when gas_price_estimation_enabled is false or when it fails
+# legacy transactions
+gas_price = 30_000_000_000
+
+# EIP-1559 transactions
+gas_fee_cap = 30_000_000_000
+gas_tip_cap = 1_800_000_000
+
+
+[Network.EVMNetworks.NEXON_STAGE]
+evm_name = "NEXON_STAGE"
+#evm_urls = ["rpc ws endpoint"]
+#evm_http_urls = ["rpc http endpoint"]
+client_implementation = "Ethereum"
+#evm_keys = ["private keys you want to use"]
+evm_simulated = false
+evm_chainlink_transaction_limit = 5000
+evm_minimum_confirmations = 1
+evm_gas_estimation_buffer = 10000
+evm_supports_eip1559 = true
+evm_default_gas_limit = 6000000
+evm_chain_id = 847799
+
+
+####
+
+[[Seth.networks]]
+name = "Nexon QA"
+transaction_timeout = "3m"
+eip_1559_dynamic_fees = true
+transfer_gas_fee = 21_000
+
+# manual settings, used when gas_price_estimation_enabled is false or when it fails
+# legacy transactions
+gas_price = 30_000_000_000
+
+# EIP-1559 transactions
+gas_fee_cap = 30_000_000_000
+gas_tip_cap = 1_800_000_000
+
+
+[Network.EVMNetworks.NEXON_QA]
+evm_name = "NEXON_QA"
+#evm_urls = ["rpc ws endpoint"]
+#evm_http_urls = ["rpc http endpoint"]
+client_implementation = "Ethereum"
+#evm_keys = ["private keys you want to use"]
+evm_simulated = false
+evm_chainlink_transaction_limit = 5000
+evm_minimum_confirmations = 1
+evm_gas_estimation_buffer = 10000
+evm_supports_eip1559 = true
+evm_default_gas_limit = 6000000
+evm_chain_id = 807424
+
+#####
+
+[[Seth.networks]]
+name = "Nexon Test"
+transaction_timeout = "3m"
+eip_1559_dynamic_fees = true
+transfer_gas_fee = 21_000
+
+# manual settings, used when gas_price_estimation_enabled is false or when it fails
+# legacy transactions
+gas_price = 30_000_000_000
+
+# EIP-1559 transactions
+gas_fee_cap = 30_000_000_000
+gas_tip_cap = 1_800_000_000
+
+
+[Network.EVMNetworks.NEXON_TEST]
+evm_name = "NEXON_TEST"
+#evm_urls = ["rpc ws endpoint"]
+#evm_http_urls = ["rpc http endpoint"]
+client_implementation = "Ethereum"
+#evm_keys = ["private keys you want to use"]
+evm_simulated = false
+evm_chainlink_transaction_limit = 5000
+evm_minimum_confirmations = 1
+evm_gas_estimation_buffer = 10000
+evm_supports_eip1559 = true
+evm_default_gas_limit = 6000000
+evm_chain_id = 595581
+
+#####
+[[Seth.networks]]
+name = "Nexon Dev"
+transaction_timeout = "3m"
+eip_1559_dynamic_fees = true
+transfer_gas_fee = 21_000
+
+# manual settings, used when gas_price_estimation_enabled is false or when it fails
+# legacy transactions
+gas_price = 30_000_000_000
+
+# EIP-1559 transactions
+gas_fee_cap = 30_000_000_000
+gas_tip_cap = 1_800_000_000
+
+
+[Network.EVMNetworks.NEXON_DEV]
+evm_name = "NEXON_DEV"
+#evm_urls = ["rpc ws endpoint"]
+#evm_http_urls = ["rpc http endpoint"]
+client_implementation = "Ethereum"
+#evm_keys = ["private keys you want to use"]
+evm_simulated = false
+evm_chainlink_transaction_limit = 5000
+evm_minimum_confirmations = 1
+evm_gas_estimation_buffer = 10000
+evm_supports_eip1559 = true
+evm_default_gas_limit = 6000000
+evm_chain_id = 5668
diff --git a/integration-tests/testconfig/vrfv2/config.go b/integration-tests/testconfig/vrfv2/config.go
index 76d54a45d5b..5e940403961 100644
--- a/integration-tests/testconfig/vrfv2/config.go
+++ b/integration-tests/testconfig/vrfv2/config.go
@@ -3,8 +3,6 @@ package testconfig
import (
"errors"
- "github.com/ethereum/go-ethereum/common"
-
vrf_common_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/common/vrf"
)
@@ -52,10 +50,6 @@ func (c *ExistingEnvConfig) Validate() error {
if *c.SubID == 0 {
return errors.New("sub_id must be positive value")
}
-
- if c.LinkAddress != nil && !common.IsHexAddress(*c.LinkAddress) {
- return errors.New("link_address must be a valid hex address")
- }
}
return c.Funding.Validate()
diff --git a/integration-tests/testconfig/vrfv2/vrfv2.toml b/integration-tests/testconfig/vrfv2/vrfv2.toml
index 011e90c15fd..de7200b1e79 100644
--- a/integration-tests/testconfig/vrfv2/vrfv2.toml
+++ b/integration-tests/testconfig/vrfv2/vrfv2.toml
@@ -113,11 +113,17 @@ bhf_job_run_timeout = "1h"
[VRFv2.ExistingEnv]
coordinator_address = ""
-consumer_address = ""
-sub_id = 1
key_hash = ""
+
+use_existing_wrapper = false
+wrapper_address = ""
create_fund_subs_and_add_consumers = true
-link_address = ""
+sub_id = 1
+consumer_address = ""
+
+create_fund_add_wrapper_consumers = true
+wrapper_consumer_address = ""
+
node_sending_key_funding_min = 10
node_sending_keys = [
"",
diff --git a/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml b/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml
index 8f8aa9530e7..88ca12975f6 100644
--- a/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml
+++ b/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml
@@ -57,7 +57,7 @@ BatchSize = 100
"""
[Common]
-chainlink_node_funding = 0.5
+chainlink_node_funding = 0.7
[VRFv2Plus]
[VRFv2Plus.General]
@@ -138,11 +138,17 @@ bhf_job_run_timeout = "1h"
[VRFv2Plus.ExistingEnv]
coordinator_address = ""
-consumer_address = ""
-sub_id = ""
key_hash = ""
+
+use_existing_wrapper = false
+wrapper_address = ""
create_fund_subs_and_add_consumers = true
-link_address = ""
+sub_id = ""
+consumer_address = ""
+
+create_fund_add_wrapper_consumers = true
+wrapper_consumer_address = ""
+
node_sending_key_funding_min = 1
node_sending_keys = []
@@ -272,7 +278,6 @@ key_hash = "0xd360445bacd26df47086ccf255c4f932d297ed8d5c7334b51eed32f61c541601"
#key_hash = "0x2328cbee29e32d0b6662d6df82ff0fea7be300bd310561c92f515c9ee19464f1"
#key_hash = "0x25f4e2d0509f42ec77db5380f3433a89fe623fa75f65d5b398d5f498327be4dd"
create_fund_subs_and_add_consumers = true
-link_address = "0x0Fd9e8d3aF1aaee056EB9e802c3A762a667b1904"
node_sending_key_funding_min = 10
node_sending_keys = [
"0xD96013C241f1741C35a135321969f92Aae02A12F",
@@ -407,7 +412,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0xe13aa26fe94bfcd2ae055911f4d3bf1aed54ca6cf77af34e17f918802fd69ba1"
create_fund_subs_and_add_consumers = true
-link_address = "0xb1D4538B4571d411F07960EF2838Ce337FE1E80E"
node_sending_key_funding_min = 20
node_sending_keys = [
"0xbE21ae371FcA1aC2d8A152e707D21e68d7d99252",
@@ -551,7 +555,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0x5b03254a80ea3eb72139ff0423cb88be42612780c3dd25f1d95a5ba7708a4be1"
create_fund_subs_and_add_consumers = true
-link_address = "0x0b9d5D9136855f6FEc3c0993feE6E9CE8a297846"
node_sending_key_funding_min = 50
node_sending_keys = [
"0x3D7Da5D6A23CA2240CE576C8638C1798a023920a",
@@ -676,7 +679,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0xf5b4a359df0598eef89872ea2170f2afa844dbf74b417e6d44d4bda9420aceb2"
create_fund_subs_and_add_consumers = true
-link_address = "0x779877A7B0D9E8603169DdbD7836e478b4624789"
node_sending_key_funding_min = 50
node_sending_keys = [
"0x0c0DC7f33A1256f0247c5ea75861d385fa5FED31",
@@ -799,7 +801,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0x4d43763d3eff849a89cf578a42787baa32132d7a80032125710e95b3972cd214"
create_fund_subs_and_add_consumers = true
-link_address = "0x84b9B910527Ad5C03A9Ca831909E21e236EA7b06"
node_sending_key_funding_min = 150
node_sending_keys = [
"0x4EE2Cc6D50E8acb6BaEf673B03559525a6c92fB8",
@@ -878,7 +879,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0x7d5692e71807c4c02f5a109627a9ad2b12a361a346790a306983af9a5e3a186f"
create_fund_subs_and_add_consumers = true
-link_address = "0x92Bd61014c5BDc4A43BBbaAEa63d0694BE43ECDd"
node_sending_key_funding_min = 30
node_sending_keys = [
"0xB97c0C52A2B957b45DA213e652c76090DDd0FEc6",
@@ -965,7 +965,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0xdc023892a41e5fe74ec7c4c2e8c0a808b01aea7acaf2b2ae30f4e08df877c48b"
create_fund_subs_and_add_consumers = true
-link_address = "0xE4DDEDb5A220eC218791dC35b1b4D737ba813EE7"
node_sending_key_funding_min = 30
node_sending_keys = [
"0xF3d9879a75BBD85890056D7c6cB37C555F9b41A3",
@@ -1051,7 +1050,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0x0cb2a18e8b762cb4c8f7b17a6cc02ac7b9d2a3346f048cfd2f5d37677f8747d8"
create_fund_subs_and_add_consumers = true
-link_address = "0xD694472F1CD02E1f3fc3534386bda6802fCFe0f7"
node_sending_key_funding_min = 30
node_sending_keys = [
"0xBFD780Af421e98C35918e10B9d6da7389C3e1D10",
@@ -1138,7 +1136,6 @@ consumer_address = ""
sub_id = ""
key_hash = "0xbc9f525e3e1d9e2336f7c77d5f33f5b60aab3765944617fed7f66a6afecac616"
create_fund_subs_and_add_consumers = true
-link_address = "0x8E3f5E6dFeb4498437149b0d347ef51427dB1DE2"
node_sending_key_funding_min = 30
node_sending_keys = [
]
From 7ec99efc64832750825f8bc6711fb9794d6e40df Mon Sep 17 00:00:00 2001
From: Matthew Pendrey
Date: Mon, 5 Aug 2024 14:29:14 +0100
Subject: [PATCH 08/52] changes to support deterministic message hash in the
remote target (#13935)
---
.changeset/polite-crabs-pretend.md | 5 ++
.../keystone_contracts_setup.go | 60 +++++++++++--------
.../integration_tests/mock_libocr.go | 15 +++--
core/capabilities/integration_tests/setup.go | 11 ++--
.../integration_tests/streams_test.go | 6 +-
core/capabilities/launcher.go | 2 +
core/capabilities/launcher_test.go | 2 +-
.../remote/target/endtoend_test.go | 2 +-
core/capabilities/remote/target/server.go | 44 ++++++++++++--
.../capabilities/remote/target/server_test.go | 47 +++++++++++++--
core/capabilities/remote/trigger_publisher.go | 8 ++-
.../remote/trigger_publisher_test.go | 2 +-
.../capabilities/remote/trigger_subscriber.go | 8 ++-
.../remote/trigger_subscriber_test.go | 2 +-
core/capabilities/streams/trigger_test.go | 2 +-
core/scripts/go.mod | 2 +-
core/scripts/go.sum | 4 +-
...deploy_initialize_capabilities_registry.go | 15 +++--
core/services/registrysyncer/syncer.go | 26 +++++---
core/services/registrysyncer/syncer_test.go | 3 +-
go.mod | 2 +-
go.sum | 4 +-
integration-tests/go.mod | 2 +-
integration-tests/go.sum | 4 +-
integration-tests/load/go.mod | 2 +-
integration-tests/load/go.sum | 4 +-
26 files changed, 202 insertions(+), 82 deletions(-)
create mode 100644 .changeset/polite-crabs-pretend.md
diff --git a/.changeset/polite-crabs-pretend.md b/.changeset/polite-crabs-pretend.md
new file mode 100644
index 00000000000..f8ea63b45c1
--- /dev/null
+++ b/.changeset/polite-crabs-pretend.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#internal ensure remote target request hash is deterministic
diff --git a/core/capabilities/integration_tests/keystone_contracts_setup.go b/core/capabilities/integration_tests/keystone_contracts_setup.go
index 42269d1bd45..004a4c32a3a 100644
--- a/core/capabilities/integration_tests/keystone_contracts_setup.go
+++ b/core/capabilities/integration_tests/keystone_contracts_setup.go
@@ -91,8 +91,8 @@ func peerToNode(nopID uint32, p peer) (kcr.CapabilitiesRegistryNodeParams, error
}, nil
}
-func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workflowDonPeers []peer, triggerDonPeers []peer,
- targetDonPeerIDs []peer,
+func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workflowDon donInfo, triggerDon donInfo,
+ targetDon donInfo,
transactOpts *bind.TransactOpts, backend *ethBackend) common.Address {
addr, _, reg, err := kcr.DeployCapabilitiesRegistry(transactOpts, backend)
require.NoError(t, err)
@@ -157,7 +157,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl
nopID := recLog.NodeOperatorId
nodes := []kcr.CapabilitiesRegistryNodeParams{}
- for _, wfPeer := range workflowDonPeers {
+ for _, wfPeer := range workflowDon.peerIDs {
n, innerErr := peerToNode(nopID, wfPeer)
require.NoError(t, innerErr)
@@ -165,7 +165,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl
nodes = append(nodes, n)
}
- for _, triggerPeer := range triggerDonPeers {
+ for _, triggerPeer := range triggerDon.peerIDs {
n, innerErr := peerToNode(nopID, triggerPeer)
require.NoError(t, innerErr)
@@ -173,7 +173,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl
nodes = append(nodes, n)
}
- for _, targetPeer := range targetDonPeerIDs {
+ for _, targetPeer := range targetDon.peerIDs {
n, innerErr := peerToNode(nopID, targetPeer)
require.NoError(t, innerErr)
@@ -185,7 +185,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl
require.NoError(t, err)
// workflow DON
- ps, err := peers(workflowDonPeers)
+ ps, err := peers(workflowDon.peerIDs)
require.NoError(t, err)
cc := newCapabilityConfig()
@@ -199,22 +199,24 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl
},
}
- workflowDonF := uint8(2)
- _, err = reg.AddDON(transactOpts, ps, cfgs, false, true, workflowDonF)
+ _, err = reg.AddDON(transactOpts, ps, cfgs, false, true, workflowDon.F)
require.NoError(t, err)
// trigger DON
- ps, err = peers(triggerDonPeers)
+ ps, err = peers(triggerDon.peerIDs)
require.NoError(t, err)
- triggerDonF := 1
- config := &pb.RemoteTriggerConfig{
- RegistrationRefresh: durationpb.New(20000 * time.Millisecond),
- RegistrationExpiry: durationpb.New(60000 * time.Millisecond),
- // F + 1
- MinResponsesToAggregate: uint32(triggerDonF) + 1,
+ triggerCapabilityConfig := newCapabilityConfig()
+ triggerCapabilityConfig.RemoteConfig = &pb.CapabilityConfig_RemoteTriggerConfig{
+ RemoteTriggerConfig: &pb.RemoteTriggerConfig{
+ RegistrationRefresh: durationpb.New(60000 * time.Millisecond),
+ RegistrationExpiry: durationpb.New(60000 * time.Millisecond),
+ // F + 1
+ MinResponsesToAggregate: uint32(triggerDon.F) + 1,
+ },
}
- configb, err := proto.Marshal(config)
+
+ configb, err := proto.Marshal(triggerCapabilityConfig)
require.NoError(t, err)
cfgs = []kcr.CapabilitiesRegistryCapabilityConfiguration{
@@ -224,22 +226,31 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl
},
}
- _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, uint8(triggerDonF))
+ _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, triggerDon.F)
require.NoError(t, err)
// target DON
- ps, err = peers(targetDonPeerIDs)
+ ps, err = peers(targetDon.peerIDs)
+ require.NoError(t, err)
+
+ targetCapabilityConfig := newCapabilityConfig()
+ targetCapabilityConfig.RemoteConfig = &pb.CapabilityConfig_RemoteTargetConfig{
+ RemoteTargetConfig: &pb.RemoteTargetConfig{
+ RequestHashExcludedAttributes: []string{"signed_report.Signatures"},
+ },
+ }
+
+ remoteTargetConfigBytes, err := proto.Marshal(targetCapabilityConfig)
require.NoError(t, err)
cfgs = []kcr.CapabilitiesRegistryCapabilityConfiguration{
{
CapabilityId: wid,
- Config: ccb,
+ Config: remoteTargetConfigBytes,
},
}
- targetDonF := uint8(1)
- _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, targetDonF)
+ _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, targetDon.F)
require.NoError(t, err)
backend.Commit()
@@ -253,19 +264,18 @@ func newCapabilityConfig() *pb.CapabilityConfig {
}
}
-func setupForwarderContract(t *testing.T, workflowDonPeers []peer, workflowDonId uint32,
- configVersion uint32, f uint8,
+func setupForwarderContract(t *testing.T, workflowDon donInfo,
transactOpts *bind.TransactOpts, backend *ethBackend) (common.Address, *forwarder.KeystoneForwarder) {
addr, _, fwd, err := forwarder.DeployKeystoneForwarder(transactOpts, backend)
require.NoError(t, err)
backend.Commit()
var signers []common.Address
- for _, p := range workflowDonPeers {
+ for _, p := range workflowDon.peerIDs {
signers = append(signers, common.HexToAddress(p.Signer))
}
- _, err = fwd.SetConfig(transactOpts, workflowDonId, configVersion, f, signers)
+ _, err = fwd.SetConfig(transactOpts, workflowDon.ID, workflowDon.ConfigVersion, workflowDon.F, signers)
require.NoError(t, err)
backend.Commit()
diff --git a/core/capabilities/integration_tests/mock_libocr.go b/core/capabilities/integration_tests/mock_libocr.go
index 39c53d48aff..14ccdce6000 100644
--- a/core/capabilities/integration_tests/mock_libocr.go
+++ b/core/capabilities/integration_tests/mock_libocr.go
@@ -157,10 +157,6 @@ func (m *mockLibOCR) simulateProtocolRound(ctx context.Context) error {
Signer: commontypes.OracleID(i),
Signature: sig,
})
-
- if uint8(len(signatures)) == m.f+1 {
- break
- }
}
for _, node := range m.nodes {
@@ -181,7 +177,16 @@ func (m *mockLibOCR) simulateProtocolRound(ctx context.Context) error {
continue
}
- err = node.Transmit(ctx, types.ConfigDigest{}, 0, report, signatures)
+ // For each node select a random set of f+1 signatures to mimic libocr behaviour
+ s := rand.NewSource(time.Now().UnixNano())
+ r := rand.New(s)
+ indices := r.Perm(len(signatures))
+ selectedSignatures := make([]types.AttributedOnchainSignature, m.f+1)
+ for i := 0; i < int(m.f+1); i++ {
+ selectedSignatures[i] = signatures[indices[i]]
+ }
+
+ err = node.Transmit(ctx, types.ConfigDigest{}, 0, report, selectedSignatures)
if err != nil {
return fmt.Errorf("failed to transmit report: %w", err)
}
diff --git a/core/capabilities/integration_tests/setup.go b/core/capabilities/integration_tests/setup.go
index 0095d2fd9de..69b8c3eaa0a 100644
--- a/core/capabilities/integration_tests/setup.go
+++ b/core/capabilities/integration_tests/setup.go
@@ -68,8 +68,8 @@ func setupStreamDonsWithTransmissionSchedule(ctx context.Context, t *testing.T,
lggr.SetLogLevel(TestLogLevel)
ethBlockchain, transactor := setupBlockchain(t, 1000, 1*time.Second)
- capabilitiesRegistryAddr := setupCapabilitiesRegistryContract(ctx, t, workflowDonInfo.peerIDs, triggerDonInfo.peerIDs, targetDonInfo.peerIDs, transactor, ethBlockchain)
- forwarderAddr, _ := setupForwarderContract(t, workflowDonInfo.peerIDs, workflowDonInfo.ID, 1, workflowDonInfo.F, transactor, ethBlockchain)
+ capabilitiesRegistryAddr := setupCapabilitiesRegistryContract(ctx, t, workflowDonInfo, triggerDonInfo, targetDonInfo, transactor, ethBlockchain)
+ forwarderAddr, _ := setupForwarderContract(t, workflowDonInfo, transactor, ethBlockchain)
consumerAddr, consumer := setupConsumerContract(t, transactor, ethBlockchain, forwarderAddr, workflowOwnerID, workflowName)
var feedIDs []string
@@ -259,9 +259,10 @@ func createDonInfo(t *testing.T, don don) donInfo {
triggerDonInfo := donInfo{
DON: commoncap.DON{
- ID: don.id,
- Members: donPeers,
- F: don.f,
+ ID: don.id,
+ Members: donPeers,
+ F: don.f,
+ ConfigVersion: 1,
},
peerIDs: peerIDs,
keys: donKeys,
diff --git a/core/capabilities/integration_tests/streams_test.go b/core/capabilities/integration_tests/streams_test.go
index 6216e36c856..7be392932f8 100644
--- a/core/capabilities/integration_tests/streams_test.go
+++ b/core/capabilities/integration_tests/streams_test.go
@@ -22,9 +22,9 @@ func Test_AllAtOnceTransmissionSchedule(t *testing.T) {
// The don IDs set in the below calls are inferred from the order in which the dons are added to the capabilities registry
// in the setupCapabilitiesRegistryContract function, should this order change the don IDs will need updating.
- workflowDonInfo := createDonInfo(t, don{id: 1, numNodes: 5, f: 1})
- triggerDonInfo := createDonInfo(t, don{id: 2, numNodes: 7, f: 1})
- targetDonInfo := createDonInfo(t, don{id: 3, numNodes: 4, f: 1})
+ workflowDonInfo := createDonInfo(t, don{id: 1, numNodes: 7, f: 2})
+ triggerDonInfo := createDonInfo(t, don{id: 2, numNodes: 7, f: 2})
+ targetDonInfo := createDonInfo(t, don{id: 3, numNodes: 4, f: 2})
consumer, feedIDs, triggerSink := setupStreamDonsWithTransmissionSchedule(ctx, t, workflowDonInfo, triggerDonInfo, targetDonInfo, 3,
"2s", "allAtOnce")
diff --git a/core/capabilities/launcher.go b/core/capabilities/launcher.go
index b4ade04127b..b30477e4c83 100644
--- a/core/capabilities/launcher.go
+++ b/core/capabilities/launcher.go
@@ -216,6 +216,7 @@ func (w *launcher) addRemoteCapabilities(ctx context.Context, myDON registrysync
int(remoteDON.F+1),
w.lggr,
)
+
// TODO: We need to implement a custom, Mercury-specific
// aggregator here, because there is no guarantee that
// all trigger events in the workflow will have the same
@@ -358,6 +359,7 @@ func (w *launcher) exposeCapabilities(ctx context.Context, myPeerID p2ptypes.Pee
case capabilities.CapabilityTypeTarget:
newTargetServer := func(capability capabilities.BaseCapability, info capabilities.CapabilityInfo) (receiverService, error) {
return target.NewServer(
+ c.RemoteTargetConfig,
myPeerID,
capability.(capabilities.TargetCapability),
info,
diff --git a/core/capabilities/launcher_test.go b/core/capabilities/launcher_test.go
index fb3e6837d00..82b03edcecb 100644
--- a/core/capabilities/launcher_test.go
+++ b/core/capabilities/launcher_test.go
@@ -323,7 +323,7 @@ func TestLauncher_WiresUpClientsForPublicWorkflowDON(t *testing.T) {
// The below state describes a Workflow DON (AcceptsWorkflows = true),
// which exposes the streams-trigger and write_chain capabilities.
// We expect receivers to be wired up and both capabilities to be added to the registry.
- var rtc capabilities.RemoteTriggerConfig
+ rtc := &capabilities.RemoteTriggerConfig{}
rtc.ApplyDefaults()
state := ®istrysyncer.LocalRegistry{
diff --git a/core/capabilities/remote/target/endtoend_test.go b/core/capabilities/remote/target/endtoend_test.go
index 9bbb53d4f66..cfab50f0fe7 100644
--- a/core/capabilities/remote/target/endtoend_test.go
+++ b/core/capabilities/remote/target/endtoend_test.go
@@ -226,7 +226,7 @@ func testRemoteTarget(ctx context.Context, t *testing.T, underlying commoncap.Ta
for i := 0; i < numCapabilityPeers; i++ {
capabilityPeer := capabilityPeers[i]
capabilityDispatcher := broker.NewDispatcherForNode(capabilityPeer)
- capabilityNode := target.NewServer(capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher,
+ capabilityNode := target.NewServer(&commoncap.RemoteTargetConfig{RequestHashExcludedAttributes: []string{}}, capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher,
capabilityNodeResponseTimeout, lggr)
servicetest.Run(t, capabilityNode)
broker.RegisterReceiverNode(capabilityPeer, capabilityNode)
diff --git a/core/capabilities/remote/target/server.go b/core/capabilities/remote/target/server.go
index ea9caf81eff..39023ffb3fa 100644
--- a/core/capabilities/remote/target/server.go
+++ b/core/capabilities/remote/target/server.go
@@ -4,10 +4,12 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
+ "fmt"
"sync"
"time"
commoncap "github.com/smartcontractkit/chainlink-common/pkg/capabilities"
+ "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb"
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/target/request"
"github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/types"
@@ -24,7 +26,9 @@ import (
// server communicates with corresponding client on remote nodes.
type server struct {
services.StateMachine
- lggr logger.Logger
+ lggr logger.Logger
+
+ config *commoncap.RemoteTargetConfig
peerID p2ptypes.PeerID
underlying commoncap.TargetCapability
capInfo commoncap.CapabilityInfo
@@ -51,9 +55,14 @@ type requestAndMsgID struct {
messageID string
}
-func NewServer(peerID p2ptypes.PeerID, underlying commoncap.TargetCapability, capInfo commoncap.CapabilityInfo, localDonInfo commoncap.DON,
+func NewServer(config *commoncap.RemoteTargetConfig, peerID p2ptypes.PeerID, underlying commoncap.TargetCapability, capInfo commoncap.CapabilityInfo, localDonInfo commoncap.DON,
workflowDONs map[uint32]commoncap.DON, dispatcher types.Dispatcher, requestTimeout time.Duration, lggr logger.Logger) *server {
+ if config == nil {
+ lggr.Info("no config provided, using default values")
+ config = &commoncap.RemoteTargetConfig{}
+ }
return &server{
+ config: config,
underlying: underlying,
peerID: peerID,
capInfo: capInfo,
@@ -126,11 +135,16 @@ func (r *server) Receive(ctx context.Context, msg *types.MessageBody) {
return
}
+ msgHash, err := r.getMessageHash(msg)
+ if err != nil {
+ r.lggr.Errorw("failed to get message hash", "err", err)
+ return
+ }
+
// A request is uniquely identified by the message id and the hash of the payload to prevent a malicious
// actor from sending a different payload with the same message id
messageId := GetMessageID(msg)
- hash := sha256.Sum256(msg.Payload)
- requestID := messageId + hex.EncodeToString(hash[:])
+ requestID := messageId + hex.EncodeToString(msgHash[:])
if requestIDs, ok := r.messageIDToRequestIDsCount[messageId]; ok {
requestIDs[requestID] = requestIDs[requestID] + 1
@@ -161,12 +175,32 @@ func (r *server) Receive(ctx context.Context, msg *types.MessageBody) {
reqAndMsgID := r.requestIDToRequest[requestID]
- err := reqAndMsgID.request.OnMessage(ctx, msg)
+ err = reqAndMsgID.request.OnMessage(ctx, msg)
if err != nil {
r.lggr.Errorw("request failed to OnMessage new message", "request", reqAndMsgID, "err", err)
}
}
+func (r *server) getMessageHash(msg *types.MessageBody) ([32]byte, error) {
+ req, err := pb.UnmarshalCapabilityRequest(msg.Payload)
+ if err != nil {
+ return [32]byte{}, fmt.Errorf("failed to unmarshal capability request: %w", err)
+ }
+
+ for _, path := range r.config.RequestHashExcludedAttributes {
+ if !req.Inputs.DeleteAtPath(path) {
+ return [32]byte{}, fmt.Errorf("failed to delete attribute from map at path: %s", path)
+ }
+ }
+
+ reqBytes, err := pb.MarshalCapabilityRequest(req)
+ if err != nil {
+ return [32]byte{}, fmt.Errorf("failed to marshal capability request: %w", err)
+ }
+ hash := sha256.Sum256(reqBytes)
+ return hash, nil
+}
+
func GetMessageID(msg *types.MessageBody) string {
return string(msg.MessageId)
}
diff --git a/core/capabilities/remote/target/server_test.go b/core/capabilities/remote/target/server_test.go
index a5aa45efd06..2460a2dd0f7 100644
--- a/core/capabilities/remote/target/server_test.go
+++ b/core/capabilities/remote/target/server_test.go
@@ -2,6 +2,7 @@ package target_test
import (
"context"
+ "strconv"
"testing"
"time"
@@ -11,6 +12,7 @@ import (
commoncap "github.com/smartcontractkit/chainlink-common/pkg/capabilities"
"github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb"
"github.com/smartcontractkit/chainlink-common/pkg/services"
+ "github.com/smartcontractkit/chainlink-common/pkg/values"
"github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/target"
remotetypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/types"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
@@ -18,12 +20,48 @@ import (
p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types"
)
+func Test_Server_ExcludesNonDeterministicInputAttributes(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ numCapabilityPeers := 4
+
+ callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{RequestHashExcludedAttributes: []string{"signed_report.Signatures"}},
+ &TestCapability{}, 10, 9, numCapabilityPeers, 3, 10*time.Minute)
+
+ for idx, caller := range callers {
+ rawInputs := map[string]any{
+ "signed_report": map[string]any{"Signatures": "sig" + strconv.Itoa(idx), "Price": 20},
+ }
+
+ inputs, err := values.NewMap(rawInputs)
+ require.NoError(t, err)
+
+ _, err = caller.Execute(context.Background(),
+ commoncap.CapabilityRequest{
+ Metadata: commoncap.RequestMetadata{
+ WorkflowID: "workflowID",
+ WorkflowExecutionID: "workflowExecutionID",
+ },
+ Inputs: inputs,
+ })
+ require.NoError(t, err)
+ }
+
+ for _, caller := range callers {
+ for i := 0; i < numCapabilityPeers; i++ {
+ msg := <-caller.receivedMessages
+ assert.Equal(t, remotetypes.Error_OK, msg.Error)
+ }
+ }
+ closeServices(t, srvcs)
+}
+
func Test_Server_RespondsAfterSufficientRequests(t *testing.T) {
ctx := testutils.Context(t)
numCapabilityPeers := 4
- callers, srvcs := testRemoteTargetServer(ctx, t, &TestCapability{}, 10, 9, numCapabilityPeers, 3, 10*time.Minute)
+ callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{}, &TestCapability{}, 10, 9, numCapabilityPeers, 3, 10*time.Minute)
for _, caller := range callers {
_, err := caller.Execute(context.Background(),
@@ -50,7 +88,7 @@ func Test_Server_InsufficientCallers(t *testing.T) {
numCapabilityPeers := 4
- callers, srvcs := testRemoteTargetServer(ctx, t, &TestCapability{}, 10, 10, numCapabilityPeers, 3, 100*time.Millisecond)
+ callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{}, &TestCapability{}, 10, 10, numCapabilityPeers, 3, 100*time.Millisecond)
for _, caller := range callers {
_, err := caller.Execute(context.Background(),
@@ -77,7 +115,7 @@ func Test_Server_CapabilityError(t *testing.T) {
numCapabilityPeers := 4
- callers, srvcs := testRemoteTargetServer(ctx, t, &TestErrorCapability{}, 10, 9, numCapabilityPeers, 3, 100*time.Millisecond)
+ callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{}, &TestErrorCapability{}, 10, 9, numCapabilityPeers, 3, 100*time.Millisecond)
for _, caller := range callers {
_, err := caller.Execute(context.Background(),
@@ -100,6 +138,7 @@ func Test_Server_CapabilityError(t *testing.T) {
}
func testRemoteTargetServer(ctx context.Context, t *testing.T,
+ config *commoncap.RemoteTargetConfig,
underlying commoncap.TargetCapability,
numWorkflowPeers int, workflowDonF uint8,
numCapabilityPeers int, capabilityDonF uint8, capabilityNodeResponseTimeout time.Duration) ([]*serverTestClient, []services.Service) {
@@ -150,7 +189,7 @@ func testRemoteTargetServer(ctx context.Context, t *testing.T,
for i := 0; i < numCapabilityPeers; i++ {
capabilityPeer := capabilityPeers[i]
capabilityDispatcher := broker.NewDispatcherForNode(capabilityPeer)
- capabilityNode := target.NewServer(capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher,
+ capabilityNode := target.NewServer(config, capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher,
capabilityNodeResponseTimeout, lggr)
require.NoError(t, capabilityNode.Start(ctx))
broker.RegisterReceiverNode(capabilityPeer, capabilityNode)
diff --git a/core/capabilities/remote/trigger_publisher.go b/core/capabilities/remote/trigger_publisher.go
index 35ce41118f5..c1f2fb32c5a 100644
--- a/core/capabilities/remote/trigger_publisher.go
+++ b/core/capabilities/remote/trigger_publisher.go
@@ -21,7 +21,7 @@ import (
//
// TriggerPublisher communicates with corresponding TriggerSubscribers on remote nodes.
type triggerPublisher struct {
- config capabilities.RemoteTriggerConfig
+ config *capabilities.RemoteTriggerConfig
underlying commoncap.TriggerCapability
capInfo commoncap.CapabilityInfo
capDonInfo commoncap.DON
@@ -48,7 +48,11 @@ type pubRegState struct {
var _ types.Receiver = &triggerPublisher{}
var _ services.Service = &triggerPublisher{}
-func NewTriggerPublisher(config capabilities.RemoteTriggerConfig, underlying commoncap.TriggerCapability, capInfo commoncap.CapabilityInfo, capDonInfo commoncap.DON, workflowDONs map[uint32]commoncap.DON, dispatcher types.Dispatcher, lggr logger.Logger) *triggerPublisher {
+func NewTriggerPublisher(config *capabilities.RemoteTriggerConfig, underlying commoncap.TriggerCapability, capInfo commoncap.CapabilityInfo, capDonInfo commoncap.DON, workflowDONs map[uint32]commoncap.DON, dispatcher types.Dispatcher, lggr logger.Logger) *triggerPublisher {
+ if config == nil {
+ lggr.Info("no config provided, using default values")
+ config = &capabilities.RemoteTriggerConfig{}
+ }
config.ApplyDefaults()
return &triggerPublisher{
config: config,
diff --git a/core/capabilities/remote/trigger_publisher_test.go b/core/capabilities/remote/trigger_publisher_test.go
index 1e3000d20ca..2c4a8518965 100644
--- a/core/capabilities/remote/trigger_publisher_test.go
+++ b/core/capabilities/remote/trigger_publisher_test.go
@@ -42,7 +42,7 @@ func TestTriggerPublisher_Register(t *testing.T) {
}
dispatcher := remoteMocks.NewDispatcher(t)
- config := capabilities.RemoteTriggerConfig{
+ config := &capabilities.RemoteTriggerConfig{
RegistrationRefresh: 100 * time.Millisecond,
RegistrationExpiry: 100 * time.Second,
MinResponsesToAggregate: 1,
diff --git a/core/capabilities/remote/trigger_subscriber.go b/core/capabilities/remote/trigger_subscriber.go
index 0ccbf37c61a..2d038e45c08 100644
--- a/core/capabilities/remote/trigger_subscriber.go
+++ b/core/capabilities/remote/trigger_subscriber.go
@@ -23,7 +23,7 @@ import (
//
// TriggerSubscriber communicates with corresponding TriggerReceivers on remote nodes.
type triggerSubscriber struct {
- config capabilities.RemoteTriggerConfig
+ config *capabilities.RemoteTriggerConfig
capInfo commoncap.CapabilityInfo
capDonInfo capabilities.DON
capDonMembers map[p2ptypes.PeerID]struct{}
@@ -55,11 +55,15 @@ var _ services.Service = &triggerSubscriber{}
// TODO makes this configurable with a default
const defaultSendChannelBufferSize = 1000
-func NewTriggerSubscriber(config capabilities.RemoteTriggerConfig, capInfo commoncap.CapabilityInfo, capDonInfo capabilities.DON, localDonInfo capabilities.DON, dispatcher types.Dispatcher, aggregator types.Aggregator, lggr logger.Logger) *triggerSubscriber {
+func NewTriggerSubscriber(config *capabilities.RemoteTriggerConfig, capInfo commoncap.CapabilityInfo, capDonInfo capabilities.DON, localDonInfo capabilities.DON, dispatcher types.Dispatcher, aggregator types.Aggregator, lggr logger.Logger) *triggerSubscriber {
if aggregator == nil {
lggr.Warnw("no aggregator provided, using default MODE aggregator", "capabilityId", capInfo.ID)
aggregator = NewDefaultModeAggregator(uint32(capDonInfo.F + 1))
}
+ if config == nil {
+ lggr.Info("no config provided, using default values")
+ config = &capabilities.RemoteTriggerConfig{}
+ }
config.ApplyDefaults()
capDonMembers := make(map[p2ptypes.PeerID]struct{})
for _, member := range capDonInfo.Members {
diff --git a/core/capabilities/remote/trigger_subscriber_test.go b/core/capabilities/remote/trigger_subscriber_test.go
index 93e962215ab..2e34b03ec5c 100644
--- a/core/capabilities/remote/trigger_subscriber_test.go
+++ b/core/capabilities/remote/trigger_subscriber_test.go
@@ -63,7 +63,7 @@ func TestTriggerSubscriber_RegisterAndReceive(t *testing.T) {
})
// register trigger
- config := capabilities.RemoteTriggerConfig{
+ config := &capabilities.RemoteTriggerConfig{
RegistrationRefresh: 100 * time.Millisecond,
RegistrationExpiry: 100 * time.Second,
MinResponsesToAggregate: 1,
diff --git a/core/capabilities/streams/trigger_test.go b/core/capabilities/streams/trigger_test.go
index cb4cfaa36bc..853f07f2aae 100644
--- a/core/capabilities/streams/trigger_test.go
+++ b/core/capabilities/streams/trigger_test.go
@@ -87,7 +87,7 @@ func TestStreamsTrigger(t *testing.T) {
Members: capMembers,
F: uint8(F),
}
- config := capabilities.RemoteTriggerConfig{
+ config := &capabilities.RemoteTriggerConfig{
MinResponsesToAggregate: uint32(F + 1),
}
subscriber := remote.NewTriggerSubscriber(config, capInfo, capDonInfo, capabilities.DON{}, nil, agg, lggr)
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 4ee443d46f8..4e250038710 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -22,7 +22,7 @@ require (
github.com/prometheus/client_golang v1.17.0
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7
github.com/spf13/cobra v1.8.0
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index 3ae26beb633..4c8eee4a1db 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -1184,8 +1184,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
diff --git a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go
index 87622415430..3352267d149 100644
--- a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go
+++ b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go
@@ -11,10 +11,11 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
- ragetypes "github.com/smartcontractkit/libocr/ragep2p/types"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
+ ragetypes "github.com/smartcontractkit/libocr/ragep2p/types"
+
capabilitiespb "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb"
"github.com/smartcontractkit/chainlink-common/pkg/values"
@@ -373,8 +374,14 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) {
panic(err)
}
- cc = newCapabilityConfig()
- ccb, err = proto.Marshal(cc)
+ targetCapabilityConfig := newCapabilityConfig()
+ targetCapabilityConfig.RemoteConfig = &capabilitiespb.CapabilityConfig_RemoteTargetConfig{
+ RemoteTargetConfig: &capabilitiespb.RemoteTargetConfig{
+ RequestHashExcludedAttributes: []string{"signed_report.Signatures"},
+ },
+ }
+
+ remoteTargetConfigBytes, err := proto.Marshal(targetCapabilityConfig)
if err != nil {
panic(err)
}
@@ -382,7 +389,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) {
cfgs = []kcr.CapabilitiesRegistryCapabilityConfiguration{
{
CapabilityId: wid,
- Config: ccb,
+ Config: remoteTargetConfigBytes,
},
}
_, err = reg.AddDON(env.Owner, ps, cfgs, true, false, 1)
diff --git a/core/services/registrysyncer/syncer.go b/core/services/registrysyncer/syncer.go
index 4bbfaef5040..9675d86dc86 100644
--- a/core/services/registrysyncer/syncer.go
+++ b/core/services/registrysyncer/syncer.go
@@ -165,12 +165,21 @@ func unmarshalCapabilityConfig(data []byte) (capabilities.CapabilityConfiguratio
return capabilities.CapabilityConfiguration{}, err
}
- var rtc capabilities.RemoteTriggerConfig
- if prtc := cconf.GetRemoteTriggerConfig(); prtc != nil {
- rtc.RegistrationRefresh = prtc.RegistrationRefresh.AsDuration()
- rtc.RegistrationExpiry = prtc.RegistrationExpiry.AsDuration()
- rtc.MinResponsesToAggregate = prtc.MinResponsesToAggregate
- rtc.MessageExpiry = prtc.MessageExpiry.AsDuration()
+ var remoteTriggerConfig *capabilities.RemoteTriggerConfig
+ var remoteTargetConfig *capabilities.RemoteTargetConfig
+
+ switch cconf.GetRemoteConfig().(type) {
+ case *capabilitiespb.CapabilityConfig_RemoteTriggerConfig:
+ prtc := cconf.GetRemoteTriggerConfig()
+ remoteTriggerConfig = &capabilities.RemoteTriggerConfig{}
+ remoteTriggerConfig.RegistrationRefresh = prtc.RegistrationRefresh.AsDuration()
+ remoteTriggerConfig.RegistrationExpiry = prtc.RegistrationExpiry.AsDuration()
+ remoteTriggerConfig.MinResponsesToAggregate = prtc.MinResponsesToAggregate
+ remoteTriggerConfig.MessageExpiry = prtc.MessageExpiry.AsDuration()
+ case *capabilitiespb.CapabilityConfig_RemoteTargetConfig:
+ prtc := cconf.GetRemoteTargetConfig()
+ remoteTargetConfig = &capabilities.RemoteTargetConfig{}
+ remoteTargetConfig.RequestHashExcludedAttributes = prtc.RequestHashExcludedAttributes
}
dc, err := values.FromMapValueProto(cconf.DefaultConfig)
@@ -180,7 +189,8 @@ func unmarshalCapabilityConfig(data []byte) (capabilities.CapabilityConfiguratio
return capabilities.CapabilityConfiguration{
DefaultConfig: dc,
- RemoteTriggerConfig: rtc,
+ RemoteTriggerConfig: remoteTriggerConfig,
+ RemoteTargetConfig: remoteTargetConfig,
}, nil
}
@@ -223,8 +233,6 @@ func (s *registrySyncer) localRegistry(ctx context.Context) (*LocalRegistry, err
return nil, innerErr
}
- cconf.RemoteTriggerConfig.ApplyDefaults()
-
cc[cid] = cconf
}
diff --git a/core/services/registrysyncer/syncer_test.go b/core/services/registrysyncer/syncer_test.go
index b926183394e..c13cc904909 100644
--- a/core/services/registrysyncer/syncer_test.go
+++ b/core/services/registrysyncer/syncer_test.go
@@ -210,6 +210,7 @@ func TestReader_Integration(t *testing.T) {
RegistrationExpiry: durationpb.New(60 * time.Second),
// F + 1
MinResponsesToAggregate: uint32(1) + 1,
+ MessageExpiry: durationpb.New(120 * time.Second),
},
},
}
@@ -256,7 +257,7 @@ func TestReader_Integration(t *testing.T) {
}, gotCap)
assert.Len(t, s.IDsToDONs, 1)
- rtc := capabilities.RemoteTriggerConfig{
+ rtc := &capabilities.RemoteTriggerConfig{
RegistrationRefresh: 20 * time.Second,
MinResponsesToAggregate: 2,
RegistrationExpiry: 60 * time.Second,
diff --git a/go.mod b/go.mod
index 8e2103eb246..3aa878d1ab2 100644
--- a/go.mod
+++ b/go.mod
@@ -72,7 +72,7 @@ require (
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chain-selectors v1.0.10
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827
diff --git a/go.sum b/go.sum
index 73d6d5b227a..0264dcc01c3 100644
--- a/go.sum
+++ b/go.sum
@@ -1136,8 +1136,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index ed693f4fccc..d7c1918c927 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -28,7 +28,7 @@ require (
github.com/shopspring/decimal v1.4.0
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
github.com/smartcontractkit/chainlink-testing-framework v1.33.0
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index ca3ce8d903e..2854b2d599b 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -1486,8 +1486,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index 3d1ae6c7a98..f0485082ecb 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -16,7 +16,7 @@ require (
github.com/rs/zerolog v1.31.0
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
github.com/smartcontractkit/chainlink-testing-framework v1.33.0
github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c
github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index 2a54ec9254f..647a02b9b0e 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -1468,8 +1468,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
From d90bb66934a46bb1c6d376b000d860e1588d91c7 Mon Sep 17 00:00:00 2001
From: Matthew Pendrey
Date: Mon, 5 Aug 2024 17:46:31 +0100
Subject: [PATCH 09/52] common version update to head of develop (#14030)
---
.changeset/ninety-cougars-tease.md | 5 +++++
core/scripts/go.mod | 2 +-
core/scripts/go.sum | 4 ++--
go.mod | 2 +-
go.sum | 4 ++--
integration-tests/go.mod | 2 +-
integration-tests/go.sum | 4 ++--
integration-tests/load/go.mod | 2 +-
integration-tests/load/go.sum | 4 ++--
9 files changed, 17 insertions(+), 12 deletions(-)
create mode 100644 .changeset/ninety-cougars-tease.md
diff --git a/.changeset/ninety-cougars-tease.md b/.changeset/ninety-cougars-tease.md
new file mode 100644
index 00000000000..ab12a571914
--- /dev/null
+++ b/.changeset/ninety-cougars-tease.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#internal restore common version to head of develop
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 4e250038710..fe4ee2c9748 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -22,7 +22,7 @@ require (
github.com/prometheus/client_golang v1.17.0
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7
github.com/spf13/cobra v1.8.0
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index 4c8eee4a1db..76eaf615279 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -1184,8 +1184,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
diff --git a/go.mod b/go.mod
index 3aa878d1ab2..45e0b62d52e 100644
--- a/go.mod
+++ b/go.mod
@@ -72,7 +72,7 @@ require (
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chain-selectors v1.0.10
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827
diff --git a/go.sum b/go.sum
index 0264dcc01c3..4a6b294c122 100644
--- a/go.sum
+++ b/go.sum
@@ -1136,8 +1136,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index d7c1918c927..8f9652099b1 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -28,7 +28,7 @@ require (
github.com/shopspring/decimal v1.4.0
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c
github.com/smartcontractkit/chainlink-testing-framework v1.33.0
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 2854b2d599b..bca92f4a97c 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -1486,8 +1486,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index f0485082ecb..f0554f2c725 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -16,7 +16,7 @@ require (
github.com/rs/zerolog v1.31.0
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
- github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409
+ github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c
github.com/smartcontractkit/chainlink-testing-framework v1.33.0
github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c
github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index 647a02b9b0e..f31a11d389d 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -1468,8 +1468,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq
github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE=
github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8=
github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc=
-github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw=
+github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0=
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs=
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM=
From 8b9f2b6b9098e8ec2368773368239106d066e4e3 Mon Sep 17 00:00:00 2001
From: ilija42 <57732589+ilija42@users.noreply.github.com>
Date: Tue, 6 Aug 2024 12:52:10 +0200
Subject: [PATCH 10/52] [BCF - 3339] - Codec and CR hashed topics support
(#14016)
* Add better handling for CR EVM filtering by hashed indexed topics
* Add comments for CR event codec usage
* Improve err handling in CR init
* Add a TODO for CR QueryKey primitive remapping handling
* Update codec test to match most recent changes
* Add changeset and run solidity prettier
* Add contracts changeset for ChainReaderTester contract changes
* simplify getNativeAndCheckedTypesForArg FixedBytesTy case
---
.changeset/thin-rings-count.md | 5 +
contracts/.changeset/itchy-turtles-agree.md | 5 +
.../shared/test/helpers/ChainReaderTester.sol | 8 +
.../chain_reader_tester.go | 175 +++++++++++++++++-
...rapper-dependency-versions-do-not-edit.txt | 2 +-
core/services/relay/evm/chain_reader.go | 10 +-
core/services/relay/evm/event_binding.go | 67 ++++---
.../chain_reader_interface_tester.go | 16 +-
.../relay/evm/evmtesting/run_tests.go | 37 +++-
core/services/relay/evm/types/codec_entry.go | 2 +-
.../relay/evm/types/codec_entry_test.go | 28 ++-
11 files changed, 314 insertions(+), 41 deletions(-)
create mode 100644 .changeset/thin-rings-count.md
create mode 100644 contracts/.changeset/itchy-turtles-agree.md
diff --git a/.changeset/thin-rings-count.md b/.changeset/thin-rings-count.md
new file mode 100644
index 00000000000..20f4b54311e
--- /dev/null
+++ b/.changeset/thin-rings-count.md
@@ -0,0 +1,5 @@
+---
+"chainlink": minor
+---
+
+#internal Add evm Chain Reader GetLatestValue support for filtering on indexed topic types that get hashed.
diff --git a/contracts/.changeset/itchy-turtles-agree.md b/contracts/.changeset/itchy-turtles-agree.md
new file mode 100644
index 00000000000..930ab850d9b
--- /dev/null
+++ b/contracts/.changeset/itchy-turtles-agree.md
@@ -0,0 +1,5 @@
+---
+'@chainlink/contracts': minor
+---
+
+#internal Add an event with indexed topics that get hashed to Chain Reader Tester contract.
diff --git a/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol b/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol
index 58a4b9a25c5..709d00cc382 100644
--- a/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol
+++ b/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol
@@ -40,6 +40,9 @@ contract ChainReaderTester {
// First topic is event hash
event TriggeredWithFourTopics(int32 indexed field1, int32 indexed field2, int32 indexed field3);
+ // first topic is event hash, second and third topics get hashed before getting stored
+ event TriggeredWithFourTopicsWithHashed(string indexed field1, uint8[32] indexed field2, bytes32 indexed field3);
+
TestStruct[] private s_seen;
uint64[] private s_arr;
uint64 private s_value;
@@ -125,4 +128,9 @@ contract ChainReaderTester {
function triggerWithFourTopics(int32 field1, int32 field2, int32 field3) public {
emit TriggeredWithFourTopics(field1, field2, field3);
}
+
+ // first topic is event hash, second and third topics get hashed before getting stored
+ function triggerWithFourTopicsWithHashed(string memory field1, uint8[32] memory field2, bytes32 field3) public {
+ emit TriggeredWithFourTopicsWithHashed(field1, field2, field3);
+ }
}
diff --git a/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go b/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go
index 751df822696..c59a6f0f0d1 100644
--- a/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go
+++ b/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go
@@ -52,8 +52,8 @@ type TestStruct struct {
}
var ChainReaderTesterMetaData = &bind.MetaData{
- ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"indexed\":false,\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"Triggered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"string\",\"name\":\"fieldHash\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"TriggeredEventWithDynamicTopic\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"TriggeredWithFourTopics\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"addTestStruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAlterablePrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDifferentPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"i\",\"type\":\"uint256\"}],\"name\":\"getElementAtIndex\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getSliceValue\",\"outputs\":[{\"internalType\":\"uint64[]\",\"name\":\"\",\"type\":\"uint64[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"returnSeen\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"value\",\"type\":\"uint64\"}],\"name\":\"setAlterablePrimitiveValue\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"triggerEvent\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"triggerEventWithDynamicTopic\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"triggerWithFourTopics\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
- Bin: "0x608060405234801561001057600080fd5b50600180548082018255600082905260048082047fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6908101805460086003958616810261010090810a8088026001600160401b0391820219909416939093179093558654808801909755848704909301805496909516909202900a91820291021990921691909117905561181e806100a96000396000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80637f002d6711610081578063ef4e1ced1161005b578063ef4e1ced146101c0578063f6f871c8146101c7578063fbe9fbf6146101da57600080fd5b80637f002d671461017d578063ab5e0b3814610190578063dbfd7332146101ad57600080fd5b806349eac2ac116100b257806349eac2ac1461010c578063679004a41461011f5780636c9a43b61461013457600080fd5b80632c45576f146100ce5780633272b66c146100f7575b600080fd5b6100e16100dc366004610c2b565b6101ec565b6040516100ee9190610d8a565b60405180910390f35b61010a610105366004610ec9565b6104c7565b005b61010a61011a366004610fde565b61051c565b61012761081f565b6040516100ee91906110d0565b61010a61014236600461111e565b600280547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff92909216919091179055565b61010a61018b366004610fde565b6108ab565b6107c65b60405167ffffffffffffffff90911681526020016100ee565b61010a6101bb36600461114f565b610902565b6003610194565b6100e16101d5366004610fde565b61093f565b60025467ffffffffffffffff16610194565b6101f4610a48565b6000610201600184611192565b81548110610211576102116111cc565b6000918252602091829020604080516101008101909152600a90920201805460030b8252600181018054929391929184019161024c906111fb565b80601f0160208091040260200160405190810160405280929190818152602001828054610278906111fb565b80156102c55780601f1061029a576101008083540402835291602001916102c5565b820191906000526020600020905b8154815290600101906020018083116102a857829003601f168201915b5050509183525050600282015460ff166020808301919091526040805161040081018083529190930192916003850191826000855b825461010083900a900460ff168152602060019283018181049485019490930390920291018084116102fa57505050928452505050600482015473ffffffffffffffffffffffffffffffffffffffff1660208083019190915260058301805460408051828502810185018252828152940193928301828280156103b357602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610388575b5050509183525050600682015460170b6020808301919091526040805180820182526007808601805460f01b7fffff0000000000000000000000000000000000000000000000000000000000001683528351808501855260088801805490930b81526009880180549590970196939591948683019491939284019190610438906111fb565b80601f0160208091040260200160405190810160405280929190818152602001828054610464906111fb565b80156104b15780601f10610486576101008083540402835291602001916104b1565b820191906000526020600020905b81548152906001019060200180831161049457829003601f168201915b5050509190925250505090525090525092915050565b81816040516104d7929190611248565b60405180910390207f3d969732b1bbbb9f1d7eb9f3f14e4cb50a74d950b3ef916a397b85dfbab93c6783836040516105109291906112a1565b60405180910390a25050565b60006040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b602082015260400161060e8461139e565b905281546001808201845560009384526020938490208351600a9093020180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff90931692909217825592820151919290919082019061067490826114f8565b5060408201516002820180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff90921691909117905560608201516106c29060038301906020610a97565b5060808201516004820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90921691909117905560a08201518051610729916005840191602090910190610b2a565b5060c08201516006820180547fffffffffffffffff0000000000000000000000000000000000000000000000001677ffffffffffffffffffffffffffffffffffffffffffffffff90921691909117905560e082015180516007830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660f09290921c91909117815560208083015180516008860180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff90921691909117815591810151909190600986019061080c90826114f8565b5050505050505050505050505050505050565b606060018054806020026020016040519081016040528092919081815260200182805480156108a157602002820191906000526020600020906000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff168152602001906008019060208260070104928301926001038202915080841161085c5790505b5050505050905090565b8960030b7f7188419dcd8b51877b71766f075f3626586c0ff190e7d056aa65ce9acb649a3d8a8a8a8a8a8a8a8a8a6040516108ee99989796959493929190611757565b60405180910390a250505050505050505050565b8060030b8260030b8460030b7f91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac560405160405180910390a4505050565b610947610a48565b6040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b6020820152604001610a378461139e565b90529b9a5050505050505050505050565b6040805161010081018252600080825260606020830181905292820152908101610a70610ba4565b8152600060208201819052606060408301819052820152608001610a92610bc3565b905290565b600183019183908215610b1a5791602002820160005b83821115610aeb57835183826101000a81548160ff021916908360ff1602179055509260200192600101602081600001049283019260010302610aad565b8015610b185782816101000a81549060ff0219169055600101602081600001049283019260010302610aeb565b505b50610b26929150610c16565b5090565b828054828255906000526020600020908101928215610b1a579160200282015b82811115610b1a57825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190610b4a565b6040518061040001604052806020906020820280368337509192915050565b604051806040016040528060007dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152602001610a926040518060400160405280600060070b8152602001606081525090565b5b80821115610b265760008155600101610c17565b600060208284031215610c3d57600080fd5b5035919050565b6000815180845260005b81811015610c6a57602081850181015186830182015201610c4e565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b8060005b6020808210610cbb5750610cd2565b825160ff1685529384019390910190600101610cac565b50505050565b600081518084526020808501945080840160005b83811015610d1e57815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610cec565b509495945050505050565b7fffff00000000000000000000000000000000000000000000000000000000000081511682526000602082015160406020850152805160070b60408501526020810151905060406060850152610d826080850182610c44565b949350505050565b60208152610d9e60208201835160030b9052565b600060208301516104e0806040850152610dbc610500850183610c44565b91506040850151610dd2606086018260ff169052565b506060850151610de56080860182610ca8565b50608085015173ffffffffffffffffffffffffffffffffffffffff1661048085015260a08501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe085840381016104a0870152610e428483610cd8565b935060c08701519150610e5b6104c087018360170b9052565b60e0870151915080868503018387015250610e768382610d29565b9695505050505050565b60008083601f840112610e9257600080fd5b50813567ffffffffffffffff811115610eaa57600080fd5b602083019150836020828501011115610ec257600080fd5b9250929050565b60008060208385031215610edc57600080fd5b823567ffffffffffffffff811115610ef357600080fd5b610eff85828601610e80565b90969095509350505050565b8035600381900b8114610f1d57600080fd5b919050565b803560ff81168114610f1d57600080fd5b806104008101831015610f4557600080fd5b92915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610f1d57600080fd5b60008083601f840112610f8157600080fd5b50813567ffffffffffffffff811115610f9957600080fd5b6020830191508360208260051b8501011115610ec257600080fd5b8035601781900b8114610f1d57600080fd5b600060408284031215610fd857600080fd5b50919050565b6000806000806000806000806000806104e08b8d031215610ffe57600080fd5b6110078b610f0b565b995060208b013567ffffffffffffffff8082111561102457600080fd5b6110308e838f01610e80565b909b50995089915061104460408e01610f22565b98506110538e60608f01610f33565b97506110626104608e01610f4b565b96506104808d013591508082111561107957600080fd5b6110858e838f01610f6f565b909650945084915061109a6104a08e01610fb4565b93506104c08d01359150808211156110b157600080fd5b506110be8d828e01610fc6565b9150509295989b9194979a5092959850565b6020808252825182820181905260009190848201906040850190845b8181101561111257835167ffffffffffffffff16835292840192918401916001016110ec565b50909695505050505050565b60006020828403121561113057600080fd5b813567ffffffffffffffff8116811461114857600080fd5b9392505050565b60008060006060848603121561116457600080fd5b61116d84610f0b565b925061117b60208501610f0b565b915061118960408501610f0b565b90509250925092565b81810381811115610f45577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600181811c9082168061120f57607f821691505b602082108103610fd8577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b8183823760009101908152919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b602081526000610d82602083018486611258565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715611307576113076112b5565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611354576113546112b5565b604052919050565b80357fffff00000000000000000000000000000000000000000000000000000000000081168114610f1d57600080fd5b8035600781900b8114610f1d57600080fd5b6000604082360312156113b057600080fd5b6113b86112e4565b6113c18361135c565b815260208084013567ffffffffffffffff808211156113df57600080fd5b8186019150604082360312156113f457600080fd5b6113fc6112e4565b6114058361138c565b8152838301358281111561141857600080fd5b929092019136601f84011261142c57600080fd5b82358281111561143e5761143e6112b5565b61146e857fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161130d565b9250808352368582860101111561148457600080fd5b8085850186850137600090830185015280840191909152918301919091525092915050565b601f8211156114f357600081815260208120601f850160051c810160208610156114d05750805b601f850160051c820191505b818110156114ef578281556001016114dc565b5050505b505050565b815167ffffffffffffffff811115611512576115126112b5565b6115268161152084546111fb565b846114a9565b602080601f83116001811461157957600084156115435750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b1785556114ef565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156115c6578886015182559484019460019091019084016115a7565b508582101561160257878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b8183526000602080850194508260005b85811015610d1e5773ffffffffffffffffffffffffffffffffffffffff61164883610f4b565b1687529582019590820190600101611622565b7fffff0000000000000000000000000000000000000000000000000000000000006116858261135c565b168252600060208201357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc18336030181126116bf57600080fd5b6040602085015282016116d18161138c565b60070b604085015260208101357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe182360301811261170e57600080fd5b0160208101903567ffffffffffffffff81111561172a57600080fd5b80360382131561173957600080fd5b6040606086015261174e608086018284611258565b95945050505050565b60006104c080835261176c8184018c8e611258565b9050602060ff808c1682860152604085018b60005b848110156117a6578361179383610f22565b1683529184019190840190600101611781565b505050505073ffffffffffffffffffffffffffffffffffffffff88166104408401528281036104608401526117dc818789611612565b90506117ee61048084018660170b9052565b8281036104a0840152611801818561165b565b9c9b50505050505050505050505056fea164736f6c6343000813000a",
+ ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"indexed\":false,\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"Triggered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"string\",\"name\":\"fieldHash\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"TriggeredEventWithDynamicTopic\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"TriggeredWithFourTopics\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"string\",\"name\":\"field1\",\"type\":\"string\"},{\"indexed\":true,\"internalType\":\"uint8[32]\",\"name\":\"field2\",\"type\":\"uint8[32]\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"field3\",\"type\":\"bytes32\"}],\"name\":\"TriggeredWithFourTopicsWithHashed\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"addTestStruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAlterablePrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDifferentPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"i\",\"type\":\"uint256\"}],\"name\":\"getElementAtIndex\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getSliceValue\",\"outputs\":[{\"internalType\":\"uint64[]\",\"name\":\"\",\"type\":\"uint64[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"returnSeen\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"value\",\"type\":\"uint64\"}],\"name\":\"setAlterablePrimitiveValue\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"triggerEvent\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"triggerEventWithDynamicTopic\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"triggerWithFourTopics\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"field1\",\"type\":\"string\"},{\"internalType\":\"uint8[32]\",\"name\":\"field2\",\"type\":\"uint8[32]\"},{\"internalType\":\"bytes32\",\"name\":\"field3\",\"type\":\"bytes32\"}],\"name\":\"triggerWithFourTopicsWithHashed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
+ Bin: "0x608060405234801561001057600080fd5b50600180548082018255600082905260048082047fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6908101805460086003958616810261010090810a8088026001600160401b0391820219909416939093179093558654808801909755848704909301805496909516909202900a91820291021990921691909117905561199c806100a96000396000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c8063a90e199811610081578063ef4e1ced1161005b578063ef4e1ced146101de578063f6f871c8146101e5578063fbe9fbf6146101f857600080fd5b8063a90e19981461019b578063ab5e0b38146101ae578063dbfd7332146101cb57600080fd5b8063679004a4116100b2578063679004a41461012a5780636c9a43b61461013f5780637f002d671461018857600080fd5b80632c45576f146100d95780633272b66c1461010257806349eac2ac14610117575b600080fd5b6100ec6100e7366004610ca3565b61020a565b6040516100f99190610e0c565b60405180910390f35b610115610110366004610f4b565b6104e5565b005b610115610125366004611060565b61053a565b61013261083d565b6040516100f99190611152565b61011561014d3660046111a0565b600280547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff92909216919091179055565b610115610196366004611060565b6108c9565b6101156101a93660046112d4565b610920565b6107c65b60405167ffffffffffffffff90911681526020016100f9565b6101156101d9366004611389565b61097a565b60036101b2565b6100ec6101f3366004611060565b6109b7565b60025467ffffffffffffffff166101b2565b610212610ac0565b600061021f6001846113cc565b8154811061022f5761022f611406565b6000918252602091829020604080516101008101909152600a90920201805460030b8252600181018054929391929184019161026a90611435565b80601f016020809104026020016040519081016040528092919081815260200182805461029690611435565b80156102e35780601f106102b8576101008083540402835291602001916102e3565b820191906000526020600020905b8154815290600101906020018083116102c657829003601f168201915b5050509183525050600282015460ff166020808301919091526040805161040081018083529190930192916003850191826000855b825461010083900a900460ff1681526020600192830181810494850194909303909202910180841161031857505050928452505050600482015473ffffffffffffffffffffffffffffffffffffffff1660208083019190915260058301805460408051828502810185018252828152940193928301828280156103d157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116103a6575b5050509183525050600682015460170b6020808301919091526040805180820182526007808601805460f01b7fffff0000000000000000000000000000000000000000000000000000000000001683528351808501855260088801805490930b8152600988018054959097019693959194868301949193928401919061045690611435565b80601f016020809104026020016040519081016040528092919081815260200182805461048290611435565b80156104cf5780601f106104a4576101008083540402835291602001916104cf565b820191906000526020600020905b8154815290600101906020018083116104b257829003601f168201915b5050509190925250505090525090525092915050565b81816040516104f5929190611482565b60405180910390207f3d969732b1bbbb9f1d7eb9f3f14e4cb50a74d950b3ef916a397b85dfbab93c67838360405161052e9291906114db565b60405180910390a25050565b60006040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b602082015260400161062c84611531565b905281546001808201845560009384526020938490208351600a9093020180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff909316929092178255928201519192909190820190610692908261161e565b5060408201516002820180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff90921691909117905560608201516106e09060038301906020610b0f565b5060808201516004820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90921691909117905560a08201518051610747916005840191602090910190610ba2565b5060c08201516006820180547fffffffffffffffff0000000000000000000000000000000000000000000000001677ffffffffffffffffffffffffffffffffffffffffffffffff90921691909117905560e082015180516007830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660f09290921c91909117815560208083015180516008860180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff90921691909117815591810151909190600986019061082a908261161e565b5050505050505050505050505050505050565b606060018054806020026020016040519081016040528092919081815260200182805480156108bf57602002820191906000526020600020906000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff168152602001906008019060208260070104928301926001038202915080841161087a5790505b5050505050905090565b8960030b7f7188419dcd8b51877b71766f075f3626586c0ff190e7d056aa65ce9acb649a3d8a8a8a8a8a8a8a8a8a60405161090c9998979695949392919061187d565b60405180910390a250505050505050505050565b808260405161092f9190611937565b6040518091039020846040516109459190611973565b604051908190038120907f7220e4dbe4e9d0ed5f71acd022bc89c26748ac6784f2c548bc17bb8e52af34b090600090a4505050565b8060030b8260030b8460030b7f91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac560405160405180910390a4505050565b6109bf610ac0565b6040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b6020820152604001610aaf84611531565b90529b9a5050505050505050505050565b6040805161010081018252600080825260606020830181905292820152908101610ae8610c1c565b8152600060208201819052606060408301819052820152608001610b0a610c3b565b905290565b600183019183908215610b925791602002820160005b83821115610b6357835183826101000a81548160ff021916908360ff1602179055509260200192600101602081600001049283019260010302610b25565b8015610b905782816101000a81549060ff0219169055600101602081600001049283019260010302610b63565b505b50610b9e929150610c8e565b5090565b828054828255906000526020600020908101928215610b92579160200282015b82811115610b9257825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190610bc2565b6040518061040001604052806020906020820280368337509192915050565b604051806040016040528060007dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152602001610b0a6040518060400160405280600060070b8152602001606081525090565b5b80821115610b9e5760008155600101610c8f565b600060208284031215610cb557600080fd5b5035919050565b60005b83811015610cd7578181015183820152602001610cbf565b50506000910152565b60008151808452610cf8816020860160208601610cbc565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b8060005b6020808210610d3d5750610d54565b825160ff1685529384019390910190600101610d2e565b50505050565b600081518084526020808501945080840160005b83811015610da057815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610d6e565b509495945050505050565b7fffff00000000000000000000000000000000000000000000000000000000000081511682526000602082015160406020850152805160070b60408501526020810151905060406060850152610e046080850182610ce0565b949350505050565b60208152610e2060208201835160030b9052565b600060208301516104e0806040850152610e3e610500850183610ce0565b91506040850151610e54606086018260ff169052565b506060850151610e676080860182610d2a565b50608085015173ffffffffffffffffffffffffffffffffffffffff1661048085015260a08501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe085840381016104a0870152610ec48483610d5a565b935060c08701519150610edd6104c087018360170b9052565b60e0870151915080868503018387015250610ef88382610dab565b9695505050505050565b60008083601f840112610f1457600080fd5b50813567ffffffffffffffff811115610f2c57600080fd5b602083019150836020828501011115610f4457600080fd5b9250929050565b60008060208385031215610f5e57600080fd5b823567ffffffffffffffff811115610f7557600080fd5b610f8185828601610f02565b90969095509350505050565b8035600381900b8114610f9f57600080fd5b919050565b803560ff81168114610f9f57600080fd5b806104008101831015610fc757600080fd5b92915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610f9f57600080fd5b60008083601f84011261100357600080fd5b50813567ffffffffffffffff81111561101b57600080fd5b6020830191508360208260051b8501011115610f4457600080fd5b8035601781900b8114610f9f57600080fd5b60006040828403121561105a57600080fd5b50919050565b6000806000806000806000806000806104e08b8d03121561108057600080fd5b6110898b610f8d565b995060208b013567ffffffffffffffff808211156110a657600080fd5b6110b28e838f01610f02565b909b5099508991506110c660408e01610fa4565b98506110d58e60608f01610fb5565b97506110e46104608e01610fcd565b96506104808d01359150808211156110fb57600080fd5b6111078e838f01610ff1565b909650945084915061111c6104a08e01611036565b93506104c08d013591508082111561113357600080fd5b506111408d828e01611048565b9150509295989b9194979a5092959850565b6020808252825182820181905260009190848201906040850190845b8181101561119457835167ffffffffffffffff168352928401929184019160010161116e565b50909695505050505050565b6000602082840312156111b257600080fd5b813567ffffffffffffffff811681146111ca57600080fd5b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715611223576112236111d1565b60405290565b600082601f83011261123a57600080fd5b813567ffffffffffffffff80821115611255576112556111d1565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190828211818310171561129b5761129b6111d1565b816040528381528660208588010111156112b457600080fd5b836020870160208301376000602085830101528094505050505092915050565b600080600061044084860312156112ea57600080fd5b833567ffffffffffffffff8082111561130257600080fd5b61130e87838801611229565b94506020915086603f87011261132357600080fd5b6040516104008101818110838211171561133f5761133f6111d1565b60405290508061042087018881111561135757600080fd5b8388015b818110156113795761136c81610fa4565b845292840192840161135b565b5095989097509435955050505050565b60008060006060848603121561139e57600080fd5b6113a784610f8d565b92506113b560208501610f8d565b91506113c360408501610f8d565b90509250925092565b81810381811115610fc7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600181811c9082168061144957607f821691505b60208210810361105a577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b8183823760009101908152919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b602081526000610e04602083018486611492565b80357fffff00000000000000000000000000000000000000000000000000000000000081168114610f9f57600080fd5b8035600781900b8114610f9f57600080fd5b60006040823603121561154357600080fd5b61154b611200565b611554836114ef565b8152602083013567ffffffffffffffff8082111561157157600080fd5b81850191506040823603121561158657600080fd5b61158e611200565b6115978361151f565b81526020830135828111156115ab57600080fd5b6115b736828601611229565b60208301525080602085015250505080915050919050565b601f82111561161957600081815260208120601f850160051c810160208610156115f65750805b601f850160051c820191505b8181101561161557828155600101611602565b5050505b505050565b815167ffffffffffffffff811115611638576116386111d1565b61164c816116468454611435565b846115cf565b602080601f83116001811461169f57600084156116695750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555611615565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156116ec578886015182559484019460019091019084016116cd565b508582101561172857878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b8183526000602080850194508260005b85811015610da05773ffffffffffffffffffffffffffffffffffffffff61176e83610fcd565b1687529582019590820190600101611748565b7fffff0000000000000000000000000000000000000000000000000000000000006117ab826114ef565b168252600060208201357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc18336030181126117e557600080fd5b6040602085015282016117f78161151f565b60070b604085015260208101357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe182360301811261183457600080fd5b0160208101903567ffffffffffffffff81111561185057600080fd5b80360382131561185f57600080fd5b60406060860152611874608086018284611492565b95945050505050565b60006104c08083526118928184018c8e611492565b9050602060ff808c1682860152604085018b60005b848110156118cc57836118b983610fa4565b16835291840191908401906001016118a7565b505050505073ffffffffffffffffffffffffffffffffffffffff8816610440840152828103610460840152611902818789611738565b905061191461048084018660170b9052565b8281036104a08401526119278185611781565b9c9b505050505050505050505050565b60008183825b602080821061194c5750611963565b825160ff168452928301929091019060010161193d565b5050506104008201905092915050565b60008251611985818460208701610cbc565b919091019291505056fea164736f6c6343000813000a",
}
var ChainReaderTesterABI = ChainReaderTesterMetaData.ABI
@@ -384,6 +384,18 @@ func (_ChainReaderTester *ChainReaderTesterTransactorSession) TriggerWithFourTop
return _ChainReaderTester.Contract.TriggerWithFourTopics(&_ChainReaderTester.TransactOpts, field1, field2, field3)
}
+func (_ChainReaderTester *ChainReaderTesterTransactor) TriggerWithFourTopicsWithHashed(opts *bind.TransactOpts, field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error) {
+ return _ChainReaderTester.contract.Transact(opts, "triggerWithFourTopicsWithHashed", field1, field2, field3)
+}
+
+func (_ChainReaderTester *ChainReaderTesterSession) TriggerWithFourTopicsWithHashed(field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error) {
+ return _ChainReaderTester.Contract.TriggerWithFourTopicsWithHashed(&_ChainReaderTester.TransactOpts, field1, field2, field3)
+}
+
+func (_ChainReaderTester *ChainReaderTesterTransactorSession) TriggerWithFourTopicsWithHashed(field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error) {
+ return _ChainReaderTester.Contract.TriggerWithFourTopicsWithHashed(&_ChainReaderTester.TransactOpts, field1, field2, field3)
+}
+
type ChainReaderTesterTriggeredIterator struct {
Event *ChainReaderTesterTriggered
@@ -791,6 +803,151 @@ func (_ChainReaderTester *ChainReaderTesterFilterer) ParseTriggeredWithFourTopic
return event, nil
}
+type ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator struct {
+ Event *ChainReaderTesterTriggeredWithFourTopicsWithHashed
+
+ contract *bind.BoundContract
+ event string
+
+ logs chan types.Log
+ sub ethereum.Subscription
+ done bool
+ fail error
+}
+
+func (it *ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator) Next() bool {
+
+ if it.fail != nil {
+ return false
+ }
+
+ if it.done {
+ select {
+ case log := <-it.logs:
+ it.Event = new(ChainReaderTesterTriggeredWithFourTopicsWithHashed)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+
+ select {
+ case log := <-it.logs:
+ it.Event = new(ChainReaderTesterTriggeredWithFourTopicsWithHashed)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+}
+
+func (it *ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator) Error() error {
+ return it.fail
+}
+
+func (it *ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+}
+
+type ChainReaderTesterTriggeredWithFourTopicsWithHashed struct {
+ Field1 common.Hash
+ Field2 [32]uint8
+ Field3 [32]byte
+ Raw types.Log
+}
+
+func (_ChainReaderTester *ChainReaderTesterFilterer) FilterTriggeredWithFourTopicsWithHashed(opts *bind.FilterOpts, field1 []string, field2 [][32]uint8, field3 [][32]byte) (*ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator, error) {
+
+ var field1Rule []interface{}
+ for _, field1Item := range field1 {
+ field1Rule = append(field1Rule, field1Item)
+ }
+ var field2Rule []interface{}
+ for _, field2Item := range field2 {
+ field2Rule = append(field2Rule, field2Item)
+ }
+ var field3Rule []interface{}
+ for _, field3Item := range field3 {
+ field3Rule = append(field3Rule, field3Item)
+ }
+
+ logs, sub, err := _ChainReaderTester.contract.FilterLogs(opts, "TriggeredWithFourTopicsWithHashed", field1Rule, field2Rule, field3Rule)
+ if err != nil {
+ return nil, err
+ }
+ return &ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator{contract: _ChainReaderTester.contract, event: "TriggeredWithFourTopicsWithHashed", logs: logs, sub: sub}, nil
+}
+
+func (_ChainReaderTester *ChainReaderTesterFilterer) WatchTriggeredWithFourTopicsWithHashed(opts *bind.WatchOpts, sink chan<- *ChainReaderTesterTriggeredWithFourTopicsWithHashed, field1 []string, field2 [][32]uint8, field3 [][32]byte) (event.Subscription, error) {
+
+ var field1Rule []interface{}
+ for _, field1Item := range field1 {
+ field1Rule = append(field1Rule, field1Item)
+ }
+ var field2Rule []interface{}
+ for _, field2Item := range field2 {
+ field2Rule = append(field2Rule, field2Item)
+ }
+ var field3Rule []interface{}
+ for _, field3Item := range field3 {
+ field3Rule = append(field3Rule, field3Item)
+ }
+
+ logs, sub, err := _ChainReaderTester.contract.WatchLogs(opts, "TriggeredWithFourTopicsWithHashed", field1Rule, field2Rule, field3Rule)
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+
+ event := new(ChainReaderTesterTriggeredWithFourTopicsWithHashed)
+ if err := _ChainReaderTester.contract.UnpackLog(event, "TriggeredWithFourTopicsWithHashed", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
+func (_ChainReaderTester *ChainReaderTesterFilterer) ParseTriggeredWithFourTopicsWithHashed(log types.Log) (*ChainReaderTesterTriggeredWithFourTopicsWithHashed, error) {
+ event := new(ChainReaderTesterTriggeredWithFourTopicsWithHashed)
+ if err := _ChainReaderTester.contract.UnpackLog(event, "TriggeredWithFourTopicsWithHashed", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+}
+
func (_ChainReaderTester *ChainReaderTester) ParseLog(log types.Log) (generated.AbigenLog, error) {
switch log.Topics[0] {
case _ChainReaderTester.abi.Events["Triggered"].ID:
@@ -799,6 +956,8 @@ func (_ChainReaderTester *ChainReaderTester) ParseLog(log types.Log) (generated.
return _ChainReaderTester.ParseTriggeredEventWithDynamicTopic(log)
case _ChainReaderTester.abi.Events["TriggeredWithFourTopics"].ID:
return _ChainReaderTester.ParseTriggeredWithFourTopics(log)
+ case _ChainReaderTester.abi.Events["TriggeredWithFourTopicsWithHashed"].ID:
+ return _ChainReaderTester.ParseTriggeredWithFourTopicsWithHashed(log)
default:
return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0])
@@ -817,6 +976,10 @@ func (ChainReaderTesterTriggeredWithFourTopics) Topic() common.Hash {
return common.HexToHash("0x91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac5")
}
+func (ChainReaderTesterTriggeredWithFourTopicsWithHashed) Topic() common.Hash {
+ return common.HexToHash("0x7220e4dbe4e9d0ed5f71acd022bc89c26748ac6784f2c548bc17bb8e52af34b0")
+}
+
func (_ChainReaderTester *ChainReaderTester) Address() common.Address {
return _ChainReaderTester.address
}
@@ -844,6 +1007,8 @@ type ChainReaderTesterInterface interface {
TriggerWithFourTopics(opts *bind.TransactOpts, field1 int32, field2 int32, field3 int32) (*types.Transaction, error)
+ TriggerWithFourTopicsWithHashed(opts *bind.TransactOpts, field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error)
+
FilterTriggered(opts *bind.FilterOpts, field []int32) (*ChainReaderTesterTriggeredIterator, error)
WatchTriggered(opts *bind.WatchOpts, sink chan<- *ChainReaderTesterTriggered, field []int32) (event.Subscription, error)
@@ -862,6 +1027,12 @@ type ChainReaderTesterInterface interface {
ParseTriggeredWithFourTopics(log types.Log) (*ChainReaderTesterTriggeredWithFourTopics, error)
+ FilterTriggeredWithFourTopicsWithHashed(opts *bind.FilterOpts, field1 []string, field2 [][32]uint8, field3 [][32]byte) (*ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator, error)
+
+ WatchTriggeredWithFourTopicsWithHashed(opts *bind.WatchOpts, sink chan<- *ChainReaderTesterTriggeredWithFourTopicsWithHashed, field1 []string, field2 [][32]uint8, field3 [][32]byte) (event.Subscription, error)
+
+ ParseTriggeredWithFourTopicsWithHashed(log types.Log) (*ChainReaderTesterTriggeredWithFourTopicsWithHashed, error)
+
ParseLog(log types.Log) (generated.AbigenLog, error)
Address() common.Address
diff --git a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt
index 41c270d61c0..3299989c582 100644
--- a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt
+++ b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt
@@ -24,7 +24,7 @@ batch_vrf_coordinator_v2: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/Batc
batch_vrf_coordinator_v2plus: ../../contracts/solc/v0.8.19/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.19/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.bin f13715b38b5b9084b08bffa571fb1c8ef686001535902e1255052f074b31ad4e
blockhash_store: ../../contracts/solc/v0.8.19/BlockhashStore/BlockhashStore.abi ../../contracts/solc/v0.8.19/BlockhashStore/BlockhashStore.bin 31b118f9577240c8834c35f8b5a1440e82a6ca8aea702970de2601824b6ab0e1
chain_module_base: ../../contracts/solc/v0.8.19/ChainModuleBase/ChainModuleBase.abi ../../contracts/solc/v0.8.19/ChainModuleBase/ChainModuleBase.bin 39dfce79330e921e5c169051b11c6e5ea15cd4db5a7b09c06aabbe9658148915
-chain_reader_tester: ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.abi ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.bin b3718dad488f54de97d124221d96b867c81e11210084a1fad379cb8385d37ffe
+chain_reader_tester: ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.abi ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.bin b207f9e6bf71e445a2664a602677011b87b80bf95c6352fd7869f1a9ddb08a5b
chain_specific_util_helper: ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.bin 66eb30b0717fefe05672df5ec863c0b9a5a654623c4757307a2726d8f31e26b1
counter: ../../contracts/solc/v0.8.6/Counter/Counter.abi ../../contracts/solc/v0.8.6/Counter/Counter.bin 6ca06e000e8423573ffa0bdfda749d88236ab3da2a4cbb4a868c706da90488c9
cron_upkeep_factory_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7
diff --git a/core/services/relay/evm/chain_reader.go b/core/services/relay/evm/chain_reader.go
index d84c2f00a9c..205fcbbcf07 100644
--- a/core/services/relay/evm/chain_reader.go
+++ b/core/services/relay/evm/chain_reader.go
@@ -128,6 +128,10 @@ func (cr *chainReader) init(chainContractReaders map[string]types.ChainContractR
return err
}
}
+
+ if cr.bindings.contractBindings[contractName] == nil {
+ return fmt.Errorf("%w: no read bindings added for contract: %s", commontypes.ErrInvalidConfig, contractName)
+ }
cr.bindings.contractBindings[contractName].pollingFilter = chainContractReader.PollingFilter.ToLPFilter(eventSigsForContractFilter)
}
return nil
@@ -259,7 +263,7 @@ func (cr *chainReader) addEvent(contractName, eventName string, a abi.ABI, chain
return err
}
- // Encoder def's codec won't be used to encode, only for its type as input for GetLatestValue
+ // Encoder defs codec won't be used for encoding, but for storing caller filtering params which won't be hashed.
if err := cr.addEncoderDef(contractName, eventName, filterArgs, nil, chainReaderDefinition.InputModifications); err != nil {
return err
}
@@ -327,9 +331,11 @@ func (cr *chainReader) addQueryingReadBindings(contractName string, genericTopic
}
}
+// getEventInput returns codec entry for expected incoming event params and the modifier to be applied to the params.
func (cr *chainReader) getEventInput(def types.ChainReaderDefinition, contractName, eventName string) (
types.CodecEntry, codec.Modifier, error) {
inputInfo := cr.parsed.EncoderDefs[WrapItemType(contractName, eventName, true)]
+ // TODO can this be simplified? Isn't this same as inputInfo.Modifier()? BCI-3909
inMod, err := def.InputModifications.ToModifier(DecoderHooks...)
if err != nil {
return nil, nil, err
@@ -378,6 +384,8 @@ func (cr *chainReader) addDecoderDef(contractName, itemType string, outputs abi.
return output.Init()
}
+// setupEventInput returns abi args where indexed flag is set to false because we expect caller to filter with params that aren't hashed.
+// codecEntry has expected onchain types set, for e.g. indexed topics of type string or uint8[32] array are expected as common.Hash onchain.
func setupEventInput(event abi.Event, inputFields []string) ([]abi.Argument, types.CodecEntry, map[string]bool) {
topicFieldDefs := map[string]bool{}
for _, value := range inputFields {
diff --git a/core/services/relay/evm/event_binding.go b/core/services/relay/evm/event_binding.go
index acfb1aa6300..97ddc99a107 100644
--- a/core/services/relay/evm/event_binding.go
+++ b/core/services/relay/evm/event_binding.go
@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid"
"github.com/smartcontractkit/chainlink-common/pkg/codec"
@@ -209,11 +210,13 @@ func (e *eventBinding) getLatestValueWithFilters(
return err
}
+ // convert caller chain agnostic params types to types representing onchain abi types, for e.g. bytes32.
checkedParams, err := e.inputModifier.TransformToOnChain(offChain, "" /* unused */)
if err != nil {
return err
}
+ // convert onchain params to native types similarly to generated abi wrappers, for e.g. fixed bytes32 abi type to [32]uint8.
nativeParams, err := e.inputInfo.ToNative(reflect.ValueOf(checkedParams))
if err != nil {
return err
@@ -252,6 +255,8 @@ func (e *eventBinding) getLatestValueWithFilters(
return e.decodeLog(ctx, logToUse, into)
}
+// convertToOffChainType creates a struct based on contract abi with applied codec modifiers.
+// Created type shouldn't have hashed types for indexed topics since incoming params wouldn't be hashed.
func (e *eventBinding) convertToOffChainType(params any) (any, error) {
offChain, err := e.codec.CreateType(WrapItemType(e.contractName, e.eventName, true), true)
if err != nil {
@@ -287,43 +292,35 @@ func matchesRemainingFilters(log *logpoller.Log, filters []common.Hash) bool {
return true
}
-func (e *eventBinding) encodeParams(item reflect.Value) ([]common.Hash, error) {
- for item.Kind() == reflect.Pointer {
- item = reflect.Indirect(item)
+// encodeParams accepts nativeParams and encodes them to match onchain topics.
+func (e *eventBinding) encodeParams(nativeParams reflect.Value) ([]common.Hash, error) {
+ for nativeParams.Kind() == reflect.Pointer {
+ nativeParams = reflect.Indirect(nativeParams)
}
- var topics []any
- switch item.Kind() {
+ var params []any
+ switch nativeParams.Kind() {
case reflect.Array, reflect.Slice:
- native, err := representArray(item, e.inputInfo)
+ native, err := representArray(nativeParams, e.inputInfo)
if err != nil {
return nil, err
}
- topics = []any{native}
+ params = []any{native}
case reflect.Struct, reflect.Map:
var err error
- if topics, err = unrollItem(item, e.inputInfo); err != nil {
+ if params, err = unrollItem(nativeParams, e.inputInfo); err != nil {
return nil, err
}
default:
- return nil, fmt.Errorf("%w: cannot encode kind %v", commontypes.ErrInvalidType, item.Kind())
+ return nil, fmt.Errorf("%w: cannot encode kind %v", commontypes.ErrInvalidType, nativeParams.Kind())
}
- // abi params allow you to Pack a pointers, but MakeTopics doesn't work with pointers.
- if err := e.derefTopics(topics); err != nil {
+ // abi params allow you to Pack a pointers, but makeTopics doesn't work with pointers.
+ if err := e.derefTopics(params); err != nil {
return nil, err
}
- hashes, err := abi.MakeTopics(topics)
- if err != nil {
- return nil, wrapInternalErr(err)
- }
-
- if len(hashes) != 1 {
- return nil, fmt.Errorf("%w: expected 1 filter set, got %d", commontypes.ErrInternal, len(hashes))
- }
-
- return hashes[0], nil
+ return e.makeTopics(params)
}
func (e *eventBinding) derefTopics(topics []any) error {
@@ -340,11 +337,38 @@ func (e *eventBinding) derefTopics(topics []any) error {
return nil
}
+// makeTopics encodes and hashes params filtering values to match onchain indexed topics.
+func (e *eventBinding) makeTopics(params []any) ([]common.Hash, error) {
+ // make topic value for non-fixed bytes array manually because geth MakeTopics doesn't support it
+ for i, topic := range params {
+ if abiArg := e.inputInfo.Args()[i]; abiArg.Type.T == abi.ArrayTy && (abiArg.Type.Elem != nil && abiArg.Type.Elem.T == abi.UintTy) {
+ packed, err := abi.Arguments{abiArg}.Pack(topic)
+ if err != nil {
+ return nil, err
+ }
+ params[i] = crypto.Keccak256Hash(packed)
+ }
+ }
+
+ hashes, err := abi.MakeTopics(params)
+ if err != nil {
+ return nil, wrapInternalErr(err)
+ }
+
+ if len(hashes) != 1 {
+ return nil, fmt.Errorf("%w: expected 1 filter set, got %d", commontypes.ErrInternal, len(hashes))
+ }
+
+ return hashes[0], nil
+}
+
func (e *eventBinding) decodeLog(ctx context.Context, log *logpoller.Log, into any) error {
+ // decode non indexed topics and apply output modifiers
if err := e.codec.Decode(ctx, log.Data, into, WrapItemType(e.contractName, e.eventName, false)); err != nil {
return err
}
+ // decode indexed topics which is rarely useful since most indexed topic types get Keccak256 hashed and should be just used for log filtering.
topics := make([]common.Hash, len(e.codecTopicInfo.Args()))
if len(log.Topics) < len(topics)+1 {
return fmt.Errorf("%w: not enough topics to decode", commontypes.ErrInvalidType)
@@ -436,6 +460,7 @@ func (e *eventBinding) remapExpression(key string, expression query.Expression)
// remap chain agnostic primitives to chain specific
func (e *eventBinding) remapPrimitive(key string, expression query.Expression) (query.Expression, error) {
switch primitive := expression.Primitive.(type) {
+ // TODO comparator primitive should undergo codec transformations and do hashed types handling similarly to how GetLatestValue handles it BCI-3910
case *primitives.Comparator:
if val, ok := e.eventDataWords[primitive.Name]; ok {
return logpoller.NewEventByWordFilter(e.hash, val, primitive.ValueComparators), nil
diff --git a/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go b/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go
index 4474f054dbc..7812ab202b1 100644
--- a/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go
+++ b/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go
@@ -32,9 +32,10 @@ import (
)
const (
- triggerWithDynamicTopic = "TriggeredEventWithDynamicTopic"
- triggerWithAllTopics = "TriggeredWithFourTopics"
- finalityDepth = 4
+ triggerWithDynamicTopic = "TriggeredEventWithDynamicTopic"
+ triggerWithAllTopics = "TriggeredWithFourTopics"
+ triggerWithAllTopicsWithHashed = "TriggeredWithFourTopicsWithHashed"
+ finalityDepth = 4
)
type EVMChainReaderInterfaceTesterHelper[T TestingT[T]] interface {
@@ -96,7 +97,7 @@ func (it *EVMChainReaderInterfaceTester[T]) Setup(t T) {
AnyContractName: {
ContractABI: chain_reader_tester.ChainReaderTesterMetaData.ABI,
ContractPollingFilter: types.ContractPollingFilter{
- GenericEventNames: []string{EventName, EventWithFilterName},
+ GenericEventNames: []string{EventName, EventWithFilterName, triggerWithAllTopicsWithHashed},
},
Configs: map[string]*types.ChainReaderDefinition{
MethodTakingLatestParamsReturningTestStruct: &methodTakingLatestParamsReturningTestStructConfig,
@@ -145,6 +146,13 @@ func (it *EVMChainReaderInterfaceTester[T]) Setup(t T) {
// These float values can map to different finality concepts across chains.
ConfidenceConfirmations: map[string]int{"0.0": int(evmtypes.Unconfirmed), "1.0": int(evmtypes.Finalized)},
},
+ triggerWithAllTopicsWithHashed: {
+ ChainSpecificName: triggerWithAllTopicsWithHashed,
+ ReadType: types.Event,
+ EventDefinitions: &types.EventDefinitions{
+ InputFields: []string{"Field1", "Field2", "Field3"},
+ },
+ },
MethodReturningSeenStruct: {
ChainSpecificName: "returnSeen",
InputModifications: codec.ModifiersConfig{
diff --git a/core/services/relay/evm/evmtesting/run_tests.go b/core/services/relay/evm/evmtesting/run_tests.go
index f958c055ca7..caa24e8ae2c 100644
--- a/core/services/relay/evm/evmtesting/run_tests.go
+++ b/core/services/relay/evm/evmtesting/run_tests.go
@@ -12,10 +12,9 @@ import (
clcommontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
"github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
. "github.com/smartcontractkit/chainlink-common/pkg/types/interfacetests" //nolint common practice to import test mods with .
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
)
func RunChainReaderEvmTests[T TestingT[T]](t T, it *EVMChainReaderInterfaceTester[T]) {
@@ -74,6 +73,31 @@ func RunChainReaderEvmTests[T TestingT[T]](t T, it *EVMChainReaderInterfaceTeste
assert.Equal(t, int32(3), latest.Field3)
})
+ t.Run("Filtering can be done on indexed topics that get hashed", func(t T) {
+ it.Setup(t)
+ it.dirtyContracts = true
+ triggerFourTopicsWithHashed(t, it, "1", [32]uint8{2}, [32]byte{5})
+ triggerFourTopicsWithHashed(t, it, "2", [32]uint8{2}, [32]byte{3})
+ triggerFourTopicsWithHashed(t, it, "1", [32]uint8{3}, [32]byte{3})
+
+ ctx := it.Helper.Context(t)
+ cr := it.GetChainReader(t)
+ require.NoError(t, cr.Bind(ctx, it.GetBindings(t)))
+ var latest struct {
+ Field3 [32]byte
+ }
+ params := struct {
+ Field1 string
+ Field2 [32]uint8
+ Field3 [32]byte
+ }{Field1: "1", Field2: [32]uint8{2}, Field3: [32]byte{5}}
+
+ time.Sleep(it.MaxWaitTimeForEvents())
+ require.NoError(t, cr.GetLatestValue(ctx, AnyContractName, triggerWithAllTopicsWithHashed, primitives.Unconfirmed, params, &latest))
+ // only checking Field3 topic makes sense since it isn't hashed, to check other fields we'd have to replicate solidity encoding and hashing
+ assert.Equal(t, [32]uint8{5}, latest.Field3)
+ })
+
t.Run("Bind returns error on missing contract at address", func(t T) {
it.Setup(t)
@@ -95,3 +119,12 @@ func triggerFourTopics[T TestingT[T]](t T, it *EVMChainReaderInterfaceTester[T],
it.IncNonce()
it.AwaitTx(t, tx)
}
+
+func triggerFourTopicsWithHashed[T TestingT[T]](t T, it *EVMChainReaderInterfaceTester[T], i1 string, i2 [32]uint8, i3 [32]byte) {
+ tx, err := it.contractTesters[it.address].ChainReaderTesterTransactor.TriggerWithFourTopicsWithHashed(it.GetAuthWithGasSet(t), i1, i2, i3)
+ require.NoError(t, err)
+ require.NoError(t, err)
+ it.Helper.Commit()
+ it.IncNonce()
+ it.AwaitTx(t, tx)
+}
diff --git a/core/services/relay/evm/types/codec_entry.go b/core/services/relay/evm/types/codec_entry.go
index 38242c43a2d..9a8103cf7f9 100644
--- a/core/services/relay/evm/types/codec_entry.go
+++ b/core/services/relay/evm/types/codec_entry.go
@@ -200,7 +200,7 @@ func getNativeAndCheckedTypesForArg(arg *abi.Argument) (reflect.Type, reflect.Ty
return reflect.TypeOf(common.Hash{}), reflect.TypeOf(common.Hash{}), nil
}
fallthrough
- case abi.SliceTy, abi.TupleTy, abi.FixedBytesTy, abi.FixedPointTy, abi.FunctionTy:
+ case abi.SliceTy, abi.TupleTy, abi.FixedPointTy, abi.FunctionTy:
// https://github.com/ethereum/go-ethereum/blob/release/1.12/accounts/abi/topics.go#L78
return nil, nil, fmt.Errorf("%w: unsupported indexed type: %v", commontypes.ErrInvalidConfig, arg.Type)
default:
diff --git a/core/services/relay/evm/types/codec_entry_test.go b/core/services/relay/evm/types/codec_entry_test.go
index 06b08fcecf2..64e0998716a 100644
--- a/core/services/relay/evm/types/codec_entry_test.go
+++ b/core/services/relay/evm/types/codec_entry_test.go
@@ -273,17 +273,27 @@ func TestCodecEntry(t *testing.T) {
assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType())
})
- t.Run("Indexed non basic types change to hash", func(t *testing.T) {
- anyType, err := abi.NewType("string", "", []abi.ArgumentMarshaling{})
+ t.Run("Indexed string and bytes array change to hash", func(t *testing.T) {
+ stringType, err := abi.NewType("string", "", []abi.ArgumentMarshaling{})
require.NoError(t, err)
- entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType, Indexed: true}}, nil, nil)
- require.NoError(t, entry.Init())
- nativeField, ok := entry.CheckedType().FieldByName("Name")
- require.True(t, ok)
- assert.Equal(t, reflect.TypeOf(&common.Hash{}), nativeField.Type)
- native, err := entry.ToNative(reflect.New(entry.CheckedType()))
+ arrayType, err := abi.NewType("uint8[32]", "", []abi.ArgumentMarshaling{})
require.NoError(t, err)
- assertHaveSameStructureAndNames(t, native.Type().Elem(), entry.CheckedType())
+
+ abiArgs := abi.Arguments{
+ {Name: "String", Type: stringType, Indexed: true},
+ {Name: "Array", Type: arrayType, Indexed: true},
+ }
+
+ for i := 0; i < len(abiArgs); i++ {
+ entry := NewCodecEntry(abi.Arguments{abiArgs[i]}, nil, nil)
+ require.NoError(t, entry.Init())
+ nativeField, ok := entry.CheckedType().FieldByName(abiArgs[i].Name)
+ require.True(t, ok)
+ assert.Equal(t, reflect.TypeOf(&common.Hash{}), nativeField.Type)
+ native, err := entry.ToNative(reflect.New(entry.CheckedType()))
+ require.NoError(t, err)
+ assertHaveSameStructureAndNames(t, native.Type().Elem(), entry.CheckedType())
+ }
})
t.Run("Too many indexed items returns an error", func(t *testing.T) {
From 55e7c8b5055c975665a59199d5eda9fa21801a07 Mon Sep 17 00:00:00 2001
From: "Abdelrahman Soliman (Boda)"
<2677789+asoliman92@users.noreply.github.com>
Date: Tue, 6 Aug 2024 15:15:37 +0400
Subject: [PATCH 11/52] [CCIP-Merge] OCR2 plugins [CCIP-2942] (#14043)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* Copy over core/services/ocr2/plugins/ccip from ccip repo (#14024)
This is first part in merging offchain code from ccip repo (https://github.com/smartcontractkit/ccip) into chainlink
Maintaining history across repos for specific direcotires was complicated so we chose to copy the directory right away.
-----------------
Co-authored-by: Abdelrahman Soliman (Boda) <2677789+asoliman92@users.noreply.github.com>
Co-authored-by: Agustina Aldasoro
Co-authored-by: Amir Y <83904651+amirylm@users.noreply.github.com>
Co-authored-by: André Vitor de Lima Matos
Co-authored-by: AnieeG
Co-authored-by: Anindita Ghosh <88458927+AnieeG@users.noreply.github.com>
Co-authored-by: Chunkai Yang
Co-authored-by: Connor Stein
Co-authored-by: Evaldas Latoškinas <34982762+elatoskinas@users.noreply.github.com>
Co-authored-by: Jean Arnaud
Co-authored-by: Justin Kaseman
Co-authored-by: Makram
Co-authored-by: Makram Kamaleddine
Co-authored-by: Mateusz Sekara
Co-authored-by: Matt Yang
Co-authored-by: Patrick
Co-authored-by: Rens Rooimans
Co-authored-by: Roman Kashitsyn
Co-authored-by: Roman Kashitsyn
Co-authored-by: Ryan Stout
Co-authored-by: Will Winder
Co-authored-by: connorwstein
Co-authored-by: dimitris
Co-authored-by: dimkouv
Co-authored-by: nogo <0xnogo@gmail.com>
Co-authored-by: nogo <110664798+0xnogo@users.noreply.github.com>
Co-authored-by: valerii-kabisov-cll <172247313+valerii-kabisov-cll@users.noreply.github.com>
* [CCIP-2942] OCR2 plugins merge fixes [CCIP-2942] (#14026)
* Add status checker
original commit:
https://github.com/smartcontractkit/ccip/commit/451984ad0469c7d685e305dba0a0d94eb0c9a053
* Add CCIP to types.go
* Add Unsafe_SetConnectionsManager to feeds service for testing
* Add CCIP feature to core.toml
* Rebuilding config
* Wiring up relayer and ocr2 delegates - this commit touches shared code !
* Adding mockery configuration for ccip specific code
* Setting CCIP feature flag to true - it's no longer used anywhere
---------
Co-authored-by: Mateusz Sekara
* VRF-1138: Make CTF tests to reuse existing VRF Wrapper (#13854)
* VRF-1138: Make CTF tests to reuse existing VRF Wrapper
* VRF-1138: remove old code
* VRF-1138: remove comments
* VRF-1138: refactoring
* VRF-1138: pr comments
* VRF-1138: pr comments
* VRF-1138: fixing lint issues
* VRF-1138: PR comments
* changes to support deterministic message hash in the remote target (#13935)
* common version update to head of develop (#14030)
* Add changeset
---------
Co-authored-by: Agustina Aldasoro
Co-authored-by: Amir Y <83904651+amirylm@users.noreply.github.com>
Co-authored-by: André Vitor de Lima Matos
Co-authored-by: AnieeG
Co-authored-by: Anindita Ghosh <88458927+AnieeG@users.noreply.github.com>
Co-authored-by: Chunkai Yang
Co-authored-by: Connor Stein
Co-authored-by: Evaldas Latoškinas <34982762+elatoskinas@users.noreply.github.com>
Co-authored-by: Jean Arnaud
Co-authored-by: Justin Kaseman
Co-authored-by: Makram
Co-authored-by: Mateusz Sekara
Co-authored-by: Matt Yang
Co-authored-by: Patrick
Co-authored-by: Rens Rooimans
Co-authored-by: Roman Kashitsyn
Co-authored-by: Roman Kashitsyn
Co-authored-by: Ryan Stout
Co-authored-by: Will Winder
Co-authored-by: dimitris
Co-authored-by: nogo <0xnogo@gmail.com>
Co-authored-by: nogo <110664798+0xnogo@users.noreply.github.com>
Co-authored-by: valerii-kabisov-cll <172247313+valerii-kabisov-cll@users.noreply.github.com>
Co-authored-by: Ilja Pavlovs <5300706+iljapavlovs@users.noreply.github.com>
Co-authored-by: Matthew Pendrey
---
.changeset/young-mice-invent.md | 5 +
.mockery.yaml | 93 +-
core/config/docs/core.toml | 2 +
core/config/toml/types.go | 4 +
core/scripts/go.mod | 2 +-
core/scripts/go.sum | 4 +-
core/services/chainlink/config_test.go | 2 +
.../testdata/config-empty-effective.toml | 1 +
.../chainlink/testdata/config-full.toml | 1 +
.../config-multi-chain-effective.toml | 1 +
core/services/feeds/mocks/service.go | 33 +
core/services/feeds/service.go | 14 +
core/services/ocr2/delegate.go | 401 +++-
core/services/ocr2/plugins/ccip/LICENSE.md | 55 +
.../plugins/ccip/abihelpers/abi_helpers.go | 187 ++
.../ccip/abihelpers/abi_helpers_test.go | 147 ++
.../ocr2/plugins/ccip/ccipcommit/factory.go | 150 ++
.../plugins/ccip/ccipcommit/factory_test.go | 100 +
.../plugins/ccip/ccipcommit/initializers.go | 241 +++
.../ocr2/plugins/ccip/ccipcommit/ocr2.go | 753 +++++++
.../ocr2/plugins/ccip/ccipcommit/ocr2_test.go | 1861 +++++++++++++++++
.../ocr2/plugins/ccip/ccipexec/batching.go | 540 +++++
.../plugins/ccip/ccipexec/batching_test.go | 910 ++++++++
.../ocr2/plugins/ccip/ccipexec/factory.go | 164 ++
.../plugins/ccip/ccipexec/factory_test.go | 67 +
.../ocr2/plugins/ccip/ccipexec/gashelpers.go | 83 +
.../plugins/ccip/ccipexec/gashelpers_test.go | 179 ++
.../ocr2/plugins/ccip/ccipexec/helpers.go | 53 +
.../plugins/ccip/ccipexec/helpers_test.go | 96 +
.../ocr2/plugins/ccip/ccipexec/inflight.go | 82 +
.../plugins/ccip/ccipexec/inflight_test.go | 42 +
.../plugins/ccip/ccipexec/initializers.go | 228 ++
.../ocr2/plugins/ccip/ccipexec/ocr2.go | 845 ++++++++
.../ocr2/plugins/ccip/ccipexec/ocr2_test.go | 1421 +++++++++++++
.../plugins/ccip/clo_ccip_integration_test.go | 137 ++
.../ocr2/plugins/ccip/config/chain_config.go | 48 +
.../plugins/ccip/config/chain_config_test.go | 135 ++
.../ocr2/plugins/ccip/config/config.go | 152 ++
.../ocr2/plugins/ccip/config/config_test.go | 234 +++
.../plugins/ccip/config/offchain_config.go | 26 +
.../plugins/ccip/config/type_and_version.go | 73 +
.../ocr2/plugins/ccip/exportinternal.go | 135 ++
.../plugins/ccip/integration_legacy_test.go | 599 ++++++
.../ocr2/plugins/ccip/integration_test.go | 644 ++++++
.../plugins/ccip/internal/cache/autosync.go | 141 ++
.../ccip/internal/cache/autosync_test.go | 128 ++
.../ccip/internal/cache/chain_health.go | 273 +++
.../ccip/internal/cache/chain_health_test.go | 303 +++
.../ccip/internal/cache/commit_roots.go | 243 +++
.../ccip/internal/cache/commit_roots_test.go | 297 +++
.../internal/cache/commit_roots_unit_test.go | 212 ++
.../ocr2/plugins/ccip/internal/cache/lazy.go | 20 +
.../plugins/ccip/internal/cache/lazy_test.go | 71 +
.../internal/cache/mocks/chain_health_mock.go | 183 ++
.../internal/cache/observed_chain_health.go | 70 +
.../cache/observed_chain_health_test.go | 62 +
.../ocr2/plugins/ccip/internal/cache/once.go | 38 +
.../plugins/ccip/internal/cache/once_test.go | 83 +
.../plugins/ccip/internal/ccipcalc/addr.go | 44 +
.../plugins/ccip/internal/ccipcalc/calc.go | 69 +
.../ccip/internal/ccipcalc/calc_test.go | 220 ++
.../ccip/internal/ccipcommon/shortcuts.go | 140 ++
.../internal/ccipcommon/shortcuts_test.go | 196 ++
.../mocks/token_pool_batched_reader_mock.go | 142 ++
.../batchreader/token_pool_batch_reader.go | 192 ++
.../token_pool_batch_reader_test.go | 86 +
.../mocks/price_registry_mock.go | 97 +
.../ccipdata/ccipdataprovider/provider.go | 40 +
.../internal/ccipdata/commit_store_reader.go | 81 +
.../ccipdata/commit_store_reader_test.go | 423 ++++
.../internal/ccipdata/factory/commit_store.go | 121 ++
.../ccipdata/factory/commit_store_test.go | 37 +
.../ccip/internal/ccipdata/factory/offramp.go | 125 ++
.../internal/ccipdata/factory/offramp_test.go | 44 +
.../ccip/internal/ccipdata/factory/onramp.go | 88 +
.../internal/ccipdata/factory/onramp_test.go | 45 +
.../ccipdata/factory/price_registry.go | 82 +
.../ccipdata/factory/price_registry_test.go | 46 +
.../ccipdata/factory/versionfinder.go | 44 +
.../mocks/commit_store_reader_mock.go | 985 +++++++++
.../ccipdata/mocks/offramp_reader_mock.go | 949 +++++++++
.../ccipdata/mocks/onramp_reader_mock.go | 480 +++++
.../mocks/price_registry_reader_mock.go | 498 +++++
.../ccipdata/mocks/token_pool_reader_mock.go | 127 ++
.../ccipdata/mocks/usdc_reader_mock.go | 97 +
.../ccip/internal/ccipdata/offramp_reader.go | 13 +
.../internal/ccipdata/offramp_reader_test.go | 416 ++++
.../ccip/internal/ccipdata/onramp_reader.go | 21 +
.../internal/ccipdata/onramp_reader_test.go | 479 +++++
.../ccipdata/price_registry_reader.go | 14 +
.../ccipdata/price_registry_reader_test.go | 296 +++
.../plugins/ccip/internal/ccipdata/reader.go | 78 +
.../ccip/internal/ccipdata/reader_test.go | 72 +
.../ccip/internal/ccipdata/retry_config.go | 9 +
.../ccip/internal/ccipdata/test_utils.go | 36 +
.../internal/ccipdata/token_pool_reader.go | 10 +
.../ccip/internal/ccipdata/usdc_reader.go | 169 ++
.../ccipdata/usdc_reader_internal_test.go | 178 ++
.../internal/ccipdata/v1_0_0/commit_store.go | 456 ++++
.../ccipdata/v1_0_0/commit_store_test.go | 49 +
.../ccip/internal/ccipdata/v1_0_0/hasher.go | 85 +
.../internal/ccipdata/v1_0_0/hasher_test.go | 84 +
.../ccip/internal/ccipdata/v1_0_0/offramp.go | 689 ++++++
.../ccipdata/v1_0_0/offramp_reader_test.go | 38 +
.../v1_0_0/offramp_reader_unit_test.go | 231 ++
.../internal/ccipdata/v1_0_0/offramp_test.go | 232 ++
.../ccip/internal/ccipdata/v1_0_0/onramp.go | 240 +++
.../ccipdata/v1_0_0/price_registry.go | 310 +++
.../internal/ccipdata/v1_0_0/test_helpers.go | 90 +
.../ccip/internal/ccipdata/v1_1_0/onramp.go | 70 +
.../internal/ccipdata/v1_2_0/commit_store.go | 469 +++++
.../ccipdata/v1_2_0/commit_store_test.go | 224 ++
.../ccip/internal/ccipdata/v1_2_0/hasher.go | 101 +
.../internal/ccipdata/v1_2_0/hasher_test.go | 78 +
.../ccip/internal/ccipdata/v1_2_0/offramp.go | 340 +++
.../ccipdata/v1_2_0/offramp_reader_test.go | 38 +
.../v1_2_0/offramp_reader_unit_test.go | 36 +
.../internal/ccipdata/v1_2_0/offramp_test.go | 173 ++
.../ccip/internal/ccipdata/v1_2_0/onramp.go | 255 +++
.../internal/ccipdata/v1_2_0/onramp_test.go | 57 +
.../ccipdata/v1_2_0/price_registry.go | 68 +
.../internal/ccipdata/v1_2_0/test_helpers.go | 48 +
.../internal/ccipdata/v1_2_0/token_pool.go | 48 +
.../ccipdata/v1_2_0/token_pool_test.go | 24 +
.../internal/ccipdata/v1_4_0/token_pool.go | 48 +
.../ccipdata/v1_4_0/token_pool_test.go | 24 +
.../internal/ccipdata/v1_5_0/commit_store.go | 59 +
.../ccip/internal/ccipdata/v1_5_0/hasher.go | 101 +
.../internal/ccipdata/v1_5_0/hasher_test.go | 78 +
.../ccip/internal/ccipdata/v1_5_0/offramp.go | 199 ++
.../internal/ccipdata/v1_5_0/offramp_test.go | 1 +
.../ccip/internal/ccipdata/v1_5_0/onramp.go | 259 +++
.../internal/ccipdata/v1_5_0/onramp_test.go | 210 ++
.../ccipdb/mocks/price_service_mock.go | 250 +++
.../ccip/internal/ccipdb/price_service.go | 400 ++++
.../internal/ccipdb/price_service_test.go | 755 +++++++
.../ccip/internal/logpollerutil/filters.go | 73 +
.../internal/logpollerutil/filters_test.go | 156 ++
.../internal/observability/commit_store.go | 75 +
.../ccip/internal/observability/metrics.go | 75 +
.../internal/observability/metrics_test.go | 87 +
.../ccip/internal/observability/offramp.go | 69 +
.../ccip/internal/observability/onramp.go | 63 +
.../observability/onramp_observed_test.go | 155 ++
.../internal/observability/price_registry.go | 64 +
.../internal/oraclelib/backfilled_oracle.go | 218 ++
.../oraclelib/backfilled_oracle_test.go | 56 +
.../plugins/ccip/internal/parseutil/bigint.go | 44 +
.../ccip/internal/parseutil/bigint_test.go | 42 +
.../plugins/ccip/internal/pricegetter/evm.go | 239 +++
.../ccip/internal/pricegetter/evm_test.go | 546 +++++
.../plugins/ccip/internal/pricegetter/mock.go | 211 ++
.../ccip/internal/pricegetter/pipeline.go | 114 +
.../internal/pricegetter/pipeline_test.go | 178 ++
.../ccip/internal/pricegetter/pricegetter.go | 7 +
.../ocr2/plugins/ccip/internal/rpclib/evm.go | 337 +++
.../plugins/ccip/internal/rpclib/evm_test.go | 223 ++
.../internal/rpclib/rpclibmocks/evm_mock.go | 97 +
core/services/ocr2/plugins/ccip/metrics.go | 99 +
.../ocr2/plugins/ccip/metrics_test.go | 47 +
.../ocr2/plugins/ccip/observations.go | 149 ++
.../ocr2/plugins/ccip/observations_test.go | 305 +++
.../ocr2/plugins/ccip/pkg/leafer/leafer.go | 61 +
.../plugins/ccip/prices/da_price_estimator.go | 176 ++
.../ccip/prices/da_price_estimator_test.go | 440 ++++
.../ccip/prices/exec_price_estimator.go | 65 +
.../ccip/prices/exec_price_estimator_test.go | 351 ++++
.../ccip/prices/gas_price_estimator.go | 59 +
.../prices/gas_price_estimator_commit_mock.go | 269 +++
.../prices/gas_price_estimator_exec_mock.go | 274 +++
.../ccip/prices/gas_price_estimator_mock.go | 331 +++
.../ocr2/plugins/ccip/proxycommitstore.go | 135 ++
.../ccip/testhelpers/ccip_contracts.go | 1580 ++++++++++++++
.../ocr2/plugins/ccip/testhelpers/config.go | 73 +
.../ccip/testhelpers/integration/chainlink.go | 1035 +++++++++
.../ccip/testhelpers/integration/jobspec.go | 334 +++
.../ocr2/plugins/ccip/testhelpers/offramp.go | 119 ++
.../ccip/testhelpers/simulated_backend.go | 75 +
.../plugins/ccip/testhelpers/structfields.go | 44 +
.../testhelpers_1_4_0/ccip_contracts_1_4_0.go | 1585 ++++++++++++++
.../testhelpers_1_4_0/chainlink.go | 1045 +++++++++
.../testhelpers_1_4_0/config_1_4_0.go | 73 +
.../ocr2/plugins/ccip/tokendata/bgworker.go | 213 ++
.../plugins/ccip/tokendata/bgworker_test.go | 188 ++
.../ccip/tokendata/http/http_client.go | 48 +
.../tokendata/http/observed_http_client.go | 69 +
.../observability/usdc_client_test.go | 151 ++
.../ocr2/plugins/ccip/tokendata/reader.go | 19 +
.../plugins/ccip/tokendata/reader_mock.go | 143 ++
.../ocr2/plugins/ccip/tokendata/usdc/usdc.go | 339 +++
.../ccip/tokendata/usdc/usdc_blackbox_test.go | 119 ++
.../plugins/ccip/tokendata/usdc/usdc_test.go | 423 ++++
.../ocr2/plugins/ccip/transactions.rlp | Bin 0 -> 115794 bytes
.../plugins/ccip/transmitter/transmitter.go | 143 ++
.../ccip/transmitter/transmitter_test.go | 282 +++
core/services/ocr2/plugins/ccip/vars.go | 14 +
core/services/ocr2/validate/validate.go | 59 +-
core/services/relay/evm/ccip.go | 205 ++
core/services/relay/evm/ccip_test.go | 18 +
core/services/relay/evm/commit_provider.go | 309 +++
core/services/relay/evm/evm.go | 227 +-
core/services/relay/evm/exec_provider.go | 391 ++++
.../mocks/ccip_transaction_status_checker.go | 104 +
.../evm/statuschecker/txm_status_checker.go | 54 +
.../statuschecker/txm_status_checker_test.go | 103 +
core/services/synchronization/common.go | 2 +
.../testdata/config-empty-effective.toml | 1 +
core/web/resolver/testdata/config-full.toml | 1 +
.../config-multi-chain-effective.toml | 1 +
docs/CONFIG.md | 7 +
go.mod | 8 +-
go.sum | 9 +-
integration-tests/go.mod | 2 +-
integration-tests/go.sum | 4 +-
integration-tests/load/go.mod | 2 +-
integration-tests/load/go.sum | 4 +-
testdata/scripts/node/validate/default.txtar | 1 +
.../disk-based-logging-disabled.txtar | 1 +
.../validate/disk-based-logging-no-dir.txtar | 1 +
.../node/validate/disk-based-logging.txtar | 1 +
.../node/validate/invalid-ocr-p2p.txtar | 1 +
testdata/scripts/node/validate/invalid.txtar | 1 +
testdata/scripts/node/validate/valid.txtar | 1 +
testdata/scripts/node/validate/warnings.txtar | 1 +
224 files changed, 42778 insertions(+), 25 deletions(-)
create mode 100644 .changeset/young-mice-invent.md
create mode 100644 core/services/ocr2/plugins/ccip/LICENSE.md
create mode 100644 core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go
create mode 100644 core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/factory.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/initializers.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/batching.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/batching_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/factory.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/factory_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/helpers.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/inflight.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/initializers.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/ocr2.go
create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go
create mode 100644 core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go
create mode 100644 core/services/ocr2/plugins/ccip/config/chain_config.go
create mode 100644 core/services/ocr2/plugins/ccip/config/chain_config_test.go
create mode 100644 core/services/ocr2/plugins/ccip/config/config.go
create mode 100644 core/services/ocr2/plugins/ccip/config/config_test.go
create mode 100644 core/services/ocr2/plugins/ccip/config/offchain_config.go
create mode 100644 core/services/ocr2/plugins/ccip/config/type_and_version.go
create mode 100644 core/services/ocr2/plugins/ccip/exportinternal.go
create mode 100644 core/services/ocr2/plugins/ccip/integration_legacy_test.go
create mode 100644 core/services/ocr2/plugins/ccip/integration_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/autosync.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/chain_health.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/lazy.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/once.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/once_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/commit_store.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/metrics.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/offramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/onramp.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/price_registry.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/rpclib/evm.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go
create mode 100644 core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/metrics.go
create mode 100644 core/services/ocr2/plugins/ccip/metrics_test.go
create mode 100644 core/services/ocr2/plugins/ccip/observations.go
create mode 100644 core/services/ocr2/plugins/ccip/observations_test.go
create mode 100644 core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/da_price_estimator.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/proxycommitstore.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/config.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/offramp.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/structfields.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go
create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/bgworker.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/http/http_client.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/reader.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/reader_mock.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go
create mode 100644 core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go
create mode 100644 core/services/ocr2/plugins/ccip/transactions.rlp
create mode 100644 core/services/ocr2/plugins/ccip/transmitter/transmitter.go
create mode 100644 core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go
create mode 100644 core/services/ocr2/plugins/ccip/vars.go
create mode 100644 core/services/relay/evm/ccip.go
create mode 100644 core/services/relay/evm/ccip_test.go
create mode 100644 core/services/relay/evm/commit_provider.go
create mode 100644 core/services/relay/evm/exec_provider.go
create mode 100644 core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go
create mode 100644 core/services/relay/evm/statuschecker/txm_status_checker.go
create mode 100644 core/services/relay/evm/statuschecker/txm_status_checker_test.go
diff --git a/.changeset/young-mice-invent.md b/.changeset/young-mice-invent.md
new file mode 100644
index 00000000000..ba9c67198aa
--- /dev/null
+++ b/.changeset/young-mice-invent.md
@@ -0,0 +1,5 @@
+---
+"chainlink": minor
+---
+
+Added CCIP plugins code from https://github.com/smartcontractkit/ccip/ #added
diff --git a/.mockery.yaml b/.mockery.yaml
index 77d2145a461..8fab61a5b9d 100644
--- a/.mockery.yaml
+++ b/.mockery.yaml
@@ -457,4 +457,95 @@ packages:
filename: optimism_dispute_game_factory_interface.go
outpkg: mock_optimism_dispute_game_factory
interfaces:
- OptimismDisputeGameFactoryInterface:
+ OptimismDisputeGameFactoryInterface:
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache:
+ config:
+ filename: chain_health_mock.go
+ interfaces:
+ ChainHealthcheck:
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata:
+ interfaces:
+ CommitStoreReader:
+ config:
+ filename: commit_store_reader_mock.go
+ OffRampReader:
+ config:
+ filename: offramp_reader_mock.go
+ OnRampReader:
+ config:
+ filename: onramp_reader_mock.go
+ PriceRegistryReader:
+ config:
+ filename: price_registry_reader_mock.go
+ TokenPoolReader:
+ config:
+ filename: token_pool_reader_mock.go
+ USDCReader:
+ config:
+ filename: usdc_reader_mock.go
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader:
+ config:
+ filename: token_pool_batched_reader_mock.go
+ interfaces:
+ TokenPoolBatchedReader:
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider:
+ config:
+ filename: price_registry_mock.go
+ interfaces:
+ PriceRegistry:
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb:
+ config:
+ filename: price_service_mock.go
+ interfaces:
+ PriceService:
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter:
+ config:
+ filename: mock.go
+ dir: "{{ .InterfaceDir }}/"
+ outpkg: pricegetter
+ interfaces:
+ PriceGetter:
+ config:
+ mockname: "Mock{{ .InterfaceName }}"
+ github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker:
+ interfaces:
+ CCIPTransactionStatusChecker:
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib:
+ config:
+ outpkg: rpclibmocks
+ interfaces:
+ BatchCaller:
+ config:
+ filename: batch_caller.go
+ dir: core/services/relay/evm/rpclibmocks
+ EvmBatchCaller:
+ config:
+ filename: evm_mock.go
+ dir: "{{ .InterfaceDir }}/rpclibmocks"
+
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices:
+ config:
+ dir: "{{ .InterfaceDir }}/"
+ outpkg: prices
+ interfaces:
+ GasPriceEstimatorCommit:
+ config:
+ mockname: "Mock{{ .InterfaceName }}"
+ filename: gas_price_estimator_commit_mock.go
+ GasPriceEstimatorExec:
+ config:
+ mockname: "Mock{{ .InterfaceName }}"
+ filename: gas_price_estimator_exec_mock.go
+ GasPriceEstimator:
+ config:
+ mockname: "Mock{{ .InterfaceName }}"
+ filename: gas_price_estimator_mock.go
+ github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata:
+ config:
+ filename: reader_mock.go
+ dir: "{{ .InterfaceDir }}/"
+ outpkg: tokendata
+ interfaces:
+ Reader:
+ config:
+ mockname: "Mock{{ .InterfaceName }}"
\ No newline at end of file
diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml
index d1b922cf291..d0960779c6c 100644
--- a/core/config/docs/core.toml
+++ b/core/config/docs/core.toml
@@ -13,6 +13,8 @@ FeedsManager = true # Default
LogPoller = false # Default
# UICSAKeys enables CSA Keys in the UI.
UICSAKeys = false # Default
+# CCIP enables the CCIP service.
+CCIP = true # Default
[Database]
# DefaultIdleInTxSessionTimeout is the maximum time allowed for a transaction to be open and idle before timing out. See Postgres `idle_in_transaction_session_timeout` for more details.
diff --git a/core/config/toml/types.go b/core/config/toml/types.go
index f827f086225..0c91ddd81a9 100644
--- a/core/config/toml/types.go
+++ b/core/config/toml/types.go
@@ -303,6 +303,7 @@ type Feature struct {
FeedsManager *bool
LogPoller *bool
UICSAKeys *bool
+ CCIP *bool
}
func (f *Feature) setFrom(f2 *Feature) {
@@ -315,6 +316,9 @@ func (f *Feature) setFrom(f2 *Feature) {
if v := f2.UICSAKeys; v != nil {
f.UICSAKeys = v
}
+ if v := f2.CCIP; v != nil {
+ f.CCIP = v
+ }
}
type Database struct {
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index fe4ee2c9748..0b7f510bcd8 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -60,7 +60,7 @@ require (
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/XSAM/otelsql v0.27.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
- github.com/avast/retry-go/v4 v4.5.1 // indirect
+ github.com/avast/retry-go/v4 v4.6.0 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index 76eaf615279..6abc303888f 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -147,8 +147,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o=
-github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc=
+github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
+github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE=
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4=
diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go
index 9b40e4dfce4..0038be8a979 100644
--- a/core/services/chainlink/config_test.go
+++ b/core/services/chainlink/config_test.go
@@ -261,6 +261,7 @@ func TestConfig_Marshal(t *testing.T) {
FeedsManager: ptr(true),
LogPoller: ptr(true),
UICSAKeys: ptr(true),
+ CCIP: ptr(true),
}
full.Database = toml.Database{
DefaultIdleInTxSessionTimeout: commoncfg.MustNewDuration(time.Minute),
@@ -771,6 +772,7 @@ Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and
FeedsManager = true
LogPoller = true
UICSAKeys = true
+CCIP = true
`},
{"Database", Config{Core: toml.Core{Database: full.Database}}, `[Database]
DefaultIdleInTxSessionTimeout = '1m0s'
diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml
index 1bad3fd91c6..f1325d824ea 100644
--- a/core/services/chainlink/testdata/config-empty-effective.toml
+++ b/core/services/chainlink/testdata/config-empty-effective.toml
@@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml
index 21d68c23ada..d752398f039 100644
--- a/core/services/chainlink/testdata/config-full.toml
+++ b/core/services/chainlink/testdata/config-full.toml
@@ -6,6 +6,7 @@ ShutdownGracePeriod = '10s'
FeedsManager = true
LogPoller = true
UICSAKeys = true
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1m0s'
diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml
index c56e755d360..12427650f42 100644
--- a/core/services/chainlink/testdata/config-multi-chain-effective.toml
+++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml
@@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/core/services/feeds/mocks/service.go b/core/services/feeds/mocks/service.go
index a660420759e..d37c327850d 100644
--- a/core/services/feeds/mocks/service.go
+++ b/core/services/feeds/mocks/service.go
@@ -1403,6 +1403,39 @@ func (_c *Service_SyncNodeInfo_Call) RunAndReturn(run func(context.Context, int6
return _c
}
+// Unsafe_SetConnectionsManager provides a mock function with given fields: _a0
+func (_m *Service) Unsafe_SetConnectionsManager(_a0 feeds.ConnectionsManager) {
+ _m.Called(_a0)
+}
+
+// Service_Unsafe_SetConnectionsManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Unsafe_SetConnectionsManager'
+type Service_Unsafe_SetConnectionsManager_Call struct {
+ *mock.Call
+}
+
+// Unsafe_SetConnectionsManager is a helper method to define mock.On call
+// - _a0 feeds.ConnectionsManager
+func (_e *Service_Expecter) Unsafe_SetConnectionsManager(_a0 interface{}) *Service_Unsafe_SetConnectionsManager_Call {
+ return &Service_Unsafe_SetConnectionsManager_Call{Call: _e.mock.On("Unsafe_SetConnectionsManager", _a0)}
+}
+
+func (_c *Service_Unsafe_SetConnectionsManager_Call) Run(run func(_a0 feeds.ConnectionsManager)) *Service_Unsafe_SetConnectionsManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(feeds.ConnectionsManager))
+ })
+ return _c
+}
+
+func (_c *Service_Unsafe_SetConnectionsManager_Call) Return() *Service_Unsafe_SetConnectionsManager_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *Service_Unsafe_SetConnectionsManager_Call) RunAndReturn(run func(feeds.ConnectionsManager)) *Service_Unsafe_SetConnectionsManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UpdateChainConfig provides a mock function with given fields: ctx, cfg
func (_m *Service) UpdateChainConfig(ctx context.Context, cfg feeds.ChainConfig) (int64, error) {
ret := _m.Called(ctx, cfg)
diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go
index b11b2b0167a..1733d4a7582 100644
--- a/core/services/feeds/service.go
+++ b/core/services/feeds/service.go
@@ -107,6 +107,9 @@ type Service interface {
ListSpecsByJobProposalIDs(ctx context.Context, ids []int64) ([]JobProposalSpec, error)
RejectSpec(ctx context.Context, id int64) error
UpdateSpecDefinition(ctx context.Context, id int64, spec string) error
+
+ // Unsafe_SetConnectionsManager Only for testing
+ Unsafe_SetConnectionsManager(ConnectionsManager)
}
type service struct {
@@ -1105,6 +1108,16 @@ func (s *service) observeJobProposalCounts(ctx context.Context) error {
return nil
}
+// Unsafe_SetConnectionsManager sets the ConnectionsManager on the service.
+//
+// We need to be able to inject a mock for the client to facilitate integration
+// tests.
+//
+// ONLY TO BE USED FOR TESTING.
+func (s *service) Unsafe_SetConnectionsManager(connMgr ConnectionsManager) {
+ s.connMgr = connMgr
+}
+
// findExistingJobForOCR2 looks for existing job for OCR2
func findExistingJobForOCR2(ctx context.Context, j *job.Job, tx job.ORM) (int32, error) {
var contractID string
@@ -1501,5 +1514,6 @@ func (ns NullService) IsJobManaged(ctx context.Context, jobID int64) (bool, erro
func (ns NullService) UpdateSpecDefinition(ctx context.Context, id int64, spec string) error {
return ErrFeedsManagerDisabled
}
+func (ns NullService) Unsafe_SetConnectionsManager(_ ConnectionsManager) {}
//revive:enable
diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go
index db0f4e9725e..5c44825ca2a 100644
--- a/core/services/ocr2/delegate.go
+++ b/core/services/ocr2/delegate.go
@@ -6,18 +6,25 @@ import (
"encoding/json"
"fmt"
"log"
+ "strconv"
"time"
+ "gopkg.in/guregu/null.v4"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/core"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
- "google.golang.org/grpc"
- "gopkg.in/guregu/null.v4"
-
+ chainselectors "github.com/smartcontractkit/chain-selectors"
"github.com/smartcontractkit/libocr/commontypes"
libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
"github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "google.golang.org/grpc"
ocr2keepers20 "github.com/smartcontractkit/chainlink-automation/pkg/v2"
ocr2keepers20config "github.com/smartcontractkit/chainlink-automation/pkg/v2/config"
@@ -26,13 +33,11 @@ import (
ocr2keepers20runner "github.com/smartcontractkit/chainlink-automation/pkg/v2/runner"
ocr2keepers21config "github.com/smartcontractkit/chainlink-automation/pkg/v3/config"
ocr2keepers21 "github.com/smartcontractkit/chainlink-automation/pkg/v3/plugin"
-
"github.com/smartcontractkit/chainlink-common/pkg/loop"
"github.com/smartcontractkit/chainlink-common/pkg/loop/reportingplugins"
"github.com/smartcontractkit/chainlink-common/pkg/loop/reportingplugins/ocr3"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
"github.com/smartcontractkit/chainlink-common/pkg/types"
- "github.com/smartcontractkit/chainlink-common/pkg/types/core"
llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo"
"github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox"
datastreamsllo "github.com/smartcontractkit/chainlink-data-streams/llo"
@@ -47,12 +52,15 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key"
"github.com/smartcontractkit/chainlink/v2/core/services/llo"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipcommit"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipexec"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/generic"
lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/median"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper"
+
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21"
ocr2keeper21core "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate"
@@ -68,6 +76,8 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/synchronization"
"github.com/smartcontractkit/chainlink/v2/core/services/telemetry"
"github.com/smartcontractkit/chainlink/v2/plugins"
+
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
)
type ErrJobSpecNoRelayer struct {
@@ -284,6 +294,7 @@ func (d *Delegate) cleanupEVM(ctx context.Context, jb job.Job, relayID types.Rel
// an inconsistent state. This assumes UnregisterFilter will return nil if the filter wasn't found
// at all (no rows deleted).
spec := jb.OCR2OracleSpec
+ transmitterID := spec.TransmitterID.String
chain, err := d.legacyChains.Get(relayID.ChainID)
if err != nil {
d.lggr.Errorw("cleanupEVM: failed to get chain id", "chainId", relayID.ChainID, "err", err)
@@ -305,6 +316,51 @@ func (d *Delegate) cleanupEVM(ctx context.Context, jb job.Job, relayID types.Rel
d.lggr.Errorw("failed to derive ocr2keeper filter names from spec", "err", err, "spec", spec)
}
filters = append(filters, filters21...)
+ case types.CCIPCommit:
+ // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire
+ var pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig
+ err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig)
+ if err != nil {
+ return err
+ }
+
+ dstProvider, err2 := d.ccipCommitGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID)
+ if err2 != nil {
+ return err
+ }
+
+ srcProvider, _, err2 := d.ccipCommitGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider)
+ if err2 != nil {
+ return err
+ }
+ err2 = ccipcommit.UnregisterCommitPluginLpFilters(srcProvider, dstProvider)
+ if err2 != nil {
+ d.lggr.Errorw("failed to unregister ccip commit plugin filters", "err", err2, "spec", spec)
+ }
+ return nil
+ case types.CCIPExecution:
+ // PROVIDER BASED ARG CONSTRUCTION
+ // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire
+ var pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig
+ err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig)
+ if err != nil {
+ return err
+ }
+
+ dstProvider, err2 := d.ccipExecGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID)
+ if err2 != nil {
+ return err
+ }
+
+ srcProvider, _, err2 := d.ccipExecGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider)
+ if err2 != nil {
+ return err
+ }
+ err2 = ccipexec.UnregisterExecPluginLpFilters(srcProvider, dstProvider)
+ if err2 != nil {
+ d.lggr.Errorw("failed to unregister ccip exec plugin filters", "err", err2, "spec", spec)
+ }
+ return nil
default:
return nil
}
@@ -448,6 +504,10 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi
return d.newServicesGenericPlugin(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, d.capabilitiesRegistry,
kvStore)
+ case types.CCIPCommit:
+ return d.newServicesCCIPCommit(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, transmitterID)
+ case types.CCIPExecution:
+ return d.newServicesCCIPExecution(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, transmitterID)
default:
return nil, errors.Errorf("plugin type %s not supported", spec.PluginType)
}
@@ -1498,6 +1558,337 @@ func (d *Delegate) newServicesOCR2Functions(
return append([]job.ServiceCtx{functionsProvider, thresholdProvider, s4Provider, ocrLogger}, functionsServices...), nil
}
+func (d *Delegate) newServicesCCIPCommit(ctx context.Context, lggr logger.SugaredLogger, jb job.Job, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, lc ocrtypes.LocalConfig, transmitterID string) ([]job.ServiceCtx, error) {
+ spec := jb.OCR2OracleSpec
+ if spec.Relay != relay.NetworkEVM {
+ return nil, fmt.Errorf("non evm chains are not supported for CCIP commit")
+ }
+ dstRid, err := spec.RelayID()
+ if err != nil {
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)}
+ }
+
+ logError := func(msg string) {
+ lggr.ErrorIf(d.jobORM.RecordError(context.Background(), jb.ID, msg), "unable to record error")
+ }
+
+ // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire
+ var pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig
+ err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ dstChainID, err := strconv.ParseInt(dstRid.ChainID, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ dstProvider, err := d.ccipCommitGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID)
+ if err != nil {
+ return nil, err
+ }
+
+ srcProvider, srcChainID, err := d.ccipCommitGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider)
+ if err != nil {
+ return nil, err
+ }
+
+ oracleArgsNoPlugin := libocr2.OCR2OracleArgs{
+ BinaryNetworkEndpointFactory: d.peerWrapper.Peer2,
+ V2Bootstrappers: bootstrapPeers,
+ ContractTransmitter: dstProvider.ContractTransmitter(),
+ ContractConfigTracker: dstProvider.ContractConfigTracker(),
+ Database: ocrDB,
+ LocalConfig: lc,
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(
+ dstRid.Network,
+ dstRid.ChainID,
+ spec.ContractID,
+ synchronization.OCR2CCIPCommit,
+ ),
+ OffchainConfigDigester: dstProvider.OffchainConfigDigester(),
+ OffchainKeyring: kb,
+ OnchainKeyring: kb,
+ MetricsRegisterer: prometheus.WrapRegistererWith(map[string]string{"job_name": jb.Name.ValueOrZero()}, prometheus.DefaultRegisterer),
+ }
+
+ return ccipcommit.NewCommitServices(ctx, d.ds, srcProvider, dstProvider, d.legacyChains, jb, lggr, d.pipelineRunner, oracleArgsNoPlugin, d.isNewlyCreatedJob, int64(srcChainID), dstChainID, logError)
+}
+
+func newCCIPCommitPluginBytes(isSourceProvider bool, sourceStartBlock uint64, destStartBlock uint64) config.CommitPluginConfig {
+ return config.CommitPluginConfig{
+ IsSourceProvider: isSourceProvider,
+ SourceStartBlock: sourceStartBlock,
+ DestStartBlock: destStartBlock,
+ }
+}
+
+func (d *Delegate) ccipCommitGetDstProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig, transmitterID string) (types.CCIPCommitProvider, error) {
+ spec := jb.OCR2OracleSpec
+ if spec.Relay != relay.NetworkEVM {
+ return nil, fmt.Errorf("non evm chains are not supported for CCIP commit")
+ }
+
+ dstRid, err := spec.RelayID()
+ if err != nil {
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)}
+ }
+
+ // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire
+ dstConfigBytes, err := newCCIPCommitPluginBytes(false, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock).Encode()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get provider from dest chain
+ dstRelayer, err := d.RelayGetter.Get(dstRid)
+ if err != nil {
+ return nil, err
+ }
+
+ provider, err := dstRelayer.NewPluginProvider(ctx,
+ types.RelayArgs{
+ ContractID: spec.ContractID,
+ RelayConfig: spec.RelayConfig.Bytes(),
+ ProviderType: string(types.CCIPCommit),
+ },
+ types.PluginArgs{
+ TransmitterID: transmitterID,
+ PluginConfig: dstConfigBytes,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("unable to create ccip commit provider: %w", err)
+ }
+ dstProvider, ok := provider.(types.CCIPCommitProvider)
+ if !ok {
+ return nil, fmt.Errorf("could not coerce PluginProvider to CCIPCommitProvider")
+ }
+
+ return dstProvider, nil
+}
+
+func (d *Delegate) ccipCommitGetSrcProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig, transmitterID string, dstProvider types.CCIPCommitProvider) (srcProvider types.CCIPCommitProvider, srcChainID uint64, err error) {
+ spec := jb.OCR2OracleSpec
+ srcConfigBytes, err := newCCIPCommitPluginBytes(true, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock).Encode()
+ if err != nil {
+ return nil, 0, err
+ }
+ // Use OffRampReader to get src chain ID and fetch the src relayer
+
+ var pluginConfig ccipconfig.CommitPluginJobSpecConfig
+ err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginConfig)
+ if err != nil {
+ return nil, 0, err
+ }
+ offRampAddress := pluginConfig.OffRamp
+ offRampReader, err := dstProvider.NewOffRampReader(ctx, offRampAddress)
+ if err != nil {
+ return nil, 0, fmt.Errorf("create offRampReader: %w", err)
+ }
+
+ offRampConfig, err := offRampReader.GetStaticConfig(ctx)
+ if err != nil {
+ return nil, 0, fmt.Errorf("get offRamp static config: %w", err)
+ }
+
+ srcChainID, err = chainselectors.ChainIdFromSelector(offRampConfig.SourceChainSelector)
+ if err != nil {
+ return nil, 0, err
+ }
+ srcChainIDstr := strconv.FormatUint(srcChainID, 10)
+
+ // Get provider from source chain
+ srcRelayer, err := d.RelayGetter.Get(types.RelayID{Network: spec.Relay, ChainID: srcChainIDstr})
+ if err != nil {
+ return nil, 0, err
+ }
+ provider, err := srcRelayer.NewPluginProvider(ctx,
+ types.RelayArgs{
+ ContractID: "", // Contract address only valid for dst chain
+ RelayConfig: spec.RelayConfig.Bytes(),
+ ProviderType: string(types.CCIPCommit),
+ },
+ types.PluginArgs{
+ TransmitterID: transmitterID,
+ PluginConfig: srcConfigBytes,
+ })
+ if err != nil {
+ return nil, 0, fmt.Errorf("srcRelayer.NewPluginProvider: %w", err)
+ }
+ srcProvider, ok := provider.(types.CCIPCommitProvider)
+ if !ok {
+ return nil, 0, fmt.Errorf("could not coerce PluginProvider to CCIPCommitProvider")
+ }
+
+ return
+}
+
+func (d *Delegate) newServicesCCIPExecution(ctx context.Context, lggr logger.SugaredLogger, jb job.Job, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, lc ocrtypes.LocalConfig, transmitterID string) ([]job.ServiceCtx, error) {
+ spec := jb.OCR2OracleSpec
+ if spec.Relay != relay.NetworkEVM {
+ return nil, fmt.Errorf("non evm chains are not supported for CCIP execution")
+ }
+ dstRid, err := spec.RelayID()
+
+ if err != nil {
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)}
+ }
+
+ logError := func(msg string) {
+ lggr.ErrorIf(d.jobORM.RecordError(context.Background(), jb.ID, msg), "unable to record error")
+ }
+
+ // PROVIDER BASED ARG CONSTRUCTION
+ // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire
+ var pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig
+ err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ dstChainID, err := strconv.ParseInt(dstRid.ChainID, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ dstProvider, err := d.ccipExecGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID)
+ if err != nil {
+ return nil, err
+ }
+
+ srcProvider, srcChainID, err := d.ccipExecGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider)
+ if err != nil {
+ return nil, err
+ }
+
+ oracleArgsNoPlugin2 := libocr2.OCR2OracleArgs{
+ BinaryNetworkEndpointFactory: d.peerWrapper.Peer2,
+ V2Bootstrappers: bootstrapPeers,
+ ContractTransmitter: dstProvider.ContractTransmitter(),
+ ContractConfigTracker: dstProvider.ContractConfigTracker(),
+ Database: ocrDB,
+ LocalConfig: lc,
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(
+ dstRid.Network,
+ dstRid.ChainID,
+ spec.ContractID,
+ synchronization.OCR2CCIPExec,
+ ),
+ OffchainConfigDigester: dstProvider.OffchainConfigDigester(),
+ OffchainKeyring: kb,
+ OnchainKeyring: kb,
+ MetricsRegisterer: prometheus.WrapRegistererWith(map[string]string{"job_name": jb.Name.ValueOrZero()}, prometheus.DefaultRegisterer),
+ }
+
+ return ccipexec.NewExecServices(ctx, lggr, jb, srcProvider, dstProvider, int64(srcChainID), dstChainID, d.isNewlyCreatedJob, oracleArgsNoPlugin2, logError)
+}
+
+func (d *Delegate) ccipExecGetDstProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig, transmitterID string) (types.CCIPExecProvider, error) {
+ spec := jb.OCR2OracleSpec
+ if spec.Relay != relay.NetworkEVM {
+ return nil, fmt.Errorf("non evm chains are not supported for CCIP execution")
+ }
+ dstRid, err := spec.RelayID()
+
+ if err != nil {
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)}
+ }
+
+ // PROVIDER BASED ARG CONSTRUCTION
+ // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire
+ dstConfigBytes, err := newExecPluginConfig(false, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock, pluginJobSpecConfig.USDCConfig, string(jb.ID)).Encode()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get provider from dest chain
+ dstRelayer, err := d.RelayGetter.Get(dstRid)
+ if err != nil {
+ return nil, err
+ }
+ provider, err := dstRelayer.NewPluginProvider(ctx,
+ types.RelayArgs{
+ ContractID: spec.ContractID,
+ RelayConfig: spec.RelayConfig.Bytes(),
+ ProviderType: string(types.CCIPExecution),
+ },
+ types.PluginArgs{
+ TransmitterID: transmitterID,
+ PluginConfig: dstConfigBytes,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("NewPluginProvider failed on dstRelayer: %w", err)
+ }
+ dstProvider, ok := provider.(types.CCIPExecProvider)
+ if !ok {
+ return nil, fmt.Errorf("could not coerce PluginProvider to CCIPExecProvider")
+ }
+
+ return dstProvider, nil
+}
+
+func (d *Delegate) ccipExecGetSrcProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig, transmitterID string, dstProvider types.CCIPExecProvider) (srcProvider types.CCIPExecProvider, srcChainID uint64, err error) {
+ spec := jb.OCR2OracleSpec
+ srcConfigBytes, err := newExecPluginConfig(true, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock, pluginJobSpecConfig.USDCConfig, string(jb.ID)).Encode()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // Use OffRampReader to get src chain ID and fetch the src relayer
+ offRampAddress := cciptypes.Address(common.HexToAddress(spec.ContractID).String())
+ offRampReader, err := dstProvider.NewOffRampReader(ctx, offRampAddress)
+ if err != nil {
+ return nil, 0, fmt.Errorf("create offRampReader: %w", err)
+ }
+
+ offRampConfig, err := offRampReader.GetStaticConfig(ctx)
+ if err != nil {
+ return nil, 0, fmt.Errorf("get offRamp static config: %w", err)
+ }
+
+ srcChainID, err = chainselectors.ChainIdFromSelector(offRampConfig.SourceChainSelector)
+ if err != nil {
+ return nil, 0, err
+ }
+ srcChainIDstr := strconv.FormatUint(srcChainID, 10)
+
+ // Get provider from source chain
+ srcRelayer, err := d.RelayGetter.Get(types.RelayID{Network: spec.Relay, ChainID: srcChainIDstr})
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to get relayer: %w", err)
+ }
+ provider, err := srcRelayer.NewPluginProvider(ctx,
+ types.RelayArgs{
+ ContractID: "",
+ RelayConfig: spec.RelayConfig.Bytes(),
+ ProviderType: string(types.CCIPExecution),
+ },
+ types.PluginArgs{
+ TransmitterID: transmitterID,
+ PluginConfig: srcConfigBytes,
+ })
+ if err != nil {
+ return nil, 0, err
+ }
+ srcProvider, ok := provider.(types.CCIPExecProvider)
+ if !ok {
+ return nil, 0, fmt.Errorf("could not coerce PluginProvider to CCIPExecProvider: %w", err)
+ }
+
+ return
+}
+
+func newExecPluginConfig(isSourceProvider bool, srcStartBlock uint64, dstStartBlock uint64, usdcConfig ccipconfig.USDCConfig, jobID string) config.ExecPluginConfig {
+ return config.ExecPluginConfig{
+ IsSourceProvider: isSourceProvider,
+ SourceStartBlock: srcStartBlock,
+ DestStartBlock: dstStartBlock,
+ USDCConfig: usdcConfig,
+ JobID: jobID,
+ }
+}
+
// errorLog implements [loop.ErrorLog]
type errorLog struct {
jobID int32
diff --git a/core/services/ocr2/plugins/ccip/LICENSE.md b/core/services/ocr2/plugins/ccip/LICENSE.md
new file mode 100644
index 00000000000..b127e1a823a
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/LICENSE.md
@@ -0,0 +1,55 @@
+Business Source License 1.1
+
+License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
+"Business Source License" is a trademark of MariaDB Corporation Ab.
+
+-----------------------------------------------------------------------------
+
+Parameters
+
+Licensor: SmartContract Chainlink Limited SEZC
+
+Licensed Work: Cross-Chain Interoperability Protocol v1.4
+The Licensed Work is (c) 2023 SmartContract Chainlink Limited SEZC
+
+Additional Use Grant: Any uses listed and defined at [v1.4-CCIP-License-grants](../../../../../contracts/src/v0.8/ccip/v1.4-CCIP-License-grants)
+
+Change Date: May 23, 2027
+
+Change License: MIT
+
+-----------------------------------------------------------------------------
+
+Terms
+
+The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use.
+
+Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate.
+
+If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work.
+
+All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor.
+
+You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work.
+
+Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work.
+
+This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License).
+
+TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE.
+
+MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark "Business Source License", as long as you comply with the Covenants of Licensor below.
+
+-----------------------------------------------------------------------------
+
+Covenants of Licensor
+
+In consideration of the right to use this License’s text and the "Business Source License" name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor:
+
+1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where "compatible" means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation.
+
+2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text "None".
+
+3. To specify a Change Date.
+
+4. Not to modify this License in any other way.
\ No newline at end of file
diff --git a/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go
new file mode 100644
index 00000000000..d0ad5642d94
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go
@@ -0,0 +1,187 @@
+package abihelpers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/big"
+ "strings"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+ "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
+)
+
+func MustGetEventID(name string, abi2 abi.ABI) common.Hash {
+ event, ok := abi2.Events[name]
+ if !ok {
+ panic(fmt.Sprintf("missing event %s", name))
+ }
+ return event.ID
+}
+
+func MustGetEventInputs(name string, abi2 abi.ABI) abi.Arguments {
+ m, ok := abi2.Events[name]
+ if !ok {
+ panic(fmt.Sprintf("missing event %s", name))
+ }
+ return m.Inputs
+}
+
+func MustGetMethodInputs(name string, abi2 abi.ABI) abi.Arguments {
+ m, ok := abi2.Methods[name]
+ if !ok {
+ panic(fmt.Sprintf("missing method %s", name))
+ }
+ return m.Inputs
+}
+
+func MustParseABI(abiStr string) abi.ABI {
+ abiParsed, err := abi.JSON(strings.NewReader(abiStr))
+ if err != nil {
+ panic(err)
+ }
+ return abiParsed
+}
+
+// ProofFlagsToBits transforms a list of boolean proof flags to a *big.Int
+// encoded number.
+func ProofFlagsToBits(proofFlags []bool) *big.Int {
+ encodedFlags := big.NewInt(0)
+ for i := 0; i < len(proofFlags); i++ {
+ if proofFlags[i] {
+ encodedFlags.SetBit(encodedFlags, i, 1)
+ }
+ }
+ return encodedFlags
+}
+
+type AbiDefined interface {
+ AbiString() string
+}
+
+type AbiDefinedValid interface {
+ AbiDefined
+ Validate() error
+}
+
+func ABIEncode(abiStr string, values ...interface{}) ([]byte, error) {
+ inAbi, err := getABI(abiStr, ENCODE)
+ if err != nil {
+ return nil, err
+ }
+ res, err := inAbi.Pack("method", values...)
+ if err != nil {
+ return nil, err
+ }
+ return res[4:], nil
+}
+
+func ABIDecode(abiStr string, data []byte) ([]interface{}, error) {
+ inAbi, err := getABI(abiStr, DECODE)
+ if err != nil {
+ return nil, err
+ }
+ return inAbi.Unpack("method", data)
+}
+
+func EncodeAbiStruct[T AbiDefined](decoded T) ([]byte, error) {
+ return ABIEncode(decoded.AbiString(), decoded)
+}
+
+func EncodeAddress(address common.Address) ([]byte, error) {
+ return ABIEncode(`[{"type":"address"}]`, address)
+}
+
+func DecodeAbiStruct[T AbiDefinedValid](encoded []byte) (T, error) {
+ var empty T
+
+ decoded, err := ABIDecode(empty.AbiString(), encoded)
+ if err != nil {
+ return empty, err
+ }
+
+ converted := abi.ConvertType(decoded[0], &empty)
+ if casted, ok := converted.(*T); ok {
+ return *casted, (*casted).Validate()
+ }
+ return empty, fmt.Errorf("can't cast from %T to %T", converted, empty)
+}
+
+func EvmWord(i uint64) common.Hash {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, i)
+ return common.BigToHash(big.NewInt(0).SetBytes(b))
+}
+
+func DecodeOCR2Config(encoded []byte) (*ocr2aggregator.OCR2AggregatorConfigSet, error) {
+ unpacked := new(ocr2aggregator.OCR2AggregatorConfigSet)
+ abiPointer, err := ocr2aggregator.OCR2AggregatorMetaData.GetAbi()
+ if err != nil {
+ return unpacked, err
+ }
+ defaultABI := *abiPointer
+ err = defaultABI.UnpackIntoInterface(unpacked, "ConfigSet", encoded)
+ if err != nil {
+ return unpacked, errors.Wrap(err, "failed to unpack log data")
+ }
+ return unpacked, nil
+}
+
+// create const encode and decode
+const (
+ ENCODE = iota
+ DECODE
+)
+
+type abiCache struct {
+ cache map[string]*abi.ABI
+ mu *sync.RWMutex
+}
+
+func newAbiCache() *abiCache {
+ return &abiCache{
+ cache: make(map[string]*abi.ABI),
+ mu: &sync.RWMutex{},
+ }
+}
+
+// Global cache for ABIs to avoid parsing the same ABI multiple times
+// As the module is already a helper module and not a service, we can keep the cache global
+// It's private to the package and can't be accessed from outside
+var myAbiCache = newAbiCache()
+
+// This Function is used to get the ABI from the cache or create a new one and cache it for later use
+// operationType is used to differentiate between encoding and decoding
+// encoding uses a definition with `inputs` and decoding uses a definition with `outputs` (check inDef)
+func getABI(abiStr string, operationType uint8) (*abi.ABI, error) {
+ var operationStr string
+ switch operationType {
+ case ENCODE:
+ operationStr = "inputs"
+ case DECODE:
+ operationStr = "outputs"
+ default:
+ return nil, fmt.Errorf("invalid operation type")
+ }
+
+ inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "%s": %s}]`, operationStr, abiStr)
+
+ myAbiCache.mu.RLock()
+ if cachedAbi, found := myAbiCache.cache[inDef]; found {
+ myAbiCache.mu.RUnlock() // unlocking before returning
+ return cachedAbi, nil
+ }
+ myAbiCache.mu.RUnlock()
+
+ res, err := abi.JSON(strings.NewReader(inDef))
+ if err != nil {
+ return nil, err
+ }
+
+ myAbiCache.mu.Lock()
+ defer myAbiCache.mu.Unlock()
+ myAbiCache.cache[inDef] = &res
+ return &res, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go
new file mode 100644
index 00000000000..4890aeb1188
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go
@@ -0,0 +1,147 @@
+package abihelpers
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+)
+
+func TestProofFlagToBits(t *testing.T) {
+ genFlags := func(indexesSet []int, size int) []bool {
+ bools := make([]bool, size)
+ for _, indexSet := range indexesSet {
+ bools[indexSet] = true
+ }
+ return bools
+ }
+ tt := []struct {
+ flags []bool
+ expected *big.Int
+ }{
+ {
+ []bool{true, false, true},
+ big.NewInt(5),
+ },
+ {
+ []bool{true, true, false}, // Note the bits are reversed, slightly easier to implement.
+ big.NewInt(3),
+ },
+ {
+ []bool{false, true, true},
+ big.NewInt(6),
+ },
+ {
+ []bool{false, false, false},
+ big.NewInt(0),
+ },
+ {
+ []bool{true, true, true},
+ big.NewInt(7),
+ },
+ {
+ genFlags([]int{266}, 300),
+ big.NewInt(0).SetBit(big.NewInt(0), 266, 1),
+ },
+ }
+ for _, tc := range tt {
+ tc := tc
+ a := ProofFlagsToBits(tc.flags)
+ assert.Equal(t, tc.expected.String(), a.String())
+ }
+}
+
+func TestEvmWord(t *testing.T) {
+ testCases := []struct {
+ inp uint64
+ exp common.Hash
+ }{
+ {inp: 1, exp: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001")},
+ {inp: math.MaxUint64, exp: common.HexToHash("0x000000000000000000000000000000000000000000000000ffffffffffffffff")},
+ }
+
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("test %d", tc.inp), func(t *testing.T) {
+ h := EvmWord(tc.inp)
+ assert.Equal(t, tc.exp, h)
+ })
+ }
+}
+
+func TestABIEncodeDecode(t *testing.T) {
+ abiStr := `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]`
+ values := []interface{}{struct {
+ Int1 *big.Int `json:"int1"`
+ Int2 *big.Int `json:"int2"`
+ }{big.NewInt(10), big.NewInt(12)}}
+
+ // First encoding, should call the underlying utils.ABIEncode
+ encoded, err := ABIEncode(abiStr, values...)
+ assert.NoError(t, err)
+ assert.NotNil(t, encoded)
+
+ // Second encoding, should retrieve from cache
+ // we're just testing here that it returns same result
+ encodedAgain, err := ABIEncode(abiStr, values...)
+
+ assert.NoError(t, err)
+ assert.True(t, bytes.Equal(encoded, encodedAgain))
+
+ // Should be able to decode it back to the original values
+ decoded, err := ABIDecode(abiStr, encoded)
+ assert.NoError(t, err)
+ assert.Equal(t, decoded, values)
+}
+
+func BenchmarkComparisonEncode(b *testing.B) {
+ abiStr := `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]`
+ values := []interface{}{struct {
+ Int1 *big.Int `json:"int1"`
+ Int2 *big.Int `json:"int2"`
+ }{big.NewInt(10), big.NewInt(12)}}
+
+ b.Run("WithoutCache", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, _ = utils.ABIEncode(abiStr, values...)
+ }
+ })
+
+ // Warm up the cache
+ _, _ = ABIEncode(abiStr, values...)
+
+ b.Run("WithCache", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, _ = ABIEncode(abiStr, values...)
+ }
+ })
+}
+
+func BenchmarkComparisonDecode(b *testing.B) {
+ abiStr := `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]`
+ values := []interface{}{struct {
+ Int1 *big.Int `json:"int1"`
+ Int2 *big.Int `json:"int2"`
+ }{big.NewInt(10), big.NewInt(12)}}
+ data, _ := utils.ABIEncode(abiStr, values...)
+
+ b.Run("WithoutCache", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, _ = utils.ABIDecode(abiStr, data)
+ }
+ })
+
+ // Warm up the cache
+ _, _ = ABIDecode(abiStr, data)
+
+ b.Run("WithCache", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, _ = ABIDecode(abiStr, data)
+ }
+ })
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/factory.go b/core/services/ocr2/plugins/ccip/ccipcommit/factory.go
new file mode 100644
index 00000000000..648f62a23a2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipcommit/factory.go
@@ -0,0 +1,150 @@
+package ccipcommit
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+type CommitReportingPluginFactory struct {
+ // Configuration derived from the job spec which does not change
+ // between plugin instances (ie between SetConfigs onchain)
+ config CommitPluginStaticConfig
+
+ // Dynamic readers
+ readersMu *sync.Mutex
+ destPriceRegReader ccipdata.PriceRegistryReader
+ destPriceRegAddr common.Address
+}
+
+// NewCommitReportingPluginFactory return a new CommitReportingPluginFactory.
+func NewCommitReportingPluginFactory(config CommitPluginStaticConfig) *CommitReportingPluginFactory {
+ return &CommitReportingPluginFactory{
+ config: config,
+ readersMu: &sync.Mutex{},
+
+ // the fields below are initially empty and populated on demand
+ destPriceRegReader: nil,
+ destPriceRegAddr: common.Address{},
+ }
+}
+
+func (rf *CommitReportingPluginFactory) UpdateDynamicReaders(ctx context.Context, newPriceRegAddr common.Address) error {
+ rf.readersMu.Lock()
+ defer rf.readersMu.Unlock()
+ // TODO: Investigate use of Close() to cleanup.
+ // TODO: a true price registry upgrade on an existing lane may want some kind of start block in its config? Right now we
+ // essentially assume that plugins don't care about historical price reg logs.
+ if rf.destPriceRegAddr == newPriceRegAddr {
+ // No-op
+ return nil
+ }
+ // Close old reader if present and open new reader if address changed
+ if rf.destPriceRegReader != nil {
+ if err := rf.destPriceRegReader.Close(); err != nil {
+ return err
+ }
+ }
+
+ destPriceRegistryReader, err := rf.config.priceRegistryProvider.NewPriceRegistryReader(context.Background(), cciptypes.Address(newPriceRegAddr.String()))
+ if err != nil {
+ return fmt.Errorf("init dynamic price registry: %w", err)
+ }
+ rf.destPriceRegReader = destPriceRegistryReader
+ rf.destPriceRegAddr = newPriceRegAddr
+ return nil
+}
+
+type reportingPluginAndInfo struct {
+ plugin types.ReportingPlugin
+ pluginInfo types.ReportingPluginInfo
+}
+
+// NewReportingPlugin registers a new ReportingPlugin
+func (rf *CommitReportingPluginFactory) NewReportingPlugin(config types.ReportingPluginConfig) (types.ReportingPlugin, types.ReportingPluginInfo, error) {
+ initialRetryDelay := rf.config.newReportingPluginRetryConfig.InitialDelay
+ maxDelay := rf.config.newReportingPluginRetryConfig.MaxDelay
+
+ pluginAndInfo, err := ccipcommon.RetryUntilSuccess(rf.NewReportingPluginFn(config), initialRetryDelay, maxDelay)
+ if err != nil {
+ return nil, types.ReportingPluginInfo{}, err
+ }
+ return pluginAndInfo.plugin, pluginAndInfo.pluginInfo, err
+}
+
+// NewReportingPluginFn implements the NewReportingPlugin logic. It is defined as a function so that it can easily be
+// retried via RetryUntilSuccess. NewReportingPlugin must return successfully in order for the Commit plugin to
+// function, hence why we can only keep retrying it until it succeeds.
+func (rf *CommitReportingPluginFactory) NewReportingPluginFn(config types.ReportingPluginConfig) func() (reportingPluginAndInfo, error) {
+ return func() (reportingPluginAndInfo, error) {
+ ctx := context.Background() // todo: consider adding some timeout
+
+ destPriceReg, err := rf.config.commitStore.ChangeConfig(ctx, config.OnchainConfig, config.OffchainConfig)
+ if err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+
+ priceRegEvmAddr, err := ccipcalc.GenericAddrToEvm(destPriceReg)
+ if err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+ if err = rf.UpdateDynamicReaders(ctx, priceRegEvmAddr); err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+
+ pluginOffChainConfig, err := rf.config.commitStore.OffchainConfig(ctx)
+ if err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+
+ gasPriceEstimator, err := rf.config.commitStore.GasPriceEstimator(ctx)
+ if err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+
+ err = rf.config.priceService.UpdateDynamicConfig(ctx, gasPriceEstimator, rf.destPriceRegReader)
+ if err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+
+ lggr := rf.config.lggr.Named("CommitReportingPlugin")
+ plugin := &CommitReportingPlugin{
+ sourceChainSelector: rf.config.sourceChainSelector,
+ sourceNative: rf.config.sourceNative,
+ onRampReader: rf.config.onRampReader,
+ destChainSelector: rf.config.destChainSelector,
+ commitStoreReader: rf.config.commitStore,
+ F: config.F,
+ lggr: lggr,
+ destPriceRegistryReader: rf.destPriceRegReader,
+ offRampReader: rf.config.offRamp,
+ gasPriceEstimator: gasPriceEstimator,
+ offchainConfig: pluginOffChainConfig,
+ metricsCollector: rf.config.metricsCollector,
+ chainHealthcheck: rf.config.chainHealthcheck,
+ priceService: rf.config.priceService,
+ }
+
+ pluginInfo := types.ReportingPluginInfo{
+ Name: "CCIPCommit",
+ UniqueReports: false, // See comment in CommitStore constructor.
+ Limits: types.ReportingPluginLimits{
+ MaxQueryLength: ccip.MaxQueryLength,
+ MaxObservationLength: ccip.MaxObservationLength,
+ MaxReportLength: MaxCommitReportLength,
+ },
+ }
+
+ return reportingPluginAndInfo{plugin, pluginInfo}, nil
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go b/core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go
new file mode 100644
index 00000000000..825026bd17e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go
@@ -0,0 +1,100 @@
+package ccipcommit
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ ccipdataprovidermocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ dbMocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks"
+)
+
+// Assert that NewReportingPlugin keeps retrying until it succeeds.
+//
+// NewReportingPlugin makes several calls (e.g. CommitStoreReader.ChangeConfig) that can fail. We use mocks to cause the
+// first call to each of these functions to fail, then all subsequent calls succeed. We assert that NewReportingPlugin
+// retries a sufficient number of times to get through the transient errors and eventually succeed.
+func TestNewReportingPluginRetriesUntilSuccess(t *testing.T) {
+ commitConfig := CommitPluginStaticConfig{}
+
+ // For this unit test, ensure that there is no delay between retries
+ commitConfig.newReportingPluginRetryConfig = ccipdata.RetryConfig{
+ InitialDelay: 0 * time.Nanosecond,
+ MaxDelay: 0 * time.Nanosecond,
+ }
+
+ // Set up the OffRampReader mock
+ mockCommitStore := new(mocks.CommitStoreReader)
+
+ // The first call is set to return an error, the following calls return a nil error
+ mockCommitStore.
+ On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything).
+ Return(ccip.Address(""), errors.New("")).
+ Once()
+ mockCommitStore.
+ On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything).
+ Return(ccip.Address("0x7c6e4F0BDe29f83BC394B75a7f313B7E5DbD2d77"), nil).
+ Times(5)
+
+ mockCommitStore.
+ On("OffchainConfig", mock.Anything).
+ Return(ccip.CommitOffchainConfig{}, errors.New("")).
+ Once()
+ mockCommitStore.
+ On("OffchainConfig", mock.Anything).
+ Return(ccip.CommitOffchainConfig{}, nil).
+ Times(3)
+
+ mockCommitStore.
+ On("GasPriceEstimator", mock.Anything).
+ Return(nil, errors.New("")).
+ Once()
+ mockCommitStore.
+ On("GasPriceEstimator", mock.Anything).
+ Return(nil, nil).
+ Times(2)
+
+ commitConfig.commitStore = mockCommitStore
+
+ mockPriceService := new(dbMocks.PriceService)
+
+ mockPriceService.
+ On("UpdateDynamicConfig", mock.Anything, mock.Anything, mock.Anything).
+ Return(errors.New("")).
+ Once()
+ mockPriceService.
+ On("UpdateDynamicConfig", mock.Anything, mock.Anything, mock.Anything).
+ Return(nil)
+
+ commitConfig.priceService = mockPriceService
+
+ priceRegistryProvider := new(ccipdataprovidermocks.PriceRegistry)
+ priceRegistryProvider.
+ On("NewPriceRegistryReader", mock.Anything, mock.Anything).
+ Return(nil, errors.New("")).
+ Once()
+ priceRegistryProvider.
+ On("NewPriceRegistryReader", mock.Anything, mock.Anything).
+ Return(nil, nil).
+ Once()
+ commitConfig.priceRegistryProvider = priceRegistryProvider
+
+ commitConfig.lggr, _ = logger.NewLogger()
+
+ factory := NewCommitReportingPluginFactory(commitConfig)
+ reportingConfig := types.ReportingPluginConfig{}
+ reportingConfig.OnchainConfig = []byte{1, 2, 3}
+ reportingConfig.OffchainConfig = []byte{1, 2, 3}
+
+ // Assert that NewReportingPlugin succeeds despite many transient internal failures (mocked out above)
+ _, _, err := factory.NewReportingPlugin(reportingConfig)
+ assert.Equal(t, nil, err)
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/initializers.go b/core/services/ocr2/plugins/ccip/ccipcommit/initializers.go
new file mode 100644
index 00000000000..e964896ab93
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipcommit/initializers.go
@@ -0,0 +1,241 @@
+package ccipcommit
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "strings"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/ethereum/go-ethereum/common"
+ libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
+ "go.uber.org/multierr"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+
+ commonlogger "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
+
+ cciporm "github.com/smartcontractkit/chainlink/v2/core/services/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ db "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/observability"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/oraclelib"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/promwrapper"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+)
+
+var defaultNewReportingPluginRetryConfig = ccipdata.RetryConfig{InitialDelay: time.Second, MaxDelay: 5 * time.Minute}
+
+func NewCommitServices(ctx context.Context, ds sqlutil.DataSource, srcProvider commontypes.CCIPCommitProvider, dstProvider commontypes.CCIPCommitProvider, chainSet legacyevm.LegacyChainContainer, jb job.Job, lggr logger.Logger, pr pipeline.Runner, argsNoPlugin libocr2.OCR2OracleArgs, new bool, sourceChainID int64, destChainID int64, logError func(string)) ([]job.ServiceCtx, error) {
+ spec := jb.OCR2OracleSpec
+
+ var pluginConfig ccipconfig.CommitPluginJobSpecConfig
+ err := json.Unmarshal(spec.PluginConfig.Bytes(), &pluginConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ commitStoreAddress := common.HexToAddress(spec.ContractID)
+
+ // commit store contract doesn't exist on the source chain, but we have an implementation of it
+ // to get access to a gas estimator on the source chain
+ srcCommitStore, err := srcProvider.NewCommitStoreReader(ctx, ccipcalc.EvmAddrToGeneric(commitStoreAddress))
+ if err != nil {
+ return nil, err
+ }
+
+ dstCommitStore, err := dstProvider.NewCommitStoreReader(ctx, ccipcalc.EvmAddrToGeneric(commitStoreAddress))
+ if err != nil {
+ return nil, err
+ }
+
+ var commitStoreReader ccipdata.CommitStoreReader
+ commitStoreReader = ccip.NewProviderProxyCommitStoreReader(srcCommitStore, dstCommitStore)
+ commitLggr := lggr.Named("CCIPCommit").With("sourceChain", sourceChainID, "destChain", destChainID)
+
+ var priceGetter pricegetter.PriceGetter
+ withPipeline := strings.Trim(pluginConfig.TokenPricesUSDPipeline, "\n\t ") != ""
+ if withPipeline {
+ priceGetter, err = pricegetter.NewPipelineGetter(pluginConfig.TokenPricesUSDPipeline, pr, jb.ID, jb.ExternalJobID, jb.Name.ValueOrZero(), lggr)
+ if err != nil {
+ return nil, fmt.Errorf("creating pipeline price getter: %w", err)
+ }
+ } else {
+ // Use dynamic price getter.
+ if pluginConfig.PriceGetterConfig == nil {
+ return nil, fmt.Errorf("priceGetterConfig is nil")
+ }
+
+ // Build price getter clients for all chains specified in the aggregator configurations.
+ // Some lanes (e.g. Wemix/Kroma) requires other clients than source and destination, since they use feeds from other chains.
+ priceGetterClients := map[uint64]pricegetter.DynamicPriceGetterClient{}
+ for _, aggCfg := range pluginConfig.PriceGetterConfig.AggregatorPrices {
+ chainID := aggCfg.ChainID
+ // Retrieve the chain.
+ chain, _, err2 := ccipconfig.GetChainByChainID(chainSet, chainID)
+ if err2 != nil {
+ return nil, fmt.Errorf("retrieving chain for chainID %d: %w", chainID, err2)
+ }
+ caller := rpclib.NewDynamicLimitedBatchCaller(
+ lggr,
+ chain.Client(),
+ rpclib.DefaultRpcBatchSizeLimit,
+ rpclib.DefaultRpcBatchBackOffMultiplier,
+ rpclib.DefaultMaxParallelRpcCalls,
+ )
+ priceGetterClients[chainID] = pricegetter.NewDynamicPriceGetterClient(caller)
+ }
+
+ priceGetter, err = pricegetter.NewDynamicPriceGetter(*pluginConfig.PriceGetterConfig, priceGetterClients)
+ if err != nil {
+ return nil, fmt.Errorf("creating dynamic price getter: %w", err)
+ }
+ }
+
+ offRampReader, err := dstProvider.NewOffRampReader(ctx, pluginConfig.OffRamp)
+ if err != nil {
+ return nil, err
+ }
+
+ staticConfig, err := commitStoreReader.GetCommitStoreStaticConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+ onRampAddress := staticConfig.OnRamp
+
+ onRampReader, err := srcProvider.NewOnRampReader(ctx, onRampAddress, staticConfig.SourceChainSelector, staticConfig.ChainSelector)
+ if err != nil {
+ return nil, err
+ }
+
+ onRampRouterAddr, err := onRampReader.RouterAddress(ctx)
+ if err != nil {
+ return nil, err
+ }
+ sourceNative, err := srcProvider.SourceNativeToken(ctx, onRampRouterAddr)
+ if err != nil {
+ return nil, err
+ }
+ // Prom wrappers
+ onRampReader = observability.NewObservedOnRampReader(onRampReader, sourceChainID, ccip.CommitPluginLabel)
+ commitStoreReader = observability.NewObservedCommitStoreReader(commitStoreReader, destChainID, ccip.CommitPluginLabel)
+ offRampReader = observability.NewObservedOffRampReader(offRampReader, destChainID, ccip.CommitPluginLabel)
+ metricsCollector := ccip.NewPluginMetricsCollector(ccip.CommitPluginLabel, sourceChainID, destChainID)
+
+ chainHealthCheck := cache.NewObservedChainHealthCheck(
+ cache.NewChainHealthcheck(
+ // Adding more details to Logger to make healthcheck logs more informative
+ // It's safe because healthcheck logs only in case of unhealthy state
+ lggr.With(
+ "onramp", onRampAddress,
+ "commitStore", commitStoreAddress,
+ "offramp", pluginConfig.OffRamp,
+ ),
+ onRampReader,
+ commitStoreReader,
+ ),
+ ccip.CommitPluginLabel,
+ sourceChainID, // assuming this is the chain id?
+ destChainID,
+ onRampAddress,
+ )
+
+ orm, err := cciporm.NewORM(ds)
+ if err != nil {
+ return nil, err
+ }
+
+ priceService := db.NewPriceService(
+ lggr,
+ orm,
+ jb.ID,
+ staticConfig.ChainSelector,
+ staticConfig.SourceChainSelector,
+ sourceNative,
+ priceGetter,
+ offRampReader,
+ )
+
+ wrappedPluginFactory := NewCommitReportingPluginFactory(CommitPluginStaticConfig{
+ lggr: lggr,
+ newReportingPluginRetryConfig: defaultNewReportingPluginRetryConfig,
+ onRampReader: onRampReader,
+ sourceChainSelector: staticConfig.SourceChainSelector,
+ sourceNative: sourceNative,
+ offRamp: offRampReader,
+ commitStore: commitStoreReader,
+ destChainSelector: staticConfig.ChainSelector,
+ priceRegistryProvider: ccip.NewChainAgnosticPriceRegistry(dstProvider),
+ metricsCollector: metricsCollector,
+ chainHealthcheck: chainHealthCheck,
+ priceService: priceService,
+ })
+ argsNoPlugin.ReportingPluginFactory = promwrapper.NewPromFactory(wrappedPluginFactory, "CCIPCommit", jb.OCR2OracleSpec.Relay, big.NewInt(0).SetInt64(destChainID))
+ argsNoPlugin.Logger = commonlogger.NewOCRWrapper(commitLggr, true, logError)
+ oracle, err := libocr2.NewOracle(argsNoPlugin)
+ if err != nil {
+ return nil, err
+ }
+ // If this is a brand-new job, then we make use of the start blocks. If not then we're rebooting and log poller will pick up where we left off.
+ if new {
+ return []job.ServiceCtx{
+ oraclelib.NewChainAgnosticBackFilledOracle(
+ lggr,
+ srcProvider,
+ dstProvider,
+ job.NewServiceAdapter(oracle),
+ ),
+ chainHealthCheck,
+ priceService,
+ }, nil
+ }
+ return []job.ServiceCtx{
+ job.NewServiceAdapter(oracle),
+ chainHealthCheck,
+ priceService,
+ }, nil
+}
+
+func CommitReportToEthTxMeta(typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) {
+ return factory.CommitReportToEthTxMeta(typ, ver)
+}
+
+// UnregisterCommitPluginLpFilters unregisters all the registered filters for both source and dest chains.
+// NOTE: The transaction MUST be used here for CLO's monster tx to function as expected
+// https://github.com/smartcontractkit/ccip/blob/68e2197472fb017dd4e5630d21e7878d58bc2a44/core/services/feeds/service.go#L716
+// TODO once that transaction is broken up, we should be able to simply rely on oracle.Close() to cleanup the filters.
+// Until then we have to deterministically reload the readers from the spec (and thus their filters) and close them.
+func UnregisterCommitPluginLpFilters(srcProvider commontypes.CCIPCommitProvider, dstProvider commontypes.CCIPCommitProvider) error {
+ unregisterFuncs := []func() error{
+ func() error {
+ return srcProvider.Close()
+ },
+ func() error {
+ return dstProvider.Close()
+ },
+ }
+
+ var multiErr error
+ for _, fn := range unregisterFuncs {
+ if err := fn(); err != nil {
+ multiErr = multierr.Append(multiErr, err)
+ }
+ }
+ return multiErr
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go
new file mode 100644
index 00000000000..2f0fc4e7956
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go
@@ -0,0 +1,753 @@
+package ccipcommit
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "sort"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/merklemulti"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider"
+ db "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+const (
+ // only dynamic field in CommitReport is tokens PriceUpdates, and we don't expect to need to update thousands of tokens in a single tx
+ MaxCommitReportLength = 10_000
+ // Maximum inflight seq number range before we consider reports to be failing to get included entirely
+ // and restart from the chain's minSeqNum. Want to set it high to allow for large throughput,
+ // but low enough to minimize wasted revert cost.
+ MaxInflightSeqNumGap = 500
+ // OnRampMessagesScanLimit is used to limit number of onramp messages scanned in each Observation.
+ // Single CommitRoot can contain up to merklemulti.MaxNumberTreeLeaves, so we scan twice that to be safe and still don't hurt DB performance.
+ OnRampMessagesScanLimit = merklemulti.MaxNumberTreeLeaves * 2
+)
+
+var (
+ _ types.ReportingPluginFactory = &CommitReportingPluginFactory{}
+ _ types.ReportingPlugin = &CommitReportingPlugin{}
+)
+
+type update struct {
+ timestamp time.Time
+ value *big.Int
+}
+
+type CommitPluginStaticConfig struct {
+ lggr logger.Logger
+ newReportingPluginRetryConfig ccipdata.RetryConfig
+ // Source
+ onRampReader ccipdata.OnRampReader
+ sourceChainSelector uint64
+ sourceNative cciptypes.Address
+ // Dest
+ offRamp ccipdata.OffRampReader
+ commitStore ccipdata.CommitStoreReader
+ destChainSelector uint64
+ priceRegistryProvider ccipdataprovider.PriceRegistry
+ // Offchain
+ metricsCollector ccip.PluginMetricsCollector
+ chainHealthcheck cache.ChainHealthcheck
+ priceService db.PriceService
+}
+
+type CommitReportingPlugin struct {
+ lggr logger.Logger
+ // Source
+ onRampReader ccipdata.OnRampReader
+ sourceChainSelector uint64
+ sourceNative cciptypes.Address
+ gasPriceEstimator prices.GasPriceEstimatorCommit
+ // Dest
+ destChainSelector uint64
+ commitStoreReader ccipdata.CommitStoreReader
+ destPriceRegistryReader ccipdata.PriceRegistryReader
+ offchainConfig cciptypes.CommitOffchainConfig
+ offRampReader ccipdata.OffRampReader
+ F int
+ // Offchain
+ metricsCollector ccip.PluginMetricsCollector
+ // State
+ chainHealthcheck cache.ChainHealthcheck
+ // DB
+ priceService db.PriceService
+}
+
+// Query is not used by the CCIP Commit plugin.
+func (r *CommitReportingPlugin) Query(context.Context, types.ReportTimestamp) (types.Query, error) {
+ return types.Query{}, nil
+}
+
+// Observation calculates the sequence number interval ready to be committed and
+// the token and gas price updates required. A valid report could contain a merkle
+// root and price updates. Price updates should never contain nil values, otherwise
+// the observation will be considered invalid and rejected.
+func (r *CommitReportingPlugin) Observation(ctx context.Context, epochAndRound types.ReportTimestamp, _ types.Query) (types.Observation, error) {
+ lggr := r.lggr.Named("CommitObservation")
+ if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil {
+ return nil, err
+ } else if !healthy {
+ return nil, ccip.ErrChainIsNotHealthy
+ }
+
+ // Will return 0,0 if no messages are found. This is a valid case as the report could
+ // still contain fee updates.
+ minSeqNr, maxSeqNr, messageIDs, err := r.calculateMinMaxSequenceNumbers(ctx, lggr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Fetches multi-lane gasPricesUSD and tokenPricesUSD for the same dest chain
+ gasPricesUSD, sourceGasPriceUSD, tokenPricesUSD, err := r.observePriceUpdates(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ lggr.Infow("Observation",
+ "minSeqNr", minSeqNr,
+ "maxSeqNr", maxSeqNr,
+ "gasPricesUSD", gasPricesUSD,
+ "tokenPricesUSD", tokenPricesUSD,
+ "epochAndRound", epochAndRound,
+ "messageIDs", messageIDs,
+ )
+ r.metricsCollector.NumberOfMessagesBasedOnInterval(ccip.Observation, minSeqNr, maxSeqNr)
+
+ // Even if all values are empty we still want to communicate our observation
+ // with the other nodes, therefore, we always return the observed values.
+ return ccip.CommitObservation{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: minSeqNr,
+ Max: maxSeqNr,
+ },
+ TokenPricesUSD: tokenPricesUSD,
+ SourceGasPriceUSD: sourceGasPriceUSD,
+ SourceGasPriceUSDPerChain: gasPricesUSD,
+ }.Marshal()
+}
+
+// observePriceUpdates fetches latest gas and token prices from DB as long as price reporting is not disabled.
+// The prices are aggregated for all lanes for the same destination chain.
+func (r *CommitReportingPlugin) observePriceUpdates(
+ ctx context.Context,
+) (gasPricesUSD map[uint64]*big.Int, sourceGasPriceUSD *big.Int, tokenPricesUSD map[cciptypes.Address]*big.Int, err error) {
+ // Do not observe prices if price reporting is disabled. Price reporting will be disabled for lanes that are not leader lanes.
+ if r.offchainConfig.PriceReportingDisabled {
+ r.lggr.Infow("Price reporting disabled, skipping gas and token price reads")
+ return map[uint64]*big.Int{}, nil, map[cciptypes.Address]*big.Int{}, nil
+ }
+
+ // Fetches multi-lane gas prices and token prices, for the given dest chain
+ gasPricesUSD, tokenPricesUSD, err = r.priceService.GetGasAndTokenPrices(ctx, r.destChainSelector)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get prices from PriceService: %w", err)
+ }
+
+ // Set prices to empty maps if nil to be friendlier to JSON encoding
+ if gasPricesUSD == nil {
+ gasPricesUSD = map[uint64]*big.Int{}
+ }
+ if tokenPricesUSD == nil {
+ tokenPricesUSD = map[cciptypes.Address]*big.Int{}
+ }
+
+ // For backwards compatibility with the older release during phased rollout, set the default gas price on this lane
+ sourceGasPriceUSD = gasPricesUSD[r.sourceChainSelector]
+
+ return gasPricesUSD, sourceGasPriceUSD, tokenPricesUSD, nil
+}
+
+func (r *CommitReportingPlugin) calculateMinMaxSequenceNumbers(ctx context.Context, lggr logger.Logger) (uint64, uint64, []cciptypes.Hash, error) {
+ nextSeqNum, err := r.commitStoreReader.GetExpectedNextSequenceNumber(ctx)
+ if err != nil {
+ return 0, 0, []cciptypes.Hash{}, err
+ }
+
+ msgRequests, err := r.onRampReader.GetSendRequestsBetweenSeqNums(ctx, nextSeqNum, nextSeqNum+OnRampMessagesScanLimit, true)
+ if err != nil {
+ return 0, 0, []cciptypes.Hash{}, err
+ }
+ if len(msgRequests) == 0 {
+ lggr.Infow("No new requests", "nextSeqNum", nextSeqNum)
+ return 0, 0, []cciptypes.Hash{}, nil
+ }
+
+ messageIDs := make([]cciptypes.Hash, 0, len(msgRequests))
+ seqNrs := make([]uint64, 0, len(msgRequests))
+ for _, msgReq := range msgRequests {
+ seqNrs = append(seqNrs, msgReq.SequenceNumber)
+ messageIDs = append(messageIDs, msgReq.MessageID)
+ }
+
+ minSeqNr := seqNrs[0]
+ maxSeqNr := seqNrs[len(seqNrs)-1]
+ if minSeqNr != nextSeqNum {
+ // Still report the observation as even partial reports have value e.g. all nodes are
+ // missing a single, different log each, they would still be able to produce a valid report.
+ lggr.Warnf("Missing sequence number range [%d-%d]", nextSeqNum, minSeqNr)
+ }
+ if !ccipcalc.ContiguousReqs(lggr, minSeqNr, maxSeqNr, seqNrs) {
+ return 0, 0, []cciptypes.Hash{}, errors.New("unexpected gap in seq nums")
+ }
+ return minSeqNr, maxSeqNr, messageIDs, nil
+}
+
+// Gets the latest token price updates based on logs within the heartbeat
+// The updates returned by this function are guaranteed to not contain nil values.
+func (r *CommitReportingPlugin) getLatestTokenPriceUpdates(ctx context.Context, now time.Time) (map[cciptypes.Address]update, error) {
+ tokenPriceUpdates, err := r.destPriceRegistryReader.GetTokenPriceUpdatesCreatedAfter(
+ ctx,
+ now.Add(-r.offchainConfig.TokenPriceHeartBeat),
+ 0,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ latestUpdates := make(map[cciptypes.Address]update)
+ for _, tokenUpdate := range tokenPriceUpdates {
+ priceUpdate := tokenUpdate.TokenPriceUpdate
+ // Ordered by ascending timestamps
+ timestamp := time.Unix(priceUpdate.TimestampUnixSec.Int64(), 0)
+ if priceUpdate.Value != nil && !timestamp.Before(latestUpdates[priceUpdate.Token].timestamp) {
+ latestUpdates[priceUpdate.Token] = update{
+ timestamp: timestamp,
+ value: priceUpdate.Value,
+ }
+ }
+ }
+
+ return latestUpdates, nil
+}
+
+// getLatestGasPriceUpdate returns the latest gas price updates based on logs within the heartbeat.
+// If an update is found, it is not expected to contain a nil value.
+func (r *CommitReportingPlugin) getLatestGasPriceUpdate(ctx context.Context, now time.Time) (map[uint64]update, error) {
+ gasPriceUpdates, err := r.destPriceRegistryReader.GetAllGasPriceUpdatesCreatedAfter(
+ ctx,
+ now.Add(-r.offchainConfig.GasPriceHeartBeat),
+ 0,
+ )
+
+ if err != nil {
+ return nil, err
+ }
+
+ latestUpdates := make(map[uint64]update)
+ for _, gasUpdate := range gasPriceUpdates {
+ priceUpdate := gasUpdate.GasPriceUpdate
+ // Ordered by ascending timestamps
+ timestamp := time.Unix(priceUpdate.TimestampUnixSec.Int64(), 0)
+ if priceUpdate.Value != nil && !timestamp.Before(latestUpdates[priceUpdate.DestChainSelector].timestamp) {
+ latestUpdates[priceUpdate.DestChainSelector] = update{
+ timestamp: timestamp,
+ value: priceUpdate.Value,
+ }
+ }
+ }
+
+ r.lggr.Infow("Latest gas price from log poller", "latestUpdates", latestUpdates)
+ return latestUpdates, nil
+}
+
+func (r *CommitReportingPlugin) Report(ctx context.Context, epochAndRound types.ReportTimestamp, _ types.Query, observations []types.AttributedObservation) (bool, types.Report, error) {
+ now := time.Now()
+ lggr := r.lggr.Named("CommitReport")
+ if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil {
+ return false, nil, err
+ } else if !healthy {
+ return false, nil, ccip.ErrChainIsNotHealthy
+ }
+
+ parsableObservations := ccip.GetParsableObservations[ccip.CommitObservation](lggr, observations)
+
+ intervals, gasPriceObs, tokenPriceObs, err := extractObservationData(lggr, r.F, r.sourceChainSelector, parsableObservations)
+ if err != nil {
+ return false, nil, err
+ }
+
+ agreedInterval, err := calculateIntervalConsensus(intervals, r.F, merklemulti.MaxNumberTreeLeaves)
+ if err != nil {
+ return false, nil, err
+ }
+
+ gasPrices, tokenPrices, err := r.selectPriceUpdates(ctx, now, gasPriceObs, tokenPriceObs)
+ if err != nil {
+ return false, nil, err
+ }
+ // If there are no fee updates and the interval is zero there is no report to produce.
+ if agreedInterval.Max == 0 && len(gasPrices) == 0 && len(tokenPrices) == 0 {
+ lggr.Infow("Empty report, skipping")
+ return false, nil, nil
+ }
+
+ report, err := r.buildReport(ctx, lggr, agreedInterval, gasPrices, tokenPrices)
+ if err != nil {
+ return false, nil, err
+ }
+ encodedReport, err := r.commitStoreReader.EncodeCommitReport(ctx, report)
+ if err != nil {
+ return false, nil, err
+ }
+ r.metricsCollector.SequenceNumber(ccip.Report, report.Interval.Max)
+ r.metricsCollector.NumberOfMessagesBasedOnInterval(ccip.Report, report.Interval.Min, report.Interval.Max)
+ lggr.Infow("Report",
+ "merkleRoot", hex.EncodeToString(report.MerkleRoot[:]),
+ "minSeqNr", report.Interval.Min,
+ "maxSeqNr", report.Interval.Max,
+ "gasPriceUpdates", report.GasPrices,
+ "tokenPriceUpdates", report.TokenPrices,
+ "epochAndRound", epochAndRound,
+ )
+ return true, encodedReport, nil
+}
+
+// calculateIntervalConsensus compresses a set of intervals into one interval
+// taking into account f which is the maximum number of faults across the whole DON.
+// OCR itself won't call Report unless there are 2*f+1 observations
+// https://github.com/smartcontractkit/libocr/blob/master/offchainreporting2/internal/protocol/report_generation_follower.go#L415
+// and f of those observations may be either unparseable or adversarially set values. That means
+// we'll either have f+1 parsed honest values here, 2f+1 parsed values with f adversarial values or somewhere
+// in between.
+// rangeLimit is the maximum range of the interval. If the interval is larger than this, it will be truncated. Zero means no limit.
+func calculateIntervalConsensus(intervals []cciptypes.CommitStoreInterval, f int, rangeLimit uint64) (cciptypes.CommitStoreInterval, error) {
+ // To understand min/max selection here, we need to consider an adversary that controls f values
+ // and is intentionally trying to stall the protocol or influence the value returned. For simplicity
+ // consider f=1 and n=4 nodes. In that case adversary may try to bias the min or max high/low.
+ // We could end up (2f+1=3) with sorted_mins=[1,1,1e9] or [-1e9,1,1] as examples. Selecting
+ // sorted_mins[f] ensures:
+ // - At least one honest node has seen this value, so adversary cannot bias the value lower which
+ // would cause reverts
+ // - If an honest oracle reports sorted_min[f] which happens to be stale i.e. that oracle
+ // has a delayed view of the chain, then the report will revert onchain but still succeed upon retry
+ // - We minimize the risk of naturally hitting the error condition minSeqNum > maxSeqNum due to oracles
+ // delayed views of the chain (would be an issue with taking sorted_mins[-f])
+ sort.Slice(intervals, func(i, j int) bool {
+ return intervals[i].Min < intervals[j].Min
+ })
+ minSeqNum := intervals[f].Min
+
+ // The only way a report could have a minSeqNum of 0 is when there are no messages to report
+ // and the report is potentially still valid for gas fee updates.
+ if minSeqNum == 0 {
+ return cciptypes.CommitStoreInterval{Min: 0, Max: 0}, nil
+ }
+ // Consider a similar example to the sorted_mins one above except where they are maxes.
+ // We choose the more "conservative" sorted_maxes[f] so:
+ // - We are ensured that at least one honest oracle has seen the max, so adversary cannot set it lower and
+ // cause the maxSeqNum < minSeqNum errors
+ // - If an honest oracle reports sorted_max[f] which happens to be stale i.e. that oracle
+ // has a delayed view of the source chain, then we simply lose a little bit of throughput.
+ // - If we were to pick sorted_max[-f] i.e. the maximum honest node view (a more "aggressive" setting in terms of throughput),
+ // then an adversary can continually send high values e.g. imagine we have observations from all 4 nodes
+ // [honest 1, honest 1, honest 2, malicious 2], in this case we pick 2, but it's not enough to be able
+ // to build a report since the first 2 honest nodes are unaware of message 2.
+ sort.Slice(intervals, func(i, j int) bool {
+ return intervals[i].Max < intervals[j].Max
+ })
+ maxSeqNum := intervals[f].Max
+ if maxSeqNum < minSeqNum {
+ // If the consensus report is invalid for onchain acceptance, we do not vote for it as
+ // an early termination step.
+ return cciptypes.CommitStoreInterval{}, errors.New("max seq num smaller than min")
+ }
+
+ // If the range is too large, truncate it.
+ if rangeLimit > 0 && maxSeqNum-minSeqNum+1 > rangeLimit {
+ maxSeqNum = minSeqNum + rangeLimit - 1
+ }
+
+ return cciptypes.CommitStoreInterval{
+ Min: minSeqNum,
+ Max: maxSeqNum,
+ }, nil
+}
+
+// extractObservationData extracts observation fields into their own slices
+// and filters out observation data that are invalid
+func extractObservationData(lggr logger.Logger, f int, sourceChainSelector uint64, observations []ccip.CommitObservation) (intervals []cciptypes.CommitStoreInterval, gasPrices map[uint64][]*big.Int, tokenPrices map[cciptypes.Address][]*big.Int, err error) {
+ // We require at least f+1 observations to reach consensus. Checking to ensure there are at least f+1 parsed observations.
+ if len(observations) <= f {
+ return nil, nil, nil, fmt.Errorf("not enough observations to form consensus: #obs=%d, f=%d", len(observations), f)
+ }
+
+ gasPriceObservations := make(map[uint64][]*big.Int)
+ tokenPriceObservations := make(map[cciptypes.Address][]*big.Int)
+ for _, obs := range observations {
+ intervals = append(intervals, obs.Interval)
+
+ for selector, price := range obs.SourceGasPriceUSDPerChain {
+ if price != nil {
+ gasPriceObservations[selector] = append(gasPriceObservations[selector], price)
+ }
+ }
+ // During phased rollout, NOPs running old release only report SourceGasPriceUSD.
+ // An empty `SourceGasPriceUSDPerChain` with a non-nil `SourceGasPriceUSD` can only happen with old release.
+ if len(obs.SourceGasPriceUSDPerChain) == 0 && obs.SourceGasPriceUSD != nil {
+ gasPriceObservations[sourceChainSelector] = append(gasPriceObservations[sourceChainSelector], obs.SourceGasPriceUSD)
+ }
+
+ for token, price := range obs.TokenPricesUSD {
+ if price != nil {
+ tokenPriceObservations[token] = append(tokenPriceObservations[token], price)
+ }
+ }
+ }
+
+ // Price is dropped if there are not enough valid observations. With a threshold of 2*(f-1) + 1, we achieve a balance between safety and liveness.
+ // During phased-rollout where some honest nodes may not have started observing the token yet, it requires 5 malicious node with 1 being the leader to successfully alter price.
+ // During regular operation, it requires 3 malicious nodes with 1 being the leader to temporarily delay price update for the token.
+ priceReportingThreshold := 2*(f-1) + 1
+
+ gasPrices = make(map[uint64][]*big.Int)
+ for selector, perChainPriceObservations := range gasPriceObservations {
+ if len(perChainPriceObservations) < priceReportingThreshold {
+ lggr.Warnf("Skipping chain with selector %d due to not enough valid observations: #obs=%d, f=%d, threshold=%d", selector, len(perChainPriceObservations), f, priceReportingThreshold)
+ continue
+ }
+ gasPrices[selector] = perChainPriceObservations
+ }
+
+ tokenPrices = make(map[cciptypes.Address][]*big.Int)
+ for token, perTokenPriceObservations := range tokenPriceObservations {
+ if len(perTokenPriceObservations) < priceReportingThreshold {
+ lggr.Warnf("Skipping token %s due to not enough valid observations: #obs=%d, f=%d, threshold=%d", string(token), len(perTokenPriceObservations), f, priceReportingThreshold)
+ continue
+ }
+ tokenPrices[token] = perTokenPriceObservations
+ }
+
+ return intervals, gasPrices, tokenPrices, nil
+}
+
+// selectPriceUpdates filters out gas and token price updates that are already inflight
+func (r *CommitReportingPlugin) selectPriceUpdates(ctx context.Context, now time.Time, gasPriceObs map[uint64][]*big.Int, tokenPriceObs map[cciptypes.Address][]*big.Int) ([]cciptypes.GasPrice, []cciptypes.TokenPrice, error) {
+ // If price reporting is disabled, there is no need to select price updates.
+ if r.offchainConfig.PriceReportingDisabled {
+ return nil, nil, nil
+ }
+
+ latestGasPrice, err := r.getLatestGasPriceUpdate(ctx, now)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ latestTokenPrices, err := r.getLatestTokenPriceUpdates(ctx, now)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return r.calculatePriceUpdates(gasPriceObs, tokenPriceObs, latestGasPrice, latestTokenPrices)
+}
+
+// Note priceUpdates must be deterministic.
+// The provided gasPriceObs and tokenPriceObs should not contain nil values.
+// The returned latestGasPrice and latestTokenPrices should not contain nil values.
+func (r *CommitReportingPlugin) calculatePriceUpdates(gasPriceObs map[uint64][]*big.Int, tokenPriceObs map[cciptypes.Address][]*big.Int, latestGasPrice map[uint64]update, latestTokenPrices map[cciptypes.Address]update) ([]cciptypes.GasPrice, []cciptypes.TokenPrice, error) {
+ var tokenPriceUpdates []cciptypes.TokenPrice
+ for token, tokenPriceObservations := range tokenPriceObs {
+ medianPrice := ccipcalc.BigIntSortedMiddle(tokenPriceObservations)
+
+ latestTokenPrice, exists := latestTokenPrices[token]
+ if exists {
+ tokenPriceUpdatedRecently := time.Since(latestTokenPrice.timestamp) < r.offchainConfig.TokenPriceHeartBeat
+ tokenPriceNotChanged := !ccipcalc.Deviates(medianPrice, latestTokenPrice.value, int64(r.offchainConfig.TokenPriceDeviationPPB))
+ if tokenPriceUpdatedRecently && tokenPriceNotChanged {
+ r.lggr.Debugw("token price was updated recently, skipping the update",
+ "token", token, "newPrice", medianPrice, "existingPrice", latestTokenPrice.value)
+ continue // skip the update if we recently had a price update close to the new value
+ }
+ }
+
+ tokenPriceUpdates = append(tokenPriceUpdates, cciptypes.TokenPrice{
+ Token: token,
+ Value: medianPrice,
+ })
+ }
+
+ // Determinism required.
+ sort.Slice(tokenPriceUpdates, func(i, j int) bool {
+ return tokenPriceUpdates[i].Token < tokenPriceUpdates[j].Token
+ })
+
+ var gasPriceUpdate []cciptypes.GasPrice
+ for chainSelector, gasPriceObservations := range gasPriceObs {
+ newGasPrice, err := r.gasPriceEstimator.Median(gasPriceObservations) // Compute the median price
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to calculate median gas price for chain selector %d: %w", chainSelector, err)
+ }
+
+ // Default to updating so that we update if there are no prior updates.
+ latestGasPrice, exists := latestGasPrice[chainSelector]
+ if exists && latestGasPrice.value != nil {
+ gasPriceUpdatedRecently := time.Since(latestGasPrice.timestamp) < r.offchainConfig.GasPriceHeartBeat
+ gasPriceDeviated, err := r.gasPriceEstimator.Deviates(newGasPrice, latestGasPrice.value)
+ if err != nil {
+ return nil, nil, err
+ }
+ if gasPriceUpdatedRecently && !gasPriceDeviated {
+ r.lggr.Debugw("gas price was updated recently and not deviated sufficiently, skipping the update",
+ "chainSelector", chainSelector, "newPrice", newGasPrice, "existingPrice", latestGasPrice.value)
+ continue
+ }
+ }
+
+ gasPriceUpdate = append(gasPriceUpdate, cciptypes.GasPrice{
+ DestChainSelector: chainSelector,
+ Value: newGasPrice,
+ })
+ }
+
+ sort.Slice(gasPriceUpdate, func(i, j int) bool {
+ return gasPriceUpdate[i].DestChainSelector < gasPriceUpdate[j].DestChainSelector
+ })
+
+ return gasPriceUpdate, tokenPriceUpdates, nil
+}
+
+// buildReport assumes there is at least one message in reqs.
+func (r *CommitReportingPlugin) buildReport(ctx context.Context, lggr logger.Logger, interval cciptypes.CommitStoreInterval, gasPrices []cciptypes.GasPrice, tokenPrices []cciptypes.TokenPrice) (cciptypes.CommitStoreReport, error) {
+ // If no messages are needed only include fee updates
+ if interval.Min == 0 {
+ return cciptypes.CommitStoreReport{
+ TokenPrices: tokenPrices,
+ GasPrices: gasPrices,
+ MerkleRoot: [32]byte{},
+ Interval: interval,
+ }, nil
+ }
+
+ // Logs are guaranteed to be in order of seq num, since these are finalized logs only
+ // and the contract's seq num is auto-incrementing.
+ sendRequests, err := r.onRampReader.GetSendRequestsBetweenSeqNums(ctx, interval.Min, interval.Max, true)
+ if err != nil {
+ return cciptypes.CommitStoreReport{}, err
+ }
+ if len(sendRequests) == 0 {
+ lggr.Warn("No messages found in interval",
+ "minSeqNr", interval.Min,
+ "maxSeqNr", interval.Max)
+ return cciptypes.CommitStoreReport{}, fmt.Errorf("tried building a tree without leaves")
+ }
+
+ leaves := make([][32]byte, 0, len(sendRequests))
+ var seqNrs []uint64
+ for _, req := range sendRequests {
+ leaves = append(leaves, req.Hash)
+ seqNrs = append(seqNrs, req.SequenceNumber)
+ }
+ if !ccipcalc.ContiguousReqs(lggr, interval.Min, interval.Max, seqNrs) {
+ return cciptypes.CommitStoreReport{}, errors.Errorf("do not have full range [%v, %v] have %v", interval.Min, interval.Max, seqNrs)
+ }
+ tree, err := merklemulti.NewTree(hashutil.NewKeccak(), leaves)
+ if err != nil {
+ return cciptypes.CommitStoreReport{}, err
+ }
+
+ return cciptypes.CommitStoreReport{
+ GasPrices: gasPrices,
+ TokenPrices: tokenPrices,
+ MerkleRoot: tree.Root(),
+ Interval: interval,
+ }, nil
+}
+
+func (r *CommitReportingPlugin) ShouldAcceptFinalizedReport(ctx context.Context, reportTimestamp types.ReportTimestamp, report types.Report) (bool, error) {
+ parsedReport, err := r.commitStoreReader.DecodeCommitReport(ctx, report)
+ if err != nil {
+ return false, err
+ }
+ lggr := r.lggr.Named("CommitShouldAcceptFinalizedReport").With(
+ "merkleRoot", parsedReport.MerkleRoot,
+ "minSeqNum", parsedReport.Interval.Min,
+ "maxSeqNum", parsedReport.Interval.Max,
+ "gasPriceUpdates", parsedReport.GasPrices,
+ "tokenPriceUpdates", parsedReport.TokenPrices,
+ "reportTimestamp", reportTimestamp,
+ )
+ // Empty report, should not be put on chain
+ if parsedReport.MerkleRoot == [32]byte{} && len(parsedReport.GasPrices) == 0 && len(parsedReport.TokenPrices) == 0 {
+ lggr.Warn("Empty report, should not be put on chain")
+ return false, nil
+ }
+
+ if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil {
+ return false, err1
+ } else if !healthy {
+ return false, ccip.ErrChainIsNotHealthy
+ }
+
+ if r.isStaleReport(ctx, lggr, parsedReport, reportTimestamp) {
+ lggr.Infow("Rejecting stale report")
+ return false, nil
+ }
+
+ r.metricsCollector.SequenceNumber(ccip.ShouldAccept, parsedReport.Interval.Max)
+ lggr.Infow("Accepting finalized report", "merkleRoot", hexutil.Encode(parsedReport.MerkleRoot[:]))
+ return true, nil
+}
+
+// ShouldTransmitAcceptedReport checks if the report is stale, if it is it should not be transmitted.
+func (r *CommitReportingPlugin) ShouldTransmitAcceptedReport(ctx context.Context, reportTimestamp types.ReportTimestamp, report types.Report) (bool, error) {
+ lggr := r.lggr.Named("CommitShouldTransmitAcceptedReport")
+ parsedReport, err := r.commitStoreReader.DecodeCommitReport(ctx, report)
+ if err != nil {
+ return false, err
+ }
+ if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil {
+ return false, err1
+ } else if !healthy {
+ return false, ccip.ErrChainIsNotHealthy
+ }
+ // If report is not stale we transmit.
+ // When the commitTransmitter enqueues the tx for tx manager,
+ // we mark it as fulfilled, effectively removing it from the set of inflight messages.
+ shouldTransmit := !r.isStaleReport(ctx, lggr, parsedReport, reportTimestamp)
+
+ lggr.Infow("ShouldTransmitAcceptedReport",
+ "shouldTransmit", shouldTransmit,
+ "reportTimestamp", reportTimestamp)
+ return shouldTransmit, nil
+}
+
+// isStaleReport checks a report to see if the contents have become stale.
+// It does so in four ways:
+// 1. if there is a merkle root, check if the sequence numbers match up with onchain data
+// 2. if there is no merkle root, check if current price's epoch and round is after onchain epoch and round
+// 3. if there is a gas price update check to see if the value is different from the last
+// reported value
+// 4. if there are token prices check to see if the values are different from the last
+// reported values.
+//
+// If there is a merkle root present, staleness is only measured based on the merkle root
+// If there is no merkle root but there is a gas update, only this gas update is used for staleness checks.
+// If only price updates are included, the price updates are used to check for staleness
+// If nothing is included the report is always considered stale.
+func (r *CommitReportingPlugin) isStaleReport(ctx context.Context, lggr logger.Logger, report cciptypes.CommitStoreReport, reportTimestamp types.ReportTimestamp) bool {
+ // If there is a merkle root, ignore all other staleness checks and only check for sequence number staleness
+ if report.MerkleRoot != [32]byte{} {
+ return r.isStaleMerkleRoot(ctx, lggr, report.Interval)
+ }
+
+ hasGasPriceUpdate := len(report.GasPrices) > 0
+ hasTokenPriceUpdates := len(report.TokenPrices) > 0
+
+ // If there is no merkle root, no gas price update and no token price update
+ // we don't want to write anything on-chain, so we consider this report stale.
+ if !hasGasPriceUpdate && !hasTokenPriceUpdates {
+ return true
+ }
+
+ // We consider a price update as stale when, there isn't an update or there is an update that is stale.
+ gasPriceStale := !hasGasPriceUpdate || r.isStaleGasPrice(ctx, lggr, report.GasPrices)
+ tokenPricesStale := !hasTokenPriceUpdates || r.isStaleTokenPrices(ctx, lggr, report.TokenPrices)
+
+ if gasPriceStale && tokenPricesStale {
+ return true
+ }
+
+ // If report only has price update, check if its epoch and round lags behind the latest onchain
+ lastPriceEpochAndRound, err := r.commitStoreReader.GetLatestPriceEpochAndRound(ctx)
+ if err != nil {
+ // Assume it's a transient issue getting the last report and try again on the next round
+ return true
+ }
+
+ thisEpochAndRound := ccipcalc.MergeEpochAndRound(reportTimestamp.Epoch, reportTimestamp.Round)
+ return lastPriceEpochAndRound >= thisEpochAndRound
+}
+
+func (r *CommitReportingPlugin) isStaleMerkleRoot(ctx context.Context, lggr logger.Logger, reportInterval cciptypes.CommitStoreInterval) bool {
+ nextSeqNum, err := r.commitStoreReader.GetExpectedNextSequenceNumber(ctx)
+ if err != nil {
+ // Assume it's a transient issue getting the last report and try again on the next round
+ return true
+ }
+
+ // The report is not stale and correct only if nextSeqNum == reportInterval.Min.
+ // Mark it stale if the condition isn't met.
+ if nextSeqNum != reportInterval.Min {
+ lggr.Infow("The report is stale because of sequence number mismatch with the commit store interval min value",
+ "nextSeqNum", nextSeqNum, "reportIntervalMin", reportInterval.Min)
+ return true
+ }
+
+ lggr.Infow("Report root is not stale", "nextSeqNum", nextSeqNum, "reportIntervalMin", reportInterval.Min)
+
+ // If a report has root and valid sequence number, the report should be submitted, regardless of price staleness
+ return false
+}
+
+func (r *CommitReportingPlugin) isStaleGasPrice(ctx context.Context, lggr logger.Logger, gasPriceUpdates []cciptypes.GasPrice) bool {
+ latestGasPrice, err := r.getLatestGasPriceUpdate(ctx, time.Now())
+ if err != nil {
+ lggr.Errorw("Gas price is stale because getLatestGasPriceUpdate failed", "err", err)
+ return true
+ }
+
+ for _, gasPriceUpdate := range gasPriceUpdates {
+ latestUpdate, exists := latestGasPrice[gasPriceUpdate.DestChainSelector]
+ if !exists || latestUpdate.value == nil {
+ lggr.Infow("Found non-stale gas price", "chainSelector", gasPriceUpdate.DestChainSelector, "gasPriceUSd", gasPriceUpdate.Value)
+ return false
+ }
+
+ gasPriceDeviated, err := r.gasPriceEstimator.Deviates(gasPriceUpdate.Value, latestUpdate.value)
+ if err != nil {
+ lggr.Errorw("Gas price is stale because deviation check failed", "err", err)
+ return true
+ }
+
+ if gasPriceDeviated {
+ lggr.Infow("Found non-stale gas price", "chainSelector", gasPriceUpdate.DestChainSelector, "gasPriceUSd", gasPriceUpdate.Value, "latestUpdate", latestUpdate.value)
+ return false
+ }
+ lggr.Infow("Gas price is stale", "chainSelector", gasPriceUpdate.DestChainSelector, "gasPriceUSd", gasPriceUpdate.Value, "latestGasPrice", latestUpdate.value)
+ }
+
+ lggr.Infow("All gas prices are stale")
+ return true
+}
+
+func (r *CommitReportingPlugin) isStaleTokenPrices(ctx context.Context, lggr logger.Logger, priceUpdates []cciptypes.TokenPrice) bool {
+ // getting the last price updates without including inflight is like querying
+ // current prices onchain, but uses logpoller's data to save on the RPC requests
+ latestTokenPriceUpdates, err := r.getLatestTokenPriceUpdates(ctx, time.Now())
+ if err != nil {
+ return true
+ }
+
+ for _, tokenUpdate := range priceUpdates {
+ latestUpdate, ok := latestTokenPriceUpdates[tokenUpdate.Token]
+ priceEqual := ok && !ccipcalc.Deviates(tokenUpdate.Value, latestUpdate.value, int64(r.offchainConfig.TokenPriceDeviationPPB))
+
+ if !priceEqual {
+ lggr.Infow("Found non-stale token price", "token", tokenUpdate.Token, "usdPerToken", tokenUpdate.Value, "latestUpdate", latestUpdate.value)
+ return false
+ }
+ lggr.Infow("Token price is stale", "latestTokenPrice", latestUpdate.value, "usdPerToken", tokenUpdate.Value, "token", tokenUpdate.Token)
+ }
+
+ lggr.Infow("All token prices are stale")
+ return true
+}
+
+func (r *CommitReportingPlugin) Close() error {
+ return nil
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go
new file mode 100644
index 00000000000..6cf7e4bec72
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go
@@ -0,0 +1,1861 @@
+package ccipcommit
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "slices"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/leanovate/gopter"
+ "github.com/leanovate/gopter/gen"
+ "github.com/leanovate/gopter/prop"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/merklemulti"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks"
+ mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ ccipcachemocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+
+ ccipdbmocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+func TestCommitReportingPlugin_Observation(t *testing.T) {
+ sourceNativeTokenAddr := ccipcalc.HexToAddress("1000")
+ destChainSelector := uint64(1)
+ sourceChainSelector := uint64(2)
+
+ bridgedTokens := []cciptypes.Address{
+ ccipcalc.HexToAddress("2000"),
+ ccipcalc.HexToAddress("3000"),
+ }
+
+ // Token price of 1e18 token amount in 1e18 USD precision
+ expectedTokenPrice := map[cciptypes.Address]*big.Int{
+ bridgedTokens[0]: big.NewInt(1e10),
+ bridgedTokens[1]: big.NewInt(2e18),
+ }
+
+ testCases := []struct {
+ name string
+ epochAndRound types.ReportTimestamp
+ commitStorePaused bool
+ sourceChainCursed bool
+ commitStoreSeqNum uint64
+ gasPrices map[uint64]*big.Int
+ tokenPrices map[cciptypes.Address]*big.Int
+ sendReqs []cciptypes.EVM2EVMMessageWithTxMeta
+ priceReportingDisabled bool
+
+ expErr bool
+ expObs ccip.CommitObservation
+ }{
+ {
+ name: "base report",
+ commitStoreSeqNum: 54,
+ gasPrices: map[uint64]*big.Int{
+ sourceChainSelector: big.NewInt(2e18),
+ },
+ tokenPrices: expectedTokenPrice,
+ sendReqs: []cciptypes.EVM2EVMMessageWithTxMeta{
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 54}},
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 55}},
+ },
+ expObs: ccip.CommitObservation{
+ TokenPricesUSD: expectedTokenPrice,
+ SourceGasPriceUSD: big.NewInt(2e18),
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector: big.NewInt(2e18),
+ },
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 54,
+ Max: 55,
+ },
+ },
+ },
+ {
+ name: "base report with multi-chain gas prices",
+ commitStoreSeqNum: 54,
+ gasPrices: map[uint64]*big.Int{
+ sourceChainSelector + 1: big.NewInt(2e18),
+ sourceChainSelector + 2: big.NewInt(3e18),
+ },
+ tokenPrices: expectedTokenPrice,
+ sendReqs: []cciptypes.EVM2EVMMessageWithTxMeta{
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 54}},
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 55}},
+ },
+ expObs: ccip.CommitObservation{
+ TokenPricesUSD: expectedTokenPrice,
+ SourceGasPriceUSD: nil,
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector + 1: big.NewInt(2e18),
+ sourceChainSelector + 2: big.NewInt(3e18),
+ },
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 54,
+ Max: 55,
+ },
+ },
+ },
+ {
+ name: "base report with price reporting disabled",
+ commitStoreSeqNum: 54,
+ gasPrices: map[uint64]*big.Int{
+ sourceChainSelector: big.NewInt(2e18),
+ },
+ tokenPrices: expectedTokenPrice,
+ sendReqs: []cciptypes.EVM2EVMMessageWithTxMeta{
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 54}},
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 55}},
+ },
+ priceReportingDisabled: true,
+ expObs: ccip.CommitObservation{
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{},
+ SourceGasPriceUSD: nil,
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{},
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 54,
+ Max: 55,
+ },
+ },
+ },
+ {
+ name: "commit store is down",
+ commitStorePaused: true,
+ sourceChainCursed: false,
+ expErr: true,
+ },
+ {
+ name: "source chain is cursed",
+ commitStorePaused: false,
+ sourceChainCursed: true,
+ expErr: true,
+ },
+ }
+
+ ctx := testutils.Context(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ commitStoreReader.On("IsDown", ctx).Return(tc.commitStorePaused, nil)
+ commitStoreReader.On("IsDestChainHealthy", ctx).Return(true, nil)
+ if !tc.commitStorePaused && !tc.sourceChainCursed {
+ commitStoreReader.On("GetExpectedNextSequenceNumber", ctx).Return(tc.commitStoreSeqNum, nil)
+ }
+
+ onRampReader := ccipdatamocks.NewOnRampReader(t)
+ onRampReader.On("IsSourceChainHealthy", ctx).Return(true, nil)
+ onRampReader.On("IsSourceCursed", ctx).Return(tc.sourceChainCursed, nil)
+ if len(tc.sendReqs) > 0 {
+ onRampReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.commitStoreSeqNum, tc.commitStoreSeqNum+OnRampMessagesScanLimit, true).
+ Return(tc.sendReqs, nil)
+ }
+
+ mockPriceService := ccipdbmocks.NewPriceService(t)
+ mockPriceService.On("GetGasAndTokenPrices", ctx, destChainSelector).Return(
+ tc.gasPrices,
+ tc.tokenPrices,
+ nil,
+ ).Maybe()
+
+ p := &CommitReportingPlugin{}
+ p.lggr = logger.TestLogger(t)
+ p.commitStoreReader = commitStoreReader
+ p.onRampReader = onRampReader
+ p.sourceNative = sourceNativeTokenAddr
+ p.metricsCollector = ccip.NoopMetricsCollector
+ p.chainHealthcheck = cache.NewChainHealthcheck(p.lggr, onRampReader, commitStoreReader)
+ p.priceService = mockPriceService
+ p.destChainSelector = destChainSelector
+ p.sourceChainSelector = sourceChainSelector
+ p.offchainConfig = cciptypes.CommitOffchainConfig{
+ PriceReportingDisabled: tc.priceReportingDisabled,
+ }
+
+ obs, err := p.Observation(ctx, tc.epochAndRound, types.Query{})
+
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+
+ if tc.expObs.TokenPricesUSD != nil {
+ // field ordering in mapping is not guaranteed, if TokenPricesUSD exists, unmarshal to compare mapping
+ var obsStuct ccip.CommitObservation
+ err = json.Unmarshal(obs, &obsStuct)
+ assert.NoError(t, err)
+
+ assert.Equal(t, tc.expObs, obsStuct)
+ } else {
+ // if TokenPricesUSD is nil, compare the bytes directly, marshal then unmarshal turns nil map to empty
+ expObsBytes, err := tc.expObs.Marshal()
+ assert.NoError(t, err)
+ assert.Equal(t, expObsBytes, []byte(obs))
+ }
+ })
+ }
+}
+
+func TestCommitReportingPlugin_Report(t *testing.T) {
+ ctx := testutils.Context(t)
+ sourceChainSelector := uint64(rand.Int())
+ var gasPrice = big.NewInt(1)
+ var gasPrice2 = big.NewInt(2)
+ gasPriceHeartBeat := *config.MustNewDuration(time.Hour)
+
+ t.Run("not enough observations", func(t *testing.T) {
+ p := &CommitReportingPlugin{}
+ p.lggr = logger.TestLogger(t)
+ p.F = 1
+
+ chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthcheck.On("IsHealthy", ctx).Return(true, nil).Maybe()
+ p.chainHealthcheck = chainHealthcheck
+
+ o := ccip.CommitObservation{Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, SourceGasPriceUSD: big.NewInt(0)}
+ obs, err := o.Marshal()
+ assert.NoError(t, err)
+
+ aos := []types.AttributedObservation{{Observation: obs}}
+
+ gotSomeReport, gotReport, err := p.Report(ctx, types.ReportTimestamp{}, types.Query{}, aos)
+ assert.False(t, gotSomeReport)
+ assert.Nil(t, gotReport)
+ assert.Error(t, err)
+ })
+
+ testCases := []struct {
+ name string
+ observations []ccip.CommitObservation
+ f int
+ gasPriceUpdates []cciptypes.GasPriceUpdateWithTxMeta
+ tokenDecimals map[cciptypes.Address]uint8
+ tokenPriceUpdates []cciptypes.TokenPriceUpdateWithTxMeta
+ sendRequests []cciptypes.EVM2EVMMessageWithTxMeta
+ expCommitReport *cciptypes.CommitStoreReport
+ expSeqNumRange cciptypes.CommitStoreInterval
+ expErr bool
+ }{
+ {
+ name: "base",
+ observations: []ccip.CommitObservation{
+ {Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, SourceGasPriceUSDPerChain: map[uint64]*big.Int{sourceChainSelector: gasPrice}},
+ {Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, SourceGasPriceUSDPerChain: map[uint64]*big.Int{sourceChainSelector: gasPrice}},
+ },
+ f: 1,
+ sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 1,
+ },
+ },
+ },
+ gasPriceUpdates: []cciptypes.GasPriceUpdateWithTxMeta{
+ {
+ GasPriceUpdate: cciptypes.GasPriceUpdate{
+ GasPrice: cciptypes.GasPrice{
+ DestChainSelector: sourceChainSelector,
+ Value: big.NewInt(1),
+ },
+ TimestampUnixSec: big.NewInt(time.Now().Add(-2 * gasPriceHeartBeat.Duration()).Unix()),
+ },
+ },
+ },
+ expSeqNumRange: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ expCommitReport: &cciptypes.CommitStoreReport{
+ MerkleRoot: [32]byte{},
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ TokenPrices: nil,
+ GasPrices: []cciptypes.GasPrice{{DestChainSelector: sourceChainSelector, Value: gasPrice}},
+ },
+ expErr: false,
+ },
+ {
+ name: "observations with mix gas price formats",
+ observations: []ccip.CommitObservation{
+ {
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector: gasPrice,
+ sourceChainSelector + 1: gasPrice2,
+ sourceChainSelector + 2: gasPrice2,
+ },
+ },
+ {
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector: gasPrice,
+ sourceChainSelector + 1: gasPrice2,
+ sourceChainSelector + 2: gasPrice2,
+ },
+ },
+ {
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector: gasPrice,
+ sourceChainSelector + 1: gasPrice2,
+ },
+ },
+ {
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ SourceGasPriceUSD: gasPrice,
+ },
+ },
+ f: 2,
+ sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 1,
+ },
+ },
+ },
+ gasPriceUpdates: []cciptypes.GasPriceUpdateWithTxMeta{
+ {
+ GasPriceUpdate: cciptypes.GasPriceUpdate{
+ GasPrice: cciptypes.GasPrice{
+ DestChainSelector: sourceChainSelector,
+ Value: big.NewInt(1),
+ },
+ TimestampUnixSec: big.NewInt(time.Now().Add(-2 * gasPriceHeartBeat.Duration()).Unix()),
+ },
+ },
+ },
+ expSeqNumRange: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ expCommitReport: &cciptypes.CommitStoreReport{
+ MerkleRoot: [32]byte{},
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ TokenPrices: nil,
+ GasPrices: []cciptypes.GasPrice{
+ {DestChainSelector: sourceChainSelector, Value: gasPrice},
+ {DestChainSelector: sourceChainSelector + 1, Value: gasPrice2},
+ },
+ },
+ expErr: false,
+ },
+ {
+ name: "empty",
+ observations: []ccip.CommitObservation{
+ {Interval: cciptypes.CommitStoreInterval{Min: 0, Max: 0}, SourceGasPriceUSD: big.NewInt(0)},
+ {Interval: cciptypes.CommitStoreInterval{Min: 0, Max: 0}, SourceGasPriceUSD: big.NewInt(0)},
+ },
+ gasPriceUpdates: []cciptypes.GasPriceUpdateWithTxMeta{
+ {
+ GasPriceUpdate: cciptypes.GasPriceUpdate{
+ GasPrice: cciptypes.GasPrice{
+ DestChainSelector: sourceChainSelector,
+ Value: big.NewInt(0),
+ },
+ TimestampUnixSec: big.NewInt(time.Now().Add(-gasPriceHeartBeat.Duration() / 2).Unix()),
+ },
+ },
+ },
+ f: 1,
+ expErr: false,
+ },
+ {
+ name: "no leaves",
+ observations: []ccip.CommitObservation{
+ {Interval: cciptypes.CommitStoreInterval{Min: 2, Max: 2}, SourceGasPriceUSD: big.NewInt(0)},
+ {Interval: cciptypes.CommitStoreInterval{Min: 2, Max: 2}, SourceGasPriceUSD: big.NewInt(0)},
+ },
+ f: 1,
+ sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{{}},
+ expSeqNumRange: cciptypes.CommitStoreInterval{Min: 2, Max: 2},
+ expErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ destPriceRegistryReader := ccipdatamocks.NewPriceRegistryReader(t)
+ destPriceRegistryReader.On("GetAllGasPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(tc.gasPriceUpdates, nil)
+ destPriceRegistryReader.On("GetTokenPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(tc.tokenPriceUpdates, nil)
+
+ onRampReader := ccipdatamocks.NewOnRampReader(t)
+ if len(tc.sendRequests) > 0 {
+ onRampReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.expSeqNumRange.Min, tc.expSeqNumRange.Max, true).Return(tc.sendRequests, nil)
+ }
+
+ evmEstimator := mocks.NewEvmFeeEstimator(t)
+ evmEstimator.On("L1Oracle").Return(nil)
+ gasPriceEstimator := prices.NewDAGasPriceEstimator(evmEstimator, nil, 2e9, 2e9) // 200% deviation
+
+ var destTokens []cciptypes.Address
+ for tk := range tc.tokenDecimals {
+ destTokens = append(destTokens, tk)
+ }
+ sort.Slice(destTokens, func(i, j int) bool {
+ return destTokens[i] < destTokens[j]
+ })
+ var destDecimals []uint8
+ for _, token := range destTokens {
+ destDecimals = append(destDecimals, tc.tokenDecimals[token])
+ }
+
+ destPriceRegistryReader.On("GetTokensDecimals", ctx, mock.MatchedBy(func(tokens []cciptypes.Address) bool {
+ for _, token := range tokens {
+ if !slices.Contains(destTokens, token) {
+ return false
+ }
+ }
+ return true
+ })).Return(destDecimals, nil).Maybe()
+
+ lp := mocks2.NewLogPoller(t)
+ commitStoreReader, err := v1_2_0.NewCommitStore(logger.TestLogger(t), utils.RandomAddress(), nil, lp)
+ assert.NoError(t, err)
+
+ healthCheck := ccipcachemocks.NewChainHealthcheck(t)
+ healthCheck.On("IsHealthy", ctx).Return(true, nil)
+
+ p := &CommitReportingPlugin{}
+ p.lggr = logger.TestLogger(t)
+ p.destPriceRegistryReader = destPriceRegistryReader
+ p.onRampReader = onRampReader
+ p.sourceChainSelector = sourceChainSelector
+ p.gasPriceEstimator = gasPriceEstimator
+ p.offchainConfig.GasPriceHeartBeat = gasPriceHeartBeat.Duration()
+ p.commitStoreReader = commitStoreReader
+ p.F = tc.f
+ p.metricsCollector = ccip.NoopMetricsCollector
+ p.chainHealthcheck = healthCheck
+
+ aos := make([]types.AttributedObservation, 0, len(tc.observations))
+ for _, o := range tc.observations {
+ obs, err2 := o.Marshal()
+ assert.NoError(t, err2)
+ aos = append(aos, types.AttributedObservation{Observation: obs})
+ }
+
+ gotSomeReport, gotReport, err := p.Report(ctx, types.ReportTimestamp{}, types.Query{}, aos)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+
+ if tc.expCommitReport != nil {
+ assert.True(t, gotSomeReport)
+ encodedExpectedReport, err := encodeCommitReport(*tc.expCommitReport)
+ assert.NoError(t, err)
+ assert.Equal(t, types.Report(encodedExpectedReport), gotReport)
+ }
+ })
+ }
+}
+
+func TestCommitReportingPlugin_ShouldAcceptFinalizedReport(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ newPlugin := func() *CommitReportingPlugin {
+ p := &CommitReportingPlugin{}
+ p.lggr = logger.TestLogger(t)
+ p.metricsCollector = ccip.NoopMetricsCollector
+ return p
+ }
+
+ t.Run("report cannot be decoded leads to error", func(t *testing.T) {
+ p := newPlugin()
+
+ encodedReport := []byte("whatever")
+
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ p.commitStoreReader = commitStoreReader
+ commitStoreReader.On("DecodeCommitReport", mock.Anything, encodedReport).
+ Return(cciptypes.CommitStoreReport{}, errors.New("unable to decode report"))
+
+ _, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport)
+ assert.Error(t, err)
+ })
+
+ t.Run("empty report should not be accepted", func(t *testing.T) {
+ p := newPlugin()
+
+ report := cciptypes.CommitStoreReport{}
+
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ p.commitStoreReader = commitStoreReader
+ commitStoreReader.On("DecodeCommitReport", mock.Anything, mock.Anything).Return(report, nil)
+
+ chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthCheck.On("IsHealthy", ctx).Return(true, nil).Maybe()
+ p.chainHealthcheck = chainHealthCheck
+
+ encodedReport, err := encodeCommitReport(report)
+ assert.NoError(t, err)
+ shouldAccept, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport)
+ assert.NoError(t, err)
+ assert.False(t, shouldAccept)
+ })
+
+ t.Run("stale report should not be accepted", func(t *testing.T) {
+ onChainSeqNum := uint64(100)
+
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ p := newPlugin()
+
+ p.commitStoreReader = commitStoreReader
+
+ report := cciptypes.CommitStoreReport{
+ GasPrices: []cciptypes.GasPrice{{Value: big.NewInt(int64(rand.Int()))}},
+ MerkleRoot: [32]byte{123}, // this report is considered non-empty since it has a merkle root
+ }
+
+ commitStoreReader.On("DecodeCommitReport", mock.Anything, mock.Anything).Return(report, nil)
+ commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(onChainSeqNum, nil)
+
+ chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthCheck.On("IsHealthy", ctx).Return(true, nil)
+ p.chainHealthcheck = chainHealthCheck
+
+ // stale since report interval is behind on chain seq num
+ report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum - 2, Max: onChainSeqNum + 10}
+ encodedReport, err := encodeCommitReport(report)
+ assert.NoError(t, err)
+
+ shouldAccept, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport)
+ assert.NoError(t, err)
+ assert.False(t, shouldAccept)
+ })
+
+ t.Run("non-stale report should be accepted", func(t *testing.T) {
+ onChainSeqNum := uint64(100)
+
+ p := newPlugin()
+
+ priceRegistryReader := ccipdatamocks.NewPriceRegistryReader(t)
+ p.destPriceRegistryReader = priceRegistryReader
+
+ p.lggr = logger.TestLogger(t)
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ p.commitStoreReader = commitStoreReader
+
+ report := cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: onChainSeqNum,
+ Max: onChainSeqNum + 10,
+ },
+ TokenPrices: []cciptypes.TokenPrice{
+ {
+ Token: cciptypes.Address(utils.RandomAddress().String()),
+ Value: big.NewInt(int64(rand.Int())),
+ },
+ },
+ GasPrices: []cciptypes.GasPrice{
+ {
+ DestChainSelector: rand.Uint64(),
+ Value: big.NewInt(int64(rand.Int())),
+ },
+ },
+ MerkleRoot: [32]byte{123},
+ }
+ commitStoreReader.On("DecodeCommitReport", mock.Anything, mock.Anything).Return(report, nil)
+ commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(onChainSeqNum, nil)
+
+ // non-stale since report interval is not behind on-chain seq num
+ report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum, Max: onChainSeqNum + 10}
+ encodedReport, err := encodeCommitReport(report)
+ assert.NoError(t, err)
+
+ chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthCheck.On("IsHealthy", ctx).Return(true, nil)
+ p.chainHealthcheck = chainHealthCheck
+
+ shouldAccept, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport)
+ assert.NoError(t, err)
+ assert.True(t, shouldAccept)
+ })
+}
+
+func TestCommitReportingPlugin_ShouldTransmitAcceptedReport(t *testing.T) {
+ report := cciptypes.CommitStoreReport{
+ TokenPrices: []cciptypes.TokenPrice{
+ {Token: cciptypes.Address(utils.RandomAddress().String()), Value: big.NewInt(9e18)},
+ },
+ GasPrices: []cciptypes.GasPrice{
+ {
+
+ DestChainSelector: rand.Uint64(),
+ Value: big.NewInt(2000e9),
+ },
+ },
+ MerkleRoot: [32]byte{123},
+ }
+
+ ctx := testutils.Context(t)
+ p := &CommitReportingPlugin{}
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ onChainSeqNum := uint64(100)
+ commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(onChainSeqNum, nil)
+ p.commitStoreReader = commitStoreReader
+ p.lggr = logger.TestLogger(t)
+
+ chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthCheck.On("IsHealthy", ctx).Return(true, nil).Maybe()
+ p.chainHealthcheck = chainHealthCheck
+
+ t.Run("should transmit when report is not stale", func(t *testing.T) {
+ // not-stale since report interval is not behind on chain seq num
+ report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum, Max: onChainSeqNum + 10}
+ encodedReport, err := encodeCommitReport(report)
+ assert.NoError(t, err)
+ commitStoreReader.On("DecodeCommitReport", mock.Anything, encodedReport).Return(report, nil).Once()
+ shouldTransmit, err := p.ShouldTransmitAcceptedReport(ctx, types.ReportTimestamp{}, encodedReport)
+ assert.NoError(t, err)
+ assert.True(t, shouldTransmit)
+ })
+
+ t.Run("should not transmit when report is stale", func(t *testing.T) {
+ // stale since report interval is behind on chain seq num
+ report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum - 2, Max: onChainSeqNum + 10}
+ encodedReport, err := encodeCommitReport(report)
+ assert.NoError(t, err)
+ commitStoreReader.On("DecodeCommitReport", mock.Anything, encodedReport).Return(report, nil).Once()
+ shouldTransmit, err := p.ShouldTransmitAcceptedReport(ctx, types.ReportTimestamp{}, encodedReport)
+ assert.NoError(t, err)
+ assert.False(t, shouldTransmit)
+ })
+
+ t.Run("error when report cannot be decoded", func(t *testing.T) {
+ reportBytes := []byte("whatever")
+ commitStoreReader.On("DecodeCommitReport", mock.Anything, reportBytes).
+ Return(cciptypes.CommitStoreReport{}, errors.New("decode error")).Once()
+ _, err := p.ShouldTransmitAcceptedReport(ctx, types.ReportTimestamp{}, reportBytes)
+ assert.Error(t, err)
+ })
+}
+
+func TestCommitReportingPlugin_observePriceUpdates(t *testing.T) {
+ destChainSelector := uint64(12345)
+ sourceChainSelector := uint64(67890)
+
+ token1 := ccipcalc.HexToAddress("0x123")
+ token2 := ccipcalc.HexToAddress("0x234")
+
+ gasPrices := map[uint64]*big.Int{
+ sourceChainSelector: big.NewInt(1e18),
+ }
+ tokenPrices := map[cciptypes.Address]*big.Int{
+ token1: big.NewInt(2e18),
+ token2: big.NewInt(3e18),
+ }
+
+ testCases := []struct {
+ name string
+ psGasPricesResult map[uint64]*big.Int
+ psTokenPricesResult map[cciptypes.Address]*big.Int
+ PriceReportingDisabled bool
+
+ expectedGasPrice map[uint64]*big.Int
+ expectedTokenPrices map[cciptypes.Address]*big.Int
+
+ psError bool
+ expectedErr bool
+ }{
+ {
+ name: "ORM called successfully",
+ psGasPricesResult: gasPrices,
+ psTokenPricesResult: tokenPrices,
+ expectedGasPrice: gasPrices,
+ expectedTokenPrices: tokenPrices,
+ },
+ {
+ name: "price reporting disabled",
+ psGasPricesResult: gasPrices,
+ psTokenPricesResult: tokenPrices,
+ PriceReportingDisabled: true,
+ expectedGasPrice: map[uint64]*big.Int{},
+ expectedTokenPrices: map[cciptypes.Address]*big.Int{},
+ psError: false,
+ expectedErr: false,
+ },
+ {
+ name: "price service error",
+ psGasPricesResult: map[uint64]*big.Int{},
+ psTokenPricesResult: map[cciptypes.Address]*big.Int{},
+ expectedGasPrice: nil,
+ expectedTokenPrices: nil,
+ psError: true,
+ expectedErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := tests.Context(t)
+
+ mockPriceService := ccipdbmocks.NewPriceService(t)
+ var psError error
+ if tc.psError {
+ psError = fmt.Errorf("price service error")
+ }
+ mockPriceService.On("GetGasAndTokenPrices", ctx, destChainSelector).Return(
+ tc.psGasPricesResult,
+ tc.psTokenPricesResult,
+ psError,
+ ).Maybe()
+
+ p := &CommitReportingPlugin{
+ lggr: logger.TestLogger(t),
+ destChainSelector: destChainSelector,
+ sourceChainSelector: sourceChainSelector,
+ priceService: mockPriceService,
+ offchainConfig: cciptypes.CommitOffchainConfig{
+ PriceReportingDisabled: tc.PriceReportingDisabled,
+ },
+ }
+ gasPricesUSD, sourceGasPriceUSD, tokenPricesUSD, err := p.observePriceUpdates(ctx)
+ if tc.expectedErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedGasPrice, gasPricesUSD)
+ assert.Equal(t, tc.expectedTokenPrices, tokenPricesUSD)
+ if tc.expectedGasPrice != nil {
+ assert.Equal(t, tc.expectedGasPrice[sourceChainSelector], sourceGasPriceUSD)
+ }
+ }
+ })
+ }
+}
+
+type CommitObservationLegacy struct {
+ Interval cciptypes.CommitStoreInterval `json:"interval"`
+ TokenPricesUSD map[cciptypes.Address]*big.Int `json:"tokensPerFeeCoin"`
+ SourceGasPriceUSD *big.Int `json:"sourceGasPrice"`
+}
+
+func TestCommitReportingPlugin_extractObservationData(t *testing.T) {
+ token1 := ccipcalc.HexToAddress("0xa")
+ token2 := ccipcalc.HexToAddress("0xb")
+ token1Price := big.NewInt(1)
+ token2Price := big.NewInt(2)
+ unsupportedToken := ccipcalc.HexToAddress("0xc")
+ gasPrice1 := big.NewInt(100)
+ gasPrice2 := big.NewInt(100)
+ var sourceChainSelector1 uint64 = 10
+ var sourceChainSelector2 uint64 = 20
+
+ tokenDecimals := make(map[cciptypes.Address]uint8)
+ tokenDecimals[token1] = 18
+ tokenDecimals[token2] = 18
+
+ validInterval := cciptypes.CommitStoreInterval{Min: 1, Max: 2}
+ zeroInterval := cciptypes.CommitStoreInterval{Min: 0, Max: 0}
+
+ // mix legacy commit observations with new commit observations to ensure they can work together
+ legacyObsRaw := CommitObservationLegacy{
+ Interval: validInterval,
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{
+ token1: token1Price,
+ token2: token2Price,
+ },
+ SourceGasPriceUSD: gasPrice1,
+ }
+ legacyObsBytes, err := json.Marshal(&legacyObsRaw)
+ assert.NoError(t, err)
+
+ newObsRaw := ccip.CommitObservation{
+ Interval: validInterval,
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{
+ token1: token1Price,
+ token2: token2Price,
+ },
+ SourceGasPriceUSD: gasPrice1,
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector1: gasPrice1,
+ sourceChainSelector2: gasPrice2,
+ },
+ }
+ newObsBytes, err := newObsRaw.Marshal()
+ assert.NoError(t, err)
+
+ lggr := logger.TestLogger(t)
+ observations := ccip.GetParsableObservations[ccip.CommitObservation](lggr, []types.AttributedObservation{
+ {Observation: legacyObsBytes},
+ {Observation: newObsBytes},
+ })
+ assert.Len(t, observations, 2)
+ legacyObs := observations[0]
+ newObs := observations[1]
+
+ obWithNilGasPrice := ccip.CommitObservation{
+ Interval: zeroInterval,
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{
+ token1: token1Price,
+ token2: token2Price,
+ },
+ SourceGasPriceUSD: nil,
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{},
+ }
+ obWithNilTokenPrice := ccip.CommitObservation{
+ Interval: zeroInterval,
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{
+ token1: token1Price,
+ token2: nil,
+ },
+ SourceGasPriceUSD: gasPrice1,
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector1: gasPrice1,
+ sourceChainSelector2: gasPrice2,
+ },
+ }
+ obMissingTokenPrices := ccip.CommitObservation{
+ Interval: zeroInterval,
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{},
+ SourceGasPriceUSD: gasPrice1,
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector1: gasPrice1,
+ sourceChainSelector2: gasPrice2,
+ },
+ }
+ obWithUnsupportedToken := ccip.CommitObservation{
+ Interval: zeroInterval,
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{
+ token1: token1Price,
+ token2: token2Price,
+ unsupportedToken: token2Price,
+ },
+ SourceGasPriceUSD: gasPrice1,
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ sourceChainSelector1: gasPrice1,
+ sourceChainSelector2: gasPrice2,
+ },
+ }
+ obEmpty := ccip.CommitObservation{
+ Interval: zeroInterval,
+ TokenPricesUSD: nil,
+ SourceGasPriceUSD: nil,
+ SourceGasPriceUSDPerChain: nil,
+ }
+
+ testCases := []struct {
+ name string
+ commitObservations []ccip.CommitObservation
+ f int
+ expIntervals []cciptypes.CommitStoreInterval
+ expGasPriceObs map[uint64][]*big.Int
+ expTokenPriceObs map[cciptypes.Address][]*big.Int
+ expError bool
+ }{
+ {
+ name: "base",
+ commitObservations: []ccip.CommitObservation{newObs, newObs, newObs},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1},
+ sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price, token1Price},
+ token2: {token2Price, token2Price, token2Price},
+ },
+ expError: false,
+ },
+ {
+ name: "pass with f=2 and mixed observations",
+ commitObservations: []ccip.CommitObservation{legacyObs, newObs, legacyObs, newObs, newObs, obWithNilGasPrice},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval, validInterval, validInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1, gasPrice1, gasPrice1},
+ sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price, token1Price, token1Price, token1Price, token1Price},
+ token2: {token2Price, token2Price, token2Price, token2Price, token2Price, token2Price},
+ },
+ expError: false,
+ },
+ {
+ name: "pass with f=2 and mixed observations with mostly legacy observations",
+ commitObservations: []ccip.CommitObservation{legacyObs, legacyObs, legacyObs, legacyObs, newObs},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval, validInterval, validInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1, gasPrice1, gasPrice1},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price, token1Price, token1Price, token1Price},
+ token2: {token2Price, token2Price, token2Price, token2Price, token2Price},
+ },
+ expError: false,
+ },
+ {
+ name: "tolerate 1 faulty obs with f=2",
+ commitObservations: []ccip.CommitObservation{legacyObs, newObs, legacyObs, obWithNilGasPrice},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price, token1Price, token1Price},
+ token2: {token2Price, token2Price, token2Price, token2Price},
+ },
+ expError: false,
+ },
+ {
+ name: "tolerate 1 nil token price with f=1",
+ commitObservations: []ccip.CommitObservation{legacyObs, newObs, obWithNilTokenPrice},
+ f: 1,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1},
+ sourceChainSelector2: {gasPrice2, gasPrice2},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price, token1Price},
+ token2: {token2Price, token2Price},
+ },
+ expError: false,
+ },
+ {
+ name: "tolerate 1 missing token prices with f=1",
+ commitObservations: []ccip.CommitObservation{legacyObs, newObs, obMissingTokenPrices},
+ f: 1,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1},
+ sourceChainSelector2: {gasPrice2, gasPrice2},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price},
+ token2: {token2Price, token2Price},
+ },
+ expError: false,
+ },
+ {
+ name: "tolerate 1 unsupported token with f=2",
+ commitObservations: []ccip.CommitObservation{legacyObs, newObs, obWithUnsupportedToken},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price, token1Price},
+ token2: {token2Price, token2Price, token2Price},
+ },
+ expError: false,
+ },
+ {
+ name: "tolerate mis-matched token observations with f=2",
+ commitObservations: []ccip.CommitObservation{legacyObs, newObs, obWithNilTokenPrice, obMissingTokenPrices},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1, gasPrice1},
+ sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{
+ token1: {token1Price, token1Price, token1Price},
+ },
+ expError: false,
+ },
+ {
+ name: "tolerate all tokens filtered out with f=2",
+ commitObservations: []ccip.CommitObservation{newObs, obMissingTokenPrices, obMissingTokenPrices},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{validInterval, zeroInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{
+ sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1},
+ sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2},
+ },
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{},
+ expError: false,
+ },
+ {
+ name: "not enough observations",
+ commitObservations: []ccip.CommitObservation{legacyObs, newObs},
+ f: 2,
+ expError: true,
+ },
+ {
+ name: "too many empty observations",
+ commitObservations: []ccip.CommitObservation{obWithNilGasPrice, obWithNilTokenPrice, obEmpty, obEmpty, obEmpty},
+ f: 2,
+ expIntervals: []cciptypes.CommitStoreInterval{zeroInterval, zeroInterval, zeroInterval, zeroInterval, zeroInterval},
+ expGasPriceObs: map[uint64][]*big.Int{},
+ expTokenPriceObs: map[cciptypes.Address][]*big.Int{},
+ expError: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ intervals, gasPriceOps, tokenPriceOps, err := extractObservationData(logger.TestLogger(t), tc.f, sourceChainSelector1, tc.commitObservations)
+
+ if tc.expError {
+ assert.Error(t, err)
+ return
+ }
+ assert.Equal(t, tc.expIntervals, intervals)
+ assert.Equal(t, tc.expGasPriceObs, gasPriceOps)
+ assert.Equal(t, tc.expTokenPriceObs, tokenPriceOps)
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestCommitReportingPlugin_calculatePriceUpdates(t *testing.T) {
+ const defaultSourceChainSelector = 10 // we reuse this value across all test cases
+ feeToken1 := ccipcalc.HexToAddress("0xa")
+ feeToken2 := ccipcalc.HexToAddress("0xb")
+
+ val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) }
+
+ testCases := []struct {
+ name string
+ commitObservations []ccip.CommitObservation
+ f int
+ latestGasPrice map[uint64]update
+ latestTokenPrices map[cciptypes.Address]update
+ gasPriceHeartBeat config.Duration
+ daGasPriceDeviationPPB int64
+ execGasPriceDeviationPPB int64
+ tokenPriceHeartBeat config.Duration
+ tokenPriceDeviationPPB uint32
+ expTokenUpdates []cciptypes.TokenPrice
+ expGasUpdates []cciptypes.GasPrice
+ }{
+ {
+ name: "median",
+ commitObservations: []ccip.CommitObservation{
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(1)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(2)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(3)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(4)}},
+ },
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-30 * time.Minute), // recent
+ value: val1e18(9), // median deviates
+ },
+ },
+ f: 2,
+ expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(3)}},
+ },
+ {
+ name: "gas price update skipped because the latest is similar and was updated recently",
+ commitObservations: []ccip.CommitObservation{
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(12)}},
+ },
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 20e7,
+ execGasPriceDeviationPPB: 20e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-30 * time.Minute), // recent
+ value: val1e18(10), // median deviates
+ },
+ },
+ f: 1,
+ expGasUpdates: nil,
+ },
+ {
+ name: "gas price update included, the latest is similar but was not updated recently",
+ commitObservations: []ccip.CommitObservation{
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)}},
+ },
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 20e7,
+ execGasPriceDeviationPPB: 20e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-90 * time.Minute), // stale
+ value: val1e18(9), // median deviates
+ },
+ },
+ f: 1,
+ expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)}},
+ },
+ {
+ name: "gas price update deviates from latest",
+ commitObservations: []ccip.CommitObservation{
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(20)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(20)}},
+ },
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 20e7,
+ execGasPriceDeviationPPB: 20e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-30 * time.Minute), // recent
+ value: val1e18(11), // latest value close to the update
+ },
+ },
+ f: 2,
+ expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: val1e18(20)}},
+ },
+ {
+ name: "multichain gas prices",
+ commitObservations: []ccip.CommitObservation{
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(1)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 1: val1e18(11)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 2: val1e18(111)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(2)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 1: val1e18(22)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 2: val1e18(222)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(3)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 1: val1e18(33)}},
+ {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 2: val1e18(333)}},
+ },
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 20e7,
+ execGasPriceDeviationPPB: 20e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-90 * time.Minute), // stale
+ value: val1e18(9), // median deviates
+ },
+ defaultSourceChainSelector + 1: {
+ timestamp: time.Now().Add(-30 * time.Minute), // recent
+ value: val1e18(20), // median does not deviate
+ },
+ },
+ f: 1,
+ expGasUpdates: []cciptypes.GasPrice{
+ {DestChainSelector: defaultSourceChainSelector, Value: val1e18(2)},
+ {DestChainSelector: defaultSourceChainSelector + 2, Value: val1e18(222)},
+ },
+ },
+ {
+ name: "median one token",
+ commitObservations: []ccip.CommitObservation{
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(10)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)},
+ },
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(12)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)},
+ },
+ },
+ f: 1,
+ expTokenUpdates: []cciptypes.TokenPrice{
+ {Token: feeToken1, Value: big.NewInt(12)},
+ },
+ // We expect a gas update because no latest
+ expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(0)}},
+ },
+ {
+ name: "median two tokens",
+ commitObservations: []ccip.CommitObservation{
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(10), feeToken2: big.NewInt(13)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)},
+ },
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(12), feeToken2: big.NewInt(7)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)},
+ },
+ },
+ f: 1,
+ expTokenUpdates: []cciptypes.TokenPrice{
+ {Token: feeToken1, Value: big.NewInt(12)},
+ {Token: feeToken2, Value: big.NewInt(13)},
+ },
+ // We expect a gas update because no latest
+ expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(0)}},
+ },
+ {
+ name: "token price update skipped because it is close to the latest",
+ commitObservations: []ccip.CommitObservation{
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(11)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)},
+ },
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(12)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)},
+ },
+ },
+ f: 1,
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 20e7,
+ execGasPriceDeviationPPB: 20e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestTokenPrices: map[cciptypes.Address]update{
+ feeToken1: {
+ timestamp: time.Now().Add(-30 * time.Minute),
+ value: val1e18(10),
+ },
+ },
+ // We expect a gas update because no latest
+ expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(0)}},
+ },
+ {
+ name: "gas price and token price both included because they are not close to the latest",
+ commitObservations: []ccip.CommitObservation{
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ defaultSourceChainSelector: val1e18(10),
+ defaultSourceChainSelector + 1: val1e18(20),
+ },
+ },
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ defaultSourceChainSelector: val1e18(11),
+ defaultSourceChainSelector + 1: val1e18(21),
+ },
+ },
+ },
+ f: 1,
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 10e7,
+ execGasPriceDeviationPPB: 10e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-30 * time.Minute),
+ value: val1e18(9),
+ },
+ defaultSourceChainSelector + 1: {
+ timestamp: time.Now().Add(-30 * time.Minute),
+ value: val1e18(9),
+ },
+ },
+ latestTokenPrices: map[cciptypes.Address]update{
+ feeToken1: {
+ timestamp: time.Now().Add(-30 * time.Minute),
+ value: val1e18(9),
+ },
+ },
+ expTokenUpdates: []cciptypes.TokenPrice{
+ {Token: feeToken1, Value: val1e18(21)},
+ },
+ expGasUpdates: []cciptypes.GasPrice{
+ {DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)},
+ {DestChainSelector: defaultSourceChainSelector + 1, Value: val1e18(21)},
+ },
+ },
+ {
+ name: "gas price and token price both included because they not been updated recently",
+ commitObservations: []ccip.CommitObservation{
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ defaultSourceChainSelector: val1e18(10),
+ defaultSourceChainSelector + 1: val1e18(20),
+ },
+ },
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ defaultSourceChainSelector: val1e18(11),
+ defaultSourceChainSelector + 1: val1e18(21),
+ },
+ },
+ },
+ f: 1,
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 10e7,
+ execGasPriceDeviationPPB: 10e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(2 * time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-90 * time.Minute),
+ value: val1e18(11),
+ },
+ defaultSourceChainSelector + 1: {
+ timestamp: time.Now().Add(-90 * time.Minute),
+ value: val1e18(21),
+ },
+ },
+ latestTokenPrices: map[cciptypes.Address]update{
+ feeToken1: {
+ timestamp: time.Now().Add(-4 * time.Hour),
+ value: val1e18(21),
+ },
+ },
+ expTokenUpdates: []cciptypes.TokenPrice{
+ {Token: feeToken1, Value: val1e18(21)},
+ },
+ expGasUpdates: []cciptypes.GasPrice{
+ {DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)},
+ {DestChainSelector: defaultSourceChainSelector + 1, Value: val1e18(21)},
+ },
+ },
+ {
+ name: "gas price included because it deviates from latest and token price skipped because it does not deviate",
+ commitObservations: []ccip.CommitObservation{
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)},
+ },
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)},
+ },
+ },
+ f: 1,
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 10e7,
+ execGasPriceDeviationPPB: 10e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(2 * time.Hour),
+ tokenPriceDeviationPPB: 200e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-90 * time.Minute),
+ value: val1e18(9),
+ },
+ },
+ latestTokenPrices: map[cciptypes.Address]update{
+ feeToken1: {
+ timestamp: time.Now().Add(-30 * time.Minute),
+ value: val1e18(9),
+ },
+ },
+ expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)}},
+ },
+ {
+ name: "gas price skipped because it does not deviate and token price included because it has not been updated recently",
+ commitObservations: []ccip.CommitObservation{
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)},
+ },
+ {
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)},
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)},
+ },
+ },
+ f: 1,
+ gasPriceHeartBeat: *config.MustNewDuration(time.Hour),
+ daGasPriceDeviationPPB: 10e7,
+ execGasPriceDeviationPPB: 10e7,
+ tokenPriceHeartBeat: *config.MustNewDuration(2 * time.Hour),
+ tokenPriceDeviationPPB: 20e7,
+ latestGasPrice: map[uint64]update{
+ defaultSourceChainSelector: {
+ timestamp: time.Now().Add(-30 * time.Minute),
+ value: val1e18(11),
+ },
+ },
+ latestTokenPrices: map[cciptypes.Address]update{
+ feeToken1: {
+ timestamp: time.Now().Add(-4 * time.Hour),
+ value: val1e18(21),
+ },
+ },
+ expTokenUpdates: []cciptypes.TokenPrice{
+ {Token: feeToken1, Value: val1e18(21)},
+ },
+ expGasUpdates: nil,
+ },
+ }
+
+ evmEstimator := mocks.NewEvmFeeEstimator(t)
+ evmEstimator.On("L1Oracle").Return(nil)
+ estimatorCSVer, _ := semver.NewVersion("1.2.0")
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ estimator, _ := prices.NewGasPriceEstimatorForCommitPlugin(
+ *estimatorCSVer,
+ evmEstimator,
+ nil,
+ tc.daGasPriceDeviationPPB,
+ tc.execGasPriceDeviationPPB,
+ )
+
+ r := &CommitReportingPlugin{
+ lggr: logger.TestLogger(t),
+ sourceChainSelector: defaultSourceChainSelector,
+ offchainConfig: cciptypes.CommitOffchainConfig{
+ GasPriceHeartBeat: tc.gasPriceHeartBeat.Duration(),
+ TokenPriceHeartBeat: tc.tokenPriceHeartBeat.Duration(),
+ TokenPriceDeviationPPB: tc.tokenPriceDeviationPPB,
+ },
+ gasPriceEstimator: estimator,
+ F: tc.f,
+ }
+
+ gasPriceObs := make(map[uint64][]*big.Int)
+ tokenPriceObs := make(map[cciptypes.Address][]*big.Int)
+ for _, obs := range tc.commitObservations {
+ for selector, price := range obs.SourceGasPriceUSDPerChain {
+ gasPriceObs[selector] = append(gasPriceObs[selector], price)
+ }
+ for token, price := range obs.TokenPricesUSD {
+ tokenPriceObs[token] = append(tokenPriceObs[token], price)
+ }
+ }
+
+ gotGas, gotTokens, err := r.calculatePriceUpdates(gasPriceObs, tokenPriceObs, tc.latestGasPrice, tc.latestTokenPrices)
+
+ assert.Equal(t, tc.expGasUpdates, gotGas)
+ assert.Equal(t, tc.expTokenUpdates, gotTokens)
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestCommitReportingPlugin_isStaleReport(t *testing.T) {
+ ctx := context.Background()
+ lggr := logger.TestLogger(t)
+ merkleRoot1 := utils.Keccak256Fixed([]byte("some merkle root 1"))
+
+ t.Run("empty report", func(t *testing.T) {
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ r := &CommitReportingPlugin{commitStoreReader: commitStoreReader}
+ isStale := r.isStaleReport(ctx, lggr, cciptypes.CommitStoreReport{}, types.ReportTimestamp{})
+ assert.True(t, isStale)
+ })
+
+ t.Run("merkle root", func(t *testing.T) {
+ const expNextSeqNum = uint64(9)
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(expNextSeqNum, nil)
+
+ r := &CommitReportingPlugin{
+ commitStoreReader: commitStoreReader,
+ }
+
+ testCases := map[string]struct {
+ interval cciptypes.CommitStoreInterval
+ result bool
+ }{
+ "The nextSeqNumber is equal to the commit store interval Min value": {
+ interval: cciptypes.CommitStoreInterval{Min: expNextSeqNum, Max: expNextSeqNum + 10},
+ result: false,
+ },
+ "The nextSeqNumber is less than the commit store interval Min value": {
+ interval: cciptypes.CommitStoreInterval{Min: expNextSeqNum + 1, Max: expNextSeqNum + 10},
+ result: true,
+ },
+ "The nextSeqNumber is greater than the commit store interval Min value": {
+ interval: cciptypes.CommitStoreInterval{Min: expNextSeqNum - 1, Max: expNextSeqNum + 10},
+ result: true,
+ },
+ "Empty interval": {
+ interval: cciptypes.CommitStoreInterval{},
+ result: true,
+ },
+ }
+
+ for tcName, tc := range testCases {
+ t.Run(tcName, func(t *testing.T) {
+ assert.Equal(t, tc.result, r.isStaleReport(ctx, lggr, cciptypes.CommitStoreReport{
+ MerkleRoot: merkleRoot1,
+ Interval: tc.interval,
+ }, types.ReportTimestamp{}))
+ })
+ }
+ })
+}
+
+func TestCommitReportingPlugin_calculateMinMaxSequenceNumbers(t *testing.T) {
+ testCases := []struct {
+ name string
+ commitStoreSeqNum uint64
+ msgSeqNums []uint64
+
+ expQueryMin uint64 // starting seq num that is used in the query to get messages
+ expMin uint64
+ expMax uint64
+ expErr bool
+ }{
+ {
+ name: "happy flow",
+ commitStoreSeqNum: 9,
+ msgSeqNums: []uint64{11, 12, 13, 14},
+ expQueryMin: 9,
+ expMin: 11,
+ expMax: 14,
+ expErr: false,
+ },
+ {
+ name: "happy flow 2",
+ commitStoreSeqNum: 9,
+ msgSeqNums: []uint64{11, 12, 13, 14},
+ expQueryMin: 9, // from commit store
+ expMin: 11,
+ expMax: 14,
+ expErr: false,
+ },
+ {
+ name: "gap in msg seq nums",
+ commitStoreSeqNum: 10,
+ expQueryMin: 10,
+ msgSeqNums: []uint64{11, 12, 14},
+ expErr: true,
+ },
+ {
+ name: "no new messages",
+ commitStoreSeqNum: 9,
+ msgSeqNums: []uint64{},
+ expQueryMin: 9,
+ expMin: 0,
+ expMax: 0,
+ expErr: false,
+ },
+ {
+ name: "unordered seq nums",
+ commitStoreSeqNum: 9,
+ msgSeqNums: []uint64{11, 13, 14, 10},
+ expQueryMin: 9,
+ expErr: true,
+ },
+ }
+
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := &CommitReportingPlugin{}
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(tc.commitStoreSeqNum, nil)
+ p.commitStoreReader = commitStoreReader
+
+ onRampReader := ccipdatamocks.NewOnRampReader(t)
+ var sendReqs []cciptypes.EVM2EVMMessageWithTxMeta
+ for _, seqNum := range tc.msgSeqNums {
+ sendReqs = append(sendReqs, cciptypes.EVM2EVMMessageWithTxMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: seqNum,
+ },
+ })
+ }
+ onRampReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.expQueryMin, tc.expQueryMin+OnRampMessagesScanLimit, true).Return(sendReqs, nil)
+ p.onRampReader = onRampReader
+
+ minSeqNum, maxSeqNum, _, err := p.calculateMinMaxSequenceNumbers(ctx, lggr)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.Equal(t, tc.expMin, minSeqNum)
+ assert.Equal(t, tc.expMax, maxSeqNum)
+ })
+ }
+}
+
+func TestCommitReportingPlugin_getLatestGasPriceUpdate(t *testing.T) {
+ now := time.Now()
+ chainSelector1 := uint64(1234)
+ chainSelector2 := uint64(5678)
+
+ chain1Value := big.NewInt(1000)
+ chain2Value := big.NewInt(2000)
+
+ testCases := []struct {
+ name string
+ priceRegistryUpdates []cciptypes.GasPriceUpdate
+ expUpdates map[uint64]update
+ expErr bool
+ }{
+ {
+ name: "happy path",
+ priceRegistryUpdates: []cciptypes.GasPriceUpdate{
+ {
+ GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector1, Value: chain1Value},
+ TimestampUnixSec: big.NewInt(now.Unix()),
+ },
+ },
+ expUpdates: map[uint64]update{chainSelector1: {timestamp: now, value: chain1Value}},
+ expErr: false,
+ },
+ {
+ name: "happy path multiple updates",
+ priceRegistryUpdates: []cciptypes.GasPriceUpdate{
+ {
+ GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector1, Value: big.NewInt(1)},
+ TimestampUnixSec: big.NewInt(now.Unix()),
+ },
+ {
+ GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector2, Value: big.NewInt(1)},
+ TimestampUnixSec: big.NewInt(now.Add(1 * time.Minute).Unix()),
+ },
+ {
+ GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector2, Value: chain2Value},
+ TimestampUnixSec: big.NewInt(now.Add(2 * time.Minute).Unix()),
+ },
+ {
+ GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector1, Value: chain1Value},
+ TimestampUnixSec: big.NewInt(now.Add(3 * time.Minute).Unix()),
+ },
+ },
+ expUpdates: map[uint64]update{
+ chainSelector1: {timestamp: now.Add(3 * time.Minute), value: chain1Value},
+ chainSelector2: {timestamp: now.Add(2 * time.Minute), value: chain2Value},
+ },
+ expErr: false,
+ },
+ }
+
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := &CommitReportingPlugin{}
+ p.lggr = lggr
+ priceReg := ccipdatamocks.NewPriceRegistryReader(t)
+ p.destPriceRegistryReader = priceReg
+
+ var events []cciptypes.GasPriceUpdateWithTxMeta
+ for _, update := range tc.priceRegistryUpdates {
+ events = append(events, cciptypes.GasPriceUpdateWithTxMeta{
+ GasPriceUpdate: update,
+ })
+ }
+
+ priceReg.On("GetAllGasPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(events, nil)
+
+ gotUpdates, err := p.getLatestGasPriceUpdate(ctx, now)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, len(tc.expUpdates), len(gotUpdates))
+ for selector, gotUpdate := range gotUpdates {
+ assert.Equal(t, tc.expUpdates[selector].timestamp.Truncate(time.Second), gotUpdate.timestamp.Truncate(time.Second))
+ assert.Equal(t, tc.expUpdates[selector].value.Uint64(), gotUpdate.value.Uint64())
+ }
+ })
+ }
+}
+
+func TestCommitReportingPlugin_getLatestTokenPriceUpdates(t *testing.T) {
+ now := time.Now()
+ tk1 := cciptypes.Address(utils.RandomAddress().String())
+ tk2 := cciptypes.Address(utils.RandomAddress().String())
+
+ testCases := []struct {
+ name string
+ priceRegistryUpdates []cciptypes.TokenPriceUpdate
+ expUpdates map[cciptypes.Address]update
+ expErr bool
+ }{
+ {
+ name: "happy path",
+ priceRegistryUpdates: []cciptypes.TokenPriceUpdate{
+ {
+ TokenPrice: cciptypes.TokenPrice{
+ Token: tk1,
+ Value: big.NewInt(1000),
+ },
+ TimestampUnixSec: big.NewInt(now.Add(1 * time.Minute).Unix()),
+ },
+ {
+ TokenPrice: cciptypes.TokenPrice{
+ Token: tk2,
+ Value: big.NewInt(2000),
+ },
+ TimestampUnixSec: big.NewInt(now.Add(2 * time.Minute).Unix()),
+ },
+ },
+ expUpdates: map[cciptypes.Address]update{
+ tk1: {timestamp: now.Add(1 * time.Minute), value: big.NewInt(1000)},
+ tk2: {timestamp: now.Add(2 * time.Minute), value: big.NewInt(2000)},
+ },
+ expErr: false,
+ },
+ }
+
+ ctx := testutils.Context(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := &CommitReportingPlugin{}
+
+ priceReg := ccipdatamocks.NewPriceRegistryReader(t)
+ p.destPriceRegistryReader = priceReg
+
+ var events []cciptypes.TokenPriceUpdateWithTxMeta
+ for _, up := range tc.priceRegistryUpdates {
+ events = append(events, cciptypes.TokenPriceUpdateWithTxMeta{
+ TokenPriceUpdate: up,
+ })
+ }
+
+ priceReg.On("GetTokenPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(events, nil)
+
+ updates, err := p.getLatestTokenPriceUpdates(ctx, now)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, len(tc.expUpdates), len(updates))
+ for k, v := range updates {
+ assert.Equal(t, tc.expUpdates[k].timestamp.Truncate(time.Second), v.timestamp.Truncate(time.Second))
+ assert.Equal(t, tc.expUpdates[k].value.Uint64(), v.value.Uint64())
+ }
+ })
+ }
+}
+
+func Test_commitReportSize(t *testing.T) {
+ testParams := gopter.DefaultTestParameters()
+ testParams.MinSuccessfulTests = 100
+ p := gopter.NewProperties(testParams)
+ p.Property("bounded commit report size", prop.ForAll(func(root []byte, min, max uint64) bool {
+ var root32 [32]byte
+ copy(root32[:], root)
+ rep, err := encodeCommitReport(cciptypes.CommitStoreReport{
+ MerkleRoot: root32,
+ Interval: cciptypes.CommitStoreInterval{Min: min, Max: max},
+ TokenPrices: []cciptypes.TokenPrice{},
+ GasPrices: []cciptypes.GasPrice{
+ {
+ DestChainSelector: 1337,
+ Value: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9
+ },
+ },
+ })
+ require.NoError(t, err)
+ return len(rep) <= MaxCommitReportLength
+ }, gen.SliceOfN(32, gen.UInt8()), gen.UInt64(), gen.UInt64()))
+ p.TestingRun(t)
+}
+
+func Test_calculateIntervalConsensus(t *testing.T) {
+ tests := []struct {
+ name string
+ intervals []cciptypes.CommitStoreInterval
+ rangeLimit uint64
+ f int
+ wantMin uint64
+ wantMax uint64
+ wantErr bool
+ }{
+ {"no obs", []cciptypes.CommitStoreInterval{{Min: 0, Max: 0}}, 0, 0, 0, 0, false},
+ {"basic", []cciptypes.CommitStoreInterval{
+ {Min: 9, Max: 14},
+ {Min: 10, Max: 12},
+ {Min: 10, Max: 14},
+ }, 0, 1, 10, 14, false},
+ {"min > max", []cciptypes.CommitStoreInterval{
+ {Min: 9, Max: 4},
+ {Min: 10, Max: 4},
+ {Min: 10, Max: 6},
+ }, 0, 1, 0, 0, true},
+ {
+ "range limit", []cciptypes.CommitStoreInterval{
+ {Min: 10, Max: 100},
+ {Min: 1, Max: 1000},
+ }, 256, 1, 10, 265, false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := calculateIntervalConsensus(tt.intervals, tt.f, tt.rangeLimit)
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ assert.Equal(t, tt.wantMin, got.Min)
+ assert.Equal(t, tt.wantMax, got.Max)
+ })
+ }
+}
+
+func TestCommitReportToEthTxMeta(t *testing.T) {
+ mctx := hashutil.NewKeccak()
+ tree, err := merklemulti.NewTree(mctx, [][32]byte{mctx.Hash([]byte{0xaa})})
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ min, max uint64
+ expectedRange []uint64
+ }{
+ {
+ "happy flow",
+ 1, 10,
+ []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+ },
+ {
+ "same sequence",
+ 1, 1,
+ []uint64{1},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ report := cciptypes.CommitStoreReport{
+ TokenPrices: []cciptypes.TokenPrice{},
+ GasPrices: []cciptypes.GasPrice{
+ {
+ DestChainSelector: uint64(1337),
+ Value: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9
+ },
+ },
+ MerkleRoot: tree.Root(),
+ Interval: cciptypes.CommitStoreInterval{Min: tc.min, Max: tc.max},
+ }
+ out, err := encodeCommitReport(report)
+ require.NoError(t, err)
+
+ fn, err := factory.CommitReportToEthTxMeta(ccipconfig.CommitStore, *semver.MustParse("1.0.0"))
+ require.NoError(t, err)
+ txMeta, err := fn(out)
+ require.NoError(t, err)
+ require.NotNil(t, txMeta)
+ require.EqualValues(t, tc.expectedRange, txMeta.SeqNumbers)
+ })
+ }
+}
+
+// TODO should be removed, tests need to be updated to use the Reader interface.
+// encodeCommitReport is only used in tests
+func encodeCommitReport(report cciptypes.CommitStoreReport) ([]byte, error) {
+ commitStoreABI := abihelpers.MustParseABI(commit_store.CommitStoreABI)
+ return v1_2_0.EncodeCommitReport(abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI), report)
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/batching.go b/core/services/ocr2/plugins/ccip/ccipexec/batching.go
new file mode 100644
index 00000000000..b457dd986d4
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/batching.go
@@ -0,0 +1,540 @@
+package ccipexec
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "time"
+
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/merklemulti"
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker"
+)
+
+type BatchContext struct {
+ report commitReportWithSendRequests
+ inflight []InflightInternalExecutionReport
+ inflightAggregateValue *big.Int
+ lggr logger.Logger
+ availableDataLen int
+ availableGas uint64
+ expectedNonces map[cciptypes.Address]uint64
+ sendersNonce map[cciptypes.Address]uint64
+ sourceTokenPricesUSD map[cciptypes.Address]*big.Int
+ destTokenPricesUSD map[cciptypes.Address]*big.Int
+ gasPrice *big.Int
+ sourceToDestToken map[cciptypes.Address]cciptypes.Address
+ aggregateTokenLimit *big.Int
+ tokenDataRemainingDuration time.Duration
+ tokenDataWorker tokendata.Worker
+ gasPriceEstimator prices.GasPriceEstimatorExec
+ destWrappedNative cciptypes.Address
+ offchainConfig cciptypes.ExecOffchainConfig
+}
+
+type BatchingStrategy interface {
+ BuildBatch(ctx context.Context, batchCtx *BatchContext) ([]ccip.ObservedMessage, []messageExecStatus)
+}
+
+type BestEffortBatchingStrategy struct{}
+
+type ZKOverflowBatchingStrategy struct {
+ statuschecker statuschecker.CCIPTransactionStatusChecker
+}
+
+func NewBatchingStrategy(batchingStrategyID uint32, statusChecker statuschecker.CCIPTransactionStatusChecker) (BatchingStrategy, error) {
+ var batchingStrategy BatchingStrategy
+ switch batchingStrategyID {
+ case 0:
+ batchingStrategy = &BestEffortBatchingStrategy{}
+ case 1:
+ batchingStrategy = &ZKOverflowBatchingStrategy{
+ statuschecker: statusChecker,
+ }
+ default:
+ return nil, errors.Errorf("unknown batching strategy ID %d", batchingStrategyID)
+ }
+ return batchingStrategy, nil
+}
+
+// BestEffortBatchingStrategy is a batching strategy that tries to batch as many messages as possible (up to certain limits).
+func (s *BestEffortBatchingStrategy) BuildBatch(
+ ctx context.Context,
+ batchCtx *BatchContext,
+) ([]ccip.ObservedMessage, []messageExecStatus) {
+ batchBuilder := newBatchBuildContainer(len(batchCtx.report.sendRequestsWithMeta))
+ for _, msg := range batchCtx.report.sendRequestsWithMeta {
+ msgLggr := batchCtx.lggr.With("messageID", hexutil.Encode(msg.MessageID[:]), "seqNr", msg.SequenceNumber)
+ status, messageMaxGas, tokenData, msgValue, err := performCommonChecks(ctx, batchCtx, msg, msgLggr)
+
+ if err != nil {
+ return []ccip.ObservedMessage{}, []messageExecStatus{}
+ }
+
+ if status.shouldBeSkipped() {
+ batchBuilder.skip(msg, status)
+ continue
+ }
+
+ updateBatchContext(batchCtx, msg, messageMaxGas, msgValue, msgLggr)
+ batchBuilder.addToBatch(msg, tokenData)
+ }
+ return batchBuilder.batch, batchBuilder.statuses
+}
+
+// ZKOverflowBatchingStrategy is a batching strategy for ZK chains overflowing under certain conditions.
+// It is a simple batching strategy that only allows one message to be added to the batch.
+// TXM is used to perform the ZK check: if the message failed the check, it will be skipped.
+func (bs ZKOverflowBatchingStrategy) BuildBatch(
+ ctx context.Context,
+ batchCtx *BatchContext,
+) ([]ccip.ObservedMessage, []messageExecStatus) {
+ batchBuilder := newBatchBuildContainer(len(batchCtx.report.sendRequestsWithMeta))
+ inflightSeqNums := getInflightSeqNums(batchCtx.inflight)
+
+ for _, msg := range batchCtx.report.sendRequestsWithMeta {
+ msgId := hexutil.Encode(msg.MessageID[:])
+ msgLggr := batchCtx.lggr.With("messageID", msgId, "seqNr", msg.SequenceNumber)
+
+ // Check if msg is inflight
+ if exists := inflightSeqNums.Contains(msg.SequenceNumber); exists {
+ // Message is inflight, skip it
+ msgLggr.Infow("Skipping message - already inflight", "message", msgId)
+ batchBuilder.skip(msg, SkippedInflight)
+ continue
+ }
+ // Message is not inflight, continue with checks
+ // Check if the messsage is overflown using TXM
+ statuses, count, err := bs.statuschecker.CheckMessageStatus(ctx, msgId)
+ if err != nil {
+ batchBuilder.skip(msg, TXMCheckError)
+ continue
+ }
+
+ msgLggr.Infow("TXM check result", "statuses", statuses, "count", count)
+
+ if len(statuses) == 0 {
+ // No status found for message = first time we see it
+ msgLggr.Infow("No status found for message - proceeding with checks", "message", msgId)
+ } else {
+ // Status(es) found for message = check if any of them is final to decide if we should add it to the batch
+ hasFatalStatus := false
+ for _, s := range statuses {
+ if s == types.Fatal {
+ msgLggr.Infow("Skipping message - found a fatal TXM status", "message", msgId)
+ batchBuilder.skip(msg, TXMFatalStatus)
+ hasFatalStatus = true
+ break
+ }
+ }
+ if hasFatalStatus {
+ continue
+ }
+ msgLggr.Infow("No fatal status found for message - proceeding with checks", "message", msgId)
+ }
+
+ status, messageMaxGas, tokenData, msgValue, err := performCommonChecks(ctx, batchCtx, msg, msgLggr)
+
+ if err != nil {
+ return []ccip.ObservedMessage{}, []messageExecStatus{}
+ }
+
+ if status.shouldBeSkipped() {
+ batchBuilder.skip(msg, status)
+ continue
+ }
+
+ updateBatchContext(batchCtx, msg, messageMaxGas, msgValue, msgLggr)
+ msgLggr.Infow("Adding message to batch", "message", msgId)
+ batchBuilder.addToBatch(msg, tokenData)
+
+ // Batch size is limited to 1 for ZK Overflow chains
+ break
+ }
+ return batchBuilder.batch, batchBuilder.statuses
+}
+
+func performCommonChecks(
+ ctx context.Context,
+ batchCtx *BatchContext,
+ msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta,
+ msgLggr logger.Logger,
+) (messageStatus, uint64, [][]byte, *big.Int, error) {
+ if msg.Executed {
+ msgLggr.Infow("Skipping message - already executed")
+ return AlreadyExecuted, 0, nil, nil, nil
+ }
+
+ if len(msg.Data) > batchCtx.availableDataLen {
+ msgLggr.Infow("Skipping message - insufficient remaining batch data length", "msgDataLen", len(msg.Data), "availableBatchDataLen", batchCtx.availableDataLen)
+ return InsufficientRemainingBatchDataLength, 0, nil, nil, nil
+ }
+
+ messageMaxGas, err1 := calculateMessageMaxGas(
+ msg.GasLimit,
+ len(batchCtx.report.sendRequestsWithMeta),
+ len(msg.Data),
+ len(msg.TokenAmounts),
+ )
+ if err1 != nil {
+ msgLggr.Errorw("Skipping message - message max gas calculation error", "err", err1)
+ return MessageMaxGasCalcError, 0, nil, nil, nil
+ }
+
+ // Check sufficient gas in batch
+ if batchCtx.availableGas < messageMaxGas {
+ msgLggr.Infow("Skipping message - insufficient remaining batch gas limit", "availableGas", batchCtx.availableGas, "messageMaxGas", messageMaxGas)
+ return InsufficientRemainingBatchGas, 0, nil, nil, nil
+ }
+
+ if _, ok := batchCtx.expectedNonces[msg.Sender]; !ok {
+ nonce, ok1 := batchCtx.sendersNonce[msg.Sender]
+ if !ok1 {
+ msgLggr.Errorw("Skipping message - missing nonce", "sender", msg.Sender)
+ return MissingNonce, 0, nil, nil, nil
+ }
+ batchCtx.expectedNonces[msg.Sender] = nonce + 1
+ }
+
+ // Check expected nonce is valid for sequenced messages.
+ // Sequenced messages have non-zero nonces.
+ if msg.Nonce > 0 && msg.Nonce != batchCtx.expectedNonces[msg.Sender] {
+ msgLggr.Warnw("Skipping message - invalid nonce", "have", msg.Nonce, "want", batchCtx.expectedNonces[msg.Sender])
+ return InvalidNonce, 0, nil, nil, nil
+ }
+
+ msgValue, err1 := aggregateTokenValue(batchCtx.lggr, batchCtx.destTokenPricesUSD, batchCtx.sourceToDestToken, msg.TokenAmounts)
+ if err1 != nil {
+ msgLggr.Errorw("Skipping message - aggregate token value compute error", "err", err1)
+ return AggregateTokenValueComputeError, 0, nil, nil, nil
+ }
+
+ // if token limit is smaller than message value skip message
+ if tokensLeft, hasCapacity := hasEnoughTokens(batchCtx.aggregateTokenLimit, msgValue, batchCtx.inflightAggregateValue); !hasCapacity {
+ msgLggr.Warnw("Skipping message - aggregate token limit exceeded", "aggregateTokenLimit", tokensLeft.String(), "msgValue", msgValue.String())
+ return AggregateTokenLimitExceeded, 0, nil, nil, nil
+ }
+
+ tokenData, elapsed, err1 := getTokenDataWithTimeout(ctx, msg, batchCtx.tokenDataRemainingDuration, batchCtx.tokenDataWorker)
+ batchCtx.tokenDataRemainingDuration -= elapsed
+ if err1 != nil {
+ if errors.Is(err1, tokendata.ErrNotReady) {
+ msgLggr.Warnw("Skipping message - token data not ready", "err", err1)
+ return TokenDataNotReady, 0, nil, nil, nil
+ }
+ msgLggr.Errorw("Skipping message - token data fetch error", "err", err1)
+ return TokenDataFetchError, 0, nil, nil, nil
+ }
+
+ dstWrappedNativePrice, exists := batchCtx.destTokenPricesUSD[batchCtx.destWrappedNative]
+ if !exists {
+ msgLggr.Errorw("Skipping message - token not in destination token prices", "token", batchCtx.destWrappedNative)
+ return TokenNotInDestTokenPrices, 0, nil, nil, nil
+ }
+
+ // calculating the source chain fee, dividing by 1e18 for denomination.
+ // For example:
+ // FeeToken=link; FeeTokenAmount=1e17 i.e. 0.1 link, price is 6e18 USD/link (1 USD = 1e18),
+ // availableFee is 1e17*6e18/1e18 = 6e17 = 0.6 USD
+ sourceFeeTokenPrice, exists := batchCtx.sourceTokenPricesUSD[msg.FeeToken]
+ if !exists {
+ msgLggr.Errorw("Skipping message - token not in source token prices", "token", msg.FeeToken)
+ return TokenNotInSrcTokenPrices, 0, nil, nil, nil
+ }
+
+ // Fee boosting
+ execCostUsd, err1 := batchCtx.gasPriceEstimator.EstimateMsgCostUSD(batchCtx.gasPrice, dstWrappedNativePrice, msg)
+ if err1 != nil {
+ msgLggr.Errorw("Failed to estimate message cost USD", "err", err1)
+ return "", 0, nil, nil, errors.New("failed to estimate message cost USD")
+ }
+
+ availableFee := big.NewInt(0).Mul(msg.FeeTokenAmount, sourceFeeTokenPrice)
+ availableFee = availableFee.Div(availableFee, big.NewInt(1e18))
+ availableFeeUsd := waitBoostedFee(time.Since(msg.BlockTimestamp), availableFee, batchCtx.offchainConfig.RelativeBoostPerWaitHour)
+ if availableFeeUsd.Cmp(execCostUsd) < 0 {
+ msgLggr.Infow(
+ "Skipping message - insufficient remaining fee",
+ "availableFeeUsd", availableFeeUsd,
+ "execCostUsd", execCostUsd,
+ "sourceBlockTimestamp", msg.BlockTimestamp,
+ "waitTime", time.Since(msg.BlockTimestamp),
+ "boost", batchCtx.offchainConfig.RelativeBoostPerWaitHour,
+ )
+ return InsufficientRemainingFee, 0, nil, nil, nil
+ }
+
+ return SuccesfullyValidated, messageMaxGas, tokenData, msgValue, nil
+}
+
+// getTokenDataWithCappedLatency gets the token data for the provided message.
+// Stops and returns an error if more than allowedWaitingTime is passed.
+func getTokenDataWithTimeout(
+ ctx context.Context,
+ msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta,
+ timeout time.Duration,
+ tokenDataWorker tokendata.Worker,
+) ([][]byte, time.Duration, error) {
+ if len(msg.TokenAmounts) == 0 {
+ return nil, 0, nil
+ }
+
+ ctxTimeout, cf := context.WithTimeout(ctx, timeout)
+ defer cf()
+ tStart := time.Now()
+ tokenData, err := tokenDataWorker.GetMsgTokenData(ctxTimeout, msg)
+ tDur := time.Since(tStart)
+ return tokenData, tDur, err
+}
+
+func getProofData(
+ ctx context.Context,
+ sourceReader ccipdata.OnRampReader,
+ interval cciptypes.CommitStoreInterval,
+) (sendReqsInRoot []cciptypes.EVM2EVMMessageWithTxMeta, leaves [][32]byte, tree *merklemulti.Tree[[32]byte], err error) {
+ // We don't need to double-check if logs are finalized because we already checked that in the Commit phase.
+ sendReqs, err := sourceReader.GetSendRequestsBetweenSeqNums(ctx, interval.Min, interval.Max, false)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ if err1 := validateSendRequests(sendReqs, interval); err1 != nil {
+ return nil, nil, nil, err1
+ }
+
+ leaves = make([][32]byte, 0, len(sendReqs))
+ for _, req := range sendReqs {
+ leaves = append(leaves, req.Hash)
+ }
+ tree, err = merklemulti.NewTree(hashutil.NewKeccak(), leaves)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return sendReqs, leaves, tree, nil
+}
+
+func validateSendRequests(sendReqs []cciptypes.EVM2EVMMessageWithTxMeta, interval cciptypes.CommitStoreInterval) error {
+ if len(sendReqs) == 0 {
+ return fmt.Errorf("could not find any requests in the provided interval %v", interval)
+ }
+
+ gotInterval := cciptypes.CommitStoreInterval{
+ Min: sendReqs[0].SequenceNumber,
+ Max: sendReqs[0].SequenceNumber,
+ }
+
+ for _, req := range sendReqs[1:] {
+ if req.SequenceNumber < gotInterval.Min {
+ gotInterval.Min = req.SequenceNumber
+ }
+ if req.SequenceNumber > gotInterval.Max {
+ gotInterval.Max = req.SequenceNumber
+ }
+ }
+
+ if (gotInterval.Min != interval.Min) || (gotInterval.Max != interval.Max) {
+ return fmt.Errorf("interval %v is not the expected %v", gotInterval, interval)
+ }
+ return nil
+}
+
+func getInflightSeqNums(inflight []InflightInternalExecutionReport) mapset.Set[uint64] {
+ seqNums := mapset.NewSet[uint64]()
+ for _, report := range inflight {
+ for _, msg := range report.messages {
+ seqNums.Add(msg.SequenceNumber)
+ }
+ }
+ return seqNums
+}
+
+func aggregateTokenValue(lggr logger.Logger, destTokenPricesUSD map[cciptypes.Address]*big.Int, sourceToDest map[cciptypes.Address]cciptypes.Address, tokensAndAmount []cciptypes.TokenAmount) (*big.Int, error) {
+ sum := big.NewInt(0)
+ for i := 0; i < len(tokensAndAmount); i++ {
+ price, ok := destTokenPricesUSD[sourceToDest[tokensAndAmount[i].Token]]
+ if !ok {
+ // If we don't have a price for the token, we will assume it's worth 0.
+ lggr.Infof("No price for token %s, assuming 0", tokensAndAmount[i].Token)
+ continue
+ }
+ sum.Add(sum, new(big.Int).Quo(new(big.Int).Mul(price, tokensAndAmount[i].Amount), big.NewInt(1e18)))
+ }
+ return sum, nil
+}
+
+func updateBatchContext(
+ batchCtx *BatchContext,
+ msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta,
+ messageMaxGas uint64,
+ msgValue *big.Int,
+ msgLggr logger.Logger) {
+ batchCtx.availableGas -= messageMaxGas
+ batchCtx.availableDataLen -= len(msg.Data)
+ batchCtx.aggregateTokenLimit.Sub(batchCtx.aggregateTokenLimit, msgValue)
+ if msg.Nonce > 0 {
+ batchCtx.expectedNonces[msg.Sender] = msg.Nonce + 1
+ }
+
+ msgLggr.Infow(
+ "Message successfully added to execution batch",
+ "nonce", msg.Nonce,
+ "sender", msg.Sender,
+ "value", msgValue,
+ "availableAggrTokenLimit", batchCtx.aggregateTokenLimit,
+ "availableGas", batchCtx.availableGas,
+ "availableDataLen", batchCtx.availableDataLen,
+ )
+}
+
+func hasEnoughTokens(tokenLimit *big.Int, msgValue *big.Int, inflightValue *big.Int) (*big.Int, bool) {
+ tokensLeft := big.NewInt(0).Sub(tokenLimit, inflightValue)
+ return tokensLeft, tokensLeft.Cmp(msgValue) >= 0
+}
+
+func buildExecutionReportForMessages(
+ msgsInRoot []cciptypes.EVM2EVMMessageWithTxMeta,
+ tree *merklemulti.Tree[[32]byte],
+ commitInterval cciptypes.CommitStoreInterval,
+ observedMessages []ccip.ObservedMessage,
+) (cciptypes.ExecReport, error) {
+ innerIdxs := make([]int, 0, len(observedMessages))
+ var messages []cciptypes.EVM2EVMMessage
+ var offchainTokenData [][][]byte
+ for _, observedMessage := range observedMessages {
+ if observedMessage.SeqNr < commitInterval.Min || observedMessage.SeqNr > commitInterval.Max {
+ // We only return messages from a single root (the root of the first message).
+ continue
+ }
+ innerIdx := int(observedMessage.SeqNr - commitInterval.Min)
+ if innerIdx >= len(msgsInRoot) || innerIdx < 0 {
+ return cciptypes.ExecReport{}, fmt.Errorf("invalid inneridx SeqNr=%d IntervalMin=%d msgsInRoot=%d",
+ observedMessage.SeqNr, commitInterval.Min, len(msgsInRoot))
+ }
+ messages = append(messages, msgsInRoot[innerIdx].EVM2EVMMessage)
+ offchainTokenData = append(offchainTokenData, observedMessage.TokenData)
+ innerIdxs = append(innerIdxs, innerIdx)
+ }
+
+ merkleProof, err := tree.Prove(innerIdxs)
+ if err != nil {
+ return cciptypes.ExecReport{}, err
+ }
+
+ // any capped proof will have length <= this one, so we reuse it to avoid proving inside loop, and update later if changed
+ return cciptypes.ExecReport{
+ Messages: messages,
+ Proofs: merkleProof.Hashes,
+ ProofFlagBits: abihelpers.ProofFlagsToBits(merkleProof.SourceFlags),
+ OffchainTokenData: offchainTokenData,
+ }, nil
+}
+
+// Validates the given message observations do not exceed the committed sequence numbers
+// in the commitStoreReader.
+func validateSeqNumbers(serviceCtx context.Context, commitStore ccipdata.CommitStoreReader, observedMessages []ccip.ObservedMessage) error {
+ nextMin, err := commitStore.GetExpectedNextSequenceNumber(serviceCtx)
+ if err != nil {
+ return err
+ }
+ // observedMessages are always sorted by SeqNr and never empty, so it's safe to take last element
+ maxSeqNumInBatch := observedMessages[len(observedMessages)-1].SeqNr
+
+ if maxSeqNumInBatch >= nextMin {
+ return errors.Errorf("Cannot execute uncommitted seq num. nextMin %v, seqNums %v", nextMin, observedMessages)
+ }
+ return nil
+}
+
+// Gets the commit report from the saved logs for a given sequence number.
+func getCommitReportForSeqNum(ctx context.Context, commitStoreReader ccipdata.CommitStoreReader, seqNum uint64) (cciptypes.CommitStoreReport, error) {
+ acceptedReports, err := commitStoreReader.GetCommitReportMatchingSeqNum(ctx, seqNum, 0)
+ if err != nil {
+ return cciptypes.CommitStoreReport{}, err
+ }
+
+ if len(acceptedReports) == 0 {
+ return cciptypes.CommitStoreReport{}, errors.Errorf("seq number not committed")
+ }
+
+ return acceptedReports[0].CommitStoreReport, nil
+}
+
+type messageStatus string
+
+const (
+ SuccesfullyValidated messageStatus = "successfully_validated"
+ AlreadyExecuted messageStatus = "already_executed"
+ SenderAlreadySkipped messageStatus = "sender_already_skipped"
+ MessageMaxGasCalcError messageStatus = "message_max_gas_calc_error"
+ InsufficientRemainingBatchDataLength messageStatus = "insufficient_remaining_batch_data_length"
+ InsufficientRemainingBatchGas messageStatus = "insufficient_remaining_batch_gas"
+ MissingNonce messageStatus = "missing_nonce"
+ InvalidNonce messageStatus = "invalid_nonce"
+ AggregateTokenValueComputeError messageStatus = "aggregate_token_value_compute_error"
+ AggregateTokenLimitExceeded messageStatus = "aggregate_token_limit_exceeded"
+ TokenDataNotReady messageStatus = "token_data_not_ready"
+ TokenDataFetchError messageStatus = "token_data_fetch_error"
+ TokenNotInDestTokenPrices messageStatus = "token_not_in_dest_token_prices"
+ TokenNotInSrcTokenPrices messageStatus = "token_not_in_src_token_prices"
+ InsufficientRemainingFee messageStatus = "insufficient_remaining_fee"
+ AddedToBatch messageStatus = "added_to_batch"
+ TXMCheckError messageStatus = "txm_check_error"
+ TXMFatalStatus messageStatus = "txm_fatal_status"
+ SkippedInflight messageStatus = "skipped_inflight"
+)
+
+func (m messageStatus) shouldBeSkipped() bool {
+ return m != SuccesfullyValidated
+}
+
+type messageExecStatus struct {
+ SeqNr uint64
+ MessageId string
+ Status messageStatus
+}
+
+func newMessageExecState(seqNr uint64, messageId cciptypes.Hash, status messageStatus) messageExecStatus {
+ return messageExecStatus{
+ SeqNr: seqNr,
+ MessageId: hexutil.Encode(messageId[:]),
+ Status: status,
+ }
+}
+
+type batchBuildContainer struct {
+ batch []ccip.ObservedMessage
+ statuses []messageExecStatus
+}
+
+func newBatchBuildContainer(capacity int) *batchBuildContainer {
+ return &batchBuildContainer{
+ batch: make([]ccip.ObservedMessage, 0, capacity),
+ statuses: make([]messageExecStatus, 0, capacity),
+ }
+}
+
+func (m *batchBuildContainer) skip(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, status messageStatus) {
+ m.addState(msg, status)
+}
+
+func (m *batchBuildContainer) addToBatch(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenData [][]byte) {
+ m.addState(msg, AddedToBatch)
+ m.batch = append(m.batch, ccip.NewObservedMessage(msg.SequenceNumber, tokenData))
+}
+
+func (m *batchBuildContainer) addState(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, state messageStatus) {
+ m.statuses = append(m.statuses, newMessageExecState(msg.SequenceNumber, msg.MessageID, state))
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/batching_test.go b/core/services/ocr2/plugins/ccip/ccipexec/batching_test.go
new file mode 100644
index 00000000000..3647556a6d5
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/batching_test.go
@@ -0,0 +1,910 @@
+package ccipexec
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "math"
+ "math/big"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+ mockstatuschecker "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker/mocks"
+)
+
+type testCase struct {
+ name string
+ reqs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+ inflight []InflightInternalExecutionReport
+ tokenLimit, destGasPrice, inflightAggregateValue *big.Int
+ srcPrices, dstPrices map[cciptypes.Address]*big.Int
+ offRampNoncesBySender map[cciptypes.Address]uint64
+ srcToDestTokens map[cciptypes.Address]cciptypes.Address
+ expectedSeqNrs []ccip.ObservedMessage
+ expectedStates []messageExecStatus
+ statuschecker func(m *mockstatuschecker.CCIPTransactionStatusChecker)
+ skipGasPriceEstimator bool
+}
+
+func Test_NewBatchingStrategy(t *testing.T) {
+ t.Parallel()
+
+ mockStatusChecker := mockstatuschecker.NewCCIPTransactionStatusChecker(t)
+
+ testCases := []int{0, 1, 2}
+
+ for _, batchingStrategyId := range testCases {
+ factory, err := NewBatchingStrategy(uint32(batchingStrategyId), mockStatusChecker)
+ if batchingStrategyId == 2 {
+ assert.Error(t, err)
+ } else {
+ assert.NotNil(t, factory)
+ assert.NoError(t, err)
+ }
+ }
+}
+
+func Test_validateSendRequests(t *testing.T) {
+ testCases := []struct {
+ name string
+ seqNums []uint64
+ providedInterval cciptypes.CommitStoreInterval
+ expErr bool
+ }{
+ {
+ name: "zero interval no seq nums",
+ seqNums: nil,
+ providedInterval: cciptypes.CommitStoreInterval{Min: 0, Max: 0},
+ expErr: true,
+ },
+ {
+ name: "exp 1 seq num got none",
+ seqNums: nil,
+ providedInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ expErr: true,
+ },
+ {
+ name: "exp 10 seq num got none",
+ seqNums: nil,
+ providedInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ expErr: true,
+ },
+ {
+ name: "got 1 seq num as expected",
+ seqNums: []uint64{1},
+ providedInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 1},
+ expErr: false,
+ },
+ {
+ name: "got 5 seq num as expected",
+ seqNums: []uint64{11, 12, 13, 14, 15},
+ providedInterval: cciptypes.CommitStoreInterval{Min: 11, Max: 15},
+ expErr: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ sendReqs := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(tc.seqNums))
+ for _, seqNum := range tc.seqNums {
+ sendReqs = append(sendReqs, cciptypes.EVM2EVMMessageWithTxMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: seqNum},
+ })
+ }
+ err := validateSendRequests(sendReqs, tc.providedInterval)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+type delayedTokenDataWorker struct {
+ delay time.Duration
+ tokendata.Worker
+}
+
+func (m delayedTokenDataWorker) GetMsgTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) ([][]byte, error) {
+ time.Sleep(m.delay)
+ return nil, ctx.Err()
+}
+
+func TestExecutionReportingPlugin_getTokenDataWithCappedLatency(t *testing.T) {
+ testCases := []struct {
+ name string
+ allowedWaitingTime time.Duration
+ workerLatency time.Duration
+ expErr bool
+ }{
+ {
+ name: "happy flow",
+ allowedWaitingTime: 10 * time.Millisecond,
+ workerLatency: time.Nanosecond,
+ expErr: false,
+ },
+ {
+ name: "worker takes long to reply",
+ allowedWaitingTime: 10 * time.Millisecond,
+ workerLatency: 20 * time.Millisecond,
+ expErr: true,
+ },
+ }
+
+ ctx := testutils.Context(t)
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ tokenDataWorker := delayedTokenDataWorker{delay: tc.workerLatency}
+
+ msg := cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{TokenAmounts: make([]cciptypes.TokenAmount, 1)},
+ }
+
+ _, _, err := getTokenDataWithTimeout(ctx, msg, tc.allowedWaitingTime, tokenDataWorker)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestBatchingStrategies(t *testing.T) {
+ sender1 := ccipcalc.HexToAddress("0xa")
+ destNative := ccipcalc.HexToAddress("0xb")
+ srcNative := ccipcalc.HexToAddress("0xc")
+
+ msg1 := createTestMessage(1, sender1, 1, srcNative, big.NewInt(1e9), false, nil)
+
+ msg2 := msg1
+ msg2.Executed = true
+
+ msg3 := msg1
+ msg3.Executed = true
+ msg3.Finalized = true
+
+ msg4 := msg1
+ msg4.TokenAmounts = []cciptypes.TokenAmount{
+ {Token: srcNative, Amount: big.NewInt(100)},
+ }
+
+ msg5 := msg4
+ msg5.SequenceNumber = msg5.SequenceNumber + 1
+ msg5.Nonce = msg5.Nonce + 1
+
+ zkMsg1 := createTestMessage(1, sender1, 0, srcNative, big.NewInt(1e9), false, nil)
+ zkMsg2 := createTestMessage(2, sender1, 0, srcNative, big.NewInt(1e9), false, nil)
+ zkMsg3 := createTestMessage(3, sender1, 0, srcNative, big.NewInt(1e9), false, nil)
+ zkMsg4 := createTestMessage(4, sender1, 0, srcNative, big.NewInt(1e9), false, nil)
+
+ testCases := []testCase{
+ {
+ name: "single message no tokens",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(1)}},
+ expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, AddedToBatch)},
+ },
+ {
+ name: "gasPriceEstimator returns error",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ },
+ {
+ name: "executed non finalized messages should be skipped",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg2},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{newMessageExecState(msg2.SequenceNumber, msg2.MessageID, AlreadyExecuted)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "finalized executed log",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg3},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{newMessageExecState(msg3.SequenceNumber, msg3.MessageID, AlreadyExecuted)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "dst token price does not exist",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, TokenNotInDestTokenPrices)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "src token price does not exist",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, TokenNotInSrcTokenPrices)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "message with tokens is not executed if limit is reached",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg4},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(99),
+ destGasPrice: big.NewInt(1),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1e18)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1e18)},
+ srcToDestTokens: map[cciptypes.Address]cciptypes.Address{
+ srcNative: destNative,
+ },
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{newMessageExecState(msg4.SequenceNumber, msg4.MessageID, AggregateTokenLimitExceeded)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "message with tokens is not executed if limit is reached when inflight is full",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg5},
+ inflight: []InflightInternalExecutionReport{{createdAt: time.Now(), messages: []cciptypes.EVM2EVMMessage{msg4.EVM2EVMMessage}}},
+ inflightAggregateValue: big.NewInt(100),
+ tokenLimit: big.NewInt(50),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1e18)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1e18)},
+ srcToDestTokens: map[cciptypes.Address]cciptypes.Address{
+ srcNative: destNative,
+ },
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 1},
+ expectedStates: []messageExecStatus{newMessageExecState(msg5.SequenceNumber, msg5.MessageID, AggregateTokenLimitExceeded)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "skip when nonce doesn't match chain value",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 123},
+ expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, InvalidNonce)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "skip when nonce not found",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{},
+ expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, MissingNonce)},
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "unordered messages",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 10,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 0,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ },
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(10, [32]byte{}, AddedToBatch),
+ },
+ },
+ {
+ name: "unordered messages not blocked by nonce",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 9,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 5,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 10,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 0,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ },
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 3},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(9, [32]byte{}, InvalidNonce),
+ newMessageExecState(10, [32]byte{}, AddedToBatch),
+ },
+ },
+ }
+
+ bestEffortTestCases := []testCase{
+ {
+ name: "skip when batch gas limit is reached",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 10,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 1,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 11,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 2,
+ GasLimit: big.NewInt(math.MaxInt64),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 12,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 3,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ },
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(10, [32]byte{}, AddedToBatch),
+ newMessageExecState(11, [32]byte{}, InsufficientRemainingBatchGas),
+ newMessageExecState(12, [32]byte{}, InvalidNonce),
+ },
+ },
+ {
+ name: "some messages skipped after hitting max batch data len",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 10,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 1,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 11,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 2,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, MaxDataLenPerBatch-500), // skipped from batch
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 12,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 3,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ },
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(10, [32]byte{}, AddedToBatch),
+ newMessageExecState(11, [32]byte{}, InsufficientRemainingBatchDataLength),
+ newMessageExecState(12, [32]byte{}, InvalidNonce),
+ },
+ },
+ {
+ name: "unordered messages then ordered messages",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 9,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 0,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: 10,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: sender1,
+ Nonce: 5,
+ GasLimit: big.NewInt(1),
+ Data: bytes.Repeat([]byte{'a'}, 1000),
+ FeeToken: srcNative,
+ MessageID: [32]byte{},
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ },
+ },
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 4},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(9)}, {SeqNr: uint64(10)}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(9, [32]byte{}, AddedToBatch),
+ newMessageExecState(10, [32]byte{}, AddedToBatch),
+ },
+ },
+ }
+
+ specificZkOverflowTestCases := []testCase{
+ {
+ name: "batch size is 1",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2, zkMsg3},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg1.SequenceNumber}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, AddedToBatch),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, mock.Anything).Return([]types.TransactionStatus{}, -1, nil)
+ },
+ },
+ {
+ name: "snooze fatal message and return empty batch",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil)
+ },
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "snooze fatal message and add next message to batch",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg2.SequenceNumber}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus),
+ newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, AddedToBatch),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil)
+ m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil)
+ },
+ },
+ {
+ name: "all messages are fatal and batch is empty",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus),
+ newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, TXMFatalStatus),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil)
+ m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil)
+ },
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "message batched when unconfirmed or failed",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg1.SequenceNumber}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, AddedToBatch),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Unconfirmed, types.Failed}, 1, nil)
+ },
+ },
+ {
+ name: "message snoozed when multiple statuses with fatal",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg2.SequenceNumber}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus),
+ newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, AddedToBatch),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Unconfirmed, types.Failed, types.Fatal}, 2, nil)
+ m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil)
+ },
+ },
+ {
+ name: "txm return error for message",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg2.SequenceNumber}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMCheckError),
+ newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, AddedToBatch),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{}, -1, errors.New("dummy txm error"))
+ m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil)
+ },
+ },
+ {
+ name: "snooze message when inflight",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1},
+ inflight: createInflight(zkMsg1),
+ inflightAggregateValue: zkMsg1.FeeTokenAmount,
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, SkippedInflight),
+ },
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "snooze when not inflight but txm returns error",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMCheckError),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{}, -1, errors.New("dummy txm error"))
+ },
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "snooze when not inflight but txm returns fatal status",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1},
+ inflight: []InflightInternalExecutionReport{},
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Unconfirmed, types.Failed, types.Fatal}, 2, nil)
+ },
+ skipGasPriceEstimator: true,
+ },
+ {
+ name: "snooze messages when inflight but batch valid messages",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2, zkMsg3, zkMsg4},
+ inflight: createInflight(zkMsg1, zkMsg2),
+ inflightAggregateValue: big.NewInt(0),
+ tokenLimit: big.NewInt(0),
+ destGasPrice: big.NewInt(10),
+ srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)},
+ dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)},
+ offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0},
+ expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg3.SequenceNumber}},
+ expectedStates: []messageExecStatus{
+ newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, SkippedInflight),
+ newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, SkippedInflight),
+ newMessageExecState(zkMsg3.SequenceNumber, zkMsg3.MessageID, AddedToBatch),
+ },
+ statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) {
+ m.Mock = mock.Mock{} // reset mock
+ m.On("CheckMessageStatus", mock.Anything, zkMsg3.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil)
+ },
+ skipGasPriceEstimator: false,
+ },
+ }
+
+ t.Run("BestEffortBatchingStrategy", func(t *testing.T) {
+ strategy := &BestEffortBatchingStrategy{}
+ runBatchingStrategyTests(t, strategy, 1_000_000, append(testCases, bestEffortTestCases...))
+ })
+
+ t.Run("ZKOverflowBatchingStrategy", func(t *testing.T) {
+ mockedStatusChecker := mockstatuschecker.NewCCIPTransactionStatusChecker(t)
+ strategy := &ZKOverflowBatchingStrategy{
+ statuschecker: mockedStatusChecker,
+ }
+ runBatchingStrategyTests(t, strategy, 1_000_000, append(testCases, specificZkOverflowTestCases...))
+ })
+}
+
+// Function to set up and run tests for a given batching strategy
+func runBatchingStrategyTests(t *testing.T, strategy BatchingStrategy, availableGas uint64, testCases []testCase) {
+ destNative := ccipcalc.HexToAddress("0xb")
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ lggr := logger.TestLogger(t)
+
+ gasPriceEstimator := prices.NewMockGasPriceEstimatorExec(t)
+ if !tc.skipGasPriceEstimator {
+ if tc.expectedSeqNrs != nil {
+ gasPriceEstimator.On("EstimateMsgCostUSD", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(0), nil)
+ } else {
+ gasPriceEstimator.On("EstimateMsgCostUSD", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(0), errors.New("error"))
+ }
+ }
+
+ // default case for ZKOverflowBatchingStrategy
+ if strategyType := reflect.TypeOf(strategy); tc.statuschecker == nil && strategyType == reflect.TypeOf(&ZKOverflowBatchingStrategy{}) {
+ strategy.(*ZKOverflowBatchingStrategy).statuschecker.(*mockstatuschecker.CCIPTransactionStatusChecker).On("CheckMessageStatus", mock.Anything, mock.Anything).Return([]types.TransactionStatus{}, -1, nil)
+ }
+
+ // Mock calls to TXM
+ if tc.statuschecker != nil {
+ tc.statuschecker(strategy.(*ZKOverflowBatchingStrategy).statuschecker.(*mockstatuschecker.CCIPTransactionStatusChecker))
+ }
+
+ batchContext := &BatchContext{
+ report: commitReportWithSendRequests{sendRequestsWithMeta: tc.reqs},
+ inflight: tc.inflight,
+ inflightAggregateValue: tc.inflightAggregateValue,
+ lggr: lggr,
+ availableDataLen: MaxDataLenPerBatch,
+ availableGas: availableGas,
+ expectedNonces: make(map[cciptypes.Address]uint64),
+ sendersNonce: tc.offRampNoncesBySender,
+ sourceTokenPricesUSD: tc.srcPrices,
+ destTokenPricesUSD: tc.dstPrices,
+ gasPrice: tc.destGasPrice,
+ sourceToDestToken: tc.srcToDestTokens,
+ aggregateTokenLimit: tc.tokenLimit,
+ tokenDataRemainingDuration: 5 * time.Second,
+ tokenDataWorker: tokendata.NewBackgroundWorker(map[cciptypes.Address]tokendata.Reader{}, 10, 5*time.Second, time.Hour),
+ gasPriceEstimator: gasPriceEstimator,
+ destWrappedNative: destNative,
+ offchainConfig: cciptypes.ExecOffchainConfig{
+ DestOptimisticConfirmations: 1,
+ BatchGasLimit: 300_000,
+ RelativeBoostPerWaitHour: 1,
+ },
+ }
+
+ seqNrs, execStates := strategy.BuildBatch(context.Background(), batchContext)
+
+ runAssertions(t, tc, seqNrs, execStates)
+ })
+ }
+}
+
+// Utility function to run common assertions
+func runAssertions(t *testing.T, tc testCase, seqNrs []ccip.ObservedMessage, execStates []messageExecStatus) {
+ if tc.expectedSeqNrs == nil {
+ assert.Len(t, seqNrs, 0)
+ } else {
+ assert.Equal(t, tc.expectedSeqNrs, seqNrs)
+ }
+
+ if tc.expectedStates == nil {
+ assert.Len(t, execStates, 0)
+ } else {
+ assert.Equal(t, tc.expectedStates, execStates)
+ }
+}
+
+func createTestMessage(seqNr uint64, sender cciptypes.Address, nonce uint64, feeToken cciptypes.Address, feeAmount *big.Int, executed bool, data []byte) cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta {
+ return cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: seqNr,
+ FeeTokenAmount: feeAmount,
+ Sender: sender,
+ Nonce: nonce,
+ GasLimit: big.NewInt(1),
+ Strict: false,
+ Receiver: "",
+ Data: data,
+ TokenAmounts: nil,
+ FeeToken: feeToken,
+ MessageID: generateMessageIDFromInt(seqNr),
+ },
+ BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC),
+ Executed: executed,
+ }
+}
+
+func generateMessageIDFromInt(input uint64) [32]byte {
+ var messageID [32]byte
+ binary.LittleEndian.PutUint32(messageID[:], uint32(input))
+ return messageID
+}
+
+func createInflight(msgs ...cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) []InflightInternalExecutionReport {
+ reports := make([]InflightInternalExecutionReport, len(msgs))
+
+ for i, msg := range msgs {
+ reports[i] = InflightInternalExecutionReport{
+ messages: []cciptypes.EVM2EVMMessage{msg.EVM2EVMMessage},
+ createdAt: msg.BlockTimestamp,
+ }
+ }
+
+ return reports
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/factory.go b/core/services/ocr2/plugins/ccip/ccipexec/factory.go
new file mode 100644
index 00000000000..97caf2e719c
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/factory.go
@@ -0,0 +1,164 @@
+package ccipexec
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+type ExecutionReportingPluginFactory struct {
+ // Config derived from job specs and does not change between instances.
+ config ExecutionPluginStaticConfig
+
+ destPriceRegReader ccipdata.PriceRegistryReader
+ destPriceRegAddr cciptypes.Address
+ readersMu *sync.Mutex
+}
+
+func NewExecutionReportingPluginFactory(config ExecutionPluginStaticConfig) *ExecutionReportingPluginFactory {
+ return &ExecutionReportingPluginFactory{
+ config: config,
+ readersMu: &sync.Mutex{},
+
+ // the fields below are initially empty and populated on demand
+ destPriceRegReader: nil,
+ destPriceRegAddr: "",
+ }
+}
+
+func (rf *ExecutionReportingPluginFactory) UpdateDynamicReaders(ctx context.Context, newPriceRegAddr cciptypes.Address) error {
+ rf.readersMu.Lock()
+ defer rf.readersMu.Unlock()
+ // TODO: Investigate use of Close() to cleanup.
+ // TODO: a true price registry upgrade on an existing lane may want some kind of start block in its config? Right now we
+ // essentially assume that plugins don't care about historical price reg logs.
+ if rf.destPriceRegAddr == newPriceRegAddr {
+ // No-op
+ return nil
+ }
+ // Close old reader (if present) and open new reader if address changed.
+ if rf.destPriceRegReader != nil {
+ if err := rf.destPriceRegReader.Close(); err != nil {
+ return err
+ }
+ }
+
+ destPriceRegistryReader, err := rf.config.priceRegistryProvider.NewPriceRegistryReader(context.Background(), newPriceRegAddr)
+ if err != nil {
+ return err
+ }
+ rf.destPriceRegReader = destPriceRegistryReader
+ rf.destPriceRegAddr = newPriceRegAddr
+ return nil
+}
+
+type reportingPluginAndInfo struct {
+ plugin types.ReportingPlugin
+ pluginInfo types.ReportingPluginInfo
+}
+
+// NewReportingPlugin registers a new ReportingPlugin
+func (rf *ExecutionReportingPluginFactory) NewReportingPlugin(config types.ReportingPluginConfig) (types.ReportingPlugin, types.ReportingPluginInfo, error) {
+ initialRetryDelay := rf.config.newReportingPluginRetryConfig.InitialDelay
+ maxDelay := rf.config.newReportingPluginRetryConfig.MaxDelay
+
+ pluginAndInfo, err := ccipcommon.RetryUntilSuccess(rf.NewReportingPluginFn(config), initialRetryDelay, maxDelay)
+ if err != nil {
+ return nil, types.ReportingPluginInfo{}, err
+ }
+ return pluginAndInfo.plugin, pluginAndInfo.pluginInfo, err
+}
+
+// NewReportingPluginFn implements the NewReportingPlugin logic. It is defined as a function so that it can easily be
+// retried via RetryUntilSuccess. NewReportingPlugin must return successfully in order for the Exec plugin to function,
+// hence why we can only keep retrying it until it succeeds.
+func (rf *ExecutionReportingPluginFactory) NewReportingPluginFn(config types.ReportingPluginConfig) func() (reportingPluginAndInfo, error) {
+ return func() (reportingPluginAndInfo, error) {
+ ctx := context.Background() // todo: consider setting a timeout
+
+ destPriceRegistry, destWrappedNative, err := rf.config.offRampReader.ChangeConfig(ctx, config.OnchainConfig, config.OffchainConfig)
+ if err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+
+ // Open dynamic readers
+ err = rf.UpdateDynamicReaders(ctx, destPriceRegistry)
+ if err != nil {
+ return reportingPluginAndInfo{}, err
+ }
+
+ offchainConfig, err := rf.config.offRampReader.OffchainConfig(ctx)
+ if err != nil {
+ return reportingPluginAndInfo{}, fmt.Errorf("get offchain config from offramp: %w", err)
+ }
+
+ gasPriceEstimator, err := rf.config.offRampReader.GasPriceEstimator(ctx)
+ if err != nil {
+ return reportingPluginAndInfo{}, fmt.Errorf("get gas price estimator from offramp: %w", err)
+ }
+
+ onchainConfig, err := rf.config.offRampReader.OnchainConfig(ctx)
+ if err != nil {
+ return reportingPluginAndInfo{}, fmt.Errorf("get onchain config from offramp: %w", err)
+ }
+
+ batchingStrategy, err := NewBatchingStrategy(offchainConfig.BatchingStrategyID, rf.config.txmStatusChecker)
+ if err != nil {
+ return reportingPluginAndInfo{}, fmt.Errorf("get batching strategy: %w", err)
+ }
+
+ msgVisibilityInterval := offchainConfig.MessageVisibilityInterval.Duration()
+ if msgVisibilityInterval.Seconds() == 0 {
+ rf.config.lggr.Info("MessageVisibilityInterval not set, falling back to PermissionLessExecutionThreshold")
+ msgVisibilityInterval = onchainConfig.PermissionLessExecutionThresholdSeconds
+ }
+ rf.config.lggr.Infof("MessageVisibilityInterval set to: %s", msgVisibilityInterval)
+
+ lggr := rf.config.lggr.Named("ExecutionReportingPlugin")
+ plugin := &ExecutionReportingPlugin{
+ F: config.F,
+ lggr: lggr,
+ offchainConfig: offchainConfig,
+ tokenDataWorker: rf.config.tokenDataWorker,
+ gasPriceEstimator: gasPriceEstimator,
+ sourcePriceRegistryProvider: rf.config.sourcePriceRegistryProvider,
+ sourcePriceRegistryLock: sync.RWMutex{},
+ sourceWrappedNativeToken: rf.config.sourceWrappedNativeToken,
+ onRampReader: rf.config.onRampReader,
+ commitStoreReader: rf.config.commitStoreReader,
+ destPriceRegistry: rf.destPriceRegReader,
+ destWrappedNative: destWrappedNative,
+ onchainConfig: onchainConfig,
+ offRampReader: rf.config.offRampReader,
+ tokenPoolBatchedReader: rf.config.tokenPoolBatchedReader,
+ inflightReports: newInflightExecReportsContainer(offchainConfig.InflightCacheExpiry.Duration()),
+ commitRootsCache: cache.NewCommitRootsCache(lggr, rf.config.commitStoreReader, msgVisibilityInterval, offchainConfig.RootSnoozeTime.Duration()),
+ metricsCollector: rf.config.metricsCollector,
+ chainHealthcheck: rf.config.chainHealthcheck,
+ batchingStrategy: batchingStrategy,
+ }
+
+ pluginInfo := types.ReportingPluginInfo{
+ Name: "CCIPExecution",
+ // Setting this to false saves on calldata since OffRamp doesn't require agreement between NOPs
+ // (OffRamp is only able to execute committed messages).
+ UniqueReports: false,
+ Limits: types.ReportingPluginLimits{
+ MaxObservationLength: ccip.MaxObservationLength,
+ MaxReportLength: MaxExecutionReportLength,
+ },
+ }
+
+ return reportingPluginAndInfo{plugin, pluginInfo}, nil
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/factory_test.go b/core/services/ocr2/plugins/ccip/ccipexec/factory_test.go
new file mode 100644
index 00000000000..7bbb9be0c69
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/factory_test.go
@@ -0,0 +1,67 @@
+package ccipexec
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ ccipdataprovidermocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+)
+
+// Assert that NewReportingPlugin keeps retrying until it succeeds.
+//
+// NewReportingPlugin makes several calls (e.g. OffRampReader.ChangeConfig()) that can fail. We use mocks to cause the
+// first call to each of these functions to fail, then all subsequent calls succeed. We assert that NewReportingPlugin
+// retries a sufficient number of times to get through the transient errors and eventually succeed.
+func TestNewReportingPluginRetriesUntilSuccess(t *testing.T) {
+ execConfig := ExecutionPluginStaticConfig{}
+
+ // For this unit test, ensure that there is no delay between retries
+ execConfig.newReportingPluginRetryConfig = ccipdata.RetryConfig{
+ InitialDelay: 0 * time.Nanosecond,
+ MaxDelay: 0 * time.Nanosecond,
+ }
+
+ // Set up the OffRampReader mock
+ mockOffRampReader := new(mocks.OffRampReader)
+
+ // The first call is set to return an error, the following calls return a nil error
+ mockOffRampReader.On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything).Return(ccip.Address(""), ccip.Address(""), errors.New("")).Once()
+ mockOffRampReader.On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything).Return(ccip.Address("addr1"), ccip.Address("addr2"), nil).Times(5)
+
+ mockOffRampReader.On("OffchainConfig", mock.Anything).Return(ccip.ExecOffchainConfig{}, errors.New("")).Once()
+ mockOffRampReader.On("OffchainConfig", mock.Anything).Return(ccip.ExecOffchainConfig{}, nil).Times(3)
+
+ mockOffRampReader.On("GasPriceEstimator", mock.Anything).Return(nil, errors.New("")).Once()
+ mockOffRampReader.On("GasPriceEstimator", mock.Anything).Return(nil, nil).Times(2)
+
+ mockOffRampReader.On("OnchainConfig", mock.Anything).Return(ccip.ExecOnchainConfig{}, errors.New("")).Once()
+ mockOffRampReader.On("OnchainConfig", mock.Anything).Return(ccip.ExecOnchainConfig{}, nil).Times(1)
+
+ execConfig.offRampReader = mockOffRampReader
+
+ // Set up the PriceRegistry mock
+ priceRegistryProvider := new(ccipdataprovidermocks.PriceRegistry)
+ priceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, mock.Anything).Return(nil, errors.New("")).Once()
+ priceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, mock.Anything).Return(nil, nil).Once()
+ execConfig.priceRegistryProvider = priceRegistryProvider
+
+ execConfig.lggr, _ = logger.NewLogger()
+
+ factory := NewExecutionReportingPluginFactory(execConfig)
+ reportingConfig := types.ReportingPluginConfig{}
+ reportingConfig.OnchainConfig = []byte{1, 2, 3}
+ reportingConfig.OffchainConfig = []byte{1, 2, 3}
+
+ // Assert that NewReportingPlugin succeeds despite many transient internal failures (mocked out above)
+ _, _, err := factory.NewReportingPlugin(reportingConfig)
+ assert.Equal(t, nil, err)
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go
new file mode 100644
index 00000000000..7e208296c53
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go
@@ -0,0 +1,83 @@
+package ccipexec
+
+import (
+ "math"
+ "math/big"
+ "time"
+)
+
+const (
+ EvmAddressLengthBytes = 20
+ EvmWordBytes = 32
+ CalldataGasPerByte = 16
+ TokenAdminRegistryWarmupCost = 2_500
+ TokenAdminRegistryPoolLookupGas = 100 + // WARM_ACCESS_COST TokenAdminRegistry
+ 700 + // CALL cost for TokenAdminRegistry
+ 2_100 // COLD_SLOAD_COST loading the pool address
+ SupportsInterfaceCheck = 2600 + // because the receiver will be untouched initially
+ 30_000*3 // supportsInterface of ERC165Checker library performs 3 static-calls of 30k gas each
+ PerTokenOverheadGas = TokenAdminRegistryPoolLookupGas +
+ SupportsInterfaceCheck +
+ 200_000 + // releaseOrMint using callWithExactGas
+ 50_000 // transfer using callWithExactGas
+ RateLimiterOverheadGas = 2_100 + // COLD_SLOAD_COST for accessing token bucket
+ 5_000 // SSTORE_RESET_GAS for updating & decreasing token bucket
+ ConstantMessagePartBytes = 10 * 32 // A message consists of 10 abi encoded fields 32B each (after encoding)
+ ExecutionStateProcessingOverheadGas = 2_100 + // COLD_SLOAD_COST for first reading the state
+ 20_000 + // SSTORE_SET_GAS for writing from 0 (untouched) to non-zero (in-progress)
+ 100 //# SLOAD_GAS = WARM_STORAGE_READ_COST for rewriting from non-zero (in-progress) to non-zero (success/failure)
+)
+
+// return the size of bytes for msg tokens
+func bytesForMsgTokens(numTokens int) int {
+ // token address (address) + token amount (uint256)
+ return (EvmAddressLengthBytes + EvmWordBytes) * numTokens
+}
+
+// Offchain: we compute the max overhead gas to determine msg executability.
+func overheadGas(dataLength, numTokens int) uint64 {
+ messageBytes := ConstantMessagePartBytes +
+ bytesForMsgTokens(numTokens) +
+ dataLength
+
+ messageCallDataGas := uint64(messageBytes * CalldataGasPerByte)
+
+ // Rate limiter only limits value in tokens. It's not called if there are no
+ // tokens in the message. The same goes for the admin registry, it's only loaded
+ // if there are tokens, and it's only loaded once.
+ rateLimiterOverhead := uint64(0)
+ adminRegistryOverhead := uint64(0)
+ if numTokens >= 1 {
+ rateLimiterOverhead = RateLimiterOverheadGas
+ adminRegistryOverhead = TokenAdminRegistryWarmupCost
+ }
+
+ return messageCallDataGas +
+ ExecutionStateProcessingOverheadGas +
+ SupportsInterfaceCheck +
+ adminRegistryOverhead +
+ rateLimiterOverhead +
+ PerTokenOverheadGas*uint64(numTokens)
+}
+
+func maxGasOverHeadGas(numMsgs, dataLength, numTokens int) uint64 {
+ merkleProofBytes := (math.Ceil(math.Log2(float64(numMsgs))))*32 + (1+2)*32 // only ever one outer root hash
+ merkleGasShare := uint64(merkleProofBytes * CalldataGasPerByte)
+
+ return overheadGas(dataLength, numTokens) + merkleGasShare
+}
+
+// waitBoostedFee boosts the given fee according to the time passed since the msg was sent.
+// RelativeBoostPerWaitHour is used to normalize the time diff,
+// it makes our loss taking "smooth" and gives us time to react without a hard deadline.
+// At the same time, messages that are slightly underpaid will start going through after waiting for a little bit.
+//
+// wait_boosted_fee(m) = (1 + (now - m.send_time).hours * RELATIVE_BOOST_PER_WAIT_HOUR) * fee(m)
+func waitBoostedFee(waitTime time.Duration, fee *big.Int, relativeBoostPerWaitHour float64) *big.Int {
+ k := 1.0 + waitTime.Hours()*relativeBoostPerWaitHour
+
+ boostedFee := big.NewFloat(0).Mul(big.NewFloat(k), new(big.Float).SetInt(fee))
+ res, _ := boostedFee.Int(nil)
+
+ return res
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go
new file mode 100644
index 00000000000..15607cc310e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go
@@ -0,0 +1,179 @@
+package ccipexec
+
+import (
+ "math/big"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestOverheadGas(t *testing.T) {
+ // Only Data and TokenAmounts are used from the messages
+ // And only the length is used so the contents doesn't matter.
+ tests := []struct {
+ dataLength int
+ numberOfTokens int
+ want uint64
+ }{
+ {
+ dataLength: 0,
+ numberOfTokens: 0,
+ want: 119920,
+ },
+ {
+ dataLength: len([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}),
+ numberOfTokens: 1,
+ want: 475948,
+ },
+ }
+
+ for _, tc := range tests {
+ got := overheadGas(tc.dataLength, tc.numberOfTokens)
+ if !reflect.DeepEqual(tc.want, got) {
+ t.Fatalf("expected: %v, got: %v", tc.want, got)
+ }
+ }
+}
+
+func TestMaxGasOverHeadGas(t *testing.T) {
+ // Only Data and TokenAmounts are used from the messages
+ // And only the length is used so the contents doesn't matter.
+ tests := []struct {
+ numMsgs int
+ dataLength int
+ numberOfTokens int
+ want uint64
+ }{
+ {
+ numMsgs: 6,
+ dataLength: 0,
+ numberOfTokens: 0,
+ want: 122992,
+ },
+ {
+ numMsgs: 3,
+ dataLength: len([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}),
+ numberOfTokens: 1,
+ want: 478508,
+ },
+ }
+
+ for _, tc := range tests {
+ got := maxGasOverHeadGas(tc.numMsgs, tc.dataLength, tc.numberOfTokens)
+ if !reflect.DeepEqual(tc.want, got) {
+ t.Fatalf("expected: %v, got: %v", tc.want, got)
+ }
+ }
+}
+
+func TestWaitBoostedFee(t *testing.T) {
+ tests := []struct {
+ name string
+ sendTimeDiff time.Duration
+ fee *big.Int
+ diff *big.Int
+ relativeBoostPerWaitHour float64
+ }{
+ {
+ "wait 10s",
+ time.Second * 10,
+ big.NewInt(6e18), // Fee: 6 LINK
+
+ big.NewInt(1166666666665984), // Boost: 0.01 LINK
+ 0.07,
+ },
+ {
+ "wait 5m",
+ time.Minute * 5,
+ big.NewInt(6e18), // Fee: 6 LINK
+ big.NewInt(35e15), // Boost: 0.35 LINK
+ 0.07,
+ },
+ {
+ "wait 7m",
+ time.Minute * 7,
+ big.NewInt(6e18), // Fee: 6 LINK
+ big.NewInt(49e15), // Boost: 0.49 LINK
+ 0.07,
+ },
+ {
+ "wait 12m",
+ time.Minute * 12,
+ big.NewInt(6e18), // Fee: 6 LINK
+ big.NewInt(84e15), // Boost: 0.84 LINK
+ 0.07,
+ },
+ {
+ "wait 25m",
+ time.Minute * 25,
+ big.NewInt(6e18), // Fee: 6 LINK
+ big.NewInt(174999999999998976), // Boost: 1.75 LINK
+ 0.07,
+ },
+ {
+ "wait 1h",
+ time.Hour * 1,
+ big.NewInt(6e18), // Fee: 6 LINK
+ big.NewInt(420e15), // Boost: 4.2 LINK
+ 0.07,
+ },
+ {
+ "wait 5h",
+ time.Hour * 5,
+ big.NewInt(6e18), // Fee: 6 LINK
+ big.NewInt(2100000000000001024), // Boost: 21LINK
+ 0.07,
+ },
+ {
+ "wait 24h",
+ time.Hour * 24,
+ big.NewInt(6e18), // Fee: 6 LINK
+ big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1008e15)), // Boost: 100LINK
+ 0.07,
+ },
+ {
+ "high boost wait 10s",
+ time.Second * 10,
+ big.NewInt(5e18),
+ big.NewInt(9722222222222336), // 1e16
+ 0.7,
+ },
+ {
+ "high boost wait 5m",
+ time.Minute * 5,
+ big.NewInt(5e18),
+ big.NewInt(291666666666667008), // 1e18
+ 0.7,
+ },
+ {
+ "high boost wait 25m",
+ time.Minute * 25,
+ big.NewInt(5e18),
+ big.NewInt(1458333333333334016), // 1e19
+ 0.7,
+ },
+ {
+ "high boost wait 5h",
+ time.Hour * 5,
+ big.NewInt(5e18),
+ big.NewInt(0).Mul(big.NewInt(10), big.NewInt(175e16)), // 1e20
+ 0.7,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ boosted := waitBoostedFee(tc.sendTimeDiff, tc.fee, tc.relativeBoostPerWaitHour)
+ diff := big.NewInt(0).Sub(boosted, tc.fee)
+ assert.Equal(t, diff, tc.diff)
+ // we check that the actual diff is approximately equals to expected diff,
+ // as we might get slightly different results locally vs. CI therefore normal Equal() would be unstable
+ //diffUpperLimit := big.NewInt(0).Add(tc.diff, big.NewInt(1e9))
+ //diffLowerLimit := big.NewInt(0).Add(tc.diff, big.NewInt(-1e9))
+ //require.Equalf(t, -1, diff.Cmp(diffUpperLimit), "actual diff (%s) is larger than expected (%s)", diff.String(), diffUpperLimit.String())
+ //require.Equal(t, 1, diff.Cmp(diffLowerLimit), "actual diff (%s) is smaller than expected (%s)", diff.String(), diffLowerLimit.String())
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/helpers.go b/core/services/ocr2/plugins/ccip/ccipexec/helpers.go
new file mode 100644
index 00000000000..46df7d793ba
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/helpers.go
@@ -0,0 +1,53 @@
+package ccipexec
+
+import (
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+// helper struct to hold the commitReport and the related send requests
+type commitReportWithSendRequests struct {
+ commitReport cciptypes.CommitStoreReport
+ sendRequestsWithMeta []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+}
+
+func (r *commitReportWithSendRequests) validate() error {
+ // make sure that number of messages is the expected
+ if exp := int(r.commitReport.Interval.Max - r.commitReport.Interval.Min + 1); len(r.sendRequestsWithMeta) != exp {
+ return errors.Errorf(
+ "unexpected missing sendRequestsWithMeta in committed root %x have %d want %d", r.commitReport.MerkleRoot, len(r.sendRequestsWithMeta), exp)
+ }
+
+ return nil
+}
+
+// uniqueSenders returns slice of unique senders based on the send requests. Order is preserved based on the order of the send requests (by sequence number).
+func (r *commitReportWithSendRequests) uniqueSenders() []cciptypes.Address {
+ orderedUniqueSenders := make([]cciptypes.Address, 0, len(r.sendRequestsWithMeta))
+ visitedSenders := mapset.NewSet[cciptypes.Address]()
+
+ for _, req := range r.sendRequestsWithMeta {
+ if !visitedSenders.Contains(req.Sender) {
+ orderedUniqueSenders = append(orderedUniqueSenders, req.Sender)
+ visitedSenders.Add(req.Sender)
+ }
+ }
+ return orderedUniqueSenders
+}
+
+func (r *commitReportWithSendRequests) allRequestsAreExecutedAndFinalized() bool {
+ for _, req := range r.sendRequestsWithMeta {
+ if !req.Executed || !req.Finalized {
+ return false
+ }
+ }
+ return true
+}
+
+// checks if the send request fits the commit report interval
+func (r *commitReportWithSendRequests) sendReqFits(sendReq cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) bool {
+ return sendReq.SequenceNumber >= r.commitReport.Interval.Min &&
+ sendReq.SequenceNumber <= r.commitReport.Interval.Max
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go b/core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go
new file mode 100644
index 00000000000..daa54fd2428
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go
@@ -0,0 +1,96 @@
+package ccipexec
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+)
+
+func Test_CommitReportWithSendRequests_uniqueSenders(t *testing.T) {
+ messageFn := func(address cciptypes.Address) cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta {
+ return cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{EVM2EVMMessage: cciptypes.EVM2EVMMessage{Sender: address}}
+ }
+
+ tests := []struct {
+ name string
+ sendRequests []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+ expUniqueSenders int
+ expSendersOrder []cciptypes.Address
+ }{
+ {
+ name: "all unique senders",
+ sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ messageFn(cciptypes.Address(utils.RandomAddress().String())),
+ messageFn(cciptypes.Address(utils.RandomAddress().String())),
+ messageFn(cciptypes.Address(utils.RandomAddress().String())),
+ },
+ expUniqueSenders: 3,
+ },
+ {
+ name: "some senders are the same",
+ sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ messageFn("0x1"),
+ messageFn("0x2"),
+ messageFn("0x1"),
+ messageFn("0x2"),
+ messageFn("0x3"),
+ },
+ expUniqueSenders: 3,
+ expSendersOrder: []cciptypes.Address{
+ cciptypes.Address("0x1"),
+ cciptypes.Address("0x2"),
+ cciptypes.Address("0x3"),
+ },
+ },
+ {
+ name: "all senders are the same",
+ sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ messageFn("0x1"),
+ messageFn("0x1"),
+ messageFn("0x1"),
+ },
+ expUniqueSenders: 1,
+ expSendersOrder: []cciptypes.Address{
+ cciptypes.Address("0x1"),
+ },
+ },
+ {
+ name: "order is preserved",
+ sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ messageFn("0x3"),
+ messageFn("0x1"),
+ messageFn("0x3"),
+ messageFn("0x2"),
+ messageFn("0x2"),
+ messageFn("0x1"),
+ },
+ expUniqueSenders: 3,
+ expSendersOrder: []cciptypes.Address{
+ cciptypes.Address("0x3"),
+ cciptypes.Address("0x1"),
+ cciptypes.Address("0x2"),
+ },
+ },
+ {
+ name: "no senders",
+ sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{},
+ expUniqueSenders: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ rep := commitReportWithSendRequests{sendRequestsWithMeta: tt.sendRequests}
+ uniqueSenders := rep.uniqueSenders()
+
+ assert.Len(t, uniqueSenders, tt.expUniqueSenders)
+ if tt.expSendersOrder != nil {
+ assert.Equal(t, tt.expSendersOrder, uniqueSenders)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/inflight.go b/core/services/ocr2/plugins/ccip/ccipexec/inflight.go
new file mode 100644
index 00000000000..c76bfdf7780
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/inflight.go
@@ -0,0 +1,82 @@
+package ccipexec
+
+import (
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+// InflightInternalExecutionReport serves the same purpose as InflightCommitReport
+// see the comment on that struct for context.
+type InflightInternalExecutionReport struct {
+ createdAt time.Time
+ messages []cciptypes.EVM2EVMMessage
+}
+
+// inflightExecReportsContainer holds existing inflight reports.
+// it provides a thread-safe access as it is called from multiple goroutines,
+// e.g. reporting and transmission protocols.
+type inflightExecReportsContainer struct {
+ locker sync.RWMutex
+ reports []InflightInternalExecutionReport
+
+ cacheExpiry time.Duration
+}
+
+func newInflightExecReportsContainer(inflightCacheExpiry time.Duration) *inflightExecReportsContainer {
+ return &inflightExecReportsContainer{
+ locker: sync.RWMutex{},
+ reports: make([]InflightInternalExecutionReport, 0),
+ cacheExpiry: inflightCacheExpiry,
+ }
+}
+
+func (container *inflightExecReportsContainer) getAll() []InflightInternalExecutionReport {
+ container.locker.RLock()
+ defer container.locker.RUnlock()
+
+ reports := make([]InflightInternalExecutionReport, len(container.reports))
+ copy(reports[:], container.reports[:])
+
+ return reports
+}
+
+func (container *inflightExecReportsContainer) expire(lggr logger.Logger) {
+ container.locker.Lock()
+ defer container.locker.Unlock()
+ // Reap old inflight txs and check if any messages in the report are inflight.
+ var stillInFlight []InflightInternalExecutionReport
+ for _, report := range container.reports {
+ if time.Since(report.createdAt) > container.cacheExpiry {
+ // Happy path: inflight report was successfully transmitted onchain, we remove it from inflight and onchain state reflects inflight.
+ // Sad path: inflight report reverts onchain, we remove it from inflight, onchain state does not reflect the change so we retry.
+ lggr.Infow("Inflight report expired", "messages", report.messages)
+ } else {
+ stillInFlight = append(stillInFlight, report)
+ }
+ }
+ container.reports = stillInFlight
+}
+
+func (container *inflightExecReportsContainer) add(lggr logger.Logger, messages []cciptypes.EVM2EVMMessage) error {
+ container.locker.Lock()
+ defer container.locker.Unlock()
+
+ for _, report := range container.reports {
+ if (len(report.messages) > 0) && (report.messages[0].SequenceNumber == messages[0].SequenceNumber) {
+ return errors.Errorf("report is already in flight")
+ }
+ }
+
+ // Otherwise not already in flight, add it.
+ lggr.Info("Inflight report added")
+ container.reports = append(container.reports, InflightInternalExecutionReport{
+ createdAt: time.Now(),
+ messages: messages,
+ })
+ return nil
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go b/core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go
new file mode 100644
index 00000000000..2a91457ef4f
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go
@@ -0,0 +1,42 @@
+package ccipexec
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func TestInflightReportsContainer_add(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ container := newInflightExecReportsContainer(time.Second)
+
+ err := container.add(lggr, []cciptypes.EVM2EVMMessage{
+ {SequenceNumber: 1}, {SequenceNumber: 2}, {SequenceNumber: 3},
+ })
+ require.NoError(t, err)
+ err = container.add(lggr, []cciptypes.EVM2EVMMessage{
+ {SequenceNumber: 1},
+ })
+ require.Error(t, err)
+ require.Equal(t, "report is already in flight", err.Error())
+ require.Equal(t, 1, len(container.getAll()))
+}
+
+func TestInflightReportsContainer_expire(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ container := newInflightExecReportsContainer(time.Second)
+
+ err := container.add(lggr, []cciptypes.EVM2EVMMessage{
+ {SequenceNumber: 1}, {SequenceNumber: 2}, {SequenceNumber: 3},
+ })
+ require.NoError(t, err)
+ container.reports[0].createdAt = time.Now().Add(-time.Second * 5)
+ require.Equal(t, 1, len(container.getAll()))
+
+ container.expire(lggr)
+ require.Equal(t, 0, len(container.getAll()))
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/initializers.go b/core/services/ocr2/plugins/ccip/ccipexec/initializers.go
new file mode 100644
index 00000000000..7826f6058fe
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/initializers.go
@@ -0,0 +1,228 @@
+package ccipexec
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+
+ "github.com/Masterminds/semver/v3"
+ "go.uber.org/multierr"
+
+ libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
+
+ commonlogger "github.com/smartcontractkit/chainlink-common/pkg/logger"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/observability"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/oraclelib"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/promwrapper"
+)
+
+var (
+ // tokenDataWorkerTimeout defines 1) The timeout while waiting for a bg call to the token data 3P provider.
+ // 2) When a client requests token data and does not specify a timeout this value is used as a default.
+ // 5 seconds is a reasonable value for a timeout.
+ // At this moment, minimum OCR Delta Round is set to 30s and deltaGrace to 5s. Based on this configuration
+ // 5s for token data worker timeout is a reasonable default.
+ tokenDataWorkerTimeout = 5 * time.Second
+ // tokenDataWorkerNumWorkers is the number of workers that will be processing token data in parallel.
+ tokenDataWorkerNumWorkers = 5
+)
+
+var defaultNewReportingPluginRetryConfig = ccipdata.RetryConfig{InitialDelay: time.Second, MaxDelay: 5 * time.Minute}
+
+func NewExecServices(ctx context.Context, lggr logger.Logger, jb job.Job, srcProvider types.CCIPExecProvider, dstProvider types.CCIPExecProvider, srcChainID int64, dstChainID int64, new bool, argsNoPlugin libocr2.OCR2OracleArgs, logError func(string)) ([]job.ServiceCtx, error) {
+ if jb.OCR2OracleSpec == nil {
+ return nil, fmt.Errorf("spec is nil")
+ }
+ spec := jb.OCR2OracleSpec
+ var pluginConfig ccipconfig.ExecPluginJobSpecConfig
+ err := json.Unmarshal(spec.PluginConfig.Bytes(), &pluginConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ offRampAddress := ccipcalc.HexToAddress(spec.ContractID)
+ offRampReader, err := dstProvider.NewOffRampReader(ctx, offRampAddress)
+ if err != nil {
+ return nil, fmt.Errorf("create offRampReader: %w", err)
+ }
+
+ offRampConfig, err := offRampReader.GetStaticConfig(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get offRamp static config: %w", err)
+ }
+
+ srcChainSelector := offRampConfig.SourceChainSelector
+ dstChainSelector := offRampConfig.ChainSelector
+ onRampReader, err := srcProvider.NewOnRampReader(ctx, offRampConfig.OnRamp, srcChainSelector, dstChainSelector)
+ if err != nil {
+ return nil, fmt.Errorf("create onRampReader: %w", err)
+ }
+
+ dynamicOnRampConfig, err := onRampReader.GetDynamicConfig(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get onramp dynamic config: %w", err)
+ }
+
+ sourceWrappedNative, err := srcProvider.SourceNativeToken(ctx, dynamicOnRampConfig.Router)
+ if err != nil {
+ return nil, fmt.Errorf("get source wrapped native token: %w", err)
+ }
+
+ srcCommitStore, err := srcProvider.NewCommitStoreReader(ctx, offRampConfig.CommitStore)
+ if err != nil {
+ return nil, fmt.Errorf("could not create src commitStoreReader reader: %w", err)
+ }
+
+ dstCommitStore, err := dstProvider.NewCommitStoreReader(ctx, offRampConfig.CommitStore)
+ if err != nil {
+ return nil, fmt.Errorf("could not create dst commitStoreReader reader: %w", err)
+ }
+
+ var commitStoreReader ccipdata.CommitStoreReader
+ commitStoreReader = ccip.NewProviderProxyCommitStoreReader(srcCommitStore, dstCommitStore)
+
+ tokenDataProviders := make(map[cciptypes.Address]tokendata.Reader)
+ // init usdc token data provider
+ if pluginConfig.USDCConfig.AttestationAPI != "" {
+ lggr.Infof("USDC token data provider enabled")
+ err2 := pluginConfig.USDCConfig.ValidateUSDCConfig()
+ if err2 != nil {
+ return nil, err2
+ }
+
+ usdcReader, err2 := srcProvider.NewTokenDataReader(ctx, ccip.EvmAddrToGeneric(pluginConfig.USDCConfig.SourceTokenAddress))
+ if err2 != nil {
+ return nil, fmt.Errorf("new usdc reader: %w", err2)
+ }
+ tokenDataProviders[cciptypes.Address(pluginConfig.USDCConfig.SourceTokenAddress.String())] = usdcReader
+ }
+
+ // Prom wrappers
+ onRampReader = observability.NewObservedOnRampReader(onRampReader, srcChainID, ccip.ExecPluginLabel)
+ commitStoreReader = observability.NewObservedCommitStoreReader(commitStoreReader, dstChainID, ccip.ExecPluginLabel)
+ offRampReader = observability.NewObservedOffRampReader(offRampReader, dstChainID, ccip.ExecPluginLabel)
+ metricsCollector := ccip.NewPluginMetricsCollector(ccip.ExecPluginLabel, srcChainID, dstChainID)
+
+ tokenPoolBatchedReader, err := dstProvider.NewTokenPoolBatchedReader(ctx, offRampAddress, srcChainSelector)
+ if err != nil {
+ return nil, fmt.Errorf("new token pool batched reader: %w", err)
+ }
+
+ chainHealthcheck := cache.NewObservedChainHealthCheck(
+ cache.NewChainHealthcheck(
+ // Adding more details to Logger to make healthcheck logs more informative
+ // It's safe because healthcheck logs only in case of unhealthy state
+ lggr.With(
+ "onramp", offRampConfig.OnRamp,
+ "commitStore", offRampConfig.CommitStore,
+ "offramp", offRampAddress,
+ ),
+ onRampReader,
+ commitStoreReader,
+ ),
+ ccip.ExecPluginLabel,
+ srcChainID,
+ dstChainID,
+ offRampConfig.OnRamp,
+ )
+
+ tokenBackgroundWorker := tokendata.NewBackgroundWorker(
+ tokenDataProviders,
+ tokenDataWorkerNumWorkers,
+ tokenDataWorkerTimeout,
+ 2*tokenDataWorkerTimeout,
+ )
+
+ wrappedPluginFactory := NewExecutionReportingPluginFactory(ExecutionPluginStaticConfig{
+ lggr: lggr,
+ onRampReader: onRampReader,
+ commitStoreReader: commitStoreReader,
+ offRampReader: offRampReader,
+ sourcePriceRegistryProvider: ccip.NewChainAgnosticPriceRegistry(srcProvider),
+ sourceWrappedNativeToken: sourceWrappedNative,
+ destChainSelector: dstChainSelector,
+ priceRegistryProvider: ccip.NewChainAgnosticPriceRegistry(dstProvider),
+ tokenPoolBatchedReader: tokenPoolBatchedReader,
+ tokenDataWorker: tokenBackgroundWorker,
+ metricsCollector: metricsCollector,
+ chainHealthcheck: chainHealthcheck,
+ newReportingPluginRetryConfig: defaultNewReportingPluginRetryConfig,
+ txmStatusChecker: statuschecker.NewTxmStatusChecker(dstProvider.GetTransactionStatus),
+ })
+
+ argsNoPlugin.ReportingPluginFactory = promwrapper.NewPromFactory(wrappedPluginFactory, "CCIPExecution", jb.OCR2OracleSpec.Relay, big.NewInt(0).SetInt64(dstChainID))
+ argsNoPlugin.Logger = commonlogger.NewOCRWrapper(lggr, true, logError)
+ oracle, err := libocr2.NewOracle(argsNoPlugin)
+ if err != nil {
+ return nil, err
+ }
+ // If this is a brand-new job, then we make use of the start blocks. If not then we're rebooting and log poller will pick up where we left off.
+ if new {
+ return []job.ServiceCtx{
+ oraclelib.NewChainAgnosticBackFilledOracle(
+ lggr,
+ srcProvider,
+ dstProvider,
+ job.NewServiceAdapter(oracle),
+ ),
+ chainHealthcheck,
+ tokenBackgroundWorker,
+ }, nil
+ }
+ return []job.ServiceCtx{
+ job.NewServiceAdapter(oracle),
+ chainHealthcheck,
+ tokenBackgroundWorker,
+ }, nil
+}
+
+// UnregisterExecPluginLpFilters unregisters all the registered filters for both source and dest chains.
+// See comment in UnregisterCommitPluginLpFilters
+// It MUST mirror the filters registered in NewExecServices.
+// This currently works because the filters registered by the created custom providers when the job is first added
+// are stored in the db. Those same filters are unregistered (i.e. deleted from the db) by the newly created providers
+// that are passed in from cleanupEVM, as while the providers have no knowledge of each other, they are created
+// on the same source and dest relayer.
+func UnregisterExecPluginLpFilters(srcProvider types.CCIPExecProvider, dstProvider types.CCIPExecProvider) error {
+ unregisterFuncs := []func() error{
+ func() error {
+ return srcProvider.Close()
+ },
+ func() error {
+ return dstProvider.Close()
+ },
+ }
+
+ var multiErr error
+ for _, fn := range unregisterFuncs {
+ if err := fn(); err != nil {
+ multiErr = multierr.Append(multiErr, err)
+ }
+ }
+ return multiErr
+}
+
+// ExecReportToEthTxMeta generates a txmgr.EthTxMeta from the given report.
+// Only MessageIDs will be populated in the TxMeta.
+func ExecReportToEthTxMeta(ctx context.Context, typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) {
+ return factory.ExecReportToEthTxMeta(ctx, typ, ver)
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/ocr2.go b/core/services/ocr2/plugins/ccip/ccipexec/ocr2.go
new file mode 100644
index 00000000000..4a09cf37b45
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/ocr2.go
@@ -0,0 +1,845 @@
+package ccipexec
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/pkg/errors"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker"
+)
+
+const (
+ // exec Report should make sure to cap returned payload to this limit
+ MaxExecutionReportLength = 250_000
+
+ // MaxDataLenPerBatch limits the total length of msg data that can be in a batch.
+ MaxDataLenPerBatch = 60_000
+
+ // MaximumAllowedTokenDataWaitTimePerBatch defines the maximum time that is allowed
+ // for the plugin to wait for token data to be fetched from external providers per batch.
+ MaximumAllowedTokenDataWaitTimePerBatch = 2 * time.Second
+
+ // MessagesIterationStep limits number of messages fetched to memory at once when iterating through unexpired CommitRoots
+ MessagesIterationStep = 1024
+)
+
+var (
+ _ types.ReportingPluginFactory = &ExecutionReportingPluginFactory{}
+ _ types.ReportingPlugin = &ExecutionReportingPlugin{}
+)
+
+type ExecutionPluginStaticConfig struct {
+ lggr logger.Logger
+ onRampReader ccipdata.OnRampReader
+ offRampReader ccipdata.OffRampReader
+ commitStoreReader ccipdata.CommitStoreReader
+ sourcePriceRegistryProvider ccipdataprovider.PriceRegistry
+ sourceWrappedNativeToken cciptypes.Address
+ tokenDataWorker tokendata.Worker
+ destChainSelector uint64
+ priceRegistryProvider ccipdataprovider.PriceRegistry // destination price registry provider.
+ tokenPoolBatchedReader batchreader.TokenPoolBatchedReader
+ metricsCollector ccip.PluginMetricsCollector
+ chainHealthcheck cache.ChainHealthcheck
+ newReportingPluginRetryConfig ccipdata.RetryConfig
+ txmStatusChecker statuschecker.CCIPTransactionStatusChecker
+}
+
+type ExecutionReportingPlugin struct {
+ // Misc
+ F int
+ lggr logger.Logger
+ offchainConfig cciptypes.ExecOffchainConfig
+ tokenDataWorker tokendata.Worker
+ metricsCollector ccip.PluginMetricsCollector
+ batchingStrategy BatchingStrategy
+
+ // Source
+ gasPriceEstimator prices.GasPriceEstimatorExec
+ sourcePriceRegistry ccipdata.PriceRegistryReader
+ sourcePriceRegistryProvider ccipdataprovider.PriceRegistry
+ sourcePriceRegistryLock sync.RWMutex
+ sourceWrappedNativeToken cciptypes.Address
+ onRampReader ccipdata.OnRampReader
+
+ // Dest
+ commitStoreReader ccipdata.CommitStoreReader
+ destPriceRegistry ccipdata.PriceRegistryReader
+ destWrappedNative cciptypes.Address
+ onchainConfig cciptypes.ExecOnchainConfig
+ offRampReader ccipdata.OffRampReader
+ tokenPoolBatchedReader batchreader.TokenPoolBatchedReader
+
+ // State
+ inflightReports *inflightExecReportsContainer
+ commitRootsCache cache.CommitsRootsCache
+ chainHealthcheck cache.ChainHealthcheck
+}
+
+func (r *ExecutionReportingPlugin) Query(context.Context, types.ReportTimestamp) (types.Query, error) {
+ return types.Query{}, nil
+}
+
+func (r *ExecutionReportingPlugin) Observation(ctx context.Context, timestamp types.ReportTimestamp, query types.Query) (types.Observation, error) {
+ lggr := r.lggr.Named("ExecutionObservation")
+ if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil {
+ return nil, err
+ } else if !healthy {
+ return nil, ccip.ErrChainIsNotHealthy
+ }
+
+ // Ensure that the source price registry is synchronized with the onRamp.
+ if err := r.ensurePriceRegistrySynchronization(ctx); err != nil {
+ return nil, fmt.Errorf("ensuring price registry synchronization: %w", err)
+ }
+
+ // Expire any inflight reports.
+ r.inflightReports.expire(lggr)
+ inFlight := r.inflightReports.getAll()
+
+ executableObservations, err := r.getExecutableObservations(ctx, lggr, inFlight)
+ if err != nil {
+ return nil, err
+ }
+ // cap observations which fits MaxObservationLength (after serialized)
+ capped := sort.Search(len(executableObservations), func(i int) bool {
+ var encoded []byte
+ encoded, err = ccip.NewExecutionObservation(executableObservations[:i+1]).Marshal()
+ if err != nil {
+ // false makes Search keep looking to the right, always including any "erroring" ObservedMessage and allowing us to detect in the bottom
+ return false
+ }
+ return len(encoded) > ccip.MaxObservationLength
+ })
+ if err != nil {
+ return nil, err
+ }
+ executableObservations = executableObservations[:capped]
+ r.metricsCollector.NumberOfMessagesProcessed(ccip.Observation, len(executableObservations))
+ lggr.Infow("Observation", "executableMessages", executableObservations)
+ // Note can be empty
+ return ccip.NewExecutionObservation(executableObservations).Marshal()
+}
+
+func (r *ExecutionReportingPlugin) getExecutableObservations(ctx context.Context, lggr logger.Logger, inflight []InflightInternalExecutionReport) ([]ccip.ObservedMessage, error) {
+ unexpiredReports, err := r.commitRootsCache.RootsEligibleForExecution(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r.metricsCollector.UnexpiredCommitRoots(len(unexpiredReports))
+
+ if len(unexpiredReports) == 0 {
+ return []ccip.ObservedMessage{}, nil
+ }
+
+ getExecTokenData := cache.LazyFunction[execTokenData](func() (execTokenData, error) {
+ return r.prepareTokenExecData(ctx)
+ })
+
+ for j := 0; j < len(unexpiredReports); {
+ unexpiredReportsPart, step := selectReportsToFillBatch(unexpiredReports[j:], MessagesIterationStep)
+ j += step
+
+ unexpiredReportsWithSendReqs, err := r.getReportsWithSendRequests(ctx, unexpiredReportsPart)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, unexpiredReport := range unexpiredReportsWithSendReqs {
+ r.tokenDataWorker.AddJobsFromMsgs(ctx, unexpiredReport.sendRequestsWithMeta)
+ }
+
+ for _, rep := range unexpiredReportsWithSendReqs {
+ if ctx.Err() != nil {
+ lggr.Warn("Processing of roots killed by context")
+ break
+ }
+
+ merkleRoot := rep.commitReport.MerkleRoot
+
+ rootLggr := lggr.With("root", hexutil.Encode(merkleRoot[:]),
+ "minSeqNr", rep.commitReport.Interval.Min,
+ "maxSeqNr", rep.commitReport.Interval.Max,
+ )
+
+ if err := rep.validate(); err != nil {
+ rootLggr.Errorw("Skipping invalid report", "err", err)
+ continue
+ }
+
+ // If all messages are already executed and finalized, snooze the root for
+ // config.PermissionLessExecutionThresholdSeconds so it will never be considered again.
+ if allMsgsExecutedAndFinalized := rep.allRequestsAreExecutedAndFinalized(); allMsgsExecutedAndFinalized {
+ rootLggr.Infow("Snoozing root forever since there are no executable txs anymore", "root", hex.EncodeToString(merkleRoot[:]))
+ r.commitRootsCache.MarkAsExecuted(merkleRoot)
+ continue
+ }
+
+ blessed, err := r.commitStoreReader.IsBlessed(ctx, merkleRoot)
+ if err != nil {
+ return nil, err
+ }
+ if !blessed {
+ rootLggr.Infow("Report is accepted but not blessed")
+ continue
+ }
+
+ tokenExecData, err := getExecTokenData()
+ if err != nil {
+ return nil, err
+ }
+
+ batch, msgExecStates := r.buildBatch(
+ ctx,
+ inflight,
+ rootLggr,
+ rep,
+ tokenExecData.rateLimiterTokenBucket.Tokens,
+ tokenExecData.sourceTokenPrices,
+ tokenExecData.destTokenPrices,
+ tokenExecData.gasPrice,
+ tokenExecData.sourceToDestTokens)
+ if len(batch) != 0 {
+ lggr.Infow("Execution batch created", "batchSize", len(batch), "messageStates", msgExecStates)
+ return batch, nil
+ }
+ r.commitRootsCache.Snooze(merkleRoot)
+ }
+ }
+ return []ccip.ObservedMessage{}, nil
+}
+
+// Calculates a map that indicates whether a sequence number has already been executed.
+// It doesn't matter if the execution succeeded, since we don't retry previous
+// attempts even if they failed. Value in the map indicates whether the log is finalized or not.
+func (r *ExecutionReportingPlugin) getExecutedSeqNrsInRange(ctx context.Context, min, max uint64) (map[uint64]bool, error) {
+ stateChanges, err := r.offRampReader.GetExecutionStateChangesBetweenSeqNums(
+ ctx,
+ min,
+ max,
+ int(r.offchainConfig.DestOptimisticConfirmations),
+ )
+ if err != nil {
+ return nil, err
+ }
+ executedMp := make(map[uint64]bool, len(stateChanges))
+ for _, stateChange := range stateChanges {
+ executedMp[stateChange.SequenceNumber] = stateChange.TxMeta.IsFinalized()
+ }
+ return executedMp, nil
+}
+
+// Builds a batch of transactions that can be executed, takes into account
+// the available gas, rate limiting, execution state, nonce state, and
+// profitability of execution.
+func (r *ExecutionReportingPlugin) buildBatch(
+ ctx context.Context,
+ inflight []InflightInternalExecutionReport,
+ lggr logger.Logger,
+ report commitReportWithSendRequests,
+ aggregateTokenLimit *big.Int,
+ sourceTokenPricesUSD map[cciptypes.Address]*big.Int,
+ destTokenPricesUSD map[cciptypes.Address]*big.Int,
+ gasPrice *big.Int,
+ sourceToDestToken map[cciptypes.Address]cciptypes.Address,
+) ([]ccip.ObservedMessage, []messageExecStatus) {
+ // We assume that next observation will start after previous epoch transmission so nonces should be already updated onchain.
+ // Worst case scenario we will try to process the same message again, and it will be skipped but protocol would progress anyway.
+ // We don't use inflightCache here to avoid cases in which inflight cache keeps progressing but due to transmission failures
+ // previous reports are not included onchain. That can lead to issues with IncorrectNonce skips,
+ // because we enforce sequential processing per sender (per sender's nonce ordering is enforced by Offramp contract)
+ sendersNonce, err := r.offRampReader.ListSenderNonces(ctx, report.uniqueSenders())
+ if err != nil {
+ lggr.Errorw("Fetching senders nonce", "err", err)
+ return []ccip.ObservedMessage{}, []messageExecStatus{}
+ }
+
+ inflightAggregateValue, err := getInflightAggregateRateLimit(lggr, inflight, destTokenPricesUSD, sourceToDestToken)
+ if err != nil {
+ lggr.Errorw("Unexpected error computing inflight values", "err", err)
+ return []ccip.ObservedMessage{}, nil
+ }
+
+ batchCtx := &BatchContext{
+ report,
+ inflight,
+ inflightAggregateValue,
+ lggr,
+ MaxDataLenPerBatch,
+ uint64(r.offchainConfig.BatchGasLimit),
+ make(map[cciptypes.Address]uint64),
+ sendersNonce,
+ sourceTokenPricesUSD,
+ destTokenPricesUSD,
+ gasPrice,
+ sourceToDestToken,
+ aggregateTokenLimit,
+ MaximumAllowedTokenDataWaitTimePerBatch,
+ r.tokenDataWorker,
+ r.gasPriceEstimator,
+ r.destWrappedNative,
+ r.offchainConfig,
+ }
+
+ return r.batchingStrategy.BuildBatch(ctx, batchCtx)
+}
+
+func calculateMessageMaxGas(gasLimit *big.Int, numRequests, dataLen, numTokens int) (uint64, error) {
+ if !gasLimit.IsUint64() {
+ return 0, fmt.Errorf("gas limit %s cannot be casted to uint64", gasLimit)
+ }
+
+ gasLimitU64 := gasLimit.Uint64()
+ gasOverHeadGas := maxGasOverHeadGas(numRequests, dataLen, numTokens)
+ messageMaxGas := gasLimitU64 + gasOverHeadGas
+
+ if messageMaxGas < gasLimitU64 || messageMaxGas < gasOverHeadGas {
+ return 0, fmt.Errorf("message max gas overflow, gasLimit=%d gasOverHeadGas=%d", gasLimitU64, gasOverHeadGas)
+ }
+
+ return messageMaxGas, nil
+}
+
+// getReportsWithSendRequests returns the target reports with populated send requests.
+func (r *ExecutionReportingPlugin) getReportsWithSendRequests(
+ ctx context.Context,
+ reports []cciptypes.CommitStoreReport,
+) ([]commitReportWithSendRequests, error) {
+ if len(reports) == 0 {
+ return nil, nil
+ }
+
+ // find interval from all the reports
+ intervalMin := reports[0].Interval.Min
+ intervalMax := reports[0].Interval.Max
+ for _, report := range reports[1:] {
+ if report.Interval.Max > intervalMax {
+ intervalMax = report.Interval.Max
+ }
+ if report.Interval.Min < intervalMin {
+ intervalMin = report.Interval.Min
+ }
+ }
+
+ // use errgroup to fetch send request logs and executed sequence numbers in parallel
+ eg := &errgroup.Group{}
+
+ var sendRequests []cciptypes.EVM2EVMMessageWithTxMeta
+ eg.Go(func() error {
+ // We don't need to double-check if logs are finalized because we already checked that in the Commit phase.
+ sendReqs, err := r.onRampReader.GetSendRequestsBetweenSeqNums(ctx, intervalMin, intervalMax, false)
+ if err != nil {
+ return err
+ }
+ sendRequests = sendReqs
+ return nil
+ })
+
+ var executedSeqNums map[uint64]bool
+ eg.Go(func() error {
+ // get executed sequence numbers
+ executedMp, err := r.getExecutedSeqNrsInRange(ctx, intervalMin, intervalMax)
+ if err != nil {
+ return err
+ }
+ executedSeqNums = executedMp
+ return nil
+ })
+
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ reportsWithSendReqs := make([]commitReportWithSendRequests, len(reports))
+ for i, report := range reports {
+ reportsWithSendReqs[i] = commitReportWithSendRequests{
+ commitReport: report,
+ sendRequestsWithMeta: make([]cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, 0, report.Interval.Max-report.Interval.Min+1),
+ }
+ }
+
+ for _, sendReq := range sendRequests {
+ // if value exists in the map then it's executed
+ // if value exists, and it's true then it's considered finalized
+ finalized, executed := executedSeqNums[sendReq.SequenceNumber]
+
+ reqWithMeta := cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: sendReq.EVM2EVMMessage,
+ BlockTimestamp: time.UnixMilli(sendReq.BlockTimestampUnixMilli),
+ Executed: executed,
+ Finalized: finalized,
+ LogIndex: uint(sendReq.LogIndex),
+ TxHash: sendReq.TxHash,
+ }
+
+ // attach the msg to the appropriate reports
+ for i := range reportsWithSendReqs {
+ if reportsWithSendReqs[i].sendReqFits(reqWithMeta) {
+ reportsWithSendReqs[i].sendRequestsWithMeta = append(reportsWithSendReqs[i].sendRequestsWithMeta, reqWithMeta)
+ }
+ }
+ }
+
+ return reportsWithSendReqs, nil
+}
+
+// Assumes non-empty report. Messages to execute can span more than one report, but are assumed to be in order of increasing
+// sequence number.
+func (r *ExecutionReportingPlugin) buildReport(ctx context.Context, lggr logger.Logger, observedMessages []ccip.ObservedMessage) ([]byte, error) {
+ if err := validateSeqNumbers(ctx, r.commitStoreReader, observedMessages); err != nil {
+ return nil, err
+ }
+ commitReport, err := getCommitReportForSeqNum(ctx, r.commitStoreReader, observedMessages[0].SeqNr)
+ if err != nil {
+ return nil, err
+ }
+ lggr.Infow("Building execution report", "observations", observedMessages, "merkleRoot", hexutil.Encode(commitReport.MerkleRoot[:]), "report", commitReport)
+
+ sendReqsInRoot, _, tree, err := getProofData(ctx, r.onRampReader, commitReport.Interval)
+ if err != nil {
+ return nil, err
+ }
+
+ // cap messages which fits MaxExecutionReportLength (after serialized)
+ capped := sort.Search(len(observedMessages), func(i int) bool {
+ report, err2 := buildExecutionReportForMessages(sendReqsInRoot, tree, commitReport.Interval, observedMessages[:i+1])
+ if err2 != nil {
+ r.lggr.Errorw("build execution report", "err", err2)
+ return false
+ }
+
+ encoded, err2 := r.offRampReader.EncodeExecutionReport(ctx, report)
+ if err2 != nil {
+ // false makes Search keep looking to the right, always including any "erroring" ObservedMessage and allowing us to detect in the bottom
+ return false
+ }
+ return len(encoded) > MaxExecutionReportLength
+ })
+
+ execReport, err := buildExecutionReportForMessages(sendReqsInRoot, tree, commitReport.Interval, observedMessages[:capped])
+ if err != nil {
+ return nil, err
+ }
+
+ encodedReport, err := r.offRampReader.EncodeExecutionReport(ctx, execReport)
+ if err != nil {
+ return nil, err
+ }
+
+ if capped < len(observedMessages) {
+ lggr.Warnf(
+ "Capping report to fit MaxExecutionReportLength: msgsCount %d -> %d, bytes %d, bytesLimit %d",
+ len(observedMessages), capped, len(encodedReport), MaxExecutionReportLength,
+ )
+ }
+ // Double check this verifies before sending.
+ valid, err := r.commitStoreReader.VerifyExecutionReport(ctx, execReport)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to verify")
+ }
+ if !valid {
+ return nil, errors.New("root does not verify")
+ }
+ if len(execReport.Messages) > 0 {
+ r.metricsCollector.NumberOfMessagesProcessed(ccip.Report, len(execReport.Messages))
+ r.metricsCollector.SequenceNumber(ccip.Report, execReport.Messages[len(execReport.Messages)-1].SequenceNumber)
+ }
+ return encodedReport, nil
+}
+
+func (r *ExecutionReportingPlugin) Report(ctx context.Context, timestamp types.ReportTimestamp, query types.Query, observations []types.AttributedObservation) (bool, types.Report, error) {
+ lggr := r.lggr.Named("ExecutionReport")
+ if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil {
+ return false, nil, err
+ } else if !healthy {
+ return false, nil, ccip.ErrChainIsNotHealthy
+ }
+ parsableObservations := ccip.GetParsableObservations[ccip.ExecutionObservation](lggr, observations)
+ // Need at least F+1 observations
+ if len(parsableObservations) <= r.F {
+ lggr.Warn("Non-empty observations <= F, need at least F+1 to continue")
+ return false, nil, nil
+ }
+
+ observedMessages, err := calculateObservedMessagesConsensus(parsableObservations, r.F)
+ if err != nil {
+ return false, nil, err
+ }
+ if len(observedMessages) == 0 {
+ return false, nil, nil
+ }
+
+ report, err := r.buildReport(ctx, lggr, observedMessages)
+ if err != nil {
+ return false, nil, err
+ }
+ lggr.Infow("Report", "executableObservations", observedMessages)
+ return true, report, nil
+}
+
+type tallyKey struct {
+ seqNr uint64
+ tokenDataHash [32]byte
+}
+
+type tallyVal struct {
+ tally int
+ tokenData [][]byte
+}
+
+func calculateObservedMessagesConsensus(observations []ccip.ExecutionObservation, f int) ([]ccip.ObservedMessage, error) {
+ tally := make(map[tallyKey]tallyVal)
+ for _, obs := range observations {
+ for seqNr, msgData := range obs.Messages {
+ tokenDataHash, err := hashutil.BytesOfBytesKeccak(msgData.TokenData)
+ if err != nil {
+ return nil, fmt.Errorf("bytes of bytes keccak: %w", err)
+ }
+
+ key := tallyKey{seqNr: seqNr, tokenDataHash: tokenDataHash}
+ if val, ok := tally[key]; ok {
+ tally[key] = tallyVal{tally: val.tally + 1, tokenData: msgData.TokenData}
+ } else {
+ tally[key] = tallyVal{tally: 1, tokenData: msgData.TokenData}
+ }
+ }
+ }
+
+ // We might have different token data for the same sequence number.
+ // For that purpose we want to keep the token data with the most occurrences.
+ seqNumTally := make(map[uint64]tallyVal)
+
+ // order tally keys to make looping over the entries deterministic
+ tallyKeys := make([]tallyKey, 0, len(tally))
+ for key := range tally {
+ tallyKeys = append(tallyKeys, key)
+ }
+ sort.Slice(tallyKeys, func(i, j int) bool {
+ return hex.EncodeToString(tallyKeys[i].tokenDataHash[:]) < hex.EncodeToString(tallyKeys[j].tokenDataHash[:])
+ })
+
+ for _, key := range tallyKeys {
+ tallyInfo := tally[key]
+ existingTally, exists := seqNumTally[key.seqNr]
+ if tallyInfo.tally > f && (!exists || tallyInfo.tally > existingTally.tally) {
+ seqNumTally[key.seqNr] = tallyInfo
+ }
+ }
+
+ finalSequenceNumbers := make([]ccip.ObservedMessage, 0, len(seqNumTally))
+ for seqNr, tallyInfo := range seqNumTally {
+ finalSequenceNumbers = append(finalSequenceNumbers, ccip.NewObservedMessage(seqNr, tallyInfo.tokenData))
+ }
+ // buildReport expects sorted sequence numbers (tally map is non-deterministic).
+ sort.Slice(finalSequenceNumbers, func(i, j int) bool {
+ return finalSequenceNumbers[i].SeqNr < finalSequenceNumbers[j].SeqNr
+ })
+ return finalSequenceNumbers, nil
+}
+
+func (r *ExecutionReportingPlugin) ShouldAcceptFinalizedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) {
+ lggr := r.lggr.Named("ShouldAcceptFinalizedReport")
+ execReport, err := r.offRampReader.DecodeExecutionReport(ctx, report)
+ if err != nil {
+ lggr.Errorw("Unable to decode report", "err", err)
+ return false, err
+ }
+ lggr = lggr.With("messageIDs", ccipcommon.GetMessageIDsAsHexString(execReport.Messages))
+
+ if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil {
+ return false, err1
+ } else if !healthy {
+ return false, ccip.ErrChainIsNotHealthy
+ }
+ // If the first message is executed already, this execution report is stale, and we do not accept it.
+ stale, err := r.isStaleReport(ctx, execReport.Messages)
+ if err != nil {
+ return false, err
+ }
+ if stale {
+ lggr.Info("Execution report is stale")
+ return false, nil
+ }
+ // Else just assume in flight
+ if err = r.inflightReports.add(lggr, execReport.Messages); err != nil {
+ return false, err
+ }
+ if len(execReport.Messages) > 0 {
+ r.metricsCollector.SequenceNumber(ccip.ShouldAccept, execReport.Messages[len(execReport.Messages)-1].SequenceNumber)
+ }
+ lggr.Info("Accepting finalized report")
+ return true, nil
+}
+
+func (r *ExecutionReportingPlugin) ShouldTransmitAcceptedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) {
+ lggr := r.lggr.Named("ShouldTransmitAcceptedReport")
+ execReport, err := r.offRampReader.DecodeExecutionReport(ctx, report)
+ if err != nil {
+ lggr.Errorw("Unable to decode report", "err", err)
+ return false, nil
+ }
+ lggr = lggr.With("messageIDs", ccipcommon.GetMessageIDsAsHexString(execReport.Messages))
+
+ if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil {
+ return false, err1
+ } else if !healthy {
+ return false, ccip.ErrChainIsNotHealthy
+ }
+ // If report is not stale we transmit.
+ // When the executeTransmitter enqueues the tx for tx manager,
+ // we mark it as execution_sent, removing it from the set of inflight messages.
+ stale, err := r.isStaleReport(ctx, execReport.Messages)
+ if err != nil {
+ return false, err
+ }
+ if stale {
+ lggr.Info("Execution report is stale")
+ return false, nil
+ }
+
+ lggr.Info("Transmitting finalized report")
+ return true, err
+}
+
+func (r *ExecutionReportingPlugin) isStaleReport(ctx context.Context, messages []cciptypes.EVM2EVMMessage) (bool, error) {
+ if len(messages) == 0 {
+ return true, fmt.Errorf("messages are empty")
+ }
+
+ // If the first message is executed already, this execution report is stale.
+ // Note the default execution state, including for arbitrary seq number not yet committed
+ // is ExecutionStateUntouched.
+ msgState, err := r.offRampReader.GetExecutionState(ctx, messages[0].SequenceNumber)
+ if err != nil {
+ return true, err
+ }
+ if state := cciptypes.MessageExecutionState(msgState); state == cciptypes.ExecutionStateFailure || state == cciptypes.ExecutionStateSuccess {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func (r *ExecutionReportingPlugin) Close() error {
+ return nil
+}
+
+func getInflightAggregateRateLimit(
+ lggr logger.Logger,
+ inflight []InflightInternalExecutionReport,
+ destTokenPrices map[cciptypes.Address]*big.Int,
+ sourceToDest map[cciptypes.Address]cciptypes.Address,
+) (*big.Int, error) {
+ inflightAggregateValue := big.NewInt(0)
+
+ for _, rep := range inflight {
+ for _, message := range rep.messages {
+ msgValue, err := aggregateTokenValue(lggr, destTokenPrices, sourceToDest, message.TokenAmounts)
+ if err != nil {
+ return nil, err
+ }
+ inflightAggregateValue.Add(inflightAggregateValue, msgValue)
+ }
+ }
+ return inflightAggregateValue, nil
+}
+
+// getTokensPrices returns token prices of the given price registry,
+// price values are USD per 1e18 of smallest token denomination, in base units 1e18 (e.g. 5$ = 5e18 USD per 1e18 units).
+// this function is used for price registry of both source and destination chains.
+func getTokensPrices(ctx context.Context, priceRegistry ccipdata.PriceRegistryReader, tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, error) {
+ tokenPrices := make(map[cciptypes.Address]*big.Int)
+
+ fetchedPrices, err := priceRegistry.GetTokenPrices(ctx, tokens)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not get token prices of %v", tokens)
+ }
+
+ // price registry should always return a price per token ordered by input tokens
+ if len(fetchedPrices) != len(tokens) {
+ return nil, fmt.Errorf("token prices length exp=%d actual=%d", len(tokens), len(fetchedPrices))
+ }
+
+ for i, token := range tokens {
+ // price of a token can never be zero
+ if fetchedPrices[i].Value.BitLen() == 0 {
+ priceRegistryAddress, err := priceRegistry.Address(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get price registry address: %w", err)
+ }
+ return nil, fmt.Errorf("price of token %s is zero (price registry=%s)", token, priceRegistryAddress)
+ }
+
+ // price registry should not report different price for the same token
+ price, exists := tokenPrices[token]
+ if exists && fetchedPrices[i].Value.Cmp(price) != 0 {
+ return nil, fmt.Errorf("price registry reported different prices (%s and %s) for the same token %s",
+ fetchedPrices[i].Value, price, token)
+ }
+
+ tokenPrices[token] = fetchedPrices[i].Value
+ }
+
+ return tokenPrices, nil
+}
+
+type execTokenData struct {
+ rateLimiterTokenBucket cciptypes.TokenBucketRateLimit
+ sourceTokenPrices map[cciptypes.Address]*big.Int
+ destTokenPrices map[cciptypes.Address]*big.Int
+ sourceToDestTokens map[cciptypes.Address]cciptypes.Address
+ gasPrice *big.Int
+}
+
+// prepareTokenExecData gather all the pre-execution data needed for token execution into a single lazy call.
+// This is done to avoid fetching the data multiple times for each message. Additionally, most of the RPC calls
+// within that function is cached, so it should be relatively fast and not require any RPC batching.
+func (r *ExecutionReportingPlugin) prepareTokenExecData(ctx context.Context) (execTokenData, error) {
+ // This could result in slightly different values on each call as
+ // the function returns the allowed amount at the time of the last block.
+ // Since this will only increase over time, the highest observed value will
+ // always be the lower bound of what would be available on chain
+ // since we already account for inflight txs.
+ rateLimiterTokenBucket, err := r.offRampReader.CurrentRateLimiterState(ctx)
+ if err != nil {
+ return execTokenData{}, err
+ }
+
+ sourceFeeTokens, err := r.sourcePriceRegistry.GetFeeTokens(ctx)
+ if err != nil {
+ return execTokenData{}, fmt.Errorf("get source fee tokens: %w", err)
+ }
+ sourceTokensPrices, err := getTokensPrices(
+ ctx,
+ r.sourcePriceRegistry,
+ ccipcommon.FlattenUniqueSlice(
+ sourceFeeTokens,
+ []cciptypes.Address{r.sourceWrappedNativeToken},
+ ),
+ )
+ if err != nil {
+ return execTokenData{}, err
+ }
+
+ destFeeTokens, destBridgedTokens, err := ccipcommon.GetDestinationTokens(ctx, r.offRampReader, r.destPriceRegistry)
+ if err != nil {
+ return execTokenData{}, fmt.Errorf("get destination tokens: %w", err)
+ }
+ destTokenPrices, err := getTokensPrices(
+ ctx,
+ r.destPriceRegistry,
+ ccipcommon.FlattenUniqueSlice(
+ destFeeTokens,
+ destBridgedTokens,
+ []cciptypes.Address{r.destWrappedNative},
+ ),
+ )
+ if err != nil {
+ return execTokenData{}, err
+ }
+
+ sourceToDestTokens, err := r.offRampReader.GetSourceToDestTokensMapping(ctx)
+ if err != nil {
+ return execTokenData{}, err
+ }
+
+ gasPrice, err := r.gasPriceEstimator.GetGasPrice(ctx)
+ if err != nil {
+ return execTokenData{}, err
+ }
+
+ return execTokenData{
+ rateLimiterTokenBucket: rateLimiterTokenBucket,
+ sourceTokenPrices: sourceTokensPrices,
+ sourceToDestTokens: sourceToDestTokens,
+ destTokenPrices: destTokenPrices,
+ gasPrice: gasPrice,
+ }, nil
+}
+
+// ensurePriceRegistrySynchronization ensures that the source price registry points to the same as the one configured on the onRamp.
+// This is required since the price registry address on the onRamp can change over time.
+func (r *ExecutionReportingPlugin) ensurePriceRegistrySynchronization(ctx context.Context) error {
+ needPriceRegistryUpdate := false
+ r.sourcePriceRegistryLock.RLock()
+ priceRegistryAddress, err := r.onRampReader.SourcePriceRegistryAddress(ctx)
+ if err != nil {
+ r.sourcePriceRegistryLock.RUnlock()
+ return fmt.Errorf("getting price registry from onramp: %w", err)
+ }
+
+ currentPriceRegistryAddress := cciptypes.Address("")
+ if r.sourcePriceRegistry != nil {
+ currentPriceRegistryAddress, err = r.sourcePriceRegistry.Address(ctx)
+ if err != nil {
+ return fmt.Errorf("get current priceregistry address: %w", err)
+ }
+ }
+
+ needPriceRegistryUpdate = r.sourcePriceRegistry == nil || priceRegistryAddress != currentPriceRegistryAddress
+ r.sourcePriceRegistryLock.RUnlock()
+ if !needPriceRegistryUpdate {
+ return nil
+ }
+
+ // Update the price registry if required.
+ r.sourcePriceRegistryLock.Lock()
+ defer r.sourcePriceRegistryLock.Unlock()
+
+ // Price registry address changed or not initialized yet, updating source price registry.
+ sourcePriceRegistry, err := r.sourcePriceRegistryProvider.NewPriceRegistryReader(ctx, priceRegistryAddress)
+ if err != nil {
+ return err
+ }
+ oldPriceRegistry := r.sourcePriceRegistry
+ r.sourcePriceRegistry = sourcePriceRegistry
+ // Close the old price registry
+ if oldPriceRegistry != nil {
+ if err1 := oldPriceRegistry.Close(); err1 != nil {
+ r.lggr.Warnw("failed to close old price registry", "err", err1)
+ }
+ }
+ return nil
+}
+
+// selectReportsToFillBatch returns the reports to fill the message limit. Single Commit Root contains exactly (Interval.Max - Interval.Min + 1) messages.
+// We keep adding reports until we reach the message limit. Please see the tests for more examples and edge cases.
+// unexpiredReports have to be sorted by Interval.Min. Otherwise, the batching logic will not be efficient,
+// because it picks messages and execution states based on the report[0].Interval.Min - report[len-1].Interval.Max range.
+// Having unexpiredReports not sorted properly will lead to fetching more messages and execution states to the memory than the messagesLimit provided.
+// However, logs from LogPoller are returned ordered by (block_number, log_index), so it should preserve the order of Interval.Min.
+// Single CommitRoot can have up to 256 messages, with current MessagesIterationStep of 1024, it means processing 4 CommitRoots at once.
+func selectReportsToFillBatch(unexpiredReports []cciptypes.CommitStoreReport, messagesLimit uint64) ([]cciptypes.CommitStoreReport, int) {
+ currentNumberOfMessages := uint64(0)
+ nbReports := 0
+ for _, report := range unexpiredReports {
+ reportMsgCount := report.Interval.Max - report.Interval.Min + 1
+ if currentNumberOfMessages+reportMsgCount > messagesLimit {
+ break
+ }
+ currentNumberOfMessages += reportMsgCount
+ nbReports++
+ }
+ return unexpiredReports[:nbReports], nbReports
+}
diff --git a/core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go b/core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go
new file mode 100644
index 00000000000..84cb73c6643
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go
@@ -0,0 +1,1421 @@
+package ccipexec
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "math"
+ "math/big"
+ "reflect"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/cometbft/cometbft/libs/rand"
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+ "github.com/smartcontractkit/libocr/commontypes"
+ "github.com/smartcontractkit/libocr/offchainreporting2/types"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ lpMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ ccipcachemocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader"
+ ccipdataprovidermocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks"
+ ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+)
+
+func TestExecutionReportingPlugin_Observation(t *testing.T) {
+ testCases := []struct {
+ name string
+ commitStorePaused bool
+ sourceChainCursed bool
+ inflightReports []InflightInternalExecutionReport
+ unexpiredReports []cciptypes.CommitStoreReportWithTxMeta
+ sendRequests []cciptypes.EVM2EVMMessageWithTxMeta
+ executedSeqNums []uint64
+ tokenPoolsMapping map[common.Address]common.Address
+ blessedRoots map[[32]byte]bool
+ senderNonce uint64
+ rateLimiterState cciptypes.TokenBucketRateLimit
+ expErr bool
+ sourceChainHealthy bool
+ destChainHealthy bool
+ }{
+ {
+ name: "commit store is down",
+ commitStorePaused: true,
+ sourceChainCursed: false,
+ sourceChainHealthy: true,
+ destChainHealthy: true,
+ expErr: true,
+ },
+ {
+ name: "source chain is cursed",
+ commitStorePaused: false,
+ sourceChainCursed: true,
+ sourceChainHealthy: true,
+ destChainHealthy: true,
+ expErr: true,
+ },
+ {
+ name: "source chain not healthy",
+ commitStorePaused: false,
+ sourceChainCursed: false,
+ sourceChainHealthy: false,
+ destChainHealthy: true,
+ expErr: true,
+ },
+ {
+ name: "dest chain not healthy",
+ commitStorePaused: false,
+ sourceChainCursed: false,
+ sourceChainHealthy: true,
+ destChainHealthy: false,
+ expErr: true,
+ },
+ {
+ name: "happy flow",
+ commitStorePaused: false,
+ sourceChainCursed: false,
+ sourceChainHealthy: true,
+ destChainHealthy: true,
+ inflightReports: []InflightInternalExecutionReport{},
+ unexpiredReports: []cciptypes.CommitStoreReportWithTxMeta{
+ {
+ CommitStoreReport: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{Min: 10, Max: 12},
+ MerkleRoot: [32]byte{123},
+ },
+ },
+ },
+ blessedRoots: map[[32]byte]bool{
+ {123}: true,
+ },
+ rateLimiterState: cciptypes.TokenBucketRateLimit{
+ IsEnabled: false,
+ },
+ tokenPoolsMapping: map[common.Address]common.Address{},
+ senderNonce: 9,
+ sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 10, GasLimit: big.NewInt(0)},
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 11, GasLimit: big.NewInt(0)},
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 12, GasLimit: big.NewInt(0)},
+ },
+ },
+ },
+ }
+
+ ctx := testutils.Context(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := &ExecutionReportingPlugin{}
+ p.inflightReports = newInflightExecReportsContainer(time.Minute)
+ p.inflightReports.reports = tc.inflightReports
+ p.lggr = logger.TestLogger(t)
+ p.tokenDataWorker = tokendata.NewBackgroundWorker(
+ make(map[cciptypes.Address]tokendata.Reader), 10, 5*time.Second, time.Hour)
+ p.metricsCollector = ccip.NoopMetricsCollector
+
+ commitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ commitStoreReader.On("IsDown", mock.Anything).Return(tc.commitStorePaused, nil).Maybe()
+ commitStoreReader.On("IsDestChainHealthy", mock.Anything).Return(tc.destChainHealthy, nil).Maybe()
+ // Blessed roots return true
+ for root, blessed := range tc.blessedRoots {
+ commitStoreReader.On("IsBlessed", mock.Anything, root).Return(blessed, nil).Maybe()
+ }
+ commitStoreReader.On("GetAcceptedCommitReportsGteTimestamp", ctx, mock.Anything, 0).
+ Return(tc.unexpiredReports, nil).Maybe()
+ p.commitStoreReader = commitStoreReader
+
+ var executionEvents []cciptypes.ExecutionStateChangedWithTxMeta
+ for _, seqNum := range tc.executedSeqNums {
+ executionEvents = append(executionEvents, cciptypes.ExecutionStateChangedWithTxMeta{
+ ExecutionStateChanged: cciptypes.ExecutionStateChanged{SequenceNumber: seqNum},
+ })
+ }
+
+ offRamp, _ := testhelpers.NewFakeOffRamp(t)
+ offRamp.SetRateLimiterState(tc.rateLimiterState)
+
+ tokenPoolBatchedReader, err := batchreader.NewEVMTokenPoolBatchedReader(p.lggr, 0, ccipcalc.EvmAddrToGeneric(offRamp.Address()), nil)
+ assert.NoError(t, err)
+ p.tokenPoolBatchedReader = tokenPoolBatchedReader
+
+ mockOffRampReader := ccipdatamocks.NewOffRampReader(t)
+ mockOffRampReader.On("GetExecutionStateChangesBetweenSeqNums", ctx, mock.Anything, mock.Anything, 0).
+ Return(executionEvents, nil).Maybe()
+ mockOffRampReader.On("CurrentRateLimiterState", mock.Anything).Return(tc.rateLimiterState, nil).Maybe()
+ mockOffRampReader.On("Address", ctx).Return(cciptypes.Address(offRamp.Address().String()), nil).Maybe()
+ senderNonces := map[cciptypes.Address]uint64{
+ cciptypes.Address(utils.RandomAddress().String()): tc.senderNonce,
+ }
+ mockOffRampReader.On("ListSenderNonces", mock.Anything, mock.Anything).Return(senderNonces, nil).Maybe()
+ mockOffRampReader.On("GetTokenPoolsRateLimits", ctx, []ccipdata.TokenPoolReader{}).
+ Return([]cciptypes.TokenBucketRateLimit{}, nil).Maybe()
+
+ mockOffRampReader.On("GetSourceToDestTokensMapping", ctx).Return(nil, nil).Maybe()
+ mockOffRampReader.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{
+ DestinationTokens: []cciptypes.Address{},
+ SourceTokens: []cciptypes.Address{},
+ }, nil).Maybe()
+ p.offRampReader = mockOffRampReader
+
+ mockOnRampReader := ccipdatamocks.NewOnRampReader(t)
+ mockOnRampReader.On("IsSourceCursed", ctx).Return(tc.sourceChainCursed, nil).Maybe()
+ mockOnRampReader.On("IsSourceChainHealthy", ctx).Return(tc.sourceChainHealthy, nil).Maybe()
+ mockOnRampReader.On("GetSendRequestsBetweenSeqNums", ctx, mock.Anything, mock.Anything, false).
+ Return(tc.sendRequests, nil).Maybe()
+ sourcePriceRegistryAddress := cciptypes.Address(utils.RandomAddress().String())
+ mockOnRampReader.On("SourcePriceRegistryAddress", ctx).Return(sourcePriceRegistryAddress, nil).Maybe()
+ p.onRampReader = mockOnRampReader
+
+ mockGasPriceEstimator := prices.NewMockGasPriceEstimatorExec(t)
+ mockGasPriceEstimator.On("GetGasPrice", ctx).Return(big.NewInt(1), nil).Maybe()
+ p.gasPriceEstimator = mockGasPriceEstimator
+
+ destPriceRegReader := ccipdatamocks.NewPriceRegistryReader(t)
+ destPriceRegReader.On("GetTokenPrices", ctx, mock.Anything).Return(
+ []cciptypes.TokenPriceUpdate{{TokenPrice: cciptypes.TokenPrice{Token: ccipcalc.HexToAddress("0x1"), Value: big.NewInt(123)}, TimestampUnixSec: big.NewInt(time.Now().Unix())}}, nil).Maybe()
+ destPriceRegReader.On("Address", ctx).Return(cciptypes.Address(utils.RandomAddress().String()), nil).Maybe()
+ destPriceRegReader.On("GetFeeTokens", ctx).Return([]cciptypes.Address{}, nil).Maybe()
+ sourcePriceRegReader := ccipdatamocks.NewPriceRegistryReader(t)
+ sourcePriceRegReader.On("Address", ctx).Return(sourcePriceRegistryAddress, nil).Maybe()
+ sourcePriceRegReader.On("GetFeeTokens", ctx).Return([]cciptypes.Address{}, nil).Maybe()
+ sourcePriceRegReader.On("GetTokenPrices", ctx, mock.Anything).Return(
+ []cciptypes.TokenPriceUpdate{{TokenPrice: cciptypes.TokenPrice{Token: ccipcalc.HexToAddress("0x1"), Value: big.NewInt(123)}, TimestampUnixSec: big.NewInt(time.Now().Unix())}}, nil).Maybe()
+ p.destPriceRegistry = destPriceRegReader
+
+ mockOnRampPriceRegistryProvider := ccipdataprovidermocks.NewPriceRegistry(t)
+ mockOnRampPriceRegistryProvider.On("NewPriceRegistryReader", ctx, sourcePriceRegistryAddress).Return(sourcePriceRegReader, nil).Maybe()
+ p.sourcePriceRegistryProvider = mockOnRampPriceRegistryProvider
+
+ p.commitRootsCache = cache.NewCommitRootsCache(logger.TestLogger(t), commitStoreReader, time.Minute, time.Minute)
+ p.chainHealthcheck = cache.NewChainHealthcheck(p.lggr, mockOnRampReader, commitStoreReader)
+
+ bs := &BestEffortBatchingStrategy{}
+ p.batchingStrategy = bs
+
+ _, err = p.Observation(ctx, types.ReportTimestamp{}, types.Query{})
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestExecutionReportingPlugin_Report(t *testing.T) {
+ testCases := []struct {
+ name string
+ f int
+ committedSeqNum uint64
+ observations []ccip.ExecutionObservation
+
+ expectingSomeReport bool
+ expectedReport cciptypes.ExecReport
+ expectingSomeErr bool
+ }{
+ {
+ name: "not enough observations to form consensus",
+ f: 5,
+ committedSeqNum: 5,
+ observations: []ccip.ExecutionObservation{
+ {Messages: map[uint64]ccip.MsgData{3: {}, 4: {}}},
+ {Messages: map[uint64]ccip.MsgData{3: {}, 4: {}}},
+ },
+ expectingSomeErr: false,
+ expectingSomeReport: false,
+ },
+ {
+ name: "zero observations",
+ f: 0,
+ committedSeqNum: 5,
+ observations: []ccip.ExecutionObservation{},
+ expectingSomeErr: false,
+ expectingSomeReport: false,
+ },
+ }
+
+ ctx := testutils.Context(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := ExecutionReportingPlugin{}
+ p.lggr = logger.TestLogger(t)
+ p.F = tc.f
+
+ p.commitStoreReader = ccipdatamocks.NewCommitStoreReader(t)
+ chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthcheck.On("IsHealthy", ctx).Return(true, nil)
+ p.chainHealthcheck = chainHealthcheck
+
+ observations := make([]types.AttributedObservation, len(tc.observations))
+ for i := range observations {
+ b, err := json.Marshal(tc.observations[i])
+ assert.NoError(t, err)
+ observations[i] = types.AttributedObservation{Observation: b, Observer: commontypes.OracleID(i + 1)}
+ }
+
+ _, _, err := p.Report(ctx, types.ReportTimestamp{}, types.Query{}, observations)
+ if tc.expectingSomeErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestExecutionReportingPlugin_ShouldAcceptFinalizedReport(t *testing.T) {
+ msg := cciptypes.EVM2EVMMessage{
+ SequenceNumber: 12,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: cciptypes.Address(utils.RandomAddress().String()),
+ Nonce: 1,
+ GasLimit: big.NewInt(1),
+ Strict: false,
+ Receiver: cciptypes.Address(utils.RandomAddress().String()),
+ Data: nil,
+ TokenAmounts: nil,
+ FeeToken: cciptypes.Address(utils.RandomAddress().String()),
+ MessageID: [32]byte{},
+ }
+ report := cciptypes.ExecReport{
+ Messages: []cciptypes.EVM2EVMMessage{msg},
+ OffchainTokenData: [][][]byte{{}},
+ Proofs: [][32]byte{{}},
+ ProofFlagBits: big.NewInt(1),
+ }
+
+ encodedReport := encodeExecutionReport(t, report)
+ mockOffRampReader := ccipdatamocks.NewOffRampReader(t)
+ mockOffRampReader.On("DecodeExecutionReport", mock.Anything, encodedReport).Return(report, nil)
+
+ chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthcheck.On("IsHealthy", mock.Anything).Return(true, nil)
+
+ plugin := ExecutionReportingPlugin{
+ offRampReader: mockOffRampReader,
+ lggr: logger.TestLogger(t),
+ inflightReports: newInflightExecReportsContainer(1 * time.Hour),
+ chainHealthcheck: chainHealthcheck,
+ metricsCollector: ccip.NoopMetricsCollector,
+ }
+
+ mockedExecState := mockOffRampReader.On("GetExecutionState", mock.Anything, uint64(12)).Return(uint8(cciptypes.ExecutionStateUntouched), nil).Once()
+
+ should, err := plugin.ShouldAcceptFinalizedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport)
+ require.NoError(t, err)
+ assert.Equal(t, true, should)
+
+ mockedExecState.Return(uint8(cciptypes.ExecutionStateSuccess), nil).Once()
+
+ should, err = plugin.ShouldAcceptFinalizedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport)
+ require.NoError(t, err)
+ assert.Equal(t, false, should)
+}
+
+func TestExecutionReportingPlugin_ShouldTransmitAcceptedReport(t *testing.T) {
+ msg := cciptypes.EVM2EVMMessage{
+ SequenceNumber: 12,
+ FeeTokenAmount: big.NewInt(1e9),
+ Sender: cciptypes.Address(utils.RandomAddress().String()),
+ Nonce: 1,
+ GasLimit: big.NewInt(1),
+ Strict: false,
+ Receiver: cciptypes.Address(utils.RandomAddress().String()),
+ Data: nil,
+ TokenAmounts: nil,
+ FeeToken: cciptypes.Address(utils.RandomAddress().String()),
+ MessageID: [32]byte{},
+ }
+ report := cciptypes.ExecReport{
+ Messages: []cciptypes.EVM2EVMMessage{msg},
+ OffchainTokenData: [][][]byte{{}},
+ Proofs: [][32]byte{{}},
+ ProofFlagBits: big.NewInt(1),
+ }
+ encodedReport := encodeExecutionReport(t, report)
+
+ mockCommitStoreReader := ccipdatamocks.NewCommitStoreReader(t)
+ mockOffRampReader := ccipdatamocks.NewOffRampReader(t)
+ mockOffRampReader.On("DecodeExecutionReport", mock.Anything, encodedReport).Return(report, nil)
+ mockedExecState := mockOffRampReader.On("GetExecutionState", mock.Anything, uint64(12)).Return(uint8(cciptypes.ExecutionStateUntouched), nil).Once()
+
+ chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t)
+ chainHealthcheck.On("IsHealthy", mock.Anything).Return(true, nil)
+
+ plugin := ExecutionReportingPlugin{
+ commitStoreReader: mockCommitStoreReader,
+ offRampReader: mockOffRampReader,
+ lggr: logger.TestLogger(t),
+ inflightReports: newInflightExecReportsContainer(1 * time.Hour),
+ chainHealthcheck: chainHealthcheck,
+ }
+
+ should, err := plugin.ShouldTransmitAcceptedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport)
+ require.NoError(t, err)
+ assert.Equal(t, true, should)
+
+ mockedExecState.Return(uint8(cciptypes.ExecutionStateFailure), nil).Once()
+ should, err = plugin.ShouldTransmitAcceptedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport)
+ require.NoError(t, err)
+ assert.Equal(t, false, should)
+}
+
+func TestExecutionReportingPlugin_buildReport(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ const numMessages = 100
+ const tokensPerMessage = 20
+ const bytesPerMessage = 1000
+
+ executionReport := generateExecutionReport(t, numMessages, tokensPerMessage, bytesPerMessage)
+ encodedReport := encodeExecutionReport(t, executionReport)
+ // ensure "naive" full report would be bigger than limit
+ assert.Greater(t, len(encodedReport), MaxExecutionReportLength, "full execution report length")
+
+ observations := make([]ccip.ObservedMessage, len(executionReport.Messages))
+ for i, msg := range executionReport.Messages {
+ observations[i] = ccip.NewObservedMessage(msg.SequenceNumber, executionReport.OffchainTokenData[i])
+ }
+
+ // ensure that buildReport should cap the built report to fit in MaxExecutionReportLength
+ p := &ExecutionReportingPlugin{}
+ p.lggr = logger.TestLogger(t)
+
+ commitStore := ccipdatamocks.NewCommitStoreReader(t)
+ commitStore.On("VerifyExecutionReport", mock.Anything, mock.Anything, mock.Anything).Return(true, nil)
+ commitStore.On("GetExpectedNextSequenceNumber", mock.Anything).
+ Return(executionReport.Messages[len(executionReport.Messages)-1].SequenceNumber+1, nil)
+ commitStore.On("GetCommitReportMatchingSeqNum", ctx, observations[0].SeqNr, 0).
+ Return([]cciptypes.CommitStoreReportWithTxMeta{
+ {
+ CommitStoreReport: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: observations[0].SeqNr,
+ Max: observations[len(observations)-1].SeqNr,
+ },
+ },
+ },
+ }, nil)
+ p.metricsCollector = ccip.NoopMetricsCollector
+ p.commitStoreReader = commitStore
+
+ lp := lpMocks.NewLogPoller(t)
+ offRampReader, err := v1_0_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, lp, nil, nil)
+ assert.NoError(t, err)
+ p.offRampReader = offRampReader
+
+ sendReqs := make([]cciptypes.EVM2EVMMessageWithTxMeta, len(observations))
+ sourceReader := ccipdatamocks.NewOnRampReader(t)
+ for i := range observations {
+ msg := cciptypes.EVM2EVMMessage{
+ SourceChainSelector: math.MaxUint64,
+ SequenceNumber: uint64(i + 1),
+ FeeTokenAmount: big.NewInt(math.MaxInt64),
+ Sender: cciptypes.Address(utils.RandomAddress().String()),
+ Nonce: math.MaxUint64,
+ GasLimit: big.NewInt(math.MaxInt64),
+ Strict: false,
+ Receiver: cciptypes.Address(utils.RandomAddress().String()),
+ Data: bytes.Repeat([]byte{0}, bytesPerMessage),
+ TokenAmounts: nil,
+ FeeToken: cciptypes.Address(utils.RandomAddress().String()),
+ MessageID: [32]byte{12},
+ }
+ sendReqs[i] = cciptypes.EVM2EVMMessageWithTxMeta{EVM2EVMMessage: msg}
+ }
+ sourceReader.On("GetSendRequestsBetweenSeqNums",
+ ctx, observations[0].SeqNr, observations[len(observations)-1].SeqNr, false).Return(sendReqs, nil)
+ p.onRampReader = sourceReader
+
+ execReport, err := p.buildReport(ctx, p.lggr, observations)
+ assert.NoError(t, err)
+ assert.LessOrEqual(t, len(execReport), MaxExecutionReportLength, "built execution report length")
+}
+
+func TestExecutionReportingPlugin_getReportsWithSendRequests(t *testing.T) {
+ testCases := []struct {
+ name string
+ reports []cciptypes.CommitStoreReport
+ expQueryMin uint64 // expected min/max used in the query to get ccipevents
+ expQueryMax uint64
+ onchainEvents []cciptypes.EVM2EVMMessageWithTxMeta
+ destExecutedSeqNums []uint64
+
+ expReports []commitReportWithSendRequests
+ expErr bool
+ }{
+ {
+ name: "no reports",
+ reports: nil,
+ expReports: nil,
+ expErr: false,
+ },
+ {
+ name: "two reports happy flow",
+ reports: []cciptypes.CommitStoreReport{
+ {
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 2},
+ MerkleRoot: [32]byte{100},
+ },
+ {
+ Interval: cciptypes.CommitStoreInterval{Min: 3, Max: 3},
+ MerkleRoot: [32]byte{200},
+ },
+ },
+ expQueryMin: 1,
+ expQueryMax: 3,
+ onchainEvents: []cciptypes.EVM2EVMMessageWithTxMeta{
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1}},
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 2}},
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 3}},
+ },
+ destExecutedSeqNums: []uint64{1},
+ expReports: []commitReportWithSendRequests{
+ {
+ commitReport: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 2},
+ MerkleRoot: [32]byte{100},
+ },
+ sendRequestsWithMeta: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1},
+ Executed: true,
+ Finalized: true,
+ },
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 2},
+ Executed: false,
+ Finalized: false,
+ },
+ },
+ },
+ {
+ commitReport: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{Min: 3, Max: 3},
+ MerkleRoot: [32]byte{200},
+ },
+ sendRequestsWithMeta: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 3},
+ Executed: false,
+ Finalized: false,
+ },
+ },
+ },
+ },
+ expErr: false,
+ },
+ }
+
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := &ExecutionReportingPlugin{}
+ p.lggr = lggr
+
+ offRampReader := ccipdatamocks.NewOffRampReader(t)
+ p.offRampReader = offRampReader
+
+ sourceReader := ccipdatamocks.NewOnRampReader(t)
+ sourceReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.expQueryMin, tc.expQueryMax, false).
+ Return(tc.onchainEvents, nil).Maybe()
+ p.onRampReader = sourceReader
+
+ finalized := make(map[uint64]cciptypes.FinalizedStatus)
+ for _, r := range tc.expReports {
+ for _, s := range r.sendRequestsWithMeta {
+ finalized[s.SequenceNumber] = cciptypes.FinalizedStatusNotFinalized
+ if s.Finalized {
+ finalized[s.SequenceNumber] = cciptypes.FinalizedStatusFinalized
+ }
+ }
+ }
+
+ var executedEvents []cciptypes.ExecutionStateChangedWithTxMeta
+ for _, executedSeqNum := range tc.destExecutedSeqNums {
+ executedEvents = append(executedEvents, cciptypes.ExecutionStateChangedWithTxMeta{
+ ExecutionStateChanged: cciptypes.ExecutionStateChanged{
+ SequenceNumber: executedSeqNum,
+ },
+ TxMeta: cciptypes.TxMeta{
+ Finalized: finalized[executedSeqNum],
+ },
+ })
+ }
+ offRampReader.On("GetExecutionStateChangesBetweenSeqNums", ctx, tc.expQueryMin, tc.expQueryMax, 0).Return(executedEvents, nil).Maybe()
+
+ populatedReports, err := p.getReportsWithSendRequests(ctx, tc.reports)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, len(tc.expReports), len(populatedReports))
+ for i, expReport := range tc.expReports {
+ assert.Equal(t, len(expReport.sendRequestsWithMeta), len(populatedReports[i].sendRequestsWithMeta))
+ for j, expReq := range expReport.sendRequestsWithMeta {
+ assert.Equal(t, expReq.Executed, populatedReports[i].sendRequestsWithMeta[j].Executed)
+ assert.Equal(t, expReq.Finalized, populatedReports[i].sendRequestsWithMeta[j].Finalized)
+ assert.Equal(t, expReq.SequenceNumber, populatedReports[i].sendRequestsWithMeta[j].SequenceNumber)
+ }
+ }
+ })
+ }
+}
+
+func Test_calculateObservedMessagesConsensus(t *testing.T) {
+ type args struct {
+ observations []ccip.ExecutionObservation
+ f int
+ }
+ tests := []struct {
+ name string
+ args args
+ want []ccip.ObservedMessage
+ }{
+ {
+ name: "no observations",
+ args: args{
+ observations: nil,
+ f: 0,
+ },
+ want: []ccip.ObservedMessage{},
+ },
+ {
+ name: "common path",
+ args: args{
+ observations: []ccip.ExecutionObservation{
+ {
+ Messages: map[uint64]ccip.MsgData{
+ 1: {TokenData: [][]byte{{0x1}, {0x1}, {0x1}}},
+ 2: {TokenData: [][]byte{{0x2}, {0x2}, {0x2}}},
+ },
+ },
+ {
+ Messages: map[uint64]ccip.MsgData{
+ 1: {TokenData: [][]byte{{0x1}, {0x1}, {0xff}}}, // different token data - should not be picked
+ 2: {TokenData: [][]byte{{0x2}, {0x2}, {0x2}}},
+ 3: {TokenData: [][]byte{{0x3}, {0x3}, {0x3}}},
+ },
+ },
+ {
+ Messages: map[uint64]ccip.MsgData{
+ 1: {TokenData: [][]byte{{0x1}, {0x1}, {0x1}}},
+ 2: {TokenData: [][]byte{{0x2}, {0x2}, {0x2}}},
+ },
+ },
+ },
+ f: 1,
+ },
+ want: []ccip.ObservedMessage{
+ {SeqNr: 1, MsgData: ccip.MsgData{TokenData: [][]byte{{0x1}, {0x1}, {0x1}}}},
+ {SeqNr: 2, MsgData: ccip.MsgData{TokenData: [][]byte{{0x2}, {0x2}, {0x2}}}},
+ },
+ },
+ {
+ name: "similar token data",
+ args: args{
+ observations: []ccip.ExecutionObservation{
+ {
+ Messages: map[uint64]ccip.MsgData{
+ 1: {TokenData: [][]byte{{0x1}, {0x1}, {0x1}}},
+ },
+ },
+ {
+ Messages: map[uint64]ccip.MsgData{
+ 1: {TokenData: [][]byte{{0x1}, {0x1, 0x1}}},
+ },
+ },
+ {
+ Messages: map[uint64]ccip.MsgData{
+ 1: {TokenData: [][]byte{{0x1}, {0x1, 0x1}}},
+ },
+ },
+ },
+ f: 1,
+ },
+ want: []ccip.ObservedMessage{
+ {SeqNr: 1, MsgData: ccip.MsgData{TokenData: [][]byte{{0x1}, {0x1, 0x1}}}},
+ },
+ },
+ {
+ name: "results should be deterministic",
+ args: args{
+ observations: []ccip.ExecutionObservation{
+ {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x2}}}}},
+ {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x2}}}}},
+ {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x1}}}}},
+ {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x3}}}}},
+ {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x3}}}}},
+ {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x1}}}}},
+ },
+ f: 1,
+ },
+ want: []ccip.ObservedMessage{
+ {SeqNr: 1, MsgData: ccip.MsgData{TokenData: [][]byte{{0x3}}}},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ res, err := calculateObservedMessagesConsensus(
+ tt.args.observations,
+ tt.args.f,
+ )
+ assert.NoError(t, err)
+ sort.Slice(res, func(i, j int) bool {
+ return res[i].SeqNr < res[j].SeqNr
+ })
+ assert.Equalf(t, tt.want, res, "calculateObservedMessagesConsensus(%v, %v)", tt.args.observations, tt.args.f)
+ })
+ }
+}
+
+func Test_getTokensPrices(t *testing.T) {
+ tk1 := ccipcalc.HexToAddress("1")
+ tk2 := ccipcalc.HexToAddress("2")
+ tk3 := ccipcalc.HexToAddress("3")
+
+ testCases := []struct {
+ name string
+ feeTokens []cciptypes.Address
+ tokens []cciptypes.Address
+ retPrices []cciptypes.TokenPriceUpdate
+ expPrices map[cciptypes.Address]*big.Int
+ expErr bool
+ }{
+ {
+ name: "base",
+ feeTokens: []cciptypes.Address{tk1, tk2},
+ tokens: []cciptypes.Address{tk3},
+ retPrices: []cciptypes.TokenPriceUpdate{
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(30)}},
+ },
+ expPrices: map[cciptypes.Address]*big.Int{
+ tk1: big.NewInt(10),
+ tk2: big.NewInt(20),
+ tk3: big.NewInt(30),
+ },
+ expErr: false,
+ },
+ {
+ name: "token is both fee token and normal token",
+ feeTokens: []cciptypes.Address{tk1, tk2},
+ tokens: []cciptypes.Address{tk3, tk1},
+ retPrices: []cciptypes.TokenPriceUpdate{
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(30)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}},
+ },
+ expPrices: map[cciptypes.Address]*big.Int{
+ tk1: big.NewInt(10),
+ tk2: big.NewInt(20),
+ tk3: big.NewInt(30),
+ },
+ expErr: false,
+ },
+ {
+ name: "token is both fee token and normal token and price registry gave different price",
+ feeTokens: []cciptypes.Address{tk1, tk2},
+ tokens: []cciptypes.Address{tk3, tk1},
+ retPrices: []cciptypes.TokenPriceUpdate{
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(30)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(1000)}},
+ },
+ expErr: true,
+ },
+ {
+ name: "contract returns less prices than requested",
+ feeTokens: []cciptypes.Address{tk1, tk2},
+ tokens: []cciptypes.Address{tk3},
+ retPrices: []cciptypes.TokenPriceUpdate{
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}},
+ {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}},
+ },
+ expErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ priceReg := ccipdatamocks.NewPriceRegistryReader(t)
+ priceReg.On("GetTokenPrices", mock.Anything, mock.Anything).Return(tc.retPrices, nil)
+ priceReg.On("Address", mock.Anything).Return(cciptypes.Address(utils.RandomAddress().String()), nil).Maybe()
+
+ tokenPrices, err := getTokensPrices(context.Background(), priceReg, append(tc.feeTokens, tc.tokens...))
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+ for tk, price := range tc.expPrices {
+ assert.Equal(t, price, tokenPrices[tk])
+ }
+ })
+ }
+}
+
+func Test_calculateMessageMaxGas(t *testing.T) {
+ type args struct {
+ gasLimit *big.Int
+ numRequests int
+ dataLen int
+ numTokens int
+ }
+ tests := []struct {
+ name string
+ args args
+ want uint64
+ wantErr bool
+ }{
+ {
+ name: "base",
+ args: args{gasLimit: big.NewInt(1000), numRequests: 5, dataLen: 5, numTokens: 2},
+ want: 826_336,
+ wantErr: false,
+ },
+ {
+ name: "large",
+ args: args{gasLimit: big.NewInt(1000), numRequests: 1000, dataLen: 1000, numTokens: 1000},
+ want: 346_485_176,
+ wantErr: false,
+ },
+ {
+ name: "gas limit overflow",
+ args: args{gasLimit: big.NewInt(0).Mul(big.NewInt(math.MaxInt64), big.NewInt(math.MaxInt64))},
+ want: 36_391_540,
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := calculateMessageMaxGas(tt.args.gasLimit, tt.args.numRequests, tt.args.dataLen, tt.args.numTokens)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equalf(t, tt.want, got, "calculateMessageMaxGas(%v, %v, %v, %v)", tt.args.gasLimit, tt.args.numRequests, tt.args.dataLen, tt.args.numTokens)
+ })
+ }
+}
+
+func Test_inflightAggregates(t *testing.T) {
+ const n = 10
+ addrs := make([]cciptypes.Address, n)
+ tokenAddrs := make([]cciptypes.Address, n)
+ for i := range addrs {
+ addrs[i] = cciptypes.Address(utils.RandomAddress().String())
+ tokenAddrs[i] = cciptypes.Address(utils.RandomAddress().String())
+ }
+ lggr := logger.TestLogger(t)
+
+ testCases := []struct {
+ name string
+ inflight []InflightInternalExecutionReport
+ destTokenPrices map[cciptypes.Address]*big.Int
+ sourceToDest map[cciptypes.Address]cciptypes.Address
+
+ expInflightSeqNrs mapset.Set[uint64]
+ expInflightAggrVal *big.Int
+ expMaxInflightSenderNonces map[cciptypes.Address]uint64
+ expInflightTokenAmounts map[cciptypes.Address]*big.Int
+ expErr bool
+ }{
+ {
+ name: "base",
+ inflight: []InflightInternalExecutionReport{
+ {
+ messages: []cciptypes.EVM2EVMMessage{
+ {
+ Sender: addrs[0],
+ SequenceNumber: 100,
+ Nonce: 2,
+ TokenAmounts: []cciptypes.TokenAmount{
+ {Token: tokenAddrs[0], Amount: big.NewInt(1e18)},
+ {Token: tokenAddrs[0], Amount: big.NewInt(2e18)},
+ },
+ },
+ {
+ Sender: addrs[0],
+ SequenceNumber: 106,
+ Nonce: 4,
+ TokenAmounts: []cciptypes.TokenAmount{
+ {Token: tokenAddrs[0], Amount: big.NewInt(1e18)},
+ {Token: tokenAddrs[0], Amount: big.NewInt(5e18)},
+ {Token: tokenAddrs[2], Amount: big.NewInt(5e18)},
+ },
+ },
+ },
+ },
+ },
+ destTokenPrices: map[cciptypes.Address]*big.Int{
+ tokenAddrs[1]: big.NewInt(1000),
+ tokenAddrs[3]: big.NewInt(500),
+ },
+ sourceToDest: map[cciptypes.Address]cciptypes.Address{
+ tokenAddrs[0]: tokenAddrs[1],
+ tokenAddrs[2]: tokenAddrs[3],
+ },
+ expInflightSeqNrs: mapset.NewSet[uint64](100, 106),
+ expInflightAggrVal: big.NewInt(9*1000 + 5*500),
+ expMaxInflightSenderNonces: map[cciptypes.Address]uint64{
+ addrs[0]: 4,
+ },
+ expInflightTokenAmounts: map[cciptypes.Address]*big.Int{
+ tokenAddrs[0]: big.NewInt(9e18),
+ tokenAddrs[2]: big.NewInt(5e18),
+ },
+ expErr: false,
+ },
+ {
+ name: "missing price should be 0",
+ inflight: []InflightInternalExecutionReport{
+ {
+ messages: []cciptypes.EVM2EVMMessage{
+ {
+ Sender: addrs[0],
+ SequenceNumber: 100,
+ Nonce: 2,
+ TokenAmounts: []cciptypes.TokenAmount{
+ {Token: tokenAddrs[0], Amount: big.NewInt(1e18)},
+ },
+ },
+ },
+ },
+ },
+ destTokenPrices: map[cciptypes.Address]*big.Int{
+ tokenAddrs[3]: big.NewInt(500),
+ },
+ sourceToDest: map[cciptypes.Address]cciptypes.Address{
+ tokenAddrs[2]: tokenAddrs[3],
+ },
+ expInflightAggrVal: big.NewInt(0),
+ expErr: false,
+ },
+ {
+ name: "nothing inflight",
+ inflight: []InflightInternalExecutionReport{},
+ expInflightSeqNrs: mapset.NewSet[uint64](),
+ expInflightAggrVal: big.NewInt(0),
+ expMaxInflightSenderNonces: map[cciptypes.Address]uint64{},
+ expInflightTokenAmounts: map[cciptypes.Address]*big.Int{},
+ expErr: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ inflightAggrVal, err := getInflightAggregateRateLimit(
+ lggr,
+ tc.inflight,
+ tc.destTokenPrices,
+ tc.sourceToDest,
+ )
+
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.True(t, reflect.DeepEqual(tc.expInflightAggrVal, inflightAggrVal))
+ })
+ }
+}
+
+func Test_commitReportWithSendRequests_validate(t *testing.T) {
+ testCases := []struct {
+ name string
+ reportInterval cciptypes.CommitStoreInterval
+ numReqs int
+ expValid bool
+ }{
+ {
+ name: "valid report",
+ reportInterval: cciptypes.CommitStoreInterval{Min: 10, Max: 20},
+ numReqs: 11,
+ expValid: true,
+ },
+ {
+ name: "report with one request",
+ reportInterval: cciptypes.CommitStoreInterval{Min: 1234, Max: 1234},
+ numReqs: 1,
+ expValid: true,
+ },
+ {
+ name: "request is missing",
+ reportInterval: cciptypes.CommitStoreInterval{Min: 1234, Max: 1234},
+ numReqs: 0,
+ expValid: false,
+ },
+ {
+ name: "requests are missing",
+ reportInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ numReqs: 5,
+ expValid: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ rep := commitReportWithSendRequests{
+ commitReport: cciptypes.CommitStoreReport{
+ Interval: tc.reportInterval,
+ },
+ sendRequestsWithMeta: make([]cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tc.numReqs),
+ }
+ err := rep.validate()
+ isValid := err == nil
+ assert.Equal(t, tc.expValid, isValid)
+ })
+ }
+}
+
+func Test_commitReportWithSendRequests_allRequestsAreExecutedAndFinalized(t *testing.T) {
+ testCases := []struct {
+ name string
+ reqs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+ expRes bool
+ }{
+ {
+ name: "all requests executed and finalized",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {Executed: true, Finalized: true},
+ {Executed: true, Finalized: true},
+ {Executed: true, Finalized: true},
+ },
+ expRes: true,
+ },
+ {
+ name: "true when there are zero requests",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{},
+ expRes: true,
+ },
+ {
+ name: "some request not executed",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {Executed: true, Finalized: true},
+ {Executed: true, Finalized: true},
+ {Executed: false, Finalized: true},
+ },
+ expRes: false,
+ },
+ {
+ name: "some request not finalized",
+ reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {Executed: true, Finalized: true},
+ {Executed: true, Finalized: true},
+ {Executed: true, Finalized: false},
+ },
+ expRes: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ rep := commitReportWithSendRequests{sendRequestsWithMeta: tc.reqs}
+ res := rep.allRequestsAreExecutedAndFinalized()
+ assert.Equal(t, tc.expRes, res)
+ })
+ }
+}
+
+func Test_commitReportWithSendRequests_sendReqFits(t *testing.T) {
+ testCases := []struct {
+ name string
+ req cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+ report cciptypes.CommitStoreReport
+ expRes bool
+ }{
+ {
+ name: "all requests executed and finalized",
+ req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1},
+ },
+ report: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ },
+ expRes: true,
+ },
+ {
+ name: "all requests executed and finalized",
+ req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 10},
+ },
+ report: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ },
+ expRes: true,
+ },
+ {
+ name: "all requests executed and finalized",
+ req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 11},
+ },
+ report: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ },
+ expRes: false,
+ },
+ {
+ name: "all requests executed and finalized",
+ req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 10},
+ },
+ report: cciptypes.CommitStoreReport{
+ Interval: cciptypes.CommitStoreInterval{Min: 10, Max: 10},
+ },
+ expRes: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ r := &commitReportWithSendRequests{commitReport: tc.report}
+ assert.Equal(t, tc.expRes, r.sendReqFits(tc.req))
+ })
+ }
+}
+
+// generateExecutionReport generates an execution report that can be used in tests
+func generateExecutionReport(t *testing.T, numMsgs, tokensPerMsg, bytesPerMsg int) cciptypes.ExecReport {
+ messages := make([]cciptypes.EVM2EVMMessage, numMsgs)
+
+ randAddr := func() cciptypes.Address {
+ return cciptypes.Address(utils.RandomAddress().String())
+ }
+
+ offChainTokenData := make([][][]byte, numMsgs)
+ for i := range messages {
+ tokenAmounts := make([]cciptypes.TokenAmount, tokensPerMsg)
+ for j := range tokenAmounts {
+ tokenAmounts[j] = cciptypes.TokenAmount{
+ Token: randAddr(),
+ Amount: big.NewInt(math.MaxInt64),
+ }
+ }
+
+ messages[i] = cciptypes.EVM2EVMMessage{
+ SourceChainSelector: rand.Uint64(),
+ SequenceNumber: uint64(i + 1),
+ FeeTokenAmount: big.NewInt(rand.Int64()),
+ Sender: randAddr(),
+ Nonce: rand.Uint64(),
+ GasLimit: big.NewInt(rand.Int64()),
+ Strict: false,
+ Receiver: randAddr(),
+ Data: bytes.Repeat([]byte{1}, bytesPerMsg),
+ TokenAmounts: tokenAmounts,
+ FeeToken: randAddr(),
+ MessageID: utils.RandomBytes32(),
+ }
+
+ data := []byte(`{"foo": "bar"}`)
+ offChainTokenData[i] = [][]byte{data, data, data}
+ }
+
+ return cciptypes.ExecReport{
+ Messages: messages,
+ OffchainTokenData: offChainTokenData,
+ Proofs: make([][32]byte, numMsgs),
+ ProofFlagBits: big.NewInt(rand.Int64()),
+ }
+}
+
+func Test_selectReportsToFillBatch(t *testing.T) {
+ tests := []struct {
+ name string
+ messagesLimit uint64 // maximum number of messages that can be included in a batch.
+ expectedBatches int // expected number of batches.
+ expectedReports int // expected number of selected reports.
+ }{
+ {
+ name: "pick all at once when messages limit is high",
+ messagesLimit: 5000,
+ expectedBatches: 1,
+ expectedReports: 10,
+ },
+ {
+ name: "pick none when messages limit is below commit report size",
+ messagesLimit: 199,
+ expectedBatches: 0,
+ expectedReports: 0,
+ },
+ {
+ name: "pick exactly the number in each report",
+ messagesLimit: 200,
+ expectedBatches: 10,
+ expectedReports: 10,
+ },
+ {
+ name: "messages limit larger than individual reports",
+ messagesLimit: 300,
+ expectedBatches: 10,
+ expectedReports: 10,
+ },
+ {
+ name: "messages limit larger than several reports",
+ messagesLimit: 650,
+ expectedBatches: 4,
+ expectedReports: 10,
+ },
+ {
+ name: "default limit",
+ messagesLimit: 1024,
+ expectedBatches: 2,
+ expectedReports: 10,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ nbCommitStoreReports := 10
+ nbMsgPerRoot := 200
+
+ var reports []cciptypes.CommitStoreReport
+ for i := 0; i < nbCommitStoreReports; i++ {
+ reports = append(reports, cciptypes.CommitStoreReport{Interval: cciptypes.CommitStoreInterval{Min: uint64(i * nbMsgPerRoot), Max: uint64((i+1)*nbMsgPerRoot - 1)}})
+ }
+
+ var unexpiredReportsBatches [][]cciptypes.CommitStoreReport
+ for i := 0; i < len(reports); {
+ unexpiredReports, step := selectReportsToFillBatch(reports[i:], tt.messagesLimit)
+ if step == 0 {
+ break
+ }
+ unexpiredReportsBatches = append(unexpiredReportsBatches, unexpiredReports)
+ i += step
+ }
+ assert.Len(t, unexpiredReportsBatches, tt.expectedBatches)
+
+ var flatten []cciptypes.CommitStoreReport
+ for _, r := range unexpiredReportsBatches {
+ flatten = append(flatten, r...)
+ }
+ assert.Equal(t, tt.expectedReports, len(flatten))
+ if tt.expectedBatches > 0 {
+ assert.Equal(t, reports, flatten)
+ } else {
+ assert.Empty(t, flatten)
+ }
+ })
+ }
+}
+
+func Test_prepareTokenExecData(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ weth := cciptypes.Address(utils.RandomAddress().String())
+ wavax := cciptypes.Address(utils.RandomAddress().String())
+ link := cciptypes.Address(utils.RandomAddress().String())
+ usdc := cciptypes.Address(utils.RandomAddress().String())
+
+ wethPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: weth, Value: big.NewInt(2e18)}}
+ wavaxPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: wavax, Value: big.NewInt(3e18)}}
+ linkPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: link, Value: big.NewInt(4e18)}}
+ usdcPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: usdc, Value: big.NewInt(5e18)}}
+
+ tokenPrices := map[cciptypes.Address]cciptypes.TokenPriceUpdate{weth: wethPriceUpdate, wavax: wavaxPriceUpdate, link: linkPriceUpdate, usdc: usdcPriceUpdate}
+
+ tests := []struct {
+ name string
+ sourceFeeTokens []cciptypes.Address
+ sourceFeeTokensErr error
+ destTokens []cciptypes.Address
+ destTokensErr error
+ destFeeTokens []cciptypes.Address
+ destFeeTokensErr error
+ sourcePrices []cciptypes.TokenPriceUpdate
+ destPrices []cciptypes.TokenPriceUpdate
+ }{
+ {
+ name: "only native token",
+ sourcePrices: []cciptypes.TokenPriceUpdate{wethPriceUpdate},
+ destPrices: []cciptypes.TokenPriceUpdate{wavaxPriceUpdate},
+ },
+ {
+ name: "additional dest fee token",
+ destFeeTokens: []cciptypes.Address{link},
+ sourcePrices: []cciptypes.TokenPriceUpdate{wethPriceUpdate},
+ destPrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, wavaxPriceUpdate},
+ },
+ {
+ name: "dest tokens",
+ destTokens: []cciptypes.Address{link, usdc},
+ sourcePrices: []cciptypes.TokenPriceUpdate{wethPriceUpdate},
+ destPrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, usdcPriceUpdate, wavaxPriceUpdate},
+ },
+ {
+ name: "source fee tokens",
+ sourceFeeTokens: []cciptypes.Address{usdc},
+ sourcePrices: []cciptypes.TokenPriceUpdate{usdcPriceUpdate, wethPriceUpdate},
+ destPrices: []cciptypes.TokenPriceUpdate{wavaxPriceUpdate},
+ },
+ {
+ name: "source, dest and fee tokens",
+ sourceFeeTokens: []cciptypes.Address{usdc},
+ destTokens: []cciptypes.Address{link},
+ destFeeTokens: []cciptypes.Address{usdc},
+ sourcePrices: []cciptypes.TokenPriceUpdate{usdcPriceUpdate, wethPriceUpdate},
+ destPrices: []cciptypes.TokenPriceUpdate{usdcPriceUpdate, linkPriceUpdate, wavaxPriceUpdate},
+ },
+ {
+ name: "source, dest and fee tokens with duplicates",
+ sourceFeeTokens: []cciptypes.Address{link, weth},
+ destTokens: []cciptypes.Address{link, wavax},
+ destFeeTokens: []cciptypes.Address{link, wavax},
+ sourcePrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, wethPriceUpdate},
+ destPrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, wavaxPriceUpdate},
+ },
+ {
+ name: "everything fails when source fails",
+ sourceFeeTokensErr: errors.New("source error"),
+ },
+ {
+ name: "everything fails when dest fee fails",
+ destFeeTokensErr: errors.New("dest fee error"),
+ },
+ {
+ name: "everything fails when dest fails",
+ destTokensErr: errors.New("dest error"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ onrampReader := ccipdatamocks.NewOnRampReader(t)
+ offrampReader := ccipdatamocks.NewOffRampReader(t)
+ sourcePriceRegistry := ccipdatamocks.NewPriceRegistryReader(t)
+ destPriceRegistry := ccipdatamocks.NewPriceRegistryReader(t)
+ gasPriceEstimator := prices.NewMockGasPriceEstimatorExec(t)
+ sourcePriceRegistryProvider := ccipdataprovidermocks.NewPriceRegistry(t)
+
+ sourcePriceRegistryAddress := cciptypes.Address(utils.RandomAddress().String())
+ onrampReader.On("SourcePriceRegistryAddress", ctx).Return(sourcePriceRegistryAddress, nil).Maybe()
+ offrampReader.On("CurrentRateLimiterState", ctx).Return(cciptypes.TokenBucketRateLimit{}, nil).Maybe()
+ offrampReader.On("GetSourceToDestTokensMapping", ctx).Return(map[cciptypes.Address]cciptypes.Address{}, nil).Maybe()
+ gasPriceEstimator.On("GetGasPrice", ctx).Return(big.NewInt(1e9), nil).Maybe()
+
+ offrampReader.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{DestinationTokens: tt.destTokens}, tt.destTokensErr).Maybe()
+ sourcePriceRegistry.On("Address", mock.Anything).Return(sourcePriceRegistryAddress, nil).Maybe()
+ sourcePriceRegistry.On("GetFeeTokens", ctx).Return(tt.sourceFeeTokens, tt.sourceFeeTokensErr).Maybe()
+ sourcePriceRegistry.On("GetTokenPrices", ctx, mock.Anything).Return(tt.sourcePrices, nil).Maybe()
+ destPriceRegistry.On("GetFeeTokens", ctx).Return(tt.destFeeTokens, tt.destFeeTokensErr).Maybe()
+ destPriceRegistry.On("GetTokenPrices", ctx, mock.Anything).Return(tt.destPrices, nil).Maybe()
+
+ sourcePriceRegistryProvider.On("NewPriceRegistryReader", ctx, sourcePriceRegistryAddress).Return(sourcePriceRegistry, nil).Maybe()
+
+ reportingPlugin := ExecutionReportingPlugin{
+ onRampReader: onrampReader,
+ offRampReader: offrampReader,
+ sourcePriceRegistry: sourcePriceRegistry,
+ sourcePriceRegistryProvider: sourcePriceRegistryProvider,
+ destPriceRegistry: destPriceRegistry,
+ gasPriceEstimator: gasPriceEstimator,
+ sourceWrappedNativeToken: weth,
+ destWrappedNative: wavax,
+ }
+
+ tokenData, err := reportingPlugin.prepareTokenExecData(ctx)
+ if tt.destFeeTokensErr != nil || tt.sourceFeeTokensErr != nil || tt.destTokensErr != nil {
+ require.Error(t, err)
+ return
+ }
+
+ require.NoError(t, err)
+ assert.Len(t, tokenData.sourceTokenPrices, len(tt.sourcePrices))
+ assert.Len(t, tokenData.destTokenPrices, len(tt.destPrices))
+
+ for token, price := range tokenData.sourceTokenPrices {
+ assert.Equal(t, tokenPrices[token].Value, price)
+ }
+
+ for token, price := range tokenData.destTokenPrices {
+ assert.Equal(t, tokenPrices[token].Value, price)
+ }
+ })
+ }
+}
+
+func encodeExecutionReport(t *testing.T, report cciptypes.ExecReport) []byte {
+ reader, err := v1_2_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, nil, nil, nil)
+ require.NoError(t, err)
+ ctx := testutils.Context(t)
+ encodedReport, err := reader.EncodeExecutionReport(ctx, report)
+ require.NoError(t, err)
+ return encodedReport
+}
+
+// Verify the price registry update mechanism in case of configuration change on the source onRamp.
+func TestExecutionReportingPlugin_ensurePriceRegistrySynchronization(t *testing.T) {
+ p := &ExecutionReportingPlugin{}
+ p.lggr = logger.TestLogger(t)
+ p.sourcePriceRegistryLock = sync.RWMutex{}
+
+ sourcePriceRegistryAddress1 := cciptypes.Address(utils.RandomAddress().String())
+ sourcePriceRegistryAddress2 := cciptypes.Address(utils.RandomAddress().String())
+
+ mockPriceRegistryReader1 := ccipdatamocks.NewPriceRegistryReader(t)
+ mockPriceRegistryReader2 := ccipdatamocks.NewPriceRegistryReader(t)
+ mockPriceRegistryReader1.On("Address", mock.Anything).Return(sourcePriceRegistryAddress1, nil)
+ mockPriceRegistryReader2.On("Address", mock.Anything).Return(sourcePriceRegistryAddress2, nil).Maybe()
+ mockPriceRegistryReader1.On("Close", mock.Anything).Return(nil)
+ mockPriceRegistryReader2.On("Close", mock.Anything).Return(nil).Maybe()
+
+ mockSourcePriceRegistryProvider := ccipdataprovidermocks.NewPriceRegistry(t)
+ mockSourcePriceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, sourcePriceRegistryAddress1).Return(mockPriceRegistryReader1, nil)
+ mockSourcePriceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, sourcePriceRegistryAddress2).Return(mockPriceRegistryReader2, nil)
+ p.sourcePriceRegistryProvider = mockSourcePriceRegistryProvider
+
+ mockOnRampReader := ccipdatamocks.NewOnRampReader(t)
+ p.onRampReader = mockOnRampReader
+
+ mockOnRampReader.On("SourcePriceRegistryAddress", mock.Anything).Return(sourcePriceRegistryAddress1, nil).Once()
+ require.Equal(t, nil, p.sourcePriceRegistry)
+ err := p.ensurePriceRegistrySynchronization(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, mockPriceRegistryReader1, p.sourcePriceRegistry)
+
+ mockOnRampReader.On("SourcePriceRegistryAddress", mock.Anything).Return(sourcePriceRegistryAddress2, nil).Once()
+ err = p.ensurePriceRegistrySynchronization(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, mockPriceRegistryReader2, p.sourcePriceRegistry)
+}
diff --git a/core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go b/core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go
new file mode 100644
index 00000000000..142ba006be6
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go
@@ -0,0 +1,137 @@
+package ccip_test
+
+import (
+ "encoding/json"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_v3_aggregator_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+ integrationtesthelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/integration"
+)
+
+func Test_CLOSpecApprovalFlow_pipeline(t *testing.T) {
+ ccipTH := integrationtesthelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector)
+
+ tokenPricesUSDPipeline, linkUSD, ethUSD := ccipTH.CreatePricesPipeline(t)
+ defer linkUSD.Close()
+ defer ethUSD.Close()
+
+ test_CLOSpecApprovalFlow(t, ccipTH, tokenPricesUSDPipeline, "")
+}
+
+func Test_CLOSpecApprovalFlow_dynamicPriceGetter(t *testing.T) {
+ ccipTH := integrationtesthelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector)
+
+ //Set up the aggregators here to avoid modifying ccipTH.
+ srcLinkAddr := ccipTH.Source.LinkToken.Address()
+ dstLinkAddr := ccipTH.Dest.LinkToken.Address()
+ srcNativeAddr, err := ccipTH.Source.Router.GetWrappedNative(nil)
+ require.NoError(t, err)
+ aggDstNativeAddr := ccipTH.Dest.WrappedNative.Address()
+
+ aggSrcNatAddr, _, aggSrcNat, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(2e18))
+ require.NoError(t, err)
+ _, err = aggSrcNat.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(17000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ aggSrcLnkAddr, _, aggSrcLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(3e18))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+ _, err = aggSrcLnk.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ aggDstLnkAddr, _, aggDstLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+ _, err = aggDstLnk.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+
+ // Check content is ok on aggregator.
+ tmp, err := aggDstLnk.LatestRoundData(&bind.CallOpts{})
+ require.NoError(t, err)
+ require.Equal(t, big.NewInt(50), tmp.RoundId)
+ require.Equal(t, big.NewInt(8000000), tmp.Answer)
+
+ // deploy dest wrapped native aggregator
+ aggDstNativeAggrAddr, _, aggDstNativeAggr, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+ _, err = aggDstNativeAggr.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(500000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+
+ priceGetterConfig := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ srcLinkAddr: {
+ ChainID: ccipTH.Source.ChainID,
+ AggregatorContractAddress: aggSrcLnkAddr,
+ },
+ srcNativeAddr: {
+ ChainID: ccipTH.Source.ChainID,
+ AggregatorContractAddress: aggSrcNatAddr,
+ },
+ dstLinkAddr: {
+ ChainID: ccipTH.Dest.ChainID,
+ AggregatorContractAddress: aggDstLnkAddr,
+ },
+ aggDstNativeAddr: {
+ ChainID: ccipTH.Dest.ChainID,
+ AggregatorContractAddress: aggDstNativeAggrAddr,
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{},
+ }
+ priceGetterConfigBytes, err := json.MarshalIndent(priceGetterConfig, "", " ")
+ require.NoError(t, err)
+ priceGetterConfigJson := string(priceGetterConfigBytes)
+
+ test_CLOSpecApprovalFlow(t, ccipTH, "", priceGetterConfigJson)
+}
+
+func test_CLOSpecApprovalFlow(t *testing.T, ccipTH integrationtesthelpers.CCIPIntegrationTestHarness, tokenPricesUSDPipeline string, priceGetterConfiguration string) {
+ jobParams := ccipTH.SetUpNodesAndJobs(t, tokenPricesUSDPipeline, priceGetterConfiguration, "http://blah.com")
+ ccipTH.SetupFeedsManager(t)
+
+ // Propose and approve new specs
+ ccipTH.ApproveJobSpecs(t, jobParams)
+
+ // Sanity check that CCIP works after CLO flow
+ currentSeqNum := 1
+
+ extraArgs, err := testhelpers.GetEVMExtraArgsV1(big.NewInt(200_003), false)
+ require.NoError(t, err)
+
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()),
+ Data: utils.RandomAddress().Bytes(),
+ TokenAmounts: []router.ClientEVMTokenAmount{},
+ FeeToken: ccipTH.Source.LinkToken.Address(),
+ ExtraArgs: extraArgs,
+ }
+ fee, err := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg)
+ require.NoError(t, err)
+
+ _, err = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Set(fee))
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ ccipTH.SendRequest(t, msg)
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum)
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum)
+
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum)
+ assert.Len(t, executionLogs, 1)
+ ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess)
+}
diff --git a/core/services/ocr2/plugins/ccip/config/chain_config.go b/core/services/ocr2/plugins/ccip/config/chain_config.go
new file mode 100644
index 00000000000..ff82def6066
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/config/chain_config.go
@@ -0,0 +1,48 @@
+package config
+
+import (
+ "strconv"
+
+ "github.com/pkg/errors"
+ chainselectors "github.com/smartcontractkit/chain-selectors"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+)
+
+func GetChainFromSpec(spec *job.OCR2OracleSpec, chainSet legacyevm.LegacyChainContainer) (legacyevm.Chain, int64, error) {
+ chainIDInterface, ok := spec.RelayConfig["chainID"]
+ if !ok {
+ return nil, 0, errors.New("chainID must be provided in relay config")
+ }
+ destChainID := uint64(chainIDInterface.(float64))
+ return GetChainByChainID(chainSet, destChainID)
+}
+
+func GetChainByChainSelector(chainSet legacyevm.LegacyChainContainer, chainSelector uint64) (legacyevm.Chain, int64, error) {
+ chainID, err := chainselectors.ChainIdFromSelector(chainSelector)
+ if err != nil {
+ return nil, 0, err
+ }
+ return GetChainByChainID(chainSet, chainID)
+}
+
+func GetChainByChainID(chainSet legacyevm.LegacyChainContainer, chainID uint64) (legacyevm.Chain, int64, error) {
+ chain, err := chainSet.Get(strconv.FormatUint(chainID, 10))
+ if err != nil {
+ return nil, 0, errors.Wrap(err, "chain not found in chainset")
+ }
+ return chain, chain.ID().Int64(), nil
+}
+
+func ResolveChainNames(sourceChainId int64, destChainId int64) (string, string, error) {
+ sourceChainName, err := chainselectors.NameFromChainId(uint64(sourceChainId))
+ if err != nil {
+ return "", "", err
+ }
+ destChainName, err := chainselectors.NameFromChainId(uint64(destChainId))
+ if err != nil {
+ return "", "", err
+ }
+ return sourceChainName, destChainName, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/config/chain_config_test.go b/core/services/ocr2/plugins/ccip/config/chain_config_test.go
new file mode 100644
index 00000000000..df2351a5ea4
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/config/chain_config_test.go
@@ -0,0 +1,135 @@
+package config
+
+import (
+ "math/big"
+ "strconv"
+ "testing"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+)
+
+func TestGetChainFromSpec(t *testing.T) {
+ testChainID := int64(1337)
+
+ tests := []struct {
+ name string
+ spec *job.OCR2OracleSpec
+ expectedErr bool
+ expectedErrMsg string
+ }{
+ {
+ name: "success",
+ spec: &job.OCR2OracleSpec{
+ RelayConfig: job.JSONConfig{
+ "chainID": float64(testChainID),
+ },
+ },
+ expectedErr: false,
+ },
+ {
+ name: "missing_chain_ID",
+ spec: &job.OCR2OracleSpec{},
+ expectedErr: true,
+ expectedErrMsg: "chainID must be provided in relay config",
+ },
+ }
+
+ mockChain := mocks.NewChain(t)
+ mockChain.On("ID").Return(big.NewInt(testChainID)).Maybe()
+
+ mockChainSet := mocks.NewLegacyChainContainer(t)
+ mockChainSet.On("Get", strconv.FormatInt(testChainID, 10)).Return(mockChain, nil).Maybe()
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ chain, chainID, err := GetChainFromSpec(test.spec, mockChainSet)
+ if test.expectedErr {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), test.expectedErrMsg)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, mockChain, chain)
+ require.Equal(t, testChainID, chainID)
+ }
+ })
+ }
+}
+
+func TestGetChainByChainSelector_success(t *testing.T) {
+ mockChain := mocks.NewChain(t)
+ mockChain.On("ID").Return(big.NewInt(11155111))
+
+ mockChainSet := mocks.NewLegacyChainContainer(t)
+ mockChainSet.On("Get", "11155111").Return(mockChain, nil)
+
+ // Ethereum Sepolia chain selector.
+ chain, chainID, err := GetChainByChainSelector(mockChainSet, uint64(16015286601757825753))
+ require.NoError(t, err)
+ require.Equal(t, mockChain, chain)
+ require.Equal(t, int64(11155111), chainID)
+}
+
+func TestGetChainByChainSelector_selectorNotFound(t *testing.T) {
+ mockChainSet := mocks.NewLegacyChainContainer(t)
+
+ _, _, err := GetChainByChainSelector(mockChainSet, uint64(444000444))
+ require.Error(t, err)
+}
+
+func TestGetChainById_notFound(t *testing.T) {
+ mockChainSet := mocks.NewLegacyChainContainer(t)
+ mockChainSet.On("Get", "444").Return(nil, errors.New("test")).Maybe()
+
+ _, _, err := GetChainByChainID(mockChainSet, uint64(444))
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "chain not found in chainset")
+}
+
+func TestResolveChainNames(t *testing.T) {
+ tests := []struct {
+ name string
+ sourceChainId int64
+ destChainId int64
+ expectedSourceChainName string
+ expectedDestChainName string
+ expectedErr bool
+ }{
+ {
+ name: "success",
+ sourceChainId: 1,
+ destChainId: 10,
+ expectedSourceChainName: "ethereum-mainnet",
+ expectedDestChainName: "ethereum-mainnet-optimism-1",
+ },
+ {
+ name: "source chain not found",
+ sourceChainId: 901278309182,
+ destChainId: 10,
+ expectedErr: true,
+ },
+ {
+ name: "dest chain not found",
+ sourceChainId: 1,
+ destChainId: 901278309182,
+ expectedErr: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ sourceChainName, destChainName, err := ResolveChainNames(test.sourceChainId, test.destChainId)
+ if test.expectedErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, test.expectedSourceChainName, sourceChainName)
+ assert.Equal(t, test.expectedDestChainName, destChainName)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/config/config.go b/core/services/ocr2/plugins/ccip/config/config.go
new file mode 100644
index 00000000000..a24a6edfd13
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/config/config.go
@@ -0,0 +1,152 @@
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/bytes"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+)
+
+// CommitPluginJobSpecConfig contains the plugin specific variables for the ccip.CCIPCommit plugin.
+type CommitPluginJobSpecConfig struct {
+ SourceStartBlock, DestStartBlock uint64 // Only for first time job add.
+ OffRamp cciptypes.Address `json:"offRamp"`
+ // TokenPricesUSDPipeline should contain a token price pipeline for the following tokens:
+ // The SOURCE chain wrapped native
+ // The DESTINATION supported tokens (including fee tokens) as defined in destination OffRamp and PriceRegistry.
+ TokenPricesUSDPipeline string `json:"tokenPricesUSDPipeline,omitempty"`
+ // PriceGetterConfig defines where to get the token prices from (i.e. static or aggregator source).
+ PriceGetterConfig *DynamicPriceGetterConfig `json:"priceGetterConfig,omitempty"`
+}
+
+type CommitPluginConfig struct {
+ IsSourceProvider bool
+ SourceStartBlock, DestStartBlock uint64
+}
+
+func (c CommitPluginConfig) Encode() ([]byte, error) {
+ bytes, err := json.Marshal(c)
+ if err != nil {
+ return nil, err
+ }
+ return bytes, nil
+}
+
+// DynamicPriceGetterConfig specifies which configuration to use for getting the price of tokens (map keys).
+type DynamicPriceGetterConfig struct {
+ AggregatorPrices map[common.Address]AggregatorPriceConfig `json:"aggregatorPrices"`
+ StaticPrices map[common.Address]StaticPriceConfig `json:"staticPrices"`
+}
+
+// AggregatorPriceConfig specifies a price retrieved from an aggregator contract.
+type AggregatorPriceConfig struct {
+ ChainID uint64 `json:"chainID,string"`
+ AggregatorContractAddress common.Address `json:"contractAddress"`
+}
+
+// StaticPriceConfig specifies a price defined statically.
+type StaticPriceConfig struct {
+ ChainID uint64 `json:"chainID,string"`
+ Price *big.Int `json:"price"`
+}
+
+// UnmarshalJSON provides a custom un-marshaller to handle JSON embedded in Toml content.
+func (c *DynamicPriceGetterConfig) UnmarshalJSON(data []byte) error {
+ type Alias DynamicPriceGetterConfig
+ if bytes.HasQuotes(data) {
+ trimmed := string(bytes.TrimQuotes(data))
+ trimmed = strings.ReplaceAll(trimmed, "\\n", "")
+ trimmed = strings.ReplaceAll(trimmed, "\\t", "")
+ trimmed = strings.ReplaceAll(trimmed, "\\", "")
+ return json.Unmarshal([]byte(trimmed), (*Alias)(c))
+ }
+ return json.Unmarshal(data, (*Alias)(c))
+}
+
+func (c *DynamicPriceGetterConfig) Validate() error {
+ for addr, v := range c.AggregatorPrices {
+ if addr == utils.ZeroAddress {
+ return fmt.Errorf("token address is zero")
+ }
+ if v.AggregatorContractAddress == utils.ZeroAddress {
+ return fmt.Errorf("aggregator contract address is zero")
+ }
+ if v.ChainID == 0 {
+ return fmt.Errorf("chain id is zero")
+ }
+ }
+
+ for addr, v := range c.StaticPrices {
+ if addr == utils.ZeroAddress {
+ return fmt.Errorf("token address is zero")
+ }
+ if v.ChainID == 0 {
+ return fmt.Errorf("chain id is zero")
+ }
+ }
+
+ // Ensure no duplication in token price resolution rules.
+ if c.AggregatorPrices != nil && c.StaticPrices != nil {
+ for tk := range c.AggregatorPrices {
+ if _, exists := c.StaticPrices[tk]; exists {
+ return fmt.Errorf("token %s defined in both aggregator and static price rules", tk)
+ }
+ }
+ }
+ return nil
+}
+
+// ExecPluginJobSpecConfig contains the plugin specific variables for the ccip.CCIPExecution plugin.
+type ExecPluginJobSpecConfig struct {
+ SourceStartBlock, DestStartBlock uint64 // Only for first time job add.
+ USDCConfig USDCConfig
+}
+
+type USDCConfig struct {
+ SourceTokenAddress common.Address
+ SourceMessageTransmitterAddress common.Address
+ AttestationAPI string
+ AttestationAPITimeoutSeconds uint
+ // AttestationAPIIntervalMilliseconds can be set to -1 to disable or 0 to use a default interval.
+ AttestationAPIIntervalMilliseconds int
+}
+
+type ExecPluginConfig struct {
+ SourceStartBlock, DestStartBlock uint64 // Only for first time job add.
+ IsSourceProvider bool
+ USDCConfig USDCConfig
+ JobID string
+}
+
+func (e ExecPluginConfig) Encode() ([]byte, error) {
+ bytes, err := json.Marshal(e)
+ if err != nil {
+ return nil, err
+ }
+ return bytes, nil
+}
+
+func (uc *USDCConfig) ValidateUSDCConfig() error {
+ if uc.AttestationAPI == "" {
+ return errors.New("AttestationAPI is required")
+ }
+ if uc.AttestationAPIIntervalMilliseconds < -1 {
+ return errors.New("AttestationAPIIntervalMilliseconds must be -1 to disable, 0 for default or greater to define the exact interval")
+ }
+ if uc.SourceTokenAddress == utils.ZeroAddress {
+ return errors.New("SourceTokenAddress is required")
+ }
+ if uc.SourceMessageTransmitterAddress == utils.ZeroAddress {
+ return errors.New("SourceMessageTransmitterAddress is required")
+ }
+
+ return nil
+}
diff --git a/core/services/ocr2/plugins/ccip/config/config_test.go b/core/services/ocr2/plugins/ccip/config/config_test.go
new file mode 100644
index 00000000000..e6207aa2231
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/config/config_test.go
@@ -0,0 +1,234 @@
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+func TestCommitConfig(t *testing.T) {
+ tests := []struct {
+ name string
+ cfg CommitPluginJobSpecConfig
+ expectedValidationError error
+ }{
+ {
+ name: "valid config",
+ cfg: CommitPluginJobSpecConfig{
+ SourceStartBlock: 222,
+ DestStartBlock: 333,
+ OffRamp: ccipcalc.HexToAddress("0x123"),
+ TokenPricesUSDPipeline: `merge [type=merge left="{}" right="{\"0xC79b96044906550A5652BCf20a6EA02f139B9Ae5\":\"1000000000000000000\"}"];`,
+ PriceGetterConfig: &DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]AggregatorPriceConfig{
+ common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3"): {
+ ChainID: 1000,
+ AggregatorContractAddress: common.HexToAddress("0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf"),
+ },
+ common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272"): {
+ ChainID: 1337,
+ AggregatorContractAddress: common.HexToAddress("0xb80244cc8b0bb18db071c150b36e9bcb8310b236"),
+ },
+ },
+ StaticPrices: map[common.Address]StaticPriceConfig{
+ common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927"): {
+ ChainID: 1057,
+ Price: big.NewInt(1000000000000000000),
+ },
+ },
+ },
+ },
+ expectedValidationError: nil,
+ },
+ {
+ name: "missing dynamic aggregator contract address",
+ cfg: CommitPluginJobSpecConfig{
+ SourceStartBlock: 222,
+ DestStartBlock: 333,
+ OffRamp: ccipcalc.HexToAddress("0x123"),
+ TokenPricesUSDPipeline: `merge [type=merge left="{}" right="{\"0xC79b96044906550A5652BCf20a6EA02f139B9Ae5\":\"1000000000000000000\"}"];`,
+ PriceGetterConfig: &DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]AggregatorPriceConfig{
+ common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3"): {
+ ChainID: 1000,
+ AggregatorContractAddress: common.HexToAddress("0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf"),
+ },
+ common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272"): {
+ ChainID: 1337,
+ AggregatorContractAddress: common.HexToAddress(""),
+ },
+ },
+ StaticPrices: map[common.Address]StaticPriceConfig{
+ common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927"): {
+ ChainID: 1057,
+ Price: big.NewInt(1000000000000000000),
+ },
+ },
+ },
+ },
+ expectedValidationError: fmt.Errorf("aggregator contract address is zero"),
+ },
+ {
+ name: "missing chain ID",
+ cfg: CommitPluginJobSpecConfig{
+ SourceStartBlock: 222,
+ DestStartBlock: 333,
+ OffRamp: ccipcalc.HexToAddress("0x123"),
+ TokenPricesUSDPipeline: `merge [type=merge left="{}" right="{\"0xC79b96044906550A5652BCf20a6EA02f139B9Ae5\":\"1000000000000000000\"}"];`,
+ PriceGetterConfig: &DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]AggregatorPriceConfig{
+ common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3"): {
+ ChainID: 1000,
+ AggregatorContractAddress: common.HexToAddress("0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf"),
+ },
+ common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272"): {
+ ChainID: 1337,
+ AggregatorContractAddress: common.HexToAddress("0xb80244cc8b0bb18db071c150b36e9bcb8310b236"),
+ },
+ },
+ StaticPrices: map[common.Address]StaticPriceConfig{
+ common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927"): {
+ ChainID: 0,
+ Price: big.NewInt(1000000000000000000),
+ },
+ },
+ },
+ },
+ expectedValidationError: fmt.Errorf("chain id is zero"),
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ // Verify proper marshall/unmarshalling of the config.
+ bts, err := json.Marshal(test.cfg)
+ require.NoError(t, err)
+ parsedConfig := CommitPluginJobSpecConfig{}
+ require.NoError(t, json.Unmarshal(bts, &parsedConfig))
+ require.Equal(t, test.cfg, parsedConfig)
+
+ // Ensure correctness of price getter configuration.
+ pgc := test.cfg.PriceGetterConfig
+ err = pgc.Validate()
+ if test.expectedValidationError != nil {
+ require.ErrorContains(t, err, test.expectedValidationError.Error())
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, uint64(1000), pgc.AggregatorPrices[common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3")].ChainID)
+ require.Equal(t, uint64(1337), pgc.AggregatorPrices[common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272")].ChainID)
+ require.Equal(t, uint64(1057), pgc.StaticPrices[common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927")].ChainID)
+ }
+ })
+ }
+}
+
+func TestExecutionConfig(t *testing.T) {
+ exampleConfig := ExecPluginJobSpecConfig{
+ SourceStartBlock: 222,
+ DestStartBlock: 333,
+ }
+
+ bts, err := json.Marshal(exampleConfig)
+ require.NoError(t, err)
+
+ parsedConfig := ExecPluginJobSpecConfig{}
+ require.NoError(t, json.Unmarshal(bts, &parsedConfig))
+
+ require.Equal(t, exampleConfig, parsedConfig)
+}
+
+func TestUSDCValidate(t *testing.T) {
+ testcases := []struct {
+ config USDCConfig
+ err string
+ }{
+ {
+ config: USDCConfig{},
+ err: "AttestationAPI is required",
+ },
+ {
+ config: USDCConfig{
+ AttestationAPI: "api",
+ },
+ err: "SourceTokenAddress is required",
+ },
+ {
+ config: USDCConfig{
+ AttestationAPI: "api",
+ SourceTokenAddress: utils.ZeroAddress,
+ },
+ err: "SourceTokenAddress is required",
+ },
+ {
+ config: USDCConfig{
+ AttestationAPI: "api",
+ SourceTokenAddress: utils.RandomAddress(),
+ },
+ err: "SourceMessageTransmitterAddress is required",
+ },
+ {
+ config: USDCConfig{
+ AttestationAPI: "api",
+ SourceTokenAddress: utils.RandomAddress(),
+ SourceMessageTransmitterAddress: utils.ZeroAddress,
+ },
+ err: "SourceMessageTransmitterAddress is required",
+ },
+ {
+ config: USDCConfig{
+ AttestationAPI: "api",
+ SourceTokenAddress: utils.RandomAddress(),
+ SourceMessageTransmitterAddress: utils.RandomAddress(),
+ },
+ err: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(fmt.Sprintf("error = %s", tc.err), func(t *testing.T) {
+ t.Parallel()
+ err := tc.config.ValidateUSDCConfig()
+ if tc.err != "" {
+ require.ErrorContains(t, err, tc.err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestUnmarshallDynamicPriceConfig(t *testing.T) {
+ jsonCfg := `
+{
+ "aggregatorPrices": {
+ "0x0820c05e1fba1244763a494a52272170c321cad3": {
+ "chainID": "1000",
+ "contractAddress": "0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf"
+ },
+ "0x4a98bb4d65347016a7ab6f85bea24b129c9a1272": {
+ "chainID": "1337",
+ "contractAddress": "0xb80244cc8b0bb18db071c150b36e9bcb8310b236"
+ }
+ },
+ "staticPrices": {
+ "0xec8c353470ccaa4f43067fcde40558e084a12927": {
+ "chainID": "1057",
+ "price": 1000000000000000000
+ }
+ }
+}
+`
+ var cfg DynamicPriceGetterConfig
+ err := json.Unmarshal([]byte(jsonCfg), &cfg)
+ require.NoError(t, err)
+ err = cfg.Validate()
+ require.NoError(t, err)
+}
diff --git a/core/services/ocr2/plugins/ccip/config/offchain_config.go b/core/services/ocr2/plugins/ccip/config/offchain_config.go
new file mode 100644
index 00000000000..f8fba3f1bcb
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/config/offchain_config.go
@@ -0,0 +1,26 @@
+package config
+
+import (
+ "encoding/json"
+)
+
+type OffchainConfig interface {
+ Validate() error
+}
+
+func DecodeOffchainConfig[T OffchainConfig](encodedConfig []byte) (T, error) {
+ var result T
+ err := json.Unmarshal(encodedConfig, &result)
+ if err != nil {
+ return result, err
+ }
+ err = result.Validate()
+ if err != nil {
+ return result, err
+ }
+ return result, nil
+}
+
+func EncodeOffchainConfig[T OffchainConfig](occ T) ([]byte, error) {
+ return json.Marshal(occ)
+}
diff --git a/core/services/ocr2/plugins/ccip/config/type_and_version.go b/core/services/ocr2/plugins/ccip/config/type_and_version.go
new file mode 100644
index 00000000000..fdfd892b087
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/config/type_and_version.go
@@ -0,0 +1,73 @@
+package config
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/Masterminds/semver/v3"
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+
+ type_and_version "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/type_and_version_interface_wrapper"
+)
+
+type ContractType string
+
+var (
+ EVM2EVMOnRamp ContractType = "EVM2EVMOnRamp"
+ EVM2EVMOffRamp ContractType = "EVM2EVMOffRamp"
+ CommitStore ContractType = "CommitStore"
+ PriceRegistry ContractType = "PriceRegistry"
+ ContractTypes = mapset.NewSet[ContractType](
+ EVM2EVMOffRamp,
+ EVM2EVMOnRamp,
+ CommitStore,
+ PriceRegistry,
+ )
+)
+
+func VerifyTypeAndVersion(addr common.Address, client bind.ContractBackend, expectedType ContractType) (semver.Version, error) {
+ contractType, version, err := TypeAndVersion(addr, client)
+ if err != nil {
+ return semver.Version{}, fmt.Errorf("failed getting type and version %w", err)
+ }
+ if contractType != expectedType {
+ return semver.Version{}, fmt.Errorf("wrong contract type %s", contractType)
+ }
+ return version, nil
+}
+
+func TypeAndVersion(addr common.Address, client bind.ContractBackend) (ContractType, semver.Version, error) {
+ tv, err := type_and_version.NewTypeAndVersionInterface(addr, client)
+ if err != nil {
+ return "", semver.Version{}, err
+ }
+ tvStr, err := tv.TypeAndVersion(nil)
+ if err != nil {
+ return "", semver.Version{}, fmt.Errorf("error calling typeAndVersion on addr: %s %w", addr.String(), err)
+ }
+
+ contractType, versionStr, err := ParseTypeAndVersion(tvStr)
+ if err != nil {
+ return "", semver.Version{}, err
+ }
+ v, err := semver.NewVersion(versionStr)
+ if err != nil {
+ return "", semver.Version{}, fmt.Errorf("failed parsing version %s: %w", versionStr, err)
+ }
+
+ if !ContractTypes.Contains(ContractType(contractType)) {
+ return "", semver.Version{}, fmt.Errorf("unrecognized contract type %v", contractType)
+ }
+ return ContractType(contractType), *v, nil
+}
+
+func ParseTypeAndVersion(tvStr string) (string, string, error) {
+ typeAndVersionValues := strings.Split(tvStr, " ")
+
+ if len(typeAndVersionValues) < 2 {
+ return "", "", fmt.Errorf("invalid type and version %s", tvStr)
+ }
+ return typeAndVersionValues[0], typeAndVersionValues[1], nil
+}
diff --git a/core/services/ocr2/plugins/ccip/exportinternal.go b/core/services/ocr2/plugins/ccip/exportinternal.go
new file mode 100644
index 00000000000..2a5767ac85d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/exportinternal.go
@@ -0,0 +1,135 @@
+package ccip
+
+import (
+ "context"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+)
+
+func GenericAddrToEvm(addr ccip.Address) (common.Address, error) {
+ return ccipcalc.GenericAddrToEvm(addr)
+}
+
+func EvmAddrToGeneric(addr common.Address) ccip.Address {
+ return ccipcalc.EvmAddrToGeneric(addr)
+}
+
+func NewEvmPriceRegistry(lp logpoller.LogPoller, ec client.Client, lggr logger.Logger, pluginLabel string) *ccipdataprovider.EvmPriceRegistry {
+ return ccipdataprovider.NewEvmPriceRegistry(lp, ec, lggr, pluginLabel)
+}
+
+type VersionFinder = factory.VersionFinder
+
+func NewCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address ccip.Address, ec client.Client, lp logpoller.LogPoller) (ccipdata.CommitStoreReader, error) {
+ return factory.NewCommitStoreReader(lggr, versionFinder, address, ec, lp)
+}
+
+func CloseCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address ccip.Address, ec client.Client, lp logpoller.LogPoller) error {
+ return factory.CloseCommitStoreReader(lggr, versionFinder, address, ec, lp)
+}
+
+func NewOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr ccip.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int, registerFilters bool) (ccipdata.OffRampReader, error) {
+ return factory.NewOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice, registerFilters)
+}
+
+func CloseOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr ccip.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) error {
+ return factory.CloseOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice)
+}
+
+func NewEvmVersionFinder() factory.EvmVersionFinder {
+ return factory.NewEvmVersionFinder()
+}
+
+func NewOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress ccip.Address, sourceLP logpoller.LogPoller, source client.Client) (ccipdata.OnRampReader, error) {
+ return factory.NewOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source)
+}
+
+func CloseOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress ccip.Address, sourceLP logpoller.LogPoller, source client.Client) error {
+ return factory.CloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source)
+}
+
+type OffRampReader = ccipdata.OffRampReader
+
+type DynamicPriceGetterClient = pricegetter.DynamicPriceGetterClient
+
+type DynamicPriceGetter = pricegetter.DynamicPriceGetter
+
+func NewDynamicPriceGetterClient(batchCaller rpclib.EvmBatchCaller) DynamicPriceGetterClient {
+ return pricegetter.NewDynamicPriceGetterClient(batchCaller)
+}
+
+func NewDynamicPriceGetter(cfg config.DynamicPriceGetterConfig, evmClients map[uint64]DynamicPriceGetterClient) (*DynamicPriceGetter, error) {
+ return pricegetter.NewDynamicPriceGetter(cfg, evmClients)
+}
+
+func NewDynamicLimitedBatchCaller(
+ lggr logger.Logger, batchSender rpclib.BatchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit uint,
+) *rpclib.DynamicLimitedBatchCaller {
+ return rpclib.NewDynamicLimitedBatchCaller(lggr, batchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit)
+}
+
+func NewUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller, registerFilters bool) (*ccipdata.USDCReaderImpl, error) {
+ return ccipdata.NewUSDCReader(lggr, jobID, transmitter, lp, registerFilters)
+}
+
+func CloseUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller) error {
+ return ccipdata.CloseUSDCReader(lggr, jobID, transmitter, lp)
+}
+
+type USDCReaderImpl = ccipdata.USDCReaderImpl
+
+var DefaultRpcBatchSizeLimit = rpclib.DefaultRpcBatchSizeLimit
+var DefaultRpcBatchBackOffMultiplier = rpclib.DefaultRpcBatchBackOffMultiplier
+var DefaultMaxParallelRpcCalls = rpclib.DefaultMaxParallelRpcCalls
+
+func NewEVMTokenPoolBatchedReader(lggr logger.Logger, remoteChainSelector uint64, offRampAddress ccip.Address, evmBatchCaller rpclib.EvmBatchCaller) (*batchreader.EVMTokenPoolBatchedReader, error) {
+ return batchreader.NewEVMTokenPoolBatchedReader(lggr, remoteChainSelector, offRampAddress, evmBatchCaller)
+}
+
+type ChainAgnosticPriceRegistry struct {
+ p ChainAgnosticPriceRegistryFactory
+}
+
+// [ChainAgnosticPriceRegistryFactory] is satisfied by [commontypes.CCIPCommitProvider] and [commontypes.CCIPExecProvider]
+type ChainAgnosticPriceRegistryFactory interface {
+ NewPriceRegistryReader(ctx context.Context, addr ccip.Address) (ccip.PriceRegistryReader, error)
+}
+
+func (c *ChainAgnosticPriceRegistry) NewPriceRegistryReader(ctx context.Context, addr ccip.Address) (ccip.PriceRegistryReader, error) {
+ return c.p.NewPriceRegistryReader(ctx, addr)
+}
+
+func NewChainAgnosticPriceRegistry(provider ChainAgnosticPriceRegistryFactory) *ChainAgnosticPriceRegistry {
+ return &ChainAgnosticPriceRegistry{provider}
+}
+
+type JSONCommitOffchainConfigV1_2_0 = v1_2_0.JSONCommitOffchainConfig
+type CommitOnchainConfig = ccipdata.CommitOnchainConfig
+
+func NewCommitOffchainConfig(
+ gasPriceDeviationPPB uint32,
+ gasPriceHeartBeat time.Duration,
+ tokenPriceDeviationPPB uint32,
+ tokenPriceHeartBeat time.Duration,
+ inflightCacheExpiry time.Duration,
+ priceReportingDisabled bool,
+) ccip.CommitOffchainConfig {
+ return ccipdata.NewCommitOffchainConfig(gasPriceDeviationPPB, gasPriceHeartBeat, tokenPriceDeviationPPB, tokenPriceHeartBeat, inflightCacheExpiry, priceReportingDisabled)
+}
diff --git a/core/services/ocr2/plugins/ccip/integration_legacy_test.go b/core/services/ocr2/plugins/ccip/integration_legacy_test.go
new file mode 100644
index 00000000000..9bc94b5fe45
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/integration_legacy_test.go
@@ -0,0 +1,599 @@
+package ccip_test
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ gethtypes "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ evm_2_evm_onramp "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_v3_aggregator_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ testhelpers_new "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+ testhelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0"
+)
+
+func TestIntegration_legacy_CCIP(t *testing.T) {
+ // Run the batches of tests for both pipeline and dynamic price getter setups.
+ // We will remove the pipeline batch once the feature is deleted from the code.
+ tests := []struct {
+ name string
+ withPipeline bool
+ }{
+ {
+ name: "with pipeline",
+ withPipeline: true,
+ },
+ {
+ name: "with dynamic price getter",
+ withPipeline: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ccipTH := testhelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector)
+
+ tokenPricesUSDPipeline := ""
+ priceGetterConfigJson := ""
+
+ if test.withPipeline {
+ // Set up a test pipeline.
+ testPricePipeline, linkUSD, ethUSD := ccipTH.CreatePricesPipeline(t)
+ defer linkUSD.Close()
+ defer ethUSD.Close()
+ tokenPricesUSDPipeline = testPricePipeline
+ } else {
+ // Set up a test price getter.
+ // Set up the aggregators here to avoid modifying ccipTH.
+ aggSrcNatAddr, _, aggSrcNat, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(2e18))
+ require.NoError(t, err)
+ _, err = aggSrcNat.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(17000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ aggSrcLnkAddr, _, aggSrcLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(3e18))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+ _, err = aggSrcLnk.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ aggDstLnkAddr, _, aggDstLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+ _, err = aggDstLnk.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+
+ priceGetterConfig := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ ccipTH.Source.LinkToken.Address(): {
+ ChainID: ccipTH.Source.ChainID,
+ AggregatorContractAddress: aggSrcLnkAddr,
+ },
+ ccipTH.Source.WrappedNative.Address(): {
+ ChainID: ccipTH.Source.ChainID,
+ AggregatorContractAddress: aggSrcNatAddr,
+ },
+ ccipTH.Dest.LinkToken.Address(): {
+ ChainID: ccipTH.Dest.ChainID,
+ AggregatorContractAddress: aggDstLnkAddr,
+ },
+ ccipTH.Dest.WrappedNative.Address(): {
+ ChainID: ccipTH.Dest.ChainID,
+ AggregatorContractAddress: aggDstLnkAddr,
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{},
+ }
+ priceGetterConfigBytes, err := json.MarshalIndent(priceGetterConfig, "", " ")
+ require.NoError(t, err)
+ priceGetterConfigJson = string(priceGetterConfigBytes)
+ }
+
+ jobParams := ccipTH.SetUpNodesAndJobs(t, tokenPricesUSDPipeline, priceGetterConfigJson, "")
+
+ currentSeqNum := 1
+
+ t.Run("single", func(t *testing.T) {
+ tokenAmount := big.NewInt(500000003) // prime number
+ gasLimit := big.NewInt(200_003) // prime number
+
+ extraArgs, err2 := testhelpers.GetEVMExtraArgsV1(gasLimit, false)
+ require.NoError(t, err2)
+
+ sourceBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{
+ {Name: testhelpers.SourcePool, Addr: ccipTH.Source.LinkTokenPool.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ {Name: testhelpers.OnRamp, Addr: ccipTH.Source.OnRamp.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ {Name: testhelpers.SourceRouter, Addr: ccipTH.Source.Router.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ {Name: testhelpers.SourcePriceRegistry, Addr: ccipTH.Source.PriceRegistry.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ })
+ require.NoError(t, err2)
+ destBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{
+ {Name: testhelpers.Receiver, Addr: ccipTH.Dest.Receivers[0].Receiver.Address(), Getter: ccipTH.GetDestLinkBalance},
+ {Name: testhelpers.DestPool, Addr: ccipTH.Dest.LinkTokenPool.Address(), Getter: ccipTH.GetDestLinkBalance},
+ {Name: testhelpers.OffRamp, Addr: ccipTH.Dest.OffRamp.Address(), Getter: ccipTH.GetDestLinkBalance},
+ })
+ require.NoError(t, err2)
+
+ ccipTH.Source.User.Value = tokenAmount
+ _, err2 = ccipTH.Source.WrappedNative.Deposit(ccipTH.Source.User)
+ require.NoError(t, err2)
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Source.User.Value = nil
+
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{
+ {
+ Token: ccipTH.Source.LinkToken.Address(),
+ Amount: tokenAmount,
+ },
+ {
+ Token: ccipTH.Source.WrappedNative.Address(),
+ Amount: tokenAmount,
+ },
+ },
+ FeeToken: ccipTH.Source.LinkToken.Address(),
+ ExtraArgs: extraArgs,
+ }
+ fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg)
+ require.NoError(t, err2)
+ // Currently no overhead and 10gwei dest gas price. So fee is simply (gasLimit * gasPrice)* link/native
+ // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String())
+ // Approve the fee amount + the token amount
+ _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount))
+ require.NoError(t, err2)
+ ccipTH.Source.Chain.Commit()
+ _, err2 = ccipTH.Source.WrappedNative.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), tokenAmount)
+ require.NoError(t, err2)
+ ccipTH.Source.Chain.Commit()
+
+ ccipTH.SendRequest(t, msg)
+ // Should eventually see this executed.
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum)
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum)
+
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum)
+ assert.Len(t, executionLogs, 1)
+ ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess)
+
+ // Asserts
+ // 1) The total pool input == total pool output
+ // 2) Pool flow equals tokens sent
+ // 3) Sent tokens arrive at the receiver
+ ccipTH.AssertBalances(t, []testhelpers.BalanceAssertion{
+ {
+ Name: testhelpers.SourcePool,
+ Address: ccipTH.Source.LinkTokenPool.Address(),
+ Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePool], tokenAmount.String()).String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourcePriceRegistry,
+ Address: ccipTH.Source.PriceRegistry.Address(),
+ Expected: sourceBalances[testhelpers.SourcePriceRegistry].String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ // Fees end up in the onramp.
+ Name: testhelpers.OnRamp,
+ Address: ccipTH.Source.OnRamp.Address(),
+ Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePriceRegistry], fee.String()).String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourceRouter,
+ Address: ccipTH.Source.Router.Address(),
+ Expected: sourceBalances[testhelpers.SourceRouter].String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.Receiver,
+ Address: ccipTH.Dest.Receivers[0].Receiver.Address(),
+ Expected: testhelpers.MustAddBigInt(destBalances[testhelpers.Receiver], tokenAmount.String()).String(),
+ Getter: ccipTH.GetDestLinkBalance,
+ },
+ {
+ Name: testhelpers.DestPool,
+ Address: ccipTH.Dest.LinkTokenPool.Address(),
+ Expected: testhelpers.MustSubBigInt(destBalances[testhelpers.DestPool], tokenAmount.String()).String(),
+ Getter: ccipTH.GetDestLinkBalance,
+ },
+ {
+ Name: testhelpers.OffRamp,
+ Address: ccipTH.Dest.OffRamp.Address(),
+ Expected: destBalances[testhelpers.OffRamp].String(),
+ Getter: ccipTH.GetDestLinkBalance,
+ },
+ })
+ currentSeqNum++
+ })
+
+ t.Run("multiple batches", func(t *testing.T) {
+ tokenAmount := big.NewInt(500000003)
+ gasLimit := big.NewInt(250_000)
+
+ var txs []*gethtypes.Transaction
+ // Enough to require batched executions as gasLimit per tx is 250k -> 500k -> 750k ....
+ // The actual gas usage of executing 15 messages is higher than the gas limit for
+ // a single tx. This means that when batching is turned off, and we simply include
+ // all txs without checking gas, this also fails.
+ n := 15
+ for i := 0; i < n; i++ {
+ txGasLimit := new(big.Int).Mul(gasLimit, big.NewInt(int64(i+1)))
+ extraArgs, err2 := testhelpers.GetEVMExtraArgsV1(txGasLimit, false)
+ require.NoError(t, err2)
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{
+ {
+ Token: ccipTH.Source.LinkToken.Address(),
+ Amount: tokenAmount,
+ },
+ },
+ FeeToken: ccipTH.Source.LinkToken.Address(),
+ ExtraArgs: extraArgs,
+ }
+ fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg)
+ require.NoError(t, err2)
+ // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice.
+ // require.Equal(t, new(big.Int).Mul(txGasLimit, gasPrice).String(), fee.String())
+ // Approve the fee amount + the token amount
+ _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount))
+ require.NoError(t, err2)
+ tx, err2 := ccipTH.Source.Router.CcipSend(ccipTH.Source.User, ccipTH.Dest.ChainSelector, msg)
+ require.NoError(t, err2)
+ txs = append(txs, tx)
+ }
+
+ // Send a batch of requests in a single block
+ testhelpers_new.ConfirmTxs(t, txs, ccipTH.Source.Chain)
+ for i := 0; i < n; i++ {
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum+i)
+ }
+ // Should see a report with the full range
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum+n-1)
+ // Should all be executed
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum+n-1)
+ for _, execLog := range executionLogs {
+ ccipTH.AssertExecState(t, execLog, testhelpers.ExecutionStateSuccess)
+ }
+
+ currentSeqNum += n
+ })
+
+ // Deploy new on ramp,Commit store,off ramp
+ // Delete v1 jobs
+ // Send a number of requests
+ // Upgrade the router with new contracts
+ // create new jobs
+ // Verify all pending requests are sent after the contracts are upgraded
+ t.Run("upgrade contracts and verify requests can be sent with upgraded contract", func(t *testing.T) {
+ gasLimit := big.NewInt(200_003) // prime number
+ tokenAmount := big.NewInt(100)
+ commitStoreV1 := ccipTH.Dest.CommitStore
+ offRampV1 := ccipTH.Dest.OffRamp
+ onRampV1 := ccipTH.Source.OnRamp
+ // deploy v2 contracts
+ ccipTH.DeployNewOnRamp(t)
+ ccipTH.DeployNewCommitStore(t)
+ ccipTH.DeployNewOffRamp(t)
+
+ // send a request as the v2 contracts are not enabled in router it should route through the v1 contracts
+ t.Logf("sending request for seqnum %d", currentSeqNum)
+ ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Dest.Chain.Commit()
+ t.Logf("verifying seqnum %d on previous onRamp %s", currentSeqNum, onRampV1.Address().Hex())
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum, onRampV1.Address())
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum, commitStoreV1.Address())
+ executionLog := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum, offRampV1.Address())
+ ccipTH.AssertExecState(t, executionLog[0], testhelpers.ExecutionStateSuccess, offRampV1.Address())
+
+ nonceAtOnRampV1, err := onRampV1.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from onRamp")
+ require.Equal(t, currentSeqNum, int(nonceAtOnRampV1))
+ nonceAtOffRampV1, err := offRampV1.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from offRamp")
+ require.Equal(t, currentSeqNum, int(nonceAtOffRampV1))
+
+ // enable the newly deployed contracts
+ newConfigBlock := ccipTH.Dest.Chain.Blockchain().CurrentBlock().Number.Int64()
+ ccipTH.EnableOnRamp(t)
+ ccipTH.EnableCommitStore(t)
+ ccipTH.EnableOffRamp(t)
+ srcStartBlock := ccipTH.Source.Chain.Blockchain().CurrentBlock().Number.Uint64()
+
+ // send a number of requests, the requests should not be delivered yet as the previous contracts are not configured
+ // with the router anymore
+ startSeq := 1
+ noOfRequests := 5
+ endSeqNum := startSeq + noOfRequests
+ for i := startSeq; i <= endSeqNum; i++ {
+ t.Logf("sending request for seqnum %d", i)
+ ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Dest.Chain.Commit()
+ ccipTH.EventuallySendRequested(t, uint64(i))
+ }
+
+ // delete v1 jobs
+ for _, node := range ccipTH.Nodes {
+ id := node.FindJobIDForContract(t, commitStoreV1.Address())
+ require.Greater(t, id, int32(0))
+ t.Logf("deleting job %d", id)
+ err = node.App.DeleteJob(context.Background(), id)
+ require.NoError(t, err)
+ id = node.FindJobIDForContract(t, offRampV1.Address())
+ require.Greater(t, id, int32(0))
+ t.Logf("deleting job %d", id)
+ err = node.App.DeleteJob(context.Background(), id)
+ require.NoError(t, err)
+ }
+
+ // Commit on both chains to reach Finality
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Dest.Chain.Commit()
+
+ // create new jobs
+ jobParams = ccipTH.NewCCIPJobSpecParams(tokenPricesUSDPipeline, priceGetterConfigJson, newConfigBlock, "")
+ jobParams.Version = "v2"
+ jobParams.SourceStartBlock = srcStartBlock
+ ccipTH.AddAllJobs(t, jobParams)
+ committedSeqNum := uint64(0)
+ // Now the requests should be delivered
+ for i := startSeq; i <= endSeqNum; i++ {
+ t.Logf("verifying seqnum %d", i)
+ ccipTH.AllNodesHaveReqSeqNum(t, i)
+ if committedSeqNum < uint64(i+1) {
+ committedSeqNum = ccipTH.EventuallyReportCommitted(t, i)
+ }
+ ccipTH.EventuallyExecutionStateChangedToSuccess(t, []uint64{uint64(i)}, uint64(newConfigBlock))
+ }
+
+ // nonces should be correctly synced from v1 contracts for the sender
+ nonceAtOnRampV2, err := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from onRamp")
+ nonceAtOffRampV2, err := ccipTH.Dest.OffRamp.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from offRamp")
+ require.Equal(t, nonceAtOnRampV1+uint64(noOfRequests)+1, nonceAtOnRampV2, "nonce should be synced from v1 onRamps")
+ require.Equal(t, nonceAtOffRampV1+uint64(noOfRequests)+1, nonceAtOffRampV2, "nonce should be synced from v1 offRamps")
+ currentSeqNum = endSeqNum + 1
+ })
+
+ t.Run("pay nops", func(t *testing.T) {
+ linkToTransferToOnRamp := big.NewInt(1e18)
+
+ // transfer some link to onramp to pay the nops
+ _, err := ccipTH.Source.LinkToken.Transfer(ccipTH.Source.User, ccipTH.Source.OnRamp.Address(), linkToTransferToOnRamp)
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ srcBalReq := []testhelpers.BalanceReq{
+ {
+ Name: testhelpers.Sender,
+ Addr: ccipTH.Source.User.From,
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ {
+ Name: testhelpers.OnRampNative,
+ Addr: ccipTH.Source.OnRamp.Address(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ {
+ Name: testhelpers.OnRamp,
+ Addr: ccipTH.Source.OnRamp.Address(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourceRouter,
+ Addr: ccipTH.Source.Router.Address(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ }
+
+ var nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight
+ var totalWeight uint16
+ nodes := ccipTH.Nodes
+ for i := range nodes {
+ // For now set the transmitter addresses to be the same as the payee addresses
+ nodes[i].PaymentReceiver = nodes[i].Transmitter
+ nopsAndWeights = append(nopsAndWeights, evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{
+ Nop: nodes[i].PaymentReceiver,
+ Weight: 5,
+ })
+ totalWeight += 5
+ srcBalReq = append(srcBalReq, testhelpers.BalanceReq{
+ Name: fmt.Sprintf("node %d", i),
+ Addr: nodes[i].PaymentReceiver,
+ Getter: ccipTH.GetSourceLinkBalance,
+ })
+ }
+ srcBalances, err := testhelpers.GetBalances(t, srcBalReq)
+ require.NoError(t, err)
+
+ // set nops on the onramp
+ ccipTH.SetNopsOnRamp(t, nopsAndWeights)
+
+ // send a message
+ extraArgs, err := testhelpers.GetEVMExtraArgsV1(big.NewInt(200_000), true)
+ require.NoError(t, err)
+
+ // FeeToken is empty, indicating it should use native token
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[1].Receiver.Address()),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{},
+ ExtraArgs: extraArgs,
+ FeeToken: common.Address{},
+ }
+ fee, err := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg)
+ require.NoError(t, err)
+
+ // verify message is sent
+ ccipTH.Source.User.Value = fee
+ ccipTH.SendRequest(t, msg)
+ ccipTH.Source.User.Value = nil
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum)
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum)
+
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum)
+ assert.Len(t, executionLogs, 1)
+ ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess)
+ currentSeqNum++
+
+ // get the nop fee
+ nopFee, err := ccipTH.Source.OnRamp.GetNopFeesJuels(nil)
+ require.NoError(t, err)
+ t.Log("nopFee", nopFee)
+
+ // withdraw fees and verify there is still fund left for nop payment
+ _, err = ccipTH.Source.OnRamp.WithdrawNonLinkFees(
+ ccipTH.Source.User,
+ ccipTH.Source.WrappedNative.Address(),
+ ccipTH.Source.User.From,
+ )
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ // pay nops
+ _, err = ccipTH.Source.OnRamp.PayNops(ccipTH.Source.User)
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ srcBalanceAssertions := []testhelpers.BalanceAssertion{
+ {
+ // Onramp should not have any balance left in wrapped native
+ Name: testhelpers.OnRampNative,
+ Address: ccipTH.Source.OnRamp.Address(),
+ Expected: big.NewInt(0).String(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ {
+ // Onramp should have the remaining link after paying nops
+ Name: testhelpers.OnRamp,
+ Address: ccipTH.Source.OnRamp.Address(),
+ Expected: new(big.Int).Sub(srcBalances[testhelpers.OnRamp], nopFee).String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourceRouter,
+ Address: ccipTH.Source.Router.Address(),
+ Expected: srcBalances[testhelpers.SourceRouter].String(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ // onRamp's balance (of previously sent fee during message sending) should have been transferred to
+ // the owner as a result of WithdrawNonLinkFees
+ {
+ Name: testhelpers.Sender,
+ Address: ccipTH.Source.User.From,
+ Expected: fee.String(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ }
+
+ // the nodes should be paid according to the weights assigned
+ for i, node := range nodes {
+ paymentWeight := float64(nopsAndWeights[i].Weight) / float64(totalWeight)
+ paidInFloat := paymentWeight * float64(nopFee.Int64())
+ paid, _ := new(big.Float).SetFloat64(paidInFloat).Int64()
+ bal := new(big.Int).Add(
+ new(big.Int).SetInt64(paid),
+ srcBalances[fmt.Sprintf("node %d", i)]).String()
+ srcBalanceAssertions = append(srcBalanceAssertions, testhelpers.BalanceAssertion{
+ Name: fmt.Sprintf("node %d", i),
+ Address: node.PaymentReceiver,
+ Expected: bal,
+ Getter: ccipTH.GetSourceLinkBalance,
+ })
+ }
+ ccipTH.AssertBalances(t, srcBalanceAssertions)
+ })
+
+ // Keep on sending a bunch of messages
+ // In the meantime update onchainConfig with new price registry address
+ // Verify if the jobs can pick up updated config
+ // Verify if all the messages are sent
+ t.Run("config change or price registry update while requests are inflight", func(t *testing.T) {
+ gasLimit := big.NewInt(200_003) // prime number
+ tokenAmount := big.NewInt(100)
+ msgWg := &sync.WaitGroup{}
+ msgWg.Add(1)
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+ startSeq := currentSeqNum
+ endSeq := currentSeqNum + 20
+
+ // send message with the old configs
+ ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipTH.Source.Chain.Commit()
+
+ go func(ccipContracts testhelpers.CCIPContracts, currentSeqNum int) {
+ seqNumber := currentSeqNum + 1
+ defer msgWg.Done()
+ for {
+ <-ticker.C // wait for ticker
+ t.Logf("sending request for seqnum %d", seqNumber)
+ ccipContracts.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipContracts.Source.Chain.Commit()
+ seqNumber++
+ if seqNumber == endSeq {
+ return
+ }
+ }
+ }(ccipTH.CCIPContracts, currentSeqNum)
+
+ ccipTH.DeployNewPriceRegistry(t)
+ commitOnchainConfig := ccipTH.CreateDefaultCommitOnchainConfig(t)
+ commitOffchainConfig := ccipTH.CreateDefaultCommitOffchainConfig(t)
+ execOnchainConfig := ccipTH.CreateDefaultExecOnchainConfig(t)
+ execOffchainConfig := ccipTH.CreateDefaultExecOffchainConfig(t)
+
+ ccipTH.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig)
+
+ // wait for all requests to be complete
+ msgWg.Wait()
+ for i := startSeq; i < endSeq; i++ {
+ ccipTH.AllNodesHaveReqSeqNum(t, i)
+ ccipTH.EventuallyReportCommitted(t, i)
+
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, i, i)
+ assert.Len(t, executionLogs, 1)
+ ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess)
+ }
+
+ for i, node := range ccipTH.Nodes {
+ t.Logf("verifying node %d", i)
+ node.EventuallyNodeUsesNewCommitConfig(t, ccipTH, ccipdata.CommitOnchainConfig{
+ PriceRegistry: ccipTH.Dest.PriceRegistry.Address(),
+ })
+ node.EventuallyNodeUsesNewExecConfig(t, ccipTH, v1_2_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: testhelpers.PermissionLessExecutionThresholdSeconds,
+ Router: ccipTH.Dest.Router.Address(),
+ PriceRegistry: ccipTH.Dest.PriceRegistry.Address(),
+ MaxDataBytes: 1e5,
+ MaxNumberOfTokensPerMsg: 5,
+ MaxPoolReleaseOrMintGas: 200_000,
+ })
+ node.EventuallyNodeUsesUpdatedPriceRegistry(t, ccipTH)
+ }
+ currentSeqNum = endSeq
+ })
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/integration_test.go b/core/services/ocr2/plugins/ccip/integration_test.go
new file mode 100644
index 00000000000..202d2ef2304
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/integration_test.go
@@ -0,0 +1,644 @@
+package ccip_test
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ gethtypes "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_v3_aggregator_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+ integrationtesthelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/integration"
+)
+
+func TestIntegration_CCIP(t *testing.T) {
+ // Run the batches of tests for both pipeline and dynamic price getter setups.
+ // We will remove the pipeline batch once the feature is deleted from the code.
+ tests := []struct {
+ name string
+ withPipeline bool
+ allowOutOfOrderExecution bool
+ }{
+ {
+ name: "with pipeline allowOutOfOrderExecution true",
+ withPipeline: true,
+ allowOutOfOrderExecution: true,
+ },
+ {
+ name: "with dynamic price getter allowOutOfOrderExecution false",
+ withPipeline: false,
+ allowOutOfOrderExecution: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ccipTH := integrationtesthelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector)
+
+ tokenPricesUSDPipeline := ""
+ priceGetterConfigJson := ""
+
+ if test.withPipeline {
+ // Set up a test pipeline.
+ testPricePipeline, linkUSD, ethUSD := ccipTH.CreatePricesPipeline(t)
+ defer linkUSD.Close()
+ defer ethUSD.Close()
+ tokenPricesUSDPipeline = testPricePipeline
+ } else {
+ // Set up a test price getter.
+ // Set up the aggregators here to avoid modifying ccipTH.
+ aggSrcNatAddr, _, aggSrcNat, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(2e18))
+ require.NoError(t, err)
+ _, err = aggSrcNat.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(17000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ aggSrcLnkAddr, _, aggSrcLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(3e18))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+ _, err = aggSrcLnk.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ aggDstLnkAddr, _, aggDstLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+ _, err = aggDstLnk.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000))
+ require.NoError(t, err)
+ ccipTH.Dest.Chain.Commit()
+
+ priceGetterConfig := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ ccipTH.Source.LinkToken.Address(): {
+ ChainID: ccipTH.Source.ChainID,
+ AggregatorContractAddress: aggSrcLnkAddr,
+ },
+ ccipTH.Source.WrappedNative.Address(): {
+ ChainID: ccipTH.Source.ChainID,
+ AggregatorContractAddress: aggSrcNatAddr,
+ },
+ ccipTH.Dest.LinkToken.Address(): {
+ ChainID: ccipTH.Dest.ChainID,
+ AggregatorContractAddress: aggDstLnkAddr,
+ },
+ ccipTH.Dest.WrappedNative.Address(): {
+ ChainID: ccipTH.Dest.ChainID,
+ AggregatorContractAddress: aggDstLnkAddr,
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{},
+ }
+ priceGetterConfigBytes, err := json.MarshalIndent(priceGetterConfig, "", " ")
+ require.NoError(t, err)
+ priceGetterConfigJson = string(priceGetterConfigBytes)
+ }
+
+ jobParams := ccipTH.SetUpNodesAndJobs(t, tokenPricesUSDPipeline, priceGetterConfigJson, "")
+
+ // track sequence number and nonce separately since nonce doesn't bump for messages with allowOutOfOrderExecution == true,
+ // but sequence number always bumps.
+ // for this test, when test.outOfOrder == false, sequence number and nonce are equal.
+ // when test.outOfOrder == true, nonce is not bumped at all, so sequence number and nonce are NOT equal.
+ currentSeqNum := 1
+ currentNonce := uint64(1)
+
+ t.Run("single", func(t *testing.T) {
+ tokenAmount := big.NewInt(500000003) // prime number
+ gasLimit := big.NewInt(200_003) // prime number
+
+ extraArgs, err2 := testhelpers.GetEVMExtraArgsV2(gasLimit, test.allowOutOfOrderExecution)
+ require.NoError(t, err2)
+
+ sourceBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{
+ {Name: testhelpers.SourcePool, Addr: ccipTH.Source.LinkTokenPool.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ {Name: testhelpers.OnRamp, Addr: ccipTH.Source.OnRamp.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ {Name: testhelpers.SourceRouter, Addr: ccipTH.Source.Router.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ {Name: testhelpers.SourcePriceRegistry, Addr: ccipTH.Source.PriceRegistry.Address(), Getter: ccipTH.GetSourceLinkBalance},
+ })
+ require.NoError(t, err2)
+ destBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{
+ {Name: testhelpers.Receiver, Addr: ccipTH.Dest.Receivers[0].Receiver.Address(), Getter: ccipTH.GetDestLinkBalance},
+ {Name: testhelpers.DestPool, Addr: ccipTH.Dest.LinkTokenPool.Address(), Getter: ccipTH.GetDestLinkBalance},
+ {Name: testhelpers.OffRamp, Addr: ccipTH.Dest.OffRamp.Address(), Getter: ccipTH.GetDestLinkBalance},
+ })
+ require.NoError(t, err2)
+
+ ccipTH.Source.User.Value = tokenAmount
+ _, err2 = ccipTH.Source.WrappedNative.Deposit(ccipTH.Source.User)
+ require.NoError(t, err2)
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Source.User.Value = nil
+
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{
+ {
+ Token: ccipTH.Source.LinkToken.Address(),
+ Amount: tokenAmount,
+ },
+ {
+ Token: ccipTH.Source.WrappedNative.Address(),
+ Amount: tokenAmount,
+ },
+ },
+ FeeToken: ccipTH.Source.LinkToken.Address(),
+ ExtraArgs: extraArgs,
+ }
+ fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg)
+ require.NoError(t, err2)
+ // Currently no overhead and 10gwei dest gas price. So fee is simply (gasLimit * gasPrice)* link/native
+ // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String())
+ // Approve the fee amount + the token amount
+ _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount))
+ require.NoError(t, err2)
+ ccipTH.Source.Chain.Commit()
+ _, err2 = ccipTH.Source.WrappedNative.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), tokenAmount)
+ require.NoError(t, err2)
+ ccipTH.Source.Chain.Commit()
+
+ beforeNonce, err := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err)
+ ccipTH.SendRequest(t, msg)
+ // TODO: can this be moved into SendRequest?
+ if test.allowOutOfOrderExecution {
+ // the nonce for that sender must not be bumped for allowOutOfOrderExecution == true messages.
+ nonce, err2 := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err2)
+ require.Equal(t, beforeNonce, nonce, "nonce must not be bumped for allowOutOfOrderExecution == true requests")
+ } else {
+ // the nonce for that sender must be bumped for allowOutOfOrderExecution == false messages.
+ nonce, err2 := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err2)
+ require.Equal(t, beforeNonce+1, nonce, "nonce must be bumped for allowOutOfOrderExecution == false requests")
+ }
+
+ // Should eventually see this executed.
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum)
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum)
+
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum)
+ assert.Len(t, executionLogs, 1)
+ ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess)
+
+ // Asserts
+ // 1) The total pool input == total pool output
+ // 2) Pool flow equals tokens sent
+ // 3) Sent tokens arrive at the receiver
+ ccipTH.AssertBalances(t, []testhelpers.BalanceAssertion{
+ {
+ Name: testhelpers.SourcePool,
+ Address: ccipTH.Source.LinkTokenPool.Address(),
+ Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePool], tokenAmount.String()).String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourcePriceRegistry,
+ Address: ccipTH.Source.PriceRegistry.Address(),
+ Expected: sourceBalances[testhelpers.SourcePriceRegistry].String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ // Fees end up in the onramp.
+ Name: testhelpers.OnRamp,
+ Address: ccipTH.Source.OnRamp.Address(),
+ Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePriceRegistry], fee.String()).String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourceRouter,
+ Address: ccipTH.Source.Router.Address(),
+ Expected: sourceBalances[testhelpers.SourceRouter].String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.Receiver,
+ Address: ccipTH.Dest.Receivers[0].Receiver.Address(),
+ Expected: testhelpers.MustAddBigInt(destBalances[testhelpers.Receiver], tokenAmount.String()).String(),
+ Getter: ccipTH.GetDestLinkBalance,
+ },
+ {
+ Name: testhelpers.DestPool,
+ Address: ccipTH.Dest.LinkTokenPool.Address(),
+ Expected: testhelpers.MustSubBigInt(destBalances[testhelpers.DestPool], tokenAmount.String()).String(),
+ Getter: ccipTH.GetDestLinkBalance,
+ },
+ {
+ Name: testhelpers.OffRamp,
+ Address: ccipTH.Dest.OffRamp.Address(),
+ Expected: destBalances[testhelpers.OffRamp].String(),
+ Getter: ccipTH.GetDestLinkBalance,
+ },
+ })
+ currentSeqNum++
+ if !test.allowOutOfOrderExecution {
+ currentNonce = uint64(currentSeqNum)
+ }
+ })
+
+ t.Run("multiple batches", func(t *testing.T) {
+ tokenAmount := big.NewInt(500000003)
+ gasLimit := big.NewInt(250_000)
+
+ var txs []*gethtypes.Transaction
+ // Enough to require batched executions as gasLimit per tx is 250k -> 500k -> 750k ....
+ // The actual gas usage of executing 15 messages is higher than the gas limit for
+ // a single tx. This means that when batching is turned off, and we simply include
+ // all txs without checking gas, this also fails.
+ n := 15
+ for i := 0; i < n; i++ {
+ txGasLimit := new(big.Int).Mul(gasLimit, big.NewInt(int64(i+1)))
+
+ // interleave ordered and non-ordered messages.
+ allowOutOfOrderExecution := false
+ if i%2 == 0 {
+ allowOutOfOrderExecution = true
+ }
+ extraArgs, err2 := testhelpers.GetEVMExtraArgsV2(txGasLimit, allowOutOfOrderExecution)
+ require.NoError(t, err2)
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{
+ {
+ Token: ccipTH.Source.LinkToken.Address(),
+ Amount: tokenAmount,
+ },
+ },
+ FeeToken: ccipTH.Source.LinkToken.Address(),
+ ExtraArgs: extraArgs,
+ }
+ fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg)
+ require.NoError(t, err2)
+ // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice.
+ // require.Equal(t, new(big.Int).Mul(txGasLimit, gasPrice).String(), fee.String())
+ // Approve the fee amount + the token amount
+ _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount))
+ require.NoError(t, err2)
+ tx, err2 := ccipTH.Source.Router.CcipSend(ccipTH.Source.User, ccipTH.Dest.ChainSelector, msg)
+ require.NoError(t, err2)
+ txs = append(txs, tx)
+ if !allowOutOfOrderExecution {
+ currentNonce++
+ }
+ }
+
+ // Send a batch of requests in a single block
+ testhelpers.ConfirmTxs(t, txs, ccipTH.Source.Chain)
+ for i := 0; i < n; i++ {
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum+i)
+ }
+ // Should see a report with the full range
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum+n-1)
+ // Should all be executed
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum+n-1)
+ for _, execLog := range executionLogs {
+ ccipTH.AssertExecState(t, execLog, testhelpers.ExecutionStateSuccess)
+ }
+
+ currentSeqNum += n
+ })
+
+ // Deploy new on ramp,Commit store,off ramp
+ // Delete v1 jobs
+ // Send a number of requests
+ // Upgrade the router with new contracts
+ // create new jobs
+ // Verify all pending requests are sent after the contracts are upgraded
+ t.Run("upgrade contracts and verify requests can be sent with upgraded contract", func(t *testing.T) {
+ gasLimit := big.NewInt(200_003) // prime number
+ tokenAmount := big.NewInt(100)
+ commitStoreV1 := ccipTH.Dest.CommitStore
+ offRampV1 := ccipTH.Dest.OffRamp
+ onRampV1 := ccipTH.Source.OnRamp
+ // deploy v2 contracts
+ ccipTH.DeployNewOnRamp(t)
+ ccipTH.DeployNewCommitStore(t)
+ ccipTH.DeployNewOffRamp(t)
+
+ // send a request as the v2 contracts are not enabled in router it should route through the v1 contracts
+ t.Logf("sending request for seqnum %d", currentSeqNum)
+ ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Dest.Chain.Commit()
+ t.Logf("verifying seqnum %d on previous onRamp %s", currentSeqNum, onRampV1.Address().Hex())
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum, onRampV1.Address())
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum, commitStoreV1.Address())
+ executionLog := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum, offRampV1.Address())
+ ccipTH.AssertExecState(t, executionLog[0], testhelpers.ExecutionStateSuccess, offRampV1.Address())
+
+ nonceAtOnRampV1, err := onRampV1.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from onRamp")
+ require.Equal(t, currentNonce, nonceAtOnRampV1, "nonce should be synced from v1 onRamp")
+ nonceAtOffRampV1, err := offRampV1.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from offRamp")
+ require.Equal(t, currentNonce, nonceAtOffRampV1, "nonce should be synced from v1 offRamp")
+
+ // enable the newly deployed contracts
+ newConfigBlock := ccipTH.Dest.Chain.Blockchain().CurrentBlock().Number.Int64()
+ ccipTH.EnableOnRamp(t)
+ ccipTH.EnableCommitStore(t)
+ ccipTH.EnableOffRamp(t)
+ srcStartBlock := ccipTH.Source.Chain.Blockchain().CurrentBlock().Number.Uint64()
+
+ // send a number of requests, the requests should not be delivered yet as the previous contracts are not configured
+ // with the router anymore
+ startSeq := 1
+ noOfRequests := 5
+ endSeqNum := startSeq + noOfRequests
+ for i := startSeq; i <= endSeqNum; i++ {
+ t.Logf("sending request for seqnum %d", i)
+ ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Dest.Chain.Commit()
+ ccipTH.EventuallySendRequested(t, uint64(i))
+ }
+
+ // delete v1 jobs
+ for _, node := range ccipTH.Nodes {
+ id := node.FindJobIDForContract(t, commitStoreV1.Address())
+ require.Greater(t, id, int32(0))
+ t.Logf("deleting job %d", id)
+ err = node.App.DeleteJob(context.Background(), id)
+ require.NoError(t, err)
+ id = node.FindJobIDForContract(t, offRampV1.Address())
+ require.Greater(t, id, int32(0))
+ t.Logf("deleting job %d", id)
+ err = node.App.DeleteJob(context.Background(), id)
+ require.NoError(t, err)
+ }
+
+ // Commit on both chains to reach Finality
+ ccipTH.Source.Chain.Commit()
+ ccipTH.Dest.Chain.Commit()
+
+ // create new jobs
+ jobParams = ccipTH.NewCCIPJobSpecParams(tokenPricesUSDPipeline, priceGetterConfigJson, newConfigBlock, "")
+ jobParams.Version = "v2"
+ jobParams.SourceStartBlock = srcStartBlock
+ ccipTH.AddAllJobs(t, jobParams)
+ committedSeqNum := uint64(0)
+ // Now the requests should be delivered
+ for i := startSeq; i <= endSeqNum; i++ {
+ t.Logf("verifying seqnum %d", i)
+ ccipTH.AllNodesHaveReqSeqNum(t, i)
+ if committedSeqNum < uint64(i+1) {
+ committedSeqNum = ccipTH.EventuallyReportCommitted(t, i)
+ }
+ ccipTH.EventuallyExecutionStateChangedToSuccess(t, []uint64{uint64(i)}, uint64(newConfigBlock))
+ }
+
+ // nonces should be correctly synced from v1 contracts for the sender
+ nonceAtOnRampV2, err := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from onRamp")
+ nonceAtOffRampV2, err := ccipTH.Dest.OffRamp.GetSenderNonce(nil, ccipTH.Source.User.From)
+ require.NoError(t, err, "getting nonce from offRamp")
+ require.Equal(t, nonceAtOnRampV1+uint64(noOfRequests)+1, nonceAtOnRampV2, "nonce should be synced from v1 onRamps")
+ require.Equal(t, nonceAtOffRampV1+uint64(noOfRequests)+1, nonceAtOffRampV2, "nonce should be synced from v1 offRamps")
+ currentSeqNum = endSeqNum + 1
+ if !test.allowOutOfOrderExecution {
+ currentNonce = uint64(currentSeqNum)
+ }
+ })
+
+ t.Run("pay nops", func(t *testing.T) {
+ linkToTransferToOnRamp := big.NewInt(1e18)
+
+ // transfer some link to onramp to pay the nops
+ _, err := ccipTH.Source.LinkToken.Transfer(ccipTH.Source.User, ccipTH.Source.OnRamp.Address(), linkToTransferToOnRamp)
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ srcBalReq := []testhelpers.BalanceReq{
+ {
+ Name: testhelpers.Sender,
+ Addr: ccipTH.Source.User.From,
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ {
+ Name: testhelpers.OnRampNative,
+ Addr: ccipTH.Source.OnRamp.Address(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ {
+ Name: testhelpers.OnRamp,
+ Addr: ccipTH.Source.OnRamp.Address(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourceRouter,
+ Addr: ccipTH.Source.Router.Address(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ }
+
+ var nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight
+ var totalWeight uint16
+ nodes := ccipTH.Nodes
+ for i := range nodes {
+ // For now set the transmitter addresses to be the same as the payee addresses
+ nodes[i].PaymentReceiver = nodes[i].Transmitter
+ nopsAndWeights = append(nopsAndWeights, evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{
+ Nop: nodes[i].PaymentReceiver,
+ Weight: 5,
+ })
+ totalWeight += 5
+ srcBalReq = append(srcBalReq, testhelpers.BalanceReq{
+ Name: fmt.Sprintf("node %d", i),
+ Addr: nodes[i].PaymentReceiver,
+ Getter: ccipTH.GetSourceLinkBalance,
+ })
+ }
+ srcBalances, err := testhelpers.GetBalances(t, srcBalReq)
+ require.NoError(t, err)
+
+ // set nops on the onramp
+ ccipTH.SetNopsOnRamp(t, nopsAndWeights)
+
+ // send a message
+ extraArgs, err := testhelpers.GetEVMExtraArgsV2(big.NewInt(200_000), test.allowOutOfOrderExecution)
+ require.NoError(t, err)
+
+ // FeeToken is empty, indicating it should use native token
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[1].Receiver.Address()),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{},
+ ExtraArgs: extraArgs,
+ FeeToken: common.Address{},
+ }
+ fee, err := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg)
+ require.NoError(t, err)
+
+ // verify message is sent
+ ccipTH.Source.User.Value = fee
+ ccipTH.SendRequest(t, msg)
+ ccipTH.Source.User.Value = nil
+ ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum)
+ ccipTH.EventuallyReportCommitted(t, currentSeqNum)
+
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum)
+ assert.Len(t, executionLogs, 1)
+ ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess)
+ currentSeqNum++
+ if test.allowOutOfOrderExecution {
+ currentNonce = uint64(currentSeqNum)
+ }
+
+ // get the nop fee
+ nopFee, err := ccipTH.Source.OnRamp.GetNopFeesJuels(nil)
+ require.NoError(t, err)
+ t.Log("nopFee", nopFee)
+
+ // withdraw fees and verify there is still fund left for nop payment
+ _, err = ccipTH.Source.OnRamp.WithdrawNonLinkFees(
+ ccipTH.Source.User,
+ ccipTH.Source.WrappedNative.Address(),
+ ccipTH.Source.User.From,
+ )
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ // pay nops
+ _, err = ccipTH.Source.OnRamp.PayNops(ccipTH.Source.User)
+ require.NoError(t, err)
+ ccipTH.Source.Chain.Commit()
+
+ srcBalanceAssertions := []testhelpers.BalanceAssertion{
+ {
+ // Onramp should not have any balance left in wrapped native
+ Name: testhelpers.OnRampNative,
+ Address: ccipTH.Source.OnRamp.Address(),
+ Expected: big.NewInt(0).String(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ {
+ // Onramp should have the remaining link after paying nops
+ Name: testhelpers.OnRamp,
+ Address: ccipTH.Source.OnRamp.Address(),
+ Expected: new(big.Int).Sub(srcBalances[testhelpers.OnRamp], nopFee).String(),
+ Getter: ccipTH.GetSourceLinkBalance,
+ },
+ {
+ Name: testhelpers.SourceRouter,
+ Address: ccipTH.Source.Router.Address(),
+ Expected: srcBalances[testhelpers.SourceRouter].String(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ // onRamp's balance (of previously sent fee during message sending) should have been transferred to
+ // the owner as a result of WithdrawNonLinkFees
+ {
+ Name: testhelpers.Sender,
+ Address: ccipTH.Source.User.From,
+ Expected: fee.String(),
+ Getter: ccipTH.GetSourceWrappedTokenBalance,
+ },
+ }
+
+ // the nodes should be paid according to the weights assigned
+ for i, node := range nodes {
+ paymentWeight := float64(nopsAndWeights[i].Weight) / float64(totalWeight)
+ paidInFloat := paymentWeight * float64(nopFee.Int64())
+ paid, _ := new(big.Float).SetFloat64(paidInFloat).Int64()
+ bal := new(big.Int).Add(
+ new(big.Int).SetInt64(paid),
+ srcBalances[fmt.Sprintf("node %d", i)]).String()
+ srcBalanceAssertions = append(srcBalanceAssertions, testhelpers.BalanceAssertion{
+ Name: fmt.Sprintf("node %d", i),
+ Address: node.PaymentReceiver,
+ Expected: bal,
+ Getter: ccipTH.GetSourceLinkBalance,
+ })
+ }
+ ccipTH.AssertBalances(t, srcBalanceAssertions)
+ })
+
+ // Keep on sending a bunch of messages
+ // In the meantime update onchainConfig with new price registry address
+ // Verify if the jobs can pick up updated config
+ // Verify if all the messages are sent
+ t.Run("config change or price registry update while requests are inflight", func(t *testing.T) {
+ gasLimit := big.NewInt(200_003) // prime number
+ tokenAmount := big.NewInt(100)
+ msgWg := &sync.WaitGroup{}
+ msgWg.Add(1)
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+ startSeq := currentSeqNum
+ endSeq := currentSeqNum + 20
+
+ // send message with the old configs
+ ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipTH.Source.Chain.Commit()
+
+ go func(ccipContracts testhelpers.CCIPContracts, currentSeqNum int) {
+ seqNumber := currentSeqNum + 1
+ defer msgWg.Done()
+ for {
+ <-ticker.C // wait for ticker
+ t.Logf("sending request for seqnum %d", seqNumber)
+ ccipContracts.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address())
+ ccipContracts.Source.Chain.Commit()
+ seqNumber++
+ if seqNumber == endSeq {
+ return
+ }
+ }
+ }(ccipTH.CCIPContracts, currentSeqNum)
+
+ ccipTH.DeployNewPriceRegistry(t)
+ commitOnchainConfig := ccipTH.CreateDefaultCommitOnchainConfig(t)
+ commitOffchainConfig := ccipTH.CreateDefaultCommitOffchainConfig(t)
+ execOnchainConfig := ccipTH.CreateDefaultExecOnchainConfig(t)
+ execOffchainConfig := ccipTH.CreateDefaultExecOffchainConfig(t)
+
+ ccipTH.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig)
+
+ // wait for all requests to be complete
+ msgWg.Wait()
+ for i := startSeq; i < endSeq; i++ {
+ ccipTH.AllNodesHaveReqSeqNum(t, i)
+ ccipTH.EventuallyReportCommitted(t, i)
+
+ executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, i, i)
+ assert.Len(t, executionLogs, 1)
+ ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess)
+ }
+
+ for i, node := range ccipTH.Nodes {
+ t.Logf("verifying node %d", i)
+ node.EventuallyNodeUsesNewCommitConfig(t, ccipTH, ccipdata.CommitOnchainConfig{
+ PriceRegistry: ccipTH.Dest.PriceRegistry.Address(),
+ })
+ node.EventuallyNodeUsesNewExecConfig(t, ccipTH, v1_5_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: testhelpers.PermissionLessExecutionThresholdSeconds,
+ Router: ccipTH.Dest.Router.Address(),
+ PriceRegistry: ccipTH.Dest.PriceRegistry.Address(),
+ MaxDataBytes: 1e5,
+ MaxNumberOfTokensPerMsg: 5,
+ MaxPoolReleaseOrMintGas: 200_000,
+ MaxTokenTransferGas: 100_000,
+ })
+ node.EventuallyNodeUsesUpdatedPriceRegistry(t, ccipTH)
+ }
+ currentSeqNum = endSeq
+ if test.allowOutOfOrderExecution {
+ currentNonce = uint64(currentSeqNum)
+ }
+ })
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/autosync.go b/core/services/ocr2/plugins/ccip/internal/cache/autosync.go
new file mode 100644
index 00000000000..690b4dd05b9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/autosync.go
@@ -0,0 +1,141 @@
+package cache
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+)
+
+type AutoSync[T any] interface {
+ Get(ctx context.Context, syncFunc func(ctx context.Context) (T, error)) (T, error)
+}
+
+// LogpollerEventsBased IMPORTANT: Cache refresh relies on the events that are finalized.
+// This introduces some delay between the event onchain occurrence and cache refreshing.
+// This is intentional, because we want to prevent handling reorgs within the cache.
+type LogpollerEventsBased[T any] struct {
+ logPoller logpoller.LogPoller
+ observedEvents []common.Hash
+ address common.Address
+
+ lock *sync.RWMutex
+ value T
+ lastChangeBlock int64
+}
+
+func NewLogpollerEventsBased[T any](
+ lp logpoller.LogPoller,
+ observedEvents []common.Hash,
+ contractAddress common.Address,
+) *LogpollerEventsBased[T] {
+ var emptyValue T
+ return &LogpollerEventsBased[T]{
+ logPoller: lp,
+ observedEvents: observedEvents,
+ address: contractAddress,
+
+ lock: &sync.RWMutex{},
+ value: emptyValue,
+ lastChangeBlock: 0,
+ }
+}
+
+func (c *LogpollerEventsBased[T]) Get(ctx context.Context, syncFunc func(ctx context.Context) (T, error)) (T, error) {
+ var empty T
+
+ hasExpired, newEventBlockNum, err := c.hasExpired(ctx)
+ if err != nil {
+ return empty, fmt.Errorf("check cache expiration: %w", err)
+ }
+
+ if hasExpired {
+ var latestValue T
+ latestValue, err = syncFunc(ctx)
+ if err != nil {
+ return empty, fmt.Errorf("sync func: %w", err)
+ }
+
+ c.set(latestValue, newEventBlockNum)
+ return latestValue, nil
+ }
+
+ cachedValue := c.get()
+ if err != nil {
+ return empty, fmt.Errorf("get cached value: %w", err)
+ }
+
+ c.lock.Lock()
+ if newEventBlockNum > c.lastChangeBlock {
+ // update the most recent block number
+ // that way the scanning window is shorter in the next run
+ c.lastChangeBlock = newEventBlockNum
+ }
+ c.lock.Unlock()
+
+ return cachedValue, nil
+}
+
+func (c *LogpollerEventsBased[T]) hasExpired(ctx context.Context) (expired bool, blockOfLatestEvent int64, err error) {
+ c.lock.RLock()
+ blockOfCurrentValue := c.lastChangeBlock
+ c.lock.RUnlock()
+
+ // NOTE: latest block should be fetched before LatestBlockByEventSigsAddrsWithConfs
+ // Otherwise there might be new events between LatestBlockByEventSigsAddrsWithConfs and
+ // latestBlock which will be missed.
+ latestBlock, err := c.logPoller.LatestBlock(ctx)
+ latestFinalizedBlock := int64(0)
+ if err != nil && !errors.Is(err, sql.ErrNoRows) {
+ return false, 0, fmt.Errorf("get latest log poller block: %w", err)
+ } else if err == nil {
+ // Since we know that we have all the events till latestBlock.FinalizedBlockNumber
+ // we want to return the block number instead of the block of the latest event
+ // for reducing the scan window on the next call.
+ latestFinalizedBlock = latestBlock.FinalizedBlockNumber
+ }
+
+ if blockOfCurrentValue == 0 {
+ return true, latestFinalizedBlock, nil
+ }
+
+ blockOfLatestEvent, err = c.logPoller.LatestBlockByEventSigsAddrsWithConfs(
+ ctx,
+ blockOfCurrentValue,
+ c.observedEvents,
+ []common.Address{c.address},
+ evmtypes.Finalized,
+ )
+ if err != nil {
+ return false, 0, fmt.Errorf("get latest events form lp: %w", err)
+ }
+
+ if blockOfLatestEvent > latestFinalizedBlock {
+ latestFinalizedBlock = blockOfLatestEvent
+ }
+ return blockOfLatestEvent > blockOfCurrentValue, latestFinalizedBlock, nil
+}
+
+func (c *LogpollerEventsBased[T]) set(value T, blockNum int64) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.lastChangeBlock > blockNum {
+ return
+ }
+
+ c.value = value
+ c.lastChangeBlock = blockNum
+}
+
+func (c *LogpollerEventsBased[T]) get() T {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.value
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go b/core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go
new file mode 100644
index 00000000000..0babfeb421d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go
@@ -0,0 +1,128 @@
+package cache_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+)
+
+func TestLogpollerEventsBased(t *testing.T) {
+ ctx := testutils.Context(t)
+ lp := lpmocks.NewLogPoller(t)
+ observedEvents := []common.Hash{
+ utils.Bytes32FromString("event a"),
+ utils.Bytes32FromString("event b"),
+ }
+ contractAddress := utils.RandomAddress()
+ c := cache.NewLogpollerEventsBased[[]int](lp, observedEvents, contractAddress)
+
+ testRounds := []struct {
+ logPollerLatestBlock int64 // latest block that logpoller parsed
+ latestEventBlock int64 // latest block that an event was seen
+ stateLatestBlock int64 // block of the current cached value (before run)
+ shouldSync bool // whether we expect sync to happen in this round
+ syncData []int // data returned after sync
+ expData []int // expected data that cache will return
+ }{
+ {
+ // this is the first 'Get' call to our cache, an event was seen at block 800
+ // and now log poller has reached block 1000.
+ logPollerLatestBlock: 1000,
+ latestEventBlock: 800,
+ stateLatestBlock: 0,
+ shouldSync: true,
+ syncData: []int{1, 2, 3},
+ expData: []int{1, 2, 3},
+ },
+ {
+ // log poller moved a few blocks and there weren't any new events
+ logPollerLatestBlock: 1010,
+ latestEventBlock: 800,
+ stateLatestBlock: 1000,
+ shouldSync: false,
+ expData: []int{1, 2, 3},
+ },
+ {
+ // log poller moved a few blocks and there was a new event
+ logPollerLatestBlock: 1020,
+ latestEventBlock: 1020,
+ stateLatestBlock: 1010,
+ shouldSync: true,
+ syncData: []int{111},
+ expData: []int{111},
+ },
+ {
+ // log poller moved a few more blocks and there was another new event
+ logPollerLatestBlock: 1050,
+ latestEventBlock: 1040,
+ stateLatestBlock: 1020,
+ shouldSync: true,
+ syncData: []int{222},
+ expData: []int{222},
+ },
+ {
+ // log poller moved a few more blocks and there wasn't any new event
+ logPollerLatestBlock: 1100,
+ latestEventBlock: 1040,
+ stateLatestBlock: 1050,
+ shouldSync: false,
+ expData: []int{222},
+ },
+ {
+ // log poller moved a few more blocks and there wasn't any new event
+ logPollerLatestBlock: 1300,
+ latestEventBlock: 1040,
+ stateLatestBlock: 1100,
+ shouldSync: false,
+ expData: []int{222},
+ },
+ {
+ // log poller moved a few more blocks and there was a new event
+ // more recent than latest block (for whatever internal reason)
+ logPollerLatestBlock: 1300,
+ latestEventBlock: 1305,
+ stateLatestBlock: 1300,
+ shouldSync: true,
+ syncData: []int{666},
+ expData: []int{666},
+ },
+ {
+ // log poller moved a few more blocks and there wasn't any new event
+ logPollerLatestBlock: 1300,
+ latestEventBlock: 1305,
+ stateLatestBlock: 1305, // <-- that's what we are testing in this round
+ shouldSync: false,
+ expData: []int{666},
+ },
+ }
+
+ for _, round := range testRounds {
+ lp.On("LatestBlock", mock.Anything).
+ Return(logpoller.LogPollerBlock{FinalizedBlockNumber: round.logPollerLatestBlock}, nil).Once()
+
+ if round.stateLatestBlock > 0 {
+ lp.On(
+ "LatestBlockByEventSigsAddrsWithConfs",
+ mock.Anything,
+ round.stateLatestBlock,
+ observedEvents,
+ []common.Address{contractAddress},
+ evmtypes.Finalized,
+ ).Return(round.latestEventBlock, nil).Once()
+ }
+
+ data, err := c.Get(ctx, func(ctx context.Context) ([]int, error) { return round.syncData, nil })
+ assert.NoError(t, err)
+ assert.Equal(t, round.expData, data)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/chain_health.go b/core/services/ocr2/plugins/ccip/internal/cache/chain_health.go
new file mode 100644
index 00000000000..00f90615eb2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/chain_health.go
@@ -0,0 +1,273 @@
+package cache
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/patrickmn/go-cache"
+ "github.com/pkg/errors"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/services"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+// ChainHealthcheck checks the health of the both source and destination chain.
+// Based on the values returned, CCIP can make a decision to stop or continue processing messages.
+// There are four things verified here:
+// 1. Source chain is healthy (this is verified by checking if source LogPoller saw finality violation)
+// 2. Dest chain is healthy (this is verified by checking if destination LogPoller saw finality violation)
+// 3. CommitStore is down (this is verified by checking if CommitStore is down and destination RMN is not cursed)
+// 4. Source chain is cursed (this is verified by checking if source RMN is not cursed)
+//
+// Whenever any of the above checks fail, the chain is considered unhealthy and the CCIP should stop
+// processing messages. Additionally, when the chain is unhealthy, this information is considered "sticky"
+// and is cached for a certain period of time based on defaultGlobalStatusExpirationDuration.
+// This may lead to some false-positives, but in this case we want to be extra cautious and avoid executing any reorged messages.
+//
+// Additionally, to reduce the number of calls to the RPC, we refresh RMN state in the background based on defaultRMNStateRefreshInterval
+type ChainHealthcheck interface {
+ job.ServiceCtx
+ IsHealthy(ctx context.Context) (bool, error)
+}
+
+const (
+ // RMN curse state is refreshed every 10 seconds
+ defaultRMNStateRefreshInterval = 10 * time.Second
+ // Whenever we mark the chain as unhealthy, we cache this information for 30 minutes
+ defaultGlobalStatusExpirationDuration = 30 * time.Minute
+
+ globalStatusKey = "globalStatus"
+ rmnStatusKey = "rmnCurseCheck"
+)
+
+type chainHealthcheck struct {
+ cache *cache.Cache
+ globalStatusKey string
+ rmnStatusKey string
+ globalStatusExpiration time.Duration
+ rmnStatusRefreshInterval time.Duration
+
+ lggr logger.Logger
+ onRamp ccipdata.OnRampReader
+ commitStore ccipdata.CommitStoreReader
+
+ services.StateMachine
+ wg *sync.WaitGroup
+ backgroundCtx context.Context //nolint:containedctx
+ backgroundCancel context.CancelFunc
+}
+
+func NewChainHealthcheck(lggr logger.Logger, onRamp ccipdata.OnRampReader, commitStore ccipdata.CommitStoreReader) *chainHealthcheck {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ ch := &chainHealthcheck{
+ // Different keys use different expiration times, so we don't need to worry about the default value
+ cache: cache.New(cache.NoExpiration, 0),
+ rmnStatusKey: rmnStatusKey,
+ globalStatusKey: globalStatusKey,
+ globalStatusExpiration: defaultGlobalStatusExpirationDuration,
+ rmnStatusRefreshInterval: defaultRMNStateRefreshInterval,
+
+ lggr: lggr,
+ onRamp: onRamp,
+ commitStore: commitStore,
+
+ wg: new(sync.WaitGroup),
+ backgroundCtx: ctx,
+ backgroundCancel: cancel,
+ }
+ return ch
+}
+
+// newChainHealthcheckWithCustomEviction is used for testing purposes only. It doesn't start background worker
+func newChainHealthcheckWithCustomEviction(lggr logger.Logger, onRamp ccipdata.OnRampReader, commitStore ccipdata.CommitStoreReader, globalStatusDuration time.Duration, rmnStatusRefreshInterval time.Duration) *chainHealthcheck {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ return &chainHealthcheck{
+ cache: cache.New(rmnStatusRefreshInterval, 0),
+ rmnStatusKey: rmnStatusKey,
+ globalStatusKey: globalStatusKey,
+ globalStatusExpiration: globalStatusDuration,
+ rmnStatusRefreshInterval: rmnStatusRefreshInterval,
+
+ lggr: lggr,
+ onRamp: onRamp,
+ commitStore: commitStore,
+
+ wg: new(sync.WaitGroup),
+ backgroundCtx: ctx,
+ backgroundCancel: cancel,
+ }
+}
+
+type rmnResponse struct {
+ healthy bool
+ err error
+}
+
+func (c *chainHealthcheck) IsHealthy(ctx context.Context) (bool, error) {
+ // Verify if flag is raised to indicate that the chain is not healthy
+ // If set to false then immediately return false without checking the chain
+ if cachedValue, found := c.cache.Get(c.globalStatusKey); found {
+ healthy, ok := cachedValue.(bool)
+ // If cached value is properly casted to bool and not healthy it means the sticky flag is raised
+ // and should be returned immediately
+ if !ok {
+ c.lggr.Criticalw("Failed to cast cached value to sticky healthcheck", "value", cachedValue)
+ } else if ok && !healthy {
+ return false, nil
+ }
+ }
+
+ // These checks are cheap and don't require any communication with the database or RPC
+ if healthy, err := c.checkIfReadersAreHealthy(ctx); err != nil {
+ return false, err
+ } else if !healthy {
+ c.markStickyStatusUnhealthy()
+ return healthy, nil
+ }
+
+ // First call might initialize cache if it's not initialized yet. Otherwise, it will use the cached value
+ if healthy, err := c.checkIfRMNsAreHealthy(ctx); err != nil {
+ return false, err
+ } else if !healthy {
+ c.markStickyStatusUnhealthy()
+ return healthy, nil
+ }
+ return true, nil
+}
+
+func (c *chainHealthcheck) Start(context.Context) error {
+ return c.StateMachine.StartOnce("ChainHealthcheck", func() error {
+ c.lggr.Info("Starting ChainHealthcheck")
+ c.wg.Add(1)
+ c.run()
+ return nil
+ })
+}
+
+func (c *chainHealthcheck) Close() error {
+ return c.StateMachine.StopOnce("ChainHealthcheck", func() error {
+ c.lggr.Info("Closing ChainHealthcheck")
+ c.backgroundCancel()
+ c.wg.Wait()
+ return nil
+ })
+}
+
+func (c *chainHealthcheck) run() {
+ ticker := time.NewTicker(c.rmnStatusRefreshInterval)
+ go func() {
+ defer c.wg.Done()
+ // Refresh the RMN state immediately after starting the background refresher
+ _, _ = c.refresh(c.backgroundCtx)
+
+ for {
+ select {
+ case <-c.backgroundCtx.Done():
+ return
+ case <-ticker.C:
+ _, err := c.refresh(c.backgroundCtx)
+ if err != nil {
+ c.lggr.Errorw("Failed to refresh RMN state in the background", "err", err)
+ }
+ }
+ }
+ }()
+}
+
+func (c *chainHealthcheck) refresh(ctx context.Context) (bool, error) {
+ healthy, err := c.fetchRMNCurseState(ctx)
+ c.cache.Set(
+ c.rmnStatusKey,
+ rmnResponse{healthy, err},
+ // Cache the value for 3 refresh intervals, this is just a defensive approach
+ // that will enforce the RMN state to be refreshed in case of bg worker hiccup (it should never happen)
+ 3*c.rmnStatusRefreshInterval,
+ )
+ return healthy, err
+}
+
+// checkIfReadersAreHealthy checks if the source and destination chains are healthy by calling underlying LogPoller
+// These calls are cheap because they don't require any communication with the database or RPC, so we don't have
+// to cache the result of these calls.
+func (c *chainHealthcheck) checkIfReadersAreHealthy(ctx context.Context) (bool, error) {
+ sourceChainHealthy, err := c.onRamp.IsSourceChainHealthy(ctx)
+ if err != nil {
+ return false, errors.Wrap(err, "onRamp IsSourceChainHealthy errored")
+ }
+
+ destChainHealthy, err := c.commitStore.IsDestChainHealthy(ctx)
+ if err != nil {
+ return false, errors.Wrap(err, "commitStore IsDestChainHealthy errored")
+ }
+
+ if !sourceChainHealthy || !destChainHealthy {
+ c.lggr.Criticalw(
+ "Lane processing is stopped because source or destination chain is reported unhealthy",
+ "sourceChainHealthy", sourceChainHealthy,
+ "destChainHealthy", destChainHealthy,
+ )
+ }
+ return sourceChainHealthy && destChainHealthy, nil
+}
+
+func (c *chainHealthcheck) checkIfRMNsAreHealthy(ctx context.Context) (bool, error) {
+ if cachedValue, found := c.cache.Get(c.rmnStatusKey); found {
+ rmn := cachedValue.(rmnResponse)
+ return rmn.healthy, rmn.err
+ }
+
+ // If the value is not found in the cache, fetch the RMN curse state in a sync manner for the first time
+ c.lggr.Info("Refreshing RMN state from the plugin routine, this should happen only once per lane during boot")
+ return c.refresh(ctx)
+}
+
+func (c *chainHealthcheck) markStickyStatusUnhealthy() {
+ c.cache.Set(c.globalStatusKey, false, c.globalStatusExpiration)
+}
+
+func (c *chainHealthcheck) fetchRMNCurseState(ctx context.Context) (bool, error) {
+ var (
+ eg = new(errgroup.Group)
+ isCommitStoreDown bool
+ isSourceCursed bool
+ )
+
+ eg.Go(func() error {
+ var err error
+ isCommitStoreDown, err = c.commitStore.IsDown(ctx)
+ if err != nil {
+ return errors.Wrap(err, "commitStore isDown check errored")
+ }
+ return nil
+ })
+
+ eg.Go(func() error {
+ var err error
+ isSourceCursed, err = c.onRamp.IsSourceCursed(ctx)
+ if err != nil {
+ return errors.Wrap(err, "onRamp isSourceCursed errored")
+ }
+ return nil
+ })
+
+ if err := eg.Wait(); err != nil {
+ return false, err
+ }
+
+ if isCommitStoreDown || isSourceCursed {
+ c.lggr.Criticalw(
+ "Lane processing is stopped because source chain is cursed or CommitStore is down",
+ "isCommitStoreDown", isCommitStoreDown,
+ "isSourceCursed", isSourceCursed,
+ )
+ return false, nil
+ }
+ return true, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go b/core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go
new file mode 100644
index 00000000000..ccdc7c4b22f
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go
@@ -0,0 +1,303 @@
+package cache
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+)
+
+func Test_RMNStateCaching(t *testing.T) {
+ ctx := tests.Context(t)
+ lggr := logger.TestLogger(t)
+ mockCommitStore := mocks.NewCommitStoreReader(t)
+ mockOnRamp := mocks.NewOnRampReader(t)
+
+ chainState := newChainHealthcheckWithCustomEviction(lggr, mockOnRamp, mockCommitStore, 10*time.Hour, 10*time.Hour)
+
+ // Chain is not cursed and healthy
+ mockCommitStore.On("IsDown", ctx).Return(false, nil).Once()
+ mockCommitStore.On("IsDestChainHealthy", ctx).Return(true, nil).Maybe()
+ mockOnRamp.On("IsSourceCursed", ctx).Return(false, nil).Once()
+ mockOnRamp.On("IsSourceChainHealthy", ctx).Return(true, nil).Maybe()
+ healthy, err := chainState.IsHealthy(ctx)
+ assert.NoError(t, err)
+ assert.True(t, healthy)
+
+ // Chain is cursed, but cache is stale
+ mockCommitStore.On("IsDown", ctx).Return(true, nil).Once()
+ mockOnRamp.On("IsSourceCursed", ctx).Return(true, nil).Once()
+ healthy, err = chainState.IsHealthy(ctx)
+ assert.NoError(t, err)
+ assert.True(t, healthy)
+
+ // Enforce cache refresh
+ _, err = chainState.refresh(ctx)
+ assert.NoError(t, err)
+
+ healthy, err = chainState.IsHealthy(ctx)
+ assert.Nil(t, err)
+ assert.False(t, healthy)
+
+ // Chain is not cursed, but previous curse should be "sticky" even when force refreshing
+ mockCommitStore.On("IsDown", ctx).Return(false, nil).Maybe()
+ mockOnRamp.On("IsSourceCursed", ctx).Return(false, nil).Maybe()
+ // Enforce cache refresh
+ _, err = chainState.refresh(ctx)
+ assert.NoError(t, err)
+
+ healthy, err = chainState.IsHealthy(ctx)
+ assert.Nil(t, err)
+ assert.False(t, healthy)
+}
+
+func Test_ChainStateIsCached(t *testing.T) {
+ ctx := tests.Context(t)
+ lggr := logger.TestLogger(t)
+ mockCommitStore := mocks.NewCommitStoreReader(t)
+ mockOnRamp := mocks.NewOnRampReader(t)
+
+ chainState := newChainHealthcheckWithCustomEviction(lggr, mockOnRamp, mockCommitStore, 10*time.Hour, 10*time.Hour)
+
+ // Chain is not cursed and healthy
+ mockCommitStore.On("IsDown", ctx).Return(false, nil).Maybe()
+ mockCommitStore.On("IsDestChainHealthy", ctx).Return(true, nil).Once()
+ mockOnRamp.On("IsSourceCursed", ctx).Return(false, nil).Maybe()
+ mockOnRamp.On("IsSourceChainHealthy", ctx).Return(true, nil).Once()
+
+ _, err := chainState.refresh(ctx)
+ assert.NoError(t, err)
+
+ healthy, err := chainState.IsHealthy(ctx)
+ assert.NoError(t, err)
+ assert.True(t, healthy)
+
+ // Chain is not healthy
+ mockCommitStore.On("IsDestChainHealthy", ctx).Return(false, nil).Once()
+ mockOnRamp.On("IsSourceChainHealthy", ctx).Return(false, nil).Once()
+ _, err = chainState.refresh(ctx)
+ assert.NoError(t, err)
+
+ healthy, err = chainState.IsHealthy(ctx)
+ assert.NoError(t, err)
+ assert.False(t, healthy)
+
+ // Previous value is returned
+ mockCommitStore.On("IsDestChainHealthy", ctx).Return(true, nil).Maybe()
+ mockOnRamp.On("IsSourceChainHealthy", ctx).Return(true, nil).Maybe()
+
+ _, err = chainState.refresh(ctx)
+ assert.NoError(t, err)
+
+ healthy, err = chainState.IsHealthy(ctx)
+ assert.NoError(t, err)
+ assert.False(t, healthy)
+}
+
+func Test_ChainStateIsHealthy(t *testing.T) {
+ testCases := []struct {
+ name string
+ commitStoreDown bool
+ commitStoreErr error
+ onRampCursed bool
+ onRampErr error
+ sourceChainUnhealthy bool
+ sourceChainErr error
+ destChainUnhealthy bool
+ destChainErr error
+
+ expectedState bool
+ expectedErr bool
+ }{
+ {
+ name: "all components healthy",
+ expectedState: true,
+ },
+ {
+ name: "CommitStore is down",
+ commitStoreDown: true,
+ expectedState: false,
+ },
+ {
+ name: "CommitStore error",
+ commitStoreErr: errors.New("commit store error"),
+ expectedErr: true,
+ },
+ {
+ name: "OnRamp is cursed",
+ onRampCursed: true,
+ expectedState: false,
+ },
+ {
+ name: "OnRamp error",
+ onRampErr: errors.New("onramp error"),
+ expectedErr: true,
+ },
+ {
+ name: "Source chain is unhealthy",
+ sourceChainUnhealthy: true,
+ expectedState: false,
+ },
+ {
+ name: "Source chain error",
+ sourceChainErr: errors.New("source chain error"),
+ expectedErr: true,
+ },
+ {
+ name: "Destination chain is unhealthy",
+ destChainUnhealthy: true,
+ expectedState: false,
+ },
+ {
+ name: "Destination chain error",
+ destChainErr: errors.New("destination chain error"),
+ expectedErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := tests.Context(t)
+ mockCommitStore := mocks.NewCommitStoreReader(t)
+ mockOnRamp := mocks.NewOnRampReader(t)
+
+ mockCommitStore.On("IsDown", ctx).Return(tc.commitStoreDown, tc.commitStoreErr).Maybe()
+ mockCommitStore.On("IsDestChainHealthy", ctx).Return(!tc.destChainUnhealthy, tc.destChainErr).Maybe()
+ mockOnRamp.On("IsSourceCursed", ctx).Return(tc.onRampCursed, tc.onRampErr).Maybe()
+ mockOnRamp.On("IsSourceChainHealthy", ctx).Return(!tc.sourceChainUnhealthy, tc.sourceChainErr).Maybe()
+
+ chainState := newChainHealthcheckWithCustomEviction(logger.TestLogger(t), mockOnRamp, mockCommitStore, 10*time.Hour, 10*time.Hour)
+
+ healthy, err := chainState.IsHealthy(ctx)
+
+ if tc.expectedErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedState, healthy)
+ }
+ })
+ }
+}
+
+func Test_RefreshingInBackground(t *testing.T) {
+ mockCommitStore := newCommitStoreWrapper(t, true, nil)
+ mockCommitStore.CommitStoreReader.On("IsDestChainHealthy", mock.Anything).Return(true, nil).Maybe()
+
+ mockOnRamp := newOnRampWrapper(t, true, nil)
+ mockOnRamp.OnRampReader.On("IsSourceChainHealthy", mock.Anything).Return(true, nil).Maybe()
+
+ chainState := newChainHealthcheckWithCustomEviction(
+ logger.TestLogger(t),
+ mockOnRamp,
+ mockCommitStore,
+ 10*time.Microsecond,
+ 10*time.Microsecond,
+ )
+ require.NoError(t, chainState.Start(tests.Context(t)))
+
+ // All healthy
+ assertHealthy(t, chainState, true)
+
+ // Commit store not healthy
+ mockCommitStore.set(false, nil)
+ assertHealthy(t, chainState, false)
+
+ // Commit store error
+ mockCommitStore.set(false, fmt.Errorf("commit store error"))
+ assertError(t, chainState)
+
+ // Commit store is back
+ mockCommitStore.set(true, nil)
+ assertHealthy(t, chainState, true)
+
+ // OnRamp not healthy
+ mockOnRamp.set(false, nil)
+ assertHealthy(t, chainState, false)
+
+ // OnRamp error
+ mockOnRamp.set(false, fmt.Errorf("onramp error"))
+ assertError(t, chainState)
+
+ // All back in healthy state
+ mockOnRamp.set(true, nil)
+ assertHealthy(t, chainState, true)
+
+ require.NoError(t, chainState.Close())
+}
+
+func assertHealthy(t *testing.T, ch *chainHealthcheck, expected bool) {
+ assert.Eventually(t, func() bool {
+ healthy, err := ch.IsHealthy(testutils.Context(t))
+ return err == nil && healthy == expected
+ }, testutils.WaitTimeout(t), testutils.TestInterval)
+}
+
+func assertError(t *testing.T, ch *chainHealthcheck) {
+ assert.Eventually(t, func() bool {
+ _, err := ch.IsHealthy(testutils.Context(t))
+ return err != nil
+ }, testutils.WaitTimeout(t), testutils.TestInterval)
+}
+
+type fakeStatusWrapper struct {
+ *mocks.CommitStoreReader
+ *mocks.OnRampReader
+
+ healthy bool
+ err error
+ mu *sync.Mutex
+}
+
+func newCommitStoreWrapper(t *testing.T, healthy bool, err error) *fakeStatusWrapper {
+ return &fakeStatusWrapper{
+ CommitStoreReader: mocks.NewCommitStoreReader(t),
+ healthy: healthy,
+ err: err,
+ mu: new(sync.Mutex),
+ }
+}
+
+func newOnRampWrapper(t *testing.T, healthy bool, err error) *fakeStatusWrapper {
+ return &fakeStatusWrapper{
+ OnRampReader: mocks.NewOnRampReader(t),
+ healthy: healthy,
+ err: err,
+ mu: new(sync.Mutex),
+ }
+}
+
+func (f *fakeStatusWrapper) IsDown(context.Context) (bool, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return !f.healthy, f.err
+}
+
+func (f *fakeStatusWrapper) IsSourceCursed(context.Context) (bool, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return !f.healthy, f.err
+}
+
+func (f *fakeStatusWrapper) Close() error {
+ return nil
+}
+
+func (f *fakeStatusWrapper) set(healthy bool, err error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.healthy = healthy
+ f.err = err
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go
new file mode 100644
index 00000000000..5f8bd5edc56
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go
@@ -0,0 +1,243 @@
+package cache
+
+import (
+ "context"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/patrickmn/go-cache"
+ orderedmap "github.com/wk8/go-ordered-map/v2"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+const (
+ // EvictionGracePeriod defines how long after the messageVisibilityInterval a root is still kept in the cache
+ EvictionGracePeriod = 1 * time.Hour
+ // CleanupInterval defines how often roots cache is scanned to evict stale roots
+ CleanupInterval = 30 * time.Minute
+)
+
+type CommitsRootsCache interface {
+ RootsEligibleForExecution(ctx context.Context) ([]ccip.CommitStoreReport, error)
+ MarkAsExecuted(merkleRoot [32]byte)
+ Snooze(merkleRoot [32]byte)
+}
+
+func NewCommitRootsCache(
+ lggr logger.Logger,
+ reader ccip.CommitStoreReader,
+ messageVisibilityInterval time.Duration,
+ rootSnoozeTime time.Duration,
+) CommitsRootsCache {
+ return newCommitRootsCache(
+ lggr,
+ reader,
+ messageVisibilityInterval,
+ rootSnoozeTime,
+ CleanupInterval,
+ EvictionGracePeriod,
+ )
+}
+
+func newCommitRootsCache(
+ lggr logger.Logger,
+ reader ccip.CommitStoreReader,
+ messageVisibilityInterval time.Duration,
+ rootSnoozeTime time.Duration,
+ cleanupInterval time.Duration,
+ evictionGracePeriod time.Duration,
+) *commitRootsCache {
+ snoozedRoots := cache.New(rootSnoozeTime, cleanupInterval)
+ executedRoots := cache.New(messageVisibilityInterval+evictionGracePeriod, cleanupInterval)
+
+ return &commitRootsCache{
+ lggr: lggr,
+ reader: reader,
+ rootSnoozeTime: rootSnoozeTime,
+ finalizedRoots: orderedmap.New[string, ccip.CommitStoreReportWithTxMeta](),
+ executedRoots: executedRoots,
+ snoozedRoots: snoozedRoots,
+ messageVisibilityInterval: messageVisibilityInterval,
+ latestFinalizedCommitRootTs: time.Now().Add(-messageVisibilityInterval),
+ cacheMu: sync.RWMutex{},
+ }
+}
+
+type commitRootsCache struct {
+ lggr logger.Logger
+ reader ccip.CommitStoreReader
+ messageVisibilityInterval time.Duration
+ rootSnoozeTime time.Duration
+
+ // Mutable state. finalizedRoots is thread-safe by default, but updating latestFinalizedCommitRootTs and finalizedRoots requires locking.
+ cacheMu sync.RWMutex
+ // finalizedRoots is a map of merkleRoot -> CommitStoreReportWithTxMeta. It stores all the CommitReports that are
+ // marked as finalized by LogPoller, but not executed yet. Keeping only finalized reports doesn't require any state sync between LP and the cache.
+ // In order to keep this map size under control, we evict stale items every time we fetch new logs from the database.
+ // Also, ccip.CommitStoreReportWithTxMeta is a very tiny entity with almost fixed size, so it's not a big deal to keep it in memory.
+ // In case of high memory footprint caused by storing roots, we can make these even more lightweight by removing token/gas price updates.
+ // Whenever the root is executed (all messages executed and ExecutionStateChange events are finalized), we remove the root from the map.
+ finalizedRoots *orderedmap.OrderedMap[string, ccip.CommitStoreReportWithTxMeta]
+ // snoozedRoots used only for temporary snoozing roots. It's a cache with TTL (usually around 5 minutes, but this configuration is set up on chain using rootSnoozeTime)
+ snoozedRoots *cache.Cache
+ // executedRoots is a cache with TTL (usually around 8 hours, but this configuration is set up on chain using messageVisibilityInterval).
+ // We keep executed roots there to make sure we don't accidentally try to reprocess already executed CommitReport
+ executedRoots *cache.Cache
+ // latestFinalizedCommitRootTs is the timestamp of the latest finalized commit root (youngest in terms of timestamp).
+ // It's used get only the logs that were considered as unfinalized in a previous run.
+ // This way we limit database scans to the minimum and keep polling "unfinalized" part of the ReportAccepted events queue.
+ latestFinalizedCommitRootTs time.Time
+}
+
+func (r *commitRootsCache) RootsEligibleForExecution(ctx context.Context) ([]ccip.CommitStoreReport, error) {
+ // 1. Fetch all the logs from the database after the latest finalized commit root timestamp.
+ // If this is a first run, it will fetch all the logs based on the messageVisibilityInterval.
+ // Worst case scenario, it will fetch around 480 reports (OCR Commit 60 seconds (fast chains default) * messageVisibilityInterval set to 8 hours (mainnet default))
+ // Even with the larger messageVisibilityInterval window (e.g. 24 hours) it should be acceptable (around 1500 logs).
+ // Keep in mind that this potentially heavy operation happens only once during the plugin boot and it's no different from the previous implementation.
+ logs, err := r.fetchLogsFromCommitStore(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // 2. Iterate over the logs and check if the root is finalized or not. Return finalized and unfinalized reports
+ // It promotes finalized roots to the finalizedRoots map and evicts stale roots.
+ finalizedReports, unfinalizedReports := r.updateFinalizedRoots(logs)
+
+ // 3. Join finalized commit reports with unfinalized reports and outfilter snoozed roots.
+ // Return only the reports that are not snoozed.
+ return r.pickReadyToExecute(finalizedReports, unfinalizedReports), nil
+}
+
+// MarkAsExecuted marks the root as executed. It means that all the messages from the root were executed and the ExecutionStateChange event was finalized.
+// Executed roots are removed from the cache.
+func (r *commitRootsCache) MarkAsExecuted(merkleRoot [32]byte) {
+ prettyMerkleRoot := merkleRootToString(merkleRoot)
+ r.lggr.Infow("Marking root as executed and removing entirely from cache", "merkleRoot", prettyMerkleRoot)
+
+ r.cacheMu.Lock()
+ defer r.cacheMu.Unlock()
+ r.finalizedRoots.Delete(prettyMerkleRoot)
+ r.executedRoots.SetDefault(prettyMerkleRoot, struct{}{})
+}
+
+// Snooze temporarily snoozes the root. It means that the root is not eligible for execution for a certain period of time.
+// Snoozed roots are skipped when calling RootsEligibleForExecution
+func (r *commitRootsCache) Snooze(merkleRoot [32]byte) {
+ prettyMerkleRoot := merkleRootToString(merkleRoot)
+ r.lggr.Infow("Snoozing root temporarily", "merkleRoot", prettyMerkleRoot, "rootSnoozeTime", r.rootSnoozeTime)
+ r.snoozedRoots.SetDefault(prettyMerkleRoot, struct{}{})
+}
+
+func (r *commitRootsCache) isSnoozed(merkleRoot [32]byte) bool {
+ _, snoozed := r.snoozedRoots.Get(merkleRootToString(merkleRoot))
+ return snoozed
+}
+
+func (r *commitRootsCache) isExecuted(merkleRoot [32]byte) bool {
+ _, executed := r.executedRoots.Get(merkleRootToString(merkleRoot))
+ return executed
+}
+
+func (r *commitRootsCache) fetchLogsFromCommitStore(ctx context.Context) ([]ccip.CommitStoreReportWithTxMeta, error) {
+ r.cacheMu.Lock()
+ messageVisibilityWindow := time.Now().Add(-r.messageVisibilityInterval)
+ if r.latestFinalizedCommitRootTs.Before(messageVisibilityWindow) {
+ r.latestFinalizedCommitRootTs = messageVisibilityWindow
+ }
+ commitRootsFilterTimestamp := r.latestFinalizedCommitRootTs
+ r.cacheMu.Unlock()
+
+ // IO operation, release lock before!
+ r.lggr.Infow("Fetching Commit Reports with timestamp greater than or equal to", "blockTimestamp", commitRootsFilterTimestamp)
+ return r.reader.GetAcceptedCommitReportsGteTimestamp(ctx, commitRootsFilterTimestamp, 0)
+}
+
+func (r *commitRootsCache) updateFinalizedRoots(logs []ccip.CommitStoreReportWithTxMeta) ([]ccip.CommitStoreReportWithTxMeta, []ccip.CommitStoreReportWithTxMeta) {
+ r.cacheMu.Lock()
+ defer r.cacheMu.Unlock()
+
+ // Assuming logs are properly ordered by block_timestamp, log_index
+ var unfinalizedReports []ccip.CommitStoreReportWithTxMeta
+ for _, log := range logs {
+ prettyMerkleRoot := merkleRootToString(log.MerkleRoot)
+ // Defensive check, if something is marked as executed, never allow it to come back to the cache
+ if r.isExecuted(log.MerkleRoot) {
+ r.lggr.Debugw("Ignoring root marked as executed", "merkleRoot", prettyMerkleRoot, "blockTimestamp", log.BlockTimestampUnixMilli)
+ continue
+ }
+
+ if log.IsFinalized() {
+ r.lggr.Debugw("Adding finalized root to cache", "merkleRoot", prettyMerkleRoot, "blockTimestamp", log.BlockTimestampUnixMilli)
+ r.finalizedRoots.Store(prettyMerkleRoot, log)
+ } else {
+ r.lggr.Debugw("Bypassing unfinalized root", "merkleRoot", prettyMerkleRoot, "blockTimestamp", log.BlockTimestampUnixMilli)
+ unfinalizedReports = append(unfinalizedReports, log)
+ }
+ }
+
+ if newest := r.finalizedRoots.Newest(); newest != nil {
+ r.latestFinalizedCommitRootTs = time.UnixMilli(newest.Value.BlockTimestampUnixMilli)
+ }
+
+ var finalizedRoots []ccip.CommitStoreReportWithTxMeta
+ var rootsToDelete []string
+
+ messageVisibilityWindow := time.Now().Add(-r.messageVisibilityInterval)
+ for pair := r.finalizedRoots.Oldest(); pair != nil; pair = pair.Next() {
+ // Mark items as stale if they are older than the messageVisibilityInterval
+ // SortedMap doesn't allow to iterate and delete, so we mark roots for deletion and remove them in a separate loop
+ if time.UnixMilli(pair.Value.BlockTimestampUnixMilli).Before(messageVisibilityWindow) {
+ rootsToDelete = append(rootsToDelete, pair.Key)
+ continue
+ }
+ finalizedRoots = append(finalizedRoots, pair.Value)
+ }
+
+ // Remove stale items
+ for _, root := range rootsToDelete {
+ r.finalizedRoots.Delete(root)
+ }
+
+ return finalizedRoots, unfinalizedReports
+}
+
+func (r *commitRootsCache) pickReadyToExecute(r1 []ccip.CommitStoreReportWithTxMeta, r2 []ccip.CommitStoreReportWithTxMeta) []ccip.CommitStoreReport {
+ allReports := append(r1, r2...)
+ eligibleReports := make([]ccip.CommitStoreReport, 0, len(allReports))
+ for _, report := range allReports {
+ if r.isSnoozed(report.MerkleRoot) {
+ r.lggr.Debugw("Skipping snoozed root",
+ "minSeqNr", report.Interval.Min,
+ "maxSeqNr", report.Interval.Max,
+ "merkleRoot", merkleRootToString(report.MerkleRoot))
+ continue
+ }
+ eligibleReports = append(eligibleReports, report.CommitStoreReport)
+ }
+ // safety check, probably not needed
+ slices.SortFunc(eligibleReports, func(i, j ccip.CommitStoreReport) int {
+ return int(i.Interval.Min - j.Interval.Min)
+ })
+ return eligibleReports
+}
+
+// internal use only for testing
+func (r *commitRootsCache) finalizedCachedLogs() []ccip.CommitStoreReport {
+ r.cacheMu.RLock()
+ defer r.cacheMu.RUnlock()
+
+ var finalizedRoots []ccip.CommitStoreReport
+ for pair := r.finalizedRoots.Oldest(); pair != nil; pair = pair.Next() {
+ finalizedRoots = append(finalizedRoots, pair.Value.CommitStoreReport)
+ }
+ return finalizedRoots
+}
+
+func merkleRootToString(merkleRoot ccip.Hash) string {
+ return merkleRoot.String()
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go
new file mode 100644
index 00000000000..dc0a8443497
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go
@@ -0,0 +1,297 @@
+package cache_test
+
+import (
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+)
+
+func Test_RootsEligibleForExecution(t *testing.T) {
+ ctx := testutils.Context(t)
+ chainID := testutils.NewRandomEVMChainID()
+ orm := logpoller.NewORM(chainID, pgtest.NewSqlxDB(t), logger.TestLogger(t))
+ lpOpts := logpoller.Opts{
+ PollPeriod: time.Hour,
+ FinalityDepth: 2,
+ BackfillBatchSize: 20,
+ RpcBatchSize: 10,
+ KeepFinalizedBlocksDepth: 1000,
+ }
+ lp := logpoller.NewLogPoller(orm, nil, logger.TestLogger(t), nil, lpOpts)
+
+ commitStoreAddr := utils.RandomAddress()
+
+ block2 := time.Now().Add(-8 * time.Hour)
+ block3 := time.Now().Add(-5 * time.Hour)
+ block4 := time.Now().Add(-1 * time.Hour)
+ newBlock4 := time.Now().Add(-2 * time.Hour)
+ block5 := time.Now()
+
+ root1 := utils.RandomBytes32()
+ root2 := utils.RandomBytes32()
+ root3 := utils.RandomBytes32()
+ root4 := utils.RandomBytes32()
+ root5 := utils.RandomBytes32()
+
+ inputLogs := []logpoller.Log{
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 1, root1, block2),
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 2, root2, block2),
+ }
+ require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 2, time.Now(), 1)))
+
+ commitStore, err := v1_2_0.NewCommitStore(logger.TestLogger(t), commitStoreAddr, nil, lp)
+ require.NoError(t, err)
+
+ rootsCache := cache.NewCommitRootsCache(logger.TestLogger(t), commitStore, 10*time.Hour, time.Second)
+
+ roots, err := rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root1, root2)
+
+ rootsCache.Snooze(root1)
+ rootsCache.Snooze(root2)
+
+ // Roots are snoozed
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots)
+
+ // Roots are unsnoozed
+ require.Eventually(t, func() bool {
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ return len(roots) == 2
+ }, 5*time.Second, 1*time.Second)
+
+ // Marking root as executed doesn't ignore other roots from the same block
+ rootsCache.MarkAsExecuted(root1)
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root2)
+
+ // Finality progress, mark all roots as finalized
+ require.NoError(t, orm.InsertBlock(ctx, utils.RandomBytes32(), 3, time.Now(), 3))
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root2)
+
+ inputLogs = []logpoller.Log{
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root3, block3),
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 1, root4, block4),
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 5, 1, root5, block5),
+ }
+ require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 5, time.Now(), 3)))
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root2, root3, root4, root5)
+
+ // Mark root in the middle as executed but keep the oldest one still waiting
+ rootsCache.MarkAsExecuted(root3)
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root2, root4, root5)
+
+ // Simulate reorg by removing all unfinalized blocks
+ require.NoError(t, orm.DeleteLogsAndBlocksAfter(ctx, 4))
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root2)
+
+ // Root4 comes back but with the different block_timestamp (before the reorged block)
+ inputLogs = []logpoller.Log{
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 1, root4, newBlock4),
+ }
+ require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 5, time.Now(), 3)))
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root2, root4)
+
+ // Mark everything as executed
+ rootsCache.MarkAsExecuted(root2)
+ rootsCache.MarkAsExecuted(root4)
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots)
+}
+
+func Test_RootsEligibleForExecutionWithReorgs(t *testing.T) {
+ ctx := testutils.Context(t)
+ chainID := testutils.NewRandomEVMChainID()
+ orm := logpoller.NewORM(chainID, pgtest.NewSqlxDB(t), logger.TestLogger(t))
+ lpOpts := logpoller.Opts{
+ PollPeriod: time.Hour,
+ FinalityDepth: 2,
+ BackfillBatchSize: 20,
+ RpcBatchSize: 10,
+ KeepFinalizedBlocksDepth: 1000,
+ }
+ lp := logpoller.NewLogPoller(orm, nil, logger.TestLogger(t), nil, lpOpts)
+
+ commitStoreAddr := utils.RandomAddress()
+
+ block1 := time.Now().Add(-8 * time.Hour)
+ block2 := time.Now().Add(-5 * time.Hour)
+ block3 := time.Now().Add(-2 * time.Hour)
+ block4 := time.Now().Add(-1 * time.Hour)
+
+ root1 := utils.RandomBytes32()
+ root2 := utils.RandomBytes32()
+ root3 := utils.RandomBytes32()
+
+ // Genesis block
+ require.NoError(t, orm.InsertBlock(ctx, utils.RandomBytes32(), 1, block1, 1))
+ inputLogs := []logpoller.Log{
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 1, root1, block2),
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 2, root2, block2),
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root3, block3),
+ }
+ require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 3, time.Now(), 1)))
+
+ commitStore, err := v1_2_0.NewCommitStore(logger.TestLogger(t), commitStoreAddr, nil, lp)
+ require.NoError(t, err)
+
+ rootsCache := cache.NewCommitRootsCache(logger.TestLogger(t), commitStore, 10*time.Hour, time.Second)
+
+ // Get all including finalized and unfinalized
+ roots, err := rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root1, root2, root3)
+
+ // Reorg everything away
+ require.NoError(t, orm.DeleteLogsAndBlocksAfter(ctx, 2))
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots)
+
+ // Reinsert the logs, mark first one as finalized
+ inputLogs = []logpoller.Log{
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root1, block3),
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 1, root2, block4),
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 2, root3, block4),
+ }
+ require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 5, time.Now(), 3)))
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root1, root2, root3)
+
+ // Reorg away everything except the finalized one
+ require.NoError(t, orm.DeleteLogsAndBlocksAfter(ctx, 4))
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root1)
+}
+
+// Not very likely, but let's be more defensive here and verify if cache works properly and can deal with duplicates
+func Test_BlocksWithTheSameTimestamps(t *testing.T) {
+ ctx := testutils.Context(t)
+ chainID := testutils.NewRandomEVMChainID()
+ orm := logpoller.NewORM(chainID, pgtest.NewSqlxDB(t), logger.TestLogger(t))
+ lpOpts := logpoller.Opts{
+ PollPeriod: time.Hour,
+ FinalityDepth: 2,
+ BackfillBatchSize: 20,
+ RpcBatchSize: 10,
+ KeepFinalizedBlocksDepth: 1000,
+ }
+ lp := logpoller.NewLogPoller(orm, nil, logger.TestLogger(t), nil, lpOpts)
+
+ commitStoreAddr := utils.RandomAddress()
+
+ block := time.Now().Add(-1 * time.Hour).Truncate(time.Second)
+ root1 := utils.RandomBytes32()
+ root2 := utils.RandomBytes32()
+
+ inputLogs := []logpoller.Log{
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 1, root1, block),
+ }
+ require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 2, time.Now(), 2)))
+
+ commitStore, err := v1_2_0.NewCommitStore(logger.TestLogger(t), commitStoreAddr, nil, lp)
+ require.NoError(t, err)
+
+ rootsCache := cache.NewCommitRootsCache(logger.TestLogger(t), commitStore, 10*time.Hour, time.Second)
+ roots, err := rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root1)
+
+ inputLogs = []logpoller.Log{
+ createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root2, block),
+ }
+ require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 3, time.Now(), 3)))
+
+ roots, err = rootsCache.RootsEligibleForExecution(ctx)
+ require.NoError(t, err)
+ assertRoots(t, roots, root1, root2)
+}
+
+func assertRoots(t *testing.T, roots []cciptypes.CommitStoreReport, root ...[32]byte) {
+ require.Len(t, roots, len(root))
+ for i, r := range root {
+ require.Equal(t, r, roots[i].MerkleRoot)
+ }
+}
+
+func createReportAcceptedLog(t testing.TB, chainID *big.Int, address common.Address, blockNumber int64, logIndex int64, merkleRoot common.Hash, blockTimestamp time.Time) logpoller.Log {
+ tAbi, err := commit_store_1_2_0.CommitStoreMetaData.GetAbi()
+ require.NoError(t, err)
+ eseEvent, ok := tAbi.Events["ReportAccepted"]
+ require.True(t, ok)
+
+ gasPriceUpdates := make([]commit_store_1_2_0.InternalGasPriceUpdate, 100)
+ tokenPriceUpdates := make([]commit_store_1_2_0.InternalTokenPriceUpdate, 100)
+
+ for i := 0; i < 100; i++ {
+ gasPriceUpdates[i] = commit_store_1_2_0.InternalGasPriceUpdate{
+ DestChainSelector: uint64(i),
+ UsdPerUnitGas: big.NewInt(int64(i)),
+ }
+ tokenPriceUpdates[i] = commit_store_1_2_0.InternalTokenPriceUpdate{
+ SourceToken: utils.RandomAddress(),
+ UsdPerToken: big.NewInt(int64(i)),
+ }
+ }
+
+ message := commit_store_1_2_0.CommitStoreCommitReport{
+ PriceUpdates: commit_store_1_2_0.InternalPriceUpdates{
+ TokenPriceUpdates: tokenPriceUpdates,
+ GasPriceUpdates: gasPriceUpdates,
+ },
+ Interval: commit_store_1_2_0.CommitStoreInterval{Min: 1, Max: 10},
+ MerkleRoot: merkleRoot,
+ }
+
+ logData, err := eseEvent.Inputs.Pack(message)
+ require.NoError(t, err)
+
+ topic0 := commit_store_1_2_0.CommitStoreReportAccepted{}.Topic()
+
+ return logpoller.Log{
+ Topics: [][]byte{
+ topic0[:],
+ },
+ Data: logData,
+ LogIndex: logIndex,
+ BlockHash: utils.RandomBytes32(),
+ BlockNumber: blockNumber,
+ BlockTimestamp: blockTimestamp.Truncate(time.Millisecond),
+ EventSig: topic0,
+ Address: address,
+ TxHash: utils.RandomBytes32(),
+ EvmChainId: ubig.New(chainID),
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go
new file mode 100644
index 00000000000..34a470ef907
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go
@@ -0,0 +1,212 @@
+package cache
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+)
+
+func Test_CacheIsInitializedWithFirstCall(t *testing.T) {
+ commitStoreReader := mocks.NewCommitStoreReader(t)
+ cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, time.Hour, time.Hour, time.Hour, time.Hour)
+ commitStoreReader.On("GetAcceptedCommitReportsGteTimestamp", mock.Anything, mock.Anything, mock.Anything).Return([]ccip.CommitStoreReportWithTxMeta{}, nil)
+
+ roots, err := cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots)
+}
+
+func Test_CacheExpiration(t *testing.T) {
+ ts1 := time.Now().Add(-5 * time.Millisecond).Truncate(time.Millisecond)
+ ts2 := time.Now().Add(-3 * time.Millisecond).Truncate(time.Millisecond)
+ ts3 := time.Now().Add(-1 * time.Millisecond).Truncate(time.Millisecond)
+
+ root1 := utils.RandomBytes32()
+ root2 := utils.RandomBytes32()
+ root3 := utils.RandomBytes32()
+
+ commitStoreReader := mocks.NewCommitStoreReader(t)
+ cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, time.Second, time.Hour, time.Hour, time.Hour)
+ mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{
+ createCommitStoreEntry(root1, ts1, true),
+ createCommitStoreEntry(root2, ts2, true),
+ createCommitStoreEntry(root3, ts3, false),
+ })
+ roots, err := cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots, root1, root2, root3)
+
+ require.Eventually(t, func() bool {
+ mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{
+ createCommitStoreEntry(root3, ts3, false),
+ })
+ roots, err = cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ return len(roots) == 1 && roots[0].MerkleRoot == root3
+ }, 5*time.Second, 1*time.Second)
+}
+
+func Test_CacheFullEviction(t *testing.T) {
+ commitStoreReader := mocks.NewCommitStoreReader(t)
+ cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, 2*time.Second, 1*time.Second, time.Second, time.Second)
+
+ maxElements := 10000
+ commitRoots := make([]ccip.CommitStoreReportWithTxMeta, maxElements)
+ for i := 0; i < maxElements; i++ {
+ finalized := i >= maxElements/2
+ commitRoots[i] = createCommitStoreEntry(utils.RandomBytes32(), time.Now(), finalized)
+ }
+ mockCommitStoreReader(commitStoreReader, time.Time{}, commitRoots)
+
+ roots, err := cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ require.Len(t, roots, maxElements)
+
+ // Marks some of them as exeucted and some of them as snoozed
+ for i := 0; i < maxElements; i++ {
+ if i%3 == 0 {
+ cache.MarkAsExecuted(commitRoots[i].MerkleRoot)
+ }
+ if i%3 == 1 {
+ cache.Snooze(commitRoots[i].MerkleRoot)
+ }
+ }
+ // Eventually everything should be entirely removed from cache. We need that check to verify if cache doesn't grow indefinitely
+ require.Eventually(t, func() bool {
+ mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{})
+ roots1, err1 := cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err1)
+
+ return len(roots1) == 0 &&
+ cache.finalizedRoots.Len() == 0 &&
+ len(cache.snoozedRoots.Items()) == 0 &&
+ len(cache.executedRoots.Items()) == 0
+ }, 10*time.Second, time.Second)
+}
+
+func Test_CacheProgression_Internal(t *testing.T) {
+ ts1 := time.Now().Add(-5 * time.Hour).Truncate(time.Millisecond)
+ ts2 := time.Now().Add(-3 * time.Hour).Truncate(time.Millisecond)
+ ts3 := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond)
+
+ root1 := utils.RandomBytes32()
+ root2 := utils.RandomBytes32()
+ root3 := utils.RandomBytes32()
+
+ commitStoreReader := mocks.NewCommitStoreReader(t)
+
+ cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, 10*time.Hour, time.Hour, time.Hour, time.Hour)
+
+ // Empty cache, no results from the reader
+ mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{})
+ roots, err := cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots)
+ assertRoots(t, cache.finalizedCachedLogs())
+
+ // Single unfinalized root returned
+ mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{createCommitStoreEntry(root1, ts1, false)})
+ roots, err = cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots, root1)
+ assertRoots(t, cache.finalizedCachedLogs())
+
+ // Finalized and unfinalized roots returned
+ mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{
+ createCommitStoreEntry(root1, ts1, true),
+ createCommitStoreEntry(root2, ts2, false),
+ })
+ roots, err = cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots, root1, root2)
+ assertRoots(t, cache.finalizedCachedLogs(), root1)
+
+ // Returning the same data should not impact cache state (no duplicates)
+ mockCommitStoreReader(commitStoreReader, ts1, []ccip.CommitStoreReportWithTxMeta{
+ createCommitStoreEntry(root1, ts1, true),
+ createCommitStoreEntry(root2, ts2, false),
+ })
+ roots, err = cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots, root1, root2)
+ assertRoots(t, cache.finalizedCachedLogs(), root1)
+
+ // Snoozing oldest root
+ cache.Snooze(root1)
+ mockCommitStoreReader(commitStoreReader, ts1, []ccip.CommitStoreReportWithTxMeta{
+ createCommitStoreEntry(root2, ts2, false),
+ createCommitStoreEntry(root3, ts3, false),
+ })
+ roots, err = cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots, root2, root3)
+ assertRoots(t, cache.finalizedCachedLogs(), root1)
+
+ // Snoozing everything
+ cache.Snooze(root2)
+ cache.Snooze(root3)
+ mockCommitStoreReader(commitStoreReader, ts1, []ccip.CommitStoreReportWithTxMeta{
+ createCommitStoreEntry(root2, ts2, true),
+ createCommitStoreEntry(root3, ts3, true),
+ })
+ roots, err = cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots)
+ assertRoots(t, cache.finalizedCachedLogs(), root1, root2, root3)
+
+ // Marking everything as executed removes it entirely, even if root is returned from the CommitStore
+ cache.MarkAsExecuted(root1)
+ cache.MarkAsExecuted(root2)
+ cache.MarkAsExecuted(root3)
+ mockCommitStoreReader(commitStoreReader, ts3, []ccip.CommitStoreReportWithTxMeta{
+ createCommitStoreEntry(root2, ts2, true),
+ createCommitStoreEntry(root3, ts3, true),
+ })
+ roots, err = cache.RootsEligibleForExecution(tests.Context(t))
+ require.NoError(t, err)
+ assertRoots(t, roots)
+ assertRoots(t, cache.finalizedCachedLogs())
+}
+
+func assertRoots(t *testing.T, reports []ccip.CommitStoreReport, expectedRoots ...[32]byte) {
+ require.Len(t, reports, len(expectedRoots))
+ for i, report := range reports {
+ assert.Equal(t, expectedRoots[i], report.MerkleRoot)
+ }
+}
+
+func mockCommitStoreReader(reader *mocks.CommitStoreReader, blockTimestamp time.Time, roots []ccip.CommitStoreReportWithTxMeta) {
+ if blockTimestamp.IsZero() {
+ reader.On("GetAcceptedCommitReportsGteTimestamp", mock.Anything, mock.Anything, mock.Anything).
+ Return(roots, nil).Once()
+ } else {
+ reader.On("GetAcceptedCommitReportsGteTimestamp", mock.Anything, blockTimestamp, mock.Anything).
+ Return(roots, nil).Once()
+ }
+}
+
+func createCommitStoreEntry(root [32]byte, ts time.Time, finalized bool) ccip.CommitStoreReportWithTxMeta {
+ status := ccip.FinalizedStatusNotFinalized
+ if finalized {
+ status = ccip.FinalizedStatusFinalized
+ }
+ return ccip.CommitStoreReportWithTxMeta{
+ CommitStoreReport: ccip.CommitStoreReport{
+ MerkleRoot: root,
+ },
+ TxMeta: ccip.TxMeta{
+ BlockTimestampUnixMilli: ts.UnixMilli(),
+ Finalized: status,
+ },
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/lazy.go b/core/services/ocr2/plugins/ccip/internal/cache/lazy.go
new file mode 100644
index 00000000000..7b15abe271b
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/lazy.go
@@ -0,0 +1,20 @@
+package cache
+
+import "sync"
+
+type LazyFunction[T any] func() (T, error)
+
+// LazyFetch caches the results during the first call and then returns the cached value
+// on each consecutive call.
+func LazyFetch[T any](fun LazyFunction[T]) LazyFunction[T] {
+ var result T
+ var err error
+ var once sync.Once
+
+ return func() (T, error) {
+ once.Do(func() {
+ result, err = fun()
+ })
+ return result, err
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go b/core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go
new file mode 100644
index 00000000000..2777a6c2e0b
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go
@@ -0,0 +1,71 @@
+package cache
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestLazyFetchPass(t *testing.T) {
+ counterFunction := createPassingCounter()
+
+ counter, _ := counterFunction()
+ require.Equal(t, 1, counter)
+
+ lazyCounter := LazyFetch(counterFunction)
+ counter, _ = lazyCounter()
+ require.Equal(t, 2, counter)
+
+ counter, _ = lazyCounter()
+ require.Equal(t, 2, counter)
+}
+
+func TestLazyFetchFail(t *testing.T) {
+ counterFunction := createFailingCounter()
+
+ _, err := counterFunction()
+ require.Equal(t, "counter 1 failed", err.Error())
+
+ lazyCounter := LazyFetch(counterFunction)
+ _, err = lazyCounter()
+ require.Equal(t, "counter 2 failed", err.Error())
+
+ _, err = lazyCounter()
+ require.Equal(t, "counter 2 failed", err.Error())
+}
+
+func TestLazyFetchMultipleRoutines(t *testing.T) {
+ routines := 100
+ counterFunction := LazyFetch(createPassingCounter())
+
+ var wg sync.WaitGroup
+ wg.Add(routines)
+
+ for i := 0; i < routines; i++ {
+ go func() {
+ counter, _ := counterFunction()
+ require.Equal(t, 1, counter)
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func createFailingCounter() func() (int, error) {
+ counter := 0
+ return func() (int, error) {
+ counter++
+ return 0, fmt.Errorf("counter %d failed", counter)
+ }
+}
+
+func createPassingCounter() func() (int, error) {
+ counter := 0
+ return func() (int, error) {
+ counter++
+ return counter, nil
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go b/core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go
new file mode 100644
index 00000000000..595b15774af
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go
@@ -0,0 +1,183 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// ChainHealthcheck is an autogenerated mock type for the ChainHealthcheck type
+type ChainHealthcheck struct {
+ mock.Mock
+}
+
+type ChainHealthcheck_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *ChainHealthcheck) EXPECT() *ChainHealthcheck_Expecter {
+ return &ChainHealthcheck_Expecter{mock: &_m.Mock}
+}
+
+// Close provides a mock function with given fields:
+func (_m *ChainHealthcheck) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// ChainHealthcheck_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type ChainHealthcheck_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *ChainHealthcheck_Expecter) Close() *ChainHealthcheck_Close_Call {
+ return &ChainHealthcheck_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *ChainHealthcheck_Close_Call) Run(run func()) *ChainHealthcheck_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *ChainHealthcheck_Close_Call) Return(_a0 error) *ChainHealthcheck_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *ChainHealthcheck_Close_Call) RunAndReturn(run func() error) *ChainHealthcheck_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// IsHealthy provides a mock function with given fields: ctx
+func (_m *ChainHealthcheck) IsHealthy(ctx context.Context) (bool, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for IsHealthy")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ChainHealthcheck_IsHealthy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsHealthy'
+type ChainHealthcheck_IsHealthy_Call struct {
+ *mock.Call
+}
+
+// IsHealthy is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *ChainHealthcheck_Expecter) IsHealthy(ctx interface{}) *ChainHealthcheck_IsHealthy_Call {
+ return &ChainHealthcheck_IsHealthy_Call{Call: _e.mock.On("IsHealthy", ctx)}
+}
+
+func (_c *ChainHealthcheck_IsHealthy_Call) Run(run func(ctx context.Context)) *ChainHealthcheck_IsHealthy_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *ChainHealthcheck_IsHealthy_Call) Return(_a0 bool, _a1 error) *ChainHealthcheck_IsHealthy_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *ChainHealthcheck_IsHealthy_Call) RunAndReturn(run func(context.Context) (bool, error)) *ChainHealthcheck_IsHealthy_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Start provides a mock function with given fields: _a0
+func (_m *ChainHealthcheck) Start(_a0 context.Context) error {
+ ret := _m.Called(_a0)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Start")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// ChainHealthcheck_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
+type ChainHealthcheck_Start_Call struct {
+ *mock.Call
+}
+
+// Start is a helper method to define mock.On call
+// - _a0 context.Context
+func (_e *ChainHealthcheck_Expecter) Start(_a0 interface{}) *ChainHealthcheck_Start_Call {
+ return &ChainHealthcheck_Start_Call{Call: _e.mock.On("Start", _a0)}
+}
+
+func (_c *ChainHealthcheck_Start_Call) Run(run func(_a0 context.Context)) *ChainHealthcheck_Start_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *ChainHealthcheck_Start_Call) Return(_a0 error) *ChainHealthcheck_Start_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *ChainHealthcheck_Start_Call) RunAndReturn(run func(context.Context) error) *ChainHealthcheck_Start_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewChainHealthcheck creates a new instance of ChainHealthcheck. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewChainHealthcheck(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *ChainHealthcheck {
+ mock := &ChainHealthcheck{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go
new file mode 100644
index 00000000000..941162448af
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go
@@ -0,0 +1,70 @@
+package cache
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+var (
+ laneHealthStatus = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "ccip_lane_healthcheck_status",
+ Help: "Keep track of the chain healthcheck calls for each lane and plugin",
+ }, []string{"plugin", "source", "dest", "onramp"})
+)
+
+type ObservedChainHealthcheck struct {
+ ChainHealthcheck
+
+ sourceChain string
+ destChain string
+ plugin string
+ // onrampAddress is used to distinguish between 1.0/2.0 lanes or blue/green lanes during deployment
+ // This changes very rarely, so it's not a performance concern for Prometheus
+ onrampAddress string
+ laneHealthStatus *prometheus.GaugeVec
+}
+
+func NewObservedChainHealthCheck(
+ chainHealthcheck ChainHealthcheck,
+ plugin string,
+ sourceChain int64,
+ destChain int64,
+ onrampAddress cciptypes.Address,
+) *ObservedChainHealthcheck {
+ return &ObservedChainHealthcheck{
+ ChainHealthcheck: chainHealthcheck,
+ sourceChain: strconv.FormatInt(sourceChain, 10),
+ destChain: strconv.FormatInt(destChain, 10),
+ plugin: plugin,
+ laneHealthStatus: laneHealthStatus,
+ onrampAddress: string(onrampAddress),
+ }
+}
+
+func (o *ObservedChainHealthcheck) IsHealthy(ctx context.Context) (bool, error) {
+ healthy, err := o.ChainHealthcheck.IsHealthy(ctx)
+ o.trackState(healthy, err)
+ return healthy, err
+}
+
+func (o *ObservedChainHealthcheck) trackState(healthy bool, err error) {
+ if err != nil {
+ // Don't report errors as unhealthy, as they are not necessarily indicative of the chain's health
+ // Could be RPC issues, etc.
+ return
+ }
+
+ status := 0
+ if healthy {
+ status = 1
+ }
+
+ o.laneHealthStatus.
+ WithLabelValues(o.plugin, o.sourceChain, o.destChain, o.onrampAddress).
+ Set(float64(status))
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go
new file mode 100644
index 00000000000..19583a37c70
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go
@@ -0,0 +1,62 @@
+package cache
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache/mocks"
+)
+
+var address = cciptypes.Address(common.HexToAddress("0x1234567890123456789012345678901234567890").String())
+
+func Test_ObservedChainStateSkipErrors(t *testing.T) {
+ mockedHealthcheck := mocks.NewChainHealthcheck(t)
+ mockedHealthcheck.On("IsHealthy", mock.Anything).Return(false, fmt.Errorf("error"))
+
+ observedChainState := NewObservedChainHealthCheck(
+ mockedHealthcheck,
+ "plugin",
+ 10,
+ 20,
+ address,
+ )
+
+ _, err := observedChainState.IsHealthy(tests.Context(t))
+ assert.Error(t, err)
+ assert.Equal(t, float64(0), testutil.ToFloat64(laneHealthStatus.WithLabelValues("plugin", "10", "20", "0x1234567890123456789012345678901234567890")))
+}
+
+func Test_ObservedChainStateReportsStatus(t *testing.T) {
+ mockedHealthcheck := mocks.NewChainHealthcheck(t)
+ mockedHealthcheck.On("IsHealthy", mock.Anything).Return(true, nil).Once()
+
+ observedChainState := NewObservedChainHealthCheck(
+ mockedHealthcheck,
+ "plugin",
+ 10,
+ 20,
+ address,
+ )
+
+ health, err := observedChainState.IsHealthy(tests.Context(t))
+ require.NoError(t, err)
+ assert.True(t, health)
+ assert.Equal(t, float64(1), testutil.ToFloat64(laneHealthStatus.WithLabelValues("plugin", "10", "20", "0x1234567890123456789012345678901234567890")))
+
+ // Mark as unhealthy
+ mockedHealthcheck.On("IsHealthy", mock.Anything).Return(false, nil).Once()
+
+ health, err = observedChainState.IsHealthy(tests.Context(t))
+ require.NoError(t, err)
+ assert.False(t, health)
+ assert.Equal(t, float64(0), testutil.ToFloat64(laneHealthStatus.WithLabelValues("plugin", "10", "20", "0x1234567890123456789012345678901234567890")))
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/once.go b/core/services/ocr2/plugins/ccip/internal/cache/once.go
new file mode 100644
index 00000000000..713501a03e1
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/once.go
@@ -0,0 +1,38 @@
+package cache
+
+import (
+ "context"
+ "sync"
+)
+
+type OnceCtxFunction[T any] func(ctx context.Context) (T, error)
+
+// CallOnceOnNoError returns a new function that wraps the given function f with caching capabilities.
+// If f returns an error, the result is not cached, allowing f to be retried on subsequent calls.
+// Use case for that is to avoid caching an error forever in case of transient errors (e.g. flaky RPC)
+func CallOnceOnNoError[T any](f OnceCtxFunction[T]) OnceCtxFunction[T] {
+ var (
+ mu sync.Mutex
+ value T
+ err error
+ called bool
+ )
+
+ return func(ctx context.Context) (T, error) {
+ mu.Lock()
+ defer mu.Unlock()
+
+ // If the function has been called successfully before, return the cached result.
+ if called && err == nil {
+ return value, nil
+ }
+
+ // Call the function and cache the result only if there is no error.
+ value, err = f(ctx)
+ if err == nil {
+ called = true
+ }
+
+ return value, err
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/cache/once_test.go b/core/services/ocr2/plugins/ccip/internal/cache/once_test.go
new file mode 100644
index 00000000000..6ba2fbddd53
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/cache/once_test.go
@@ -0,0 +1,83 @@
+package cache
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+)
+
+// TestCallOnceOnNoErrorCachingSuccess tests caching behavior when the function succeeds.
+func TestCallOnceOnNoErrorCachingSuccess(t *testing.T) {
+ callCount := 0
+ testFunc := func(ctx context.Context) (string, error) {
+ callCount++
+ return "test result", nil
+ }
+
+ cachedFunc := CallOnceOnNoError(testFunc)
+
+ // Call the function twice.
+ _, err := cachedFunc(tests.Context(t))
+ assert.NoError(t, err, "Expected no error on the first call")
+
+ _, err = cachedFunc(tests.Context(t))
+ assert.NoError(t, err, "Expected no error on the second call")
+
+ assert.Equal(t, 1, callCount, "Function should be called exactly once")
+}
+
+// TestCallOnceOnNoErrorCachingError tests that the function is retried after an error.
+func TestCallOnceOnNoErrorCachingError(t *testing.T) {
+ callCount := 0
+ testFunc := func(ctx context.Context) (string, error) {
+ callCount++
+ if callCount == 1 {
+ return "", errors.New("test error")
+ }
+ return "test result", nil
+ }
+
+ cachedFunc := CallOnceOnNoError(testFunc)
+
+ // First call should fail.
+ _, err := cachedFunc(tests.Context(t))
+ require.Error(t, err, "Expected an error on the first call")
+
+ // Second call should succeed.
+ r, err := cachedFunc(tests.Context(t))
+ assert.NoError(t, err, "Expected no error on the second call")
+ assert.Equal(t, "test result", r)
+ assert.Equal(t, 2, callCount, "Function should be called exactly twice")
+}
+
+// TestCallOnceOnNoErrorCachingConcurrency tests that the function works correctly under concurrent access.
+func TestCallOnceOnNoErrorCachingConcurrency(t *testing.T) {
+ var wg sync.WaitGroup
+ callCount := 0
+ testFunc := func(ctx context.Context) (string, error) {
+ callCount++
+ return "test result", nil
+ }
+
+ cachedFunc := CallOnceOnNoError(testFunc)
+
+ // Simulate concurrent calls.
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, err := cachedFunc(tests.Context(t))
+ assert.NoError(t, err, "Expected no error in concurrent execution")
+ }()
+ }
+
+ wg.Wait()
+
+ assert.Equal(t, 1, callCount, "Function should be called exactly once despite concurrent calls")
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go b/core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go
new file mode 100644
index 00000000000..40cdab6df9d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go
@@ -0,0 +1,44 @@
+package ccipcalc
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+func EvmAddrsToGeneric(evmAddrs ...common.Address) []cciptypes.Address {
+ res := make([]cciptypes.Address, 0, len(evmAddrs))
+ for _, addr := range evmAddrs {
+ res = append(res, cciptypes.Address(addr.String()))
+ }
+ return res
+}
+
+func EvmAddrToGeneric(evmAddr common.Address) cciptypes.Address {
+ return cciptypes.Address(evmAddr.String())
+}
+
+func GenericAddrsToEvm(genericAddrs ...cciptypes.Address) ([]common.Address, error) {
+ evmAddrs := make([]common.Address, 0, len(genericAddrs))
+ for _, addr := range genericAddrs {
+ if !common.IsHexAddress(string(addr)) {
+ return nil, fmt.Errorf("%s not an evm address", addr)
+ }
+ evmAddrs = append(evmAddrs, common.HexToAddress(string(addr)))
+ }
+ return evmAddrs, nil
+}
+
+func GenericAddrToEvm(genAddr cciptypes.Address) (common.Address, error) {
+ evmAddrs, err := GenericAddrsToEvm(genAddr)
+ if err != nil {
+ return common.Address{}, err
+ }
+ return evmAddrs[0], nil
+}
+
+func HexToAddress(h string) cciptypes.Address {
+ return cciptypes.Address(common.HexToAddress(h).String())
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go
new file mode 100644
index 00000000000..8ba57e77ed2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go
@@ -0,0 +1,69 @@
+package ccipcalc
+
+import (
+ "math/big"
+ "sort"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+// ContiguousReqs checks if seqNrs contains all numbers from min to max.
+func ContiguousReqs(lggr logger.Logger, min, max uint64, seqNrs []uint64) bool {
+ if int(max-min+1) != len(seqNrs) {
+ return false
+ }
+
+ for i, j := min, 0; i <= max && j < len(seqNrs); i, j = i+1, j+1 {
+ if seqNrs[j] != i {
+ lggr.Errorw("unexpected gap in seq nums", "seqNr", i, "minSeqNr", min, "maxSeqNr", max)
+ return false
+ }
+ }
+ return true
+}
+
+// CalculateUsdPerUnitGas returns: (sourceGasPrice * usdPerFeeCoin) / 1e18
+func CalculateUsdPerUnitGas(sourceGasPrice *big.Int, usdPerFeeCoin *big.Int) *big.Int {
+ // (wei / gas) * (usd / eth) * (1 eth / 1e18 wei) = usd/gas
+ tmp := new(big.Int).Mul(sourceGasPrice, usdPerFeeCoin)
+ return tmp.Div(tmp, big.NewInt(1e18))
+}
+
+// BigIntSortedMiddle returns the middle number after sorting the provided numbers. nil is returned if the provided slice is empty.
+// If length of the provided slice is even, the right-hand-side value of the middle 2 numbers is returned.
+// The objective of this function is to always pick within the range of values reported by honest nodes when we have 2f+1 values.
+func BigIntSortedMiddle(vals []*big.Int) *big.Int {
+ if len(vals) == 0 {
+ return nil
+ }
+
+ valsCopy := make([]*big.Int, len(vals))
+ copy(valsCopy[:], vals[:])
+ sort.Slice(valsCopy, func(i, j int) bool {
+ return valsCopy[i].Cmp(valsCopy[j]) == -1
+ })
+ return valsCopy[len(valsCopy)/2]
+}
+
+// Deviates checks if x1 and x2 deviates based on the provided ppb (parts per billion)
+// ppb is calculated based on the smaller value of the two
+// e.g, if x1 > x2, deviation_parts_per_billion = ((x1 - x2) / x2) * 1e9
+func Deviates(x1, x2 *big.Int, ppb int64) bool {
+ // if x1 == 0 or x2 == 0, deviates if x2 != x1, to avoid the relative division by 0 error
+ if x1.BitLen() == 0 || x2.BitLen() == 0 {
+ return x1.Cmp(x2) != 0
+ }
+ diff := big.NewInt(0).Sub(x1, x2) // diff = x1-x2
+ diff.Mul(diff, big.NewInt(1e9)) // diff = diff * 1e9
+ // dividing by the smaller value gives consistent ppb regardless of input order, and supports >100% deviation.
+ if x1.Cmp(x2) > 0 {
+ diff.Div(diff, x2)
+ } else {
+ diff.Div(diff, x1)
+ }
+ return diff.CmpAbs(big.NewInt(ppb)) > 0 // abs(diff) > ppb
+}
+
+func MergeEpochAndRound(epoch uint32, round uint8) uint64 {
+ return uint64(epoch)<<8 + uint64(round)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go
new file mode 100644
index 00000000000..83384eca481
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go
@@ -0,0 +1,220 @@
+package ccipcalc
+
+import (
+ "math"
+ "math/big"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func TestMergeEpochAndRound(t *testing.T) {
+ type args struct {
+ epoch uint32
+ round uint8
+ }
+ tests := []struct {
+ name string
+ args args
+ want uint64
+ }{
+ {
+ name: "zero round and epoch",
+ args: args{epoch: 0, round: 0},
+ want: 0,
+ },
+ {
+ name: "avg case",
+ args: args{
+ epoch: 243,
+ round: 15,
+ },
+ want: 62223,
+ },
+ {
+ name: "largest epoch and round",
+ args: args{
+ epoch: math.MaxUint32,
+ round: math.MaxUint8,
+ },
+ want: 1099511627775,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equalf(t, tt.want,
+ MergeEpochAndRound(tt.args.epoch, tt.args.round),
+ "mergeEpochAndRound(%v, %v)", tt.args.epoch, tt.args.round)
+ })
+ }
+}
+
+func TestContiguousReqs(t *testing.T) {
+ testCases := []struct {
+ min uint64
+ max uint64
+ seqNrs []uint64
+ exp bool
+ }{
+ {min: 5, max: 10, seqNrs: []uint64{5, 6, 7, 8, 9, 10}, exp: true},
+ {min: 5, max: 10, seqNrs: []uint64{5, 7, 8, 9, 10}, exp: false},
+ {min: 5, max: 10, seqNrs: []uint64{5, 6, 7, 8, 9, 10, 11}, exp: false},
+ {min: 5, max: 10, seqNrs: []uint64{}, exp: false},
+ {min: 1, max: 1, seqNrs: []uint64{1}, exp: true},
+ {min: 6, max: 10, seqNrs: []uint64{5, 7, 8, 9, 10}, exp: false},
+ }
+
+ for _, tc := range testCases {
+ res := ContiguousReqs(logger.NullLogger, tc.min, tc.max, tc.seqNrs)
+ assert.Equal(t, tc.exp, res)
+ }
+}
+
+func TestCalculateUsdPerUnitGas(t *testing.T) {
+ testCases := []struct {
+ name string
+ sourceGasPrice *big.Int
+ usdPerFeeCoin *big.Int
+ exp *big.Int
+ }{
+ {
+ name: "base case",
+ sourceGasPrice: big.NewInt(2e18),
+ usdPerFeeCoin: big.NewInt(3e18),
+ exp: big.NewInt(6e18),
+ },
+ {
+ name: "small numbers",
+ sourceGasPrice: big.NewInt(1000),
+ usdPerFeeCoin: big.NewInt(2000),
+ exp: big.NewInt(0),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res := CalculateUsdPerUnitGas(tc.sourceGasPrice, tc.usdPerFeeCoin)
+ assert.Zero(t, tc.exp.Cmp(res))
+ })
+ }
+}
+
+func TestBigIntSortedMiddle(t *testing.T) {
+ tests := []struct {
+ name string
+ vals []*big.Int
+ want *big.Int
+ }{
+ {
+ name: "base case",
+ vals: []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(4), big.NewInt(5)},
+ want: big.NewInt(4),
+ },
+ {
+ name: "not sorted",
+ vals: []*big.Int{big.NewInt(100), big.NewInt(50), big.NewInt(30), big.NewInt(110)},
+ want: big.NewInt(100),
+ },
+ {
+ name: "empty slice",
+ vals: []*big.Int{},
+ want: nil,
+ },
+ {
+ name: "one item",
+ vals: []*big.Int{big.NewInt(123)},
+ want: big.NewInt(123),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equalf(t, tt.want, BigIntSortedMiddle(tt.vals), "BigIntSortedMiddle(%v)", tt.vals)
+ })
+ }
+}
+
+func TestDeviates(t *testing.T) {
+ type args struct {
+ x1 *big.Int
+ x2 *big.Int
+ ppb int64
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "base case",
+ args: args{x1: big.NewInt(1e9), x2: big.NewInt(2e9), ppb: 1},
+ want: true,
+ },
+ {
+ name: "x1 is zero and x1 neq x2",
+ args: args{x1: big.NewInt(0), x2: big.NewInt(1), ppb: 999},
+ want: true,
+ },
+ {
+ name: "x2 is zero and x1 neq x2",
+ args: args{x1: big.NewInt(1), x2: big.NewInt(0), ppb: 999},
+ want: true,
+ },
+ {
+ name: "x1 and x2 are both zero",
+ args: args{x1: big.NewInt(0), x2: big.NewInt(0), ppb: 999},
+ want: false,
+ },
+ {
+ name: "deviates when ppb is 0",
+ args: args{x1: big.NewInt(0), x2: big.NewInt(1), ppb: 0},
+ want: true,
+ },
+ {
+ name: "does not deviate when x1 eq x2",
+ args: args{x1: big.NewInt(5), x2: big.NewInt(5), ppb: 1},
+ want: false,
+ },
+ {
+ name: "does not deviate with high ppb when x2 is greater",
+ args: args{x1: big.NewInt(5), x2: big.NewInt(10), ppb: 2e9},
+ want: false,
+ },
+ {
+ name: "does not deviate with high ppb when x1 is greater",
+ args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 2e9},
+ want: false,
+ },
+ {
+ name: "deviates with low ppb when x2 is greater",
+ args: args{x1: big.NewInt(5), x2: big.NewInt(10), ppb: 9e8},
+ want: true,
+ },
+ {
+ name: "deviates with low ppb when x1 is greater",
+ args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 9e8},
+ want: true,
+ },
+ {
+ name: "near deviation limit but deviates",
+ args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 1e9 - 1},
+ want: true,
+ },
+ {
+ name: "at deviation limit but does not deviate",
+ args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 1e9},
+ want: false,
+ },
+ {
+ name: "near deviation limit but does not deviate",
+ args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 1e9 + 1},
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equalf(t, tt.want, Deviates(tt.args.x1, tt.args.x2, tt.args.ppb), "Deviates(%v, %v, %v)", tt.args.x1, tt.args.x2, tt.args.ppb)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go
new file mode 100644
index 00000000000..4f5ba6cfaea
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go
@@ -0,0 +1,140 @@
+package ccipcommon
+
+import (
+ "context"
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/avast/retry-go/v4"
+
+ "golang.org/x/sync/errgroup"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+func GetMessageIDsAsHexString(messages []cciptypes.EVM2EVMMessage) []string {
+ messageIDs := make([]string, 0, len(messages))
+ for _, m := range messages {
+ messageIDs = append(messageIDs, "0x"+hex.EncodeToString(m.MessageID[:]))
+ }
+ return messageIDs
+}
+
+type BackfillArgs struct {
+ SourceLP, DestLP logpoller.LogPoller
+ SourceStartBlock, DestStartBlock uint64
+}
+
+// GetFilteredSortedLaneTokens returns union of tokens supported on this lane, including fee tokens from the provided price registry
+// and the bridgeable tokens from offRamp. Bridgeable tokens are only included if they are configured on the pricegetter
+// Fee tokens are not filtered as they must always be priced
+func GetFilteredSortedLaneTokens(ctx context.Context, offRamp ccipdata.OffRampReader, priceRegistry cciptypes.PriceRegistryReader, priceGetter cciptypes.PriceGetter) (laneTokens []cciptypes.Address, excludedTokens []cciptypes.Address, err error) {
+ destFeeTokens, destBridgeableTokens, err := GetDestinationTokens(ctx, offRamp, priceRegistry)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get tokens with batch limit: %w", err)
+ }
+
+ destTokensWithPrice, destTokensWithoutPrice, err := priceGetter.FilterConfiguredTokens(ctx, destBridgeableTokens)
+ if err != nil {
+ return nil, nil, fmt.Errorf("filter for priced tokens: %w", err)
+ }
+
+ return flattenedAndSortedTokens(destFeeTokens, destTokensWithPrice), destTokensWithoutPrice, nil
+}
+
+func flattenedAndSortedTokens(slices ...[]cciptypes.Address) (tokens []cciptypes.Address) {
+ // fee token can overlap with bridgeable tokens, we need to dedup them to arrive at lane token set
+ tokens = FlattenUniqueSlice(slices...)
+
+ // return the tokens in deterministic order to aid with testing and debugging
+ sort.Slice(tokens, func(i, j int) bool {
+ return tokens[i] < tokens[j]
+ })
+
+ return tokens
+}
+
+// GetDestinationTokens returns the destination chain fee tokens from the provided price registry
+// and the bridgeable tokens from the offramp.
+func GetDestinationTokens(ctx context.Context, offRamp ccipdata.OffRampReader, priceRegistry cciptypes.PriceRegistryReader) (fee, bridged []cciptypes.Address, err error) {
+ eg := new(errgroup.Group)
+
+ var destFeeTokens []cciptypes.Address
+ var destBridgeableTokens []cciptypes.Address
+
+ eg.Go(func() error {
+ tokens, err := priceRegistry.GetFeeTokens(ctx)
+ if err != nil {
+ return fmt.Errorf("get dest fee tokens: %w", err)
+ }
+ destFeeTokens = tokens
+ return nil
+ })
+
+ eg.Go(func() error {
+ tokens, err := offRamp.GetTokens(ctx)
+ if err != nil {
+ return fmt.Errorf("get dest bridgeable tokens: %w", err)
+ }
+ destBridgeableTokens = tokens.DestinationTokens
+ return nil
+ })
+
+ if err := eg.Wait(); err != nil {
+ return nil, nil, err
+ }
+
+ return destFeeTokens, destBridgeableTokens, nil
+}
+
+// FlattenUniqueSlice returns a flattened slice that contains unique elements by preserving their order.
+func FlattenUniqueSlice[T comparable](slices ...[]T) []T {
+ seen := make(map[T]struct{})
+ flattened := make([]T, 0)
+
+ for _, sl := range slices {
+ for _, el := range sl {
+ if _, exists := seen[el]; !exists {
+ flattened = append(flattened, el)
+ seen[el] = struct{}{}
+ }
+ }
+ }
+ return flattened
+}
+
+func IsTxRevertError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // Geth eth_call reverts with "execution reverted"
+ // Nethermind, Parity, OpenEthereum eth_call reverts with "VM execution error"
+ // See: https://github.com/ethereum/go-ethereum/issues/21886
+ return strings.Contains(err.Error(), "execution reverted") || strings.Contains(err.Error(), "VM execution error")
+}
+
+func SelectorToBytes(chainSelector uint64) [16]byte {
+ var b [16]byte
+ binary.BigEndian.PutUint64(b[:], chainSelector)
+ return b
+}
+
+// RetryUntilSuccess repeatedly calls fn until it returns a nil error. After each failed call there is an exponential
+// backoff applied, between initialDelay and maxDelay.
+func RetryUntilSuccess[T any](fn func() (T, error), initialDelay time.Duration, maxDelay time.Duration) (T, error) {
+ return retry.DoWithData(
+ fn,
+ retry.Delay(initialDelay),
+ retry.MaxDelay(maxDelay),
+ retry.DelayType(retry.BackOffDelay),
+ retry.UntilSucceeded(),
+ )
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go
new file mode 100644
index 00000000000..73a3b834956
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go
@@ -0,0 +1,196 @@
+package ccipcommon
+
+import (
+ "fmt"
+ "math/rand"
+ "sort"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter"
+)
+
+func TestGetMessageIDsAsHexString(t *testing.T) {
+ t.Run("base", func(t *testing.T) {
+ hashes := make([]cciptypes.Hash, 10)
+ for i := range hashes {
+ hashes[i] = cciptypes.Hash(common.HexToHash(strconv.Itoa(rand.Intn(100000))))
+ }
+
+ msgs := make([]cciptypes.EVM2EVMMessage, len(hashes))
+ for i := range msgs {
+ msgs[i] = cciptypes.EVM2EVMMessage{MessageID: hashes[i]}
+ }
+
+ messageIDs := GetMessageIDsAsHexString(msgs)
+ for i := range messageIDs {
+ assert.Equal(t, hashes[i].String(), messageIDs[i])
+ }
+ })
+
+ t.Run("empty", func(t *testing.T) {
+ messageIDs := GetMessageIDsAsHexString(nil)
+ assert.Empty(t, messageIDs)
+ })
+}
+
+func TestFlattenUniqueSlice(t *testing.T) {
+ testCases := []struct {
+ name string
+ inputSlices [][]int
+ expectedOutput []int
+ }{
+ {name: "empty", inputSlices: nil, expectedOutput: []int{}},
+ {name: "empty 2", inputSlices: [][]int{}, expectedOutput: []int{}},
+ {name: "single", inputSlices: [][]int{{1, 2, 3, 3, 3, 4}}, expectedOutput: []int{1, 2, 3, 4}},
+ {name: "simple", inputSlices: [][]int{{1, 2, 3}, {2, 3, 4}}, expectedOutput: []int{1, 2, 3, 4}},
+ {
+ name: "more complex case",
+ inputSlices: [][]int{{1, 3}, {2, 4, 3}, {5, 2, -1, 7, 10}},
+ expectedOutput: []int{1, 3, 2, 4, 5, -1, 7, 10},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res := FlattenUniqueSlice(tc.inputSlices...)
+ assert.Equal(t, tc.expectedOutput, res)
+ })
+ }
+}
+
+func TestGetFilteredChainTokens(t *testing.T) {
+ const numTokens = 6
+ var tokens []cciptypes.Address
+ for i := 0; i < numTokens; i++ {
+ tokens = append(tokens, ccipcalc.EvmAddrToGeneric(utils.RandomAddress()))
+ }
+
+ testCases := []struct {
+ name string
+ feeTokens []cciptypes.Address
+ destTokens []cciptypes.Address
+ expectedChainTokens []cciptypes.Address
+ expectedFilteredTokens []cciptypes.Address
+ }{
+ {
+ name: "empty",
+ feeTokens: []cciptypes.Address{},
+ destTokens: []cciptypes.Address{},
+ expectedChainTokens: []cciptypes.Address{},
+ expectedFilteredTokens: []cciptypes.Address{},
+ },
+ {
+ name: "unique tokens",
+ feeTokens: []cciptypes.Address{tokens[0]},
+ destTokens: []cciptypes.Address{tokens[1], tokens[2], tokens[3]},
+ expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3]},
+ expectedFilteredTokens: []cciptypes.Address{tokens[4], tokens[5]},
+ },
+ {
+ name: "all tokens",
+ feeTokens: []cciptypes.Address{tokens[0]},
+ destTokens: []cciptypes.Address{tokens[1], tokens[2], tokens[3], tokens[4], tokens[5]},
+ expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[4], tokens[5]},
+ expectedFilteredTokens: []cciptypes.Address{},
+ },
+ {
+ name: "overlapping tokens",
+ feeTokens: []cciptypes.Address{tokens[0]},
+ destTokens: []cciptypes.Address{tokens[1], tokens[2], tokens[5], tokens[3], tokens[0], tokens[2], tokens[3], tokens[4], tokens[5], tokens[5]},
+ expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[4], tokens[5]},
+ expectedFilteredTokens: []cciptypes.Address{},
+ },
+ {
+ name: "unconfigured tokens",
+ feeTokens: []cciptypes.Address{tokens[0]},
+ destTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[0], tokens[2], tokens[3], tokens[4], tokens[5], tokens[5]},
+ expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[4]},
+ expectedFilteredTokens: []cciptypes.Address{tokens[5]},
+ },
+ }
+
+ ctx := testutils.Context(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ priceRegistry := ccipdatamocks.NewPriceRegistryReader(t)
+ priceRegistry.On("GetFeeTokens", ctx).Return(tc.feeTokens, nil).Once()
+
+ priceGet := pricegetter.NewMockPriceGetter(t)
+ priceGet.On("FilterConfiguredTokens", mock.Anything, mock.Anything).Return(tc.expectedChainTokens, tc.expectedFilteredTokens, nil)
+
+ offRamp := ccipdatamocks.NewOffRampReader(t)
+ offRamp.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{DestinationTokens: tc.destTokens}, nil).Once()
+
+ chainTokens, filteredTokens, err := GetFilteredSortedLaneTokens(ctx, offRamp, priceRegistry, priceGet)
+ assert.NoError(t, err)
+
+ sort.Slice(tc.expectedChainTokens, func(i, j int) bool {
+ return tc.expectedChainTokens[i] < tc.expectedChainTokens[j]
+ })
+ assert.Equal(t, tc.expectedChainTokens, chainTokens)
+ assert.Equal(t, tc.expectedFilteredTokens, filteredTokens)
+ })
+ }
+}
+
+func TestIsTxRevertError(t *testing.T) {
+ testCases := []struct {
+ name string
+ inputError error
+ expectedOutput bool
+ }{
+ {name: "empty", inputError: nil, expectedOutput: false},
+ {name: "non-revert error", inputError: fmt.Errorf("nothing"), expectedOutput: false},
+ {name: "geth error", inputError: fmt.Errorf("execution reverted"), expectedOutput: true},
+ {name: "nethermind error", inputError: fmt.Errorf("VM execution error"), expectedOutput: true},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.expectedOutput, IsTxRevertError(tc.inputError))
+ })
+ }
+}
+
+func TestRetryUntilSuccess(t *testing.T) {
+ // Set delays to 0 for tests
+ initialDelay := 0 * time.Nanosecond
+ maxDelay := 0 * time.Nanosecond
+
+ numAttempts := 5
+ numCalls := 0
+ // A function that returns success only after numAttempts calls. RetryUntilSuccess will repeatedly call this
+ // function until it succeeds.
+ fn := func() (int, error) {
+ numCalls++
+ numAttempts--
+ if numAttempts > 0 {
+ return numCalls, fmt.Errorf("")
+ }
+ return numCalls, nil
+ }
+
+ // Assert that RetryUntilSuccess returns the expected value when fn returns success on the 5th attempt
+ numCalls, err := RetryUntilSuccess(fn, initialDelay, maxDelay)
+ assert.Nil(t, err)
+ assert.Equal(t, 5, numCalls)
+
+ // Assert that RetryUntilSuccess returns the expected value when fn returns success on the 8th attempt
+ numAttempts = 8
+ numCalls = 0
+ numCalls, err = RetryUntilSuccess(fn, initialDelay, maxDelay)
+ assert.Nil(t, err)
+ assert.Equal(t, 8, numCalls)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go
new file mode 100644
index 00000000000..551cd7c6a68
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go
@@ -0,0 +1,142 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// TokenPoolBatchedReader is an autogenerated mock type for the TokenPoolBatchedReader type
+type TokenPoolBatchedReader struct {
+ mock.Mock
+}
+
+type TokenPoolBatchedReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *TokenPoolBatchedReader) EXPECT() *TokenPoolBatchedReader_Expecter {
+ return &TokenPoolBatchedReader_Expecter{mock: &_m.Mock}
+}
+
+// Close provides a mock function with given fields:
+func (_m *TokenPoolBatchedReader) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// TokenPoolBatchedReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type TokenPoolBatchedReader_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *TokenPoolBatchedReader_Expecter) Close() *TokenPoolBatchedReader_Close_Call {
+ return &TokenPoolBatchedReader_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *TokenPoolBatchedReader_Close_Call) Run(run func()) *TokenPoolBatchedReader_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *TokenPoolBatchedReader_Close_Call) Return(_a0 error) *TokenPoolBatchedReader_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *TokenPoolBatchedReader_Close_Call) RunAndReturn(run func() error) *TokenPoolBatchedReader_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetInboundTokenPoolRateLimits provides a mock function with given fields: ctx, tokenPoolReaders
+func (_m *TokenPoolBatchedReader) GetInboundTokenPoolRateLimits(ctx context.Context, tokenPoolReaders []ccip.Address) ([]ccip.TokenBucketRateLimit, error) {
+ ret := _m.Called(ctx, tokenPoolReaders)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetInboundTokenPoolRateLimits")
+ }
+
+ var r0 []ccip.TokenBucketRateLimit
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]ccip.TokenBucketRateLimit, error)); ok {
+ return rf(ctx, tokenPoolReaders)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []ccip.TokenBucketRateLimit); ok {
+ r0 = rf(ctx, tokenPoolReaders)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.TokenBucketRateLimit)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok {
+ r1 = rf(ctx, tokenPoolReaders)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInboundTokenPoolRateLimits'
+type TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call struct {
+ *mock.Call
+}
+
+// GetInboundTokenPoolRateLimits is a helper method to define mock.On call
+// - ctx context.Context
+// - tokenPoolReaders []ccip.Address
+func (_e *TokenPoolBatchedReader_Expecter) GetInboundTokenPoolRateLimits(ctx interface{}, tokenPoolReaders interface{}) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call {
+ return &TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call{Call: _e.mock.On("GetInboundTokenPoolRateLimits", ctx, tokenPoolReaders)}
+}
+
+func (_c *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call) Run(run func(ctx context.Context, tokenPoolReaders []ccip.Address)) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]ccip.Address))
+ })
+ return _c
+}
+
+func (_c *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call) Return(_a0 []ccip.TokenBucketRateLimit, _a1 error) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]ccip.TokenBucketRateLimit, error)) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewTokenPoolBatchedReader creates a new instance of TokenPoolBatchedReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewTokenPoolBatchedReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *TokenPoolBatchedReader {
+ mock := &TokenPoolBatchedReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go
new file mode 100644
index 00000000000..57e8df1bde3
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go
@@ -0,0 +1,192 @@
+package batchreader
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ type_and_version "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/type_and_version_interface_wrapper"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0"
+)
+
+var (
+ typeAndVersionABI = abihelpers.MustParseABI(type_and_version.TypeAndVersionInterfaceABI)
+)
+
+type EVMTokenPoolBatchedReader struct {
+ lggr logger.Logger
+ remoteChainSelector uint64
+ offRampAddress common.Address
+ evmBatchCaller rpclib.EvmBatchCaller
+
+ tokenPoolReaders map[cciptypes.Address]ccipdata.TokenPoolReader
+ tokenPoolReaderMu sync.RWMutex
+}
+
+type TokenPoolBatchedReader interface {
+ cciptypes.TokenPoolBatchedReader
+}
+
+var _ TokenPoolBatchedReader = (*EVMTokenPoolBatchedReader)(nil)
+
+func NewEVMTokenPoolBatchedReader(lggr logger.Logger, remoteChainSelector uint64, offRampAddress cciptypes.Address, evmBatchCaller rpclib.EvmBatchCaller) (*EVMTokenPoolBatchedReader, error) {
+ offRampAddrEvm, err := ccipcalc.GenericAddrToEvm(offRampAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ return &EVMTokenPoolBatchedReader{
+ lggr: lggr,
+ remoteChainSelector: remoteChainSelector,
+ offRampAddress: offRampAddrEvm,
+ evmBatchCaller: evmBatchCaller,
+ tokenPoolReaders: make(map[cciptypes.Address]ccipdata.TokenPoolReader),
+ }, nil
+}
+
+func (br *EVMTokenPoolBatchedReader) GetInboundTokenPoolRateLimits(ctx context.Context, tokenPools []cciptypes.Address) ([]cciptypes.TokenBucketRateLimit, error) {
+ if len(tokenPools) == 0 {
+ return []cciptypes.TokenBucketRateLimit{}, nil
+ }
+
+ err := br.loadTokenPoolReaders(ctx, tokenPools)
+ if err != nil {
+ return nil, err
+ }
+
+ tokenPoolReaders := make([]ccipdata.TokenPoolReader, 0, len(tokenPools))
+ for _, poolAddress := range tokenPools {
+ br.tokenPoolReaderMu.RLock()
+ tokenPoolReader, exists := br.tokenPoolReaders[poolAddress]
+ br.tokenPoolReaderMu.RUnlock()
+ if !exists {
+ return nil, fmt.Errorf("token pool %s not found", poolAddress)
+ }
+ tokenPoolReaders = append(tokenPoolReaders, tokenPoolReader)
+ }
+
+ evmCalls := make([]rpclib.EvmCall, 0, len(tokenPoolReaders))
+ for _, poolReader := range tokenPoolReaders {
+ switch v := poolReader.(type) {
+ case *v1_2_0.TokenPool:
+ evmCalls = append(evmCalls, v1_2_0.GetInboundTokenPoolRateLimitCall(v.Address(), v.OffRampAddress))
+ case *v1_4_0.TokenPool:
+ evmCalls = append(evmCalls, v1_4_0.GetInboundTokenPoolRateLimitCall(v.Address(), v.RemoteChainSelector))
+ default:
+ return nil, fmt.Errorf("unsupported token pool version %T", v)
+ }
+ }
+
+ results, err := br.evmBatchCaller.BatchCall(ctx, 0, evmCalls)
+ if err != nil {
+ return nil, fmt.Errorf("batch call limit: %w", err)
+ }
+
+ resultsParsed, err := rpclib.ParseOutputs[cciptypes.TokenBucketRateLimit](results, func(d rpclib.DataAndErr) (cciptypes.TokenBucketRateLimit, error) {
+ return rpclib.ParseOutput[cciptypes.TokenBucketRateLimit](d, 0)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("parse outputs: %w", err)
+ }
+ return resultsParsed, nil
+}
+
+// loadTokenPoolReaders loads the token pools into the factory's cache
+func (br *EVMTokenPoolBatchedReader) loadTokenPoolReaders(ctx context.Context, tokenPoolAddresses []cciptypes.Address) error {
+ var missingTokens []common.Address
+
+ br.tokenPoolReaderMu.RLock()
+ for _, poolAddress := range tokenPoolAddresses {
+ if _, exists := br.tokenPoolReaders[poolAddress]; !exists {
+ evmPoolAddr, err := ccipcalc.GenericAddrToEvm(poolAddress)
+ if err != nil {
+ return err
+ }
+ missingTokens = append(missingTokens, evmPoolAddr)
+ }
+ }
+ br.tokenPoolReaderMu.RUnlock()
+
+ // Only continue if there are missing tokens
+ if len(missingTokens) == 0 {
+ return nil
+ }
+
+ typeAndVersions, err := getBatchedTypeAndVersion(ctx, br.evmBatchCaller, missingTokens)
+ if err != nil {
+ return err
+ }
+
+ br.tokenPoolReaderMu.Lock()
+ defer br.tokenPoolReaderMu.Unlock()
+ for i, tokenPoolAddress := range missingTokens {
+ typeAndVersion := typeAndVersions[i]
+ poolType, version, err := ccipconfig.ParseTypeAndVersion(typeAndVersion)
+ if err != nil {
+ return err
+ }
+ switch version {
+ case ccipdata.V1_0_0, ccipdata.V1_1_0, ccipdata.V1_2_0:
+ br.tokenPoolReaders[ccipcalc.EvmAddrToGeneric(tokenPoolAddress)] = v1_2_0.NewTokenPool(poolType, tokenPoolAddress, br.offRampAddress)
+ case ccipdata.V1_4_0:
+ br.tokenPoolReaders[ccipcalc.EvmAddrToGeneric(tokenPoolAddress)] = v1_4_0.NewTokenPool(poolType, tokenPoolAddress, br.remoteChainSelector)
+ default:
+ return fmt.Errorf("unsupported token pool version %v", version)
+ }
+ }
+ return nil
+}
+
+func getBatchedTypeAndVersion(ctx context.Context, evmBatchCaller rpclib.EvmBatchCaller, poolAddresses []common.Address) ([]string, error) {
+ var evmCalls []rpclib.EvmCall
+
+ for _, poolAddress := range poolAddresses {
+ // Add the typeAndVersion call to the batch
+ evmCalls = append(evmCalls, rpclib.NewEvmCall(
+ typeAndVersionABI,
+ "typeAndVersion",
+ poolAddress,
+ ))
+ }
+
+ results, err := evmBatchCaller.BatchCall(ctx, 0, evmCalls)
+ if err != nil {
+ return nil, fmt.Errorf("batch call limit: %w", err)
+ }
+
+ result, err := rpclib.ParseOutputs[string](results, func(d rpclib.DataAndErr) (string, error) {
+ tAndV, err1 := rpclib.ParseOutput[string](d, 0)
+ if err1 != nil {
+ // typeAndVersion method do not exist for 1.0 pools. We are going to get an ErrEmptyOutput in that case.
+ // Some chains, like the simulated chains, will simply revert with "execution reverted"
+ if errors.Is(err1, rpclib.ErrEmptyOutput) || ccipcommon.IsTxRevertError(err1) {
+ return "LegacyPool " + ccipdata.V1_0_0, nil
+ }
+ return "", err1
+ }
+
+ return tAndV, nil
+ })
+ if err != nil {
+ return nil, fmt.Errorf("parse outputs: %w", err)
+ }
+ return result, nil
+}
+
+func (br *EVMTokenPoolBatchedReader) Close() error {
+ return nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go
new file mode 100644
index 00000000000..c67c3c15276
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go
@@ -0,0 +1,86 @@
+package batchreader
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+func TestTokenPoolFactory(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ offRamp := utils.RandomAddress()
+ ctx := context.Background()
+ remoteChainSelector := uint64(2000)
+ batchCallerMock := rpclibmocks.NewEvmBatchCaller(t)
+
+ tokenPoolBatchReader, err := NewEVMTokenPoolBatchedReader(lggr, remoteChainSelector, ccipcalc.EvmAddrToGeneric(offRamp), batchCallerMock)
+ assert.NoError(t, err)
+
+ poolTypes := []string{"BurnMint", "LockRelease"}
+
+ rateLimits := cciptypes.TokenBucketRateLimit{
+ Tokens: big.NewInt(333333),
+ LastUpdated: 33,
+ IsEnabled: true,
+ Capacity: big.NewInt(666666),
+ Rate: big.NewInt(444444),
+ }
+
+ for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_1_0, ccipdata.V1_2_0, ccipdata.V1_4_0} {
+ gotRateLimits, err := tokenPoolBatchReader.GetInboundTokenPoolRateLimits(ctx, []cciptypes.Address{})
+ require.NoError(t, err)
+ assert.Empty(t, gotRateLimits)
+
+ var batchCallResult []rpclib.DataAndErr
+ for _, poolType := range poolTypes {
+ if versionStr == ccipdata.V1_0_0 {
+ // simulating the behaviour for 1.0.0 pools where typeAndVersion method does not exist
+ batchCallResult = append(batchCallResult, rpclib.DataAndErr{
+ Err: fmt.Errorf("unpack result: %w", rpclib.ErrEmptyOutput),
+ })
+ } else {
+ batchCallResult = append(batchCallResult, rpclib.DataAndErr{
+ Outputs: []any{poolType + " " + versionStr},
+ Err: nil,
+ })
+ }
+ }
+
+ batchCallerMock.On("BatchCall", ctx, uint64(0), mock.Anything).Return(batchCallResult, nil).Once()
+ batchCallerMock.On("BatchCall", ctx, uint64(0), mock.Anything).Return([]rpclib.DataAndErr{{
+ Outputs: []any{rateLimits},
+ Err: nil,
+ }, {
+ Outputs: []any{rateLimits},
+ Err: nil,
+ }}, nil).Once()
+
+ var poolAddresses []cciptypes.Address
+
+ for i := 0; i < len(poolTypes); i++ {
+ poolAddresses = append(poolAddresses, ccipcalc.EvmAddrToGeneric(utils.RandomAddress()))
+ }
+
+ gotRateLimits, err = tokenPoolBatchReader.GetInboundTokenPoolRateLimits(ctx, poolAddresses)
+ require.NoError(t, err)
+ assert.Len(t, gotRateLimits, len(poolTypes))
+
+ for _, gotRateLimit := range gotRateLimits {
+ assert.Equal(t, rateLimits, gotRateLimit)
+ }
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go
new file mode 100644
index 00000000000..59588a25d17
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go
@@ -0,0 +1,97 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// PriceRegistry is an autogenerated mock type for the PriceRegistry type
+type PriceRegistry struct {
+ mock.Mock
+}
+
+type PriceRegistry_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *PriceRegistry) EXPECT() *PriceRegistry_Expecter {
+ return &PriceRegistry_Expecter{mock: &_m.Mock}
+}
+
+// NewPriceRegistryReader provides a mock function with given fields: ctx, addr
+func (_m *PriceRegistry) NewPriceRegistryReader(ctx context.Context, addr ccip.Address) (ccip.PriceRegistryReader, error) {
+ ret := _m.Called(ctx, addr)
+
+ if len(ret) == 0 {
+ panic("no return value specified for NewPriceRegistryReader")
+ }
+
+ var r0 ccip.PriceRegistryReader
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.Address) (ccip.PriceRegistryReader, error)); ok {
+ return rf(ctx, addr)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.Address) ccip.PriceRegistryReader); ok {
+ r0 = rf(ctx, addr)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(ccip.PriceRegistryReader)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ccip.Address) error); ok {
+ r1 = rf(ctx, addr)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistry_NewPriceRegistryReader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewPriceRegistryReader'
+type PriceRegistry_NewPriceRegistryReader_Call struct {
+ *mock.Call
+}
+
+// NewPriceRegistryReader is a helper method to define mock.On call
+// - ctx context.Context
+// - addr ccip.Address
+func (_e *PriceRegistry_Expecter) NewPriceRegistryReader(ctx interface{}, addr interface{}) *PriceRegistry_NewPriceRegistryReader_Call {
+ return &PriceRegistry_NewPriceRegistryReader_Call{Call: _e.mock.On("NewPriceRegistryReader", ctx, addr)}
+}
+
+func (_c *PriceRegistry_NewPriceRegistryReader_Call) Run(run func(ctx context.Context, addr ccip.Address)) *PriceRegistry_NewPriceRegistryReader_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(ccip.Address))
+ })
+ return _c
+}
+
+func (_c *PriceRegistry_NewPriceRegistryReader_Call) Return(_a0 ccip.PriceRegistryReader, _a1 error) *PriceRegistry_NewPriceRegistryReader_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistry_NewPriceRegistryReader_Call) RunAndReturn(run func(context.Context, ccip.Address) (ccip.PriceRegistryReader, error)) *PriceRegistry_NewPriceRegistryReader_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewPriceRegistry creates a new instance of PriceRegistry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewPriceRegistry(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *PriceRegistry {
+ mock := &PriceRegistry{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go
new file mode 100644
index 00000000000..d1666d548ae
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go
@@ -0,0 +1,40 @@
+package ccipdataprovider
+
+import (
+ "context"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/observability"
+)
+
+type PriceRegistry interface {
+ NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (cciptypes.PriceRegistryReader, error)
+}
+
+type EvmPriceRegistry struct {
+ lp logpoller.LogPoller
+ ec client.Client
+ lggr logger.Logger
+ pluginLabel string
+}
+
+func NewEvmPriceRegistry(lp logpoller.LogPoller, ec client.Client, lggr logger.Logger, pluginLabel string) *EvmPriceRegistry {
+ return &EvmPriceRegistry{
+ lp: lp,
+ ec: ec,
+ lggr: lggr,
+ pluginLabel: pluginLabel,
+ }
+}
+
+func (p *EvmPriceRegistry) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (cciptypes.PriceRegistryReader, error) {
+ destPriceRegistryReader, err := factory.NewPriceRegistryReader(ctx, p.lggr, factory.NewEvmVersionFinder(), addr, p.lp, p.ec)
+ if err != nil {
+ return nil, err
+ }
+ return observability.NewPriceRegistryReader(destPriceRegistryReader, p.ec.ConfiguredChainID().Int64(), p.pluginLabel), nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go
new file mode 100644
index 00000000000..2b144b765ed
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go
@@ -0,0 +1,81 @@
+package ccipdata
+
+import (
+ "context"
+ "math/big"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+)
+
+// Common to all versions
+type CommitOnchainConfig commit_store.CommitStoreDynamicConfig
+
+func (d CommitOnchainConfig) AbiString() string {
+ return `
+ [
+ {
+ "components": [
+ {"name": "priceRegistry", "type": "address"}
+ ],
+ "type": "tuple"
+ }
+ ]`
+}
+
+func (d CommitOnchainConfig) Validate() error {
+ if d.PriceRegistry == (common.Address{}) {
+ return errors.New("must set Price Registry address")
+ }
+ return nil
+}
+
+func NewCommitOffchainConfig(
+ gasPriceDeviationPPB uint32,
+ gasPriceHeartBeat time.Duration,
+ tokenPriceDeviationPPB uint32,
+ tokenPriceHeartBeat time.Duration,
+ inflightCacheExpiry time.Duration,
+ priceReportingDisabled bool,
+) cciptypes.CommitOffchainConfig {
+ return cciptypes.CommitOffchainConfig{
+ GasPriceDeviationPPB: gasPriceDeviationPPB,
+ GasPriceHeartBeat: gasPriceHeartBeat,
+ TokenPriceDeviationPPB: tokenPriceDeviationPPB,
+ TokenPriceHeartBeat: tokenPriceHeartBeat,
+ InflightCacheExpiry: inflightCacheExpiry,
+ PriceReportingDisabled: priceReportingDisabled,
+ }
+}
+
+type CommitStoreReader interface {
+ cciptypes.CommitStoreReader
+ SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error
+ SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error
+}
+
+// FetchCommitStoreStaticConfig provides access to a commitStore's static config, which is required to access the source chain ID.
+func FetchCommitStoreStaticConfig(address common.Address, ec client.Client) (commit_store.CommitStoreStaticConfig, error) {
+ commitStore, err := loadCommitStore(address, ec)
+ if err != nil {
+ return commit_store.CommitStoreStaticConfig{}, err
+ }
+ return commitStore.GetStaticConfig(&bind.CallOpts{})
+}
+
+func loadCommitStore(commitStoreAddress common.Address, client client.Client) (commit_store.CommitStoreInterface, error) {
+ _, err := ccipconfig.VerifyTypeAndVersion(commitStoreAddress, client, ccipconfig.CommitStore)
+ if err != nil {
+ return nil, errors.Wrap(err, "Invalid commitStore contract")
+ }
+ return commit_store.NewCommitStore(commitStoreAddress, client)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go
new file mode 100644
index 00000000000..4e134b1f175
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go
@@ -0,0 +1,423 @@
+package ccipdata_test
+
+import (
+ "context"
+ "math/big"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ gasmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks"
+ rollupMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/rollups/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+)
+
+func TestCommitOffchainConfig_Encoding(t *testing.T) {
+ tests := map[string]struct {
+ want v1_2_0.JSONCommitOffchainConfig
+ expectErr bool
+ }{
+ "encodes and decodes config with all fields set": {
+ want: v1_2_0.JSONCommitOffchainConfig{
+ SourceFinalityDepth: 3,
+ DestFinalityDepth: 3,
+ GasPriceHeartBeat: *config.MustNewDuration(1 * time.Hour),
+ DAGasPriceDeviationPPB: 5e7,
+ ExecGasPriceDeviationPPB: 5e7,
+ TokenPriceHeartBeat: *config.MustNewDuration(1 * time.Hour),
+ TokenPriceDeviationPPB: 5e7,
+ InflightCacheExpiry: *config.MustNewDuration(23456 * time.Second),
+ },
+ },
+ "fails decoding when all fields present but with 0 values": {
+ want: v1_2_0.JSONCommitOffchainConfig{
+ SourceFinalityDepth: 0,
+ DestFinalityDepth: 0,
+ GasPriceHeartBeat: *config.MustNewDuration(0),
+ DAGasPriceDeviationPPB: 0,
+ ExecGasPriceDeviationPPB: 0,
+ TokenPriceHeartBeat: *config.MustNewDuration(0),
+ TokenPriceDeviationPPB: 0,
+ InflightCacheExpiry: *config.MustNewDuration(0),
+ },
+ expectErr: true,
+ },
+ "fails decoding when all fields are missing": {
+ want: v1_2_0.JSONCommitOffchainConfig{},
+ expectErr: true,
+ },
+ "fails decoding when some fields are missing": {
+ want: v1_2_0.JSONCommitOffchainConfig{
+ SourceFinalityDepth: 3,
+ GasPriceHeartBeat: *config.MustNewDuration(1 * time.Hour),
+ DAGasPriceDeviationPPB: 5e7,
+ ExecGasPriceDeviationPPB: 5e7,
+ TokenPriceHeartBeat: *config.MustNewDuration(1 * time.Hour),
+ TokenPriceDeviationPPB: 5e7,
+ },
+ expectErr: true,
+ },
+ }
+ for name, tc := range tests {
+ t.Run(name, func(t *testing.T) {
+ encode, err := ccipconfig.EncodeOffchainConfig(tc.want)
+ require.NoError(t, err)
+ got, err := ccipconfig.DecodeOffchainConfig[v1_2_0.JSONCommitOffchainConfig](encode)
+
+ if tc.expectErr {
+ require.ErrorContains(t, err, "must set")
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.want, got)
+ }
+ })
+ }
+}
+
+func TestCommitOnchainConfig(t *testing.T) {
+ tests := []struct {
+ name string
+ want ccipdata.CommitOnchainConfig
+ expectErr bool
+ }{
+ {
+ name: "encodes and decodes config with all fields set",
+ want: ccipdata.CommitOnchainConfig{
+ PriceRegistry: utils.RandomAddress(),
+ },
+ expectErr: false,
+ },
+ {
+ name: "encodes and fails decoding config with missing fields",
+ want: ccipdata.CommitOnchainConfig{},
+ expectErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ encoded, err := abihelpers.EncodeAbiStruct(tt.want)
+ require.NoError(t, err)
+
+ decoded, err := abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](encoded)
+ if tt.expectErr {
+ require.ErrorContains(t, err, "must set")
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.want, decoded)
+ }
+ })
+ }
+}
+
+func TestCommitStoreReaders(t *testing.T) {
+ user, ec := newSim(t)
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ lpOpts := logpoller.Opts{
+ PollPeriod: 100 * time.Millisecond,
+ FinalityDepth: 2,
+ BackfillBatchSize: 3,
+ RpcBatchSize: 2,
+ KeepFinalizedBlocksDepth: 1000,
+ }
+ headTracker := headtracker.NewSimulatedHeadTracker(ec, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ if lpOpts.PollPeriod == 0 {
+ lpOpts.PollPeriod = 1 * time.Hour
+ }
+ lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), lggr), ec, lggr, headTracker, lpOpts)
+
+ // Deploy 2 commit store versions
+ onramp1 := utils.RandomAddress()
+ onramp2 := utils.RandomAddress()
+ // Report
+ rep := cciptypes.CommitStoreReport{
+ TokenPrices: []cciptypes.TokenPrice{{Token: ccipcalc.EvmAddrToGeneric(utils.RandomAddress()), Value: big.NewInt(1)}},
+ GasPrices: []cciptypes.GasPrice{{DestChainSelector: 1, Value: big.NewInt(1)}},
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ MerkleRoot: common.HexToHash("0x1"),
+ }
+ er := big.NewInt(1)
+ armAddr, _, arm, err := mock_arm_contract.DeployMockARMContract(user, ec)
+ require.NoError(t, err)
+ addr, _, ch, err := commit_store_helper_1_0_0.DeployCommitStoreHelper(user, ec, commit_store_helper_1_0_0.CommitStoreStaticConfig{
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ SourceChainSelector: testutils.SimulatedChainID.Uint64(),
+ OnRamp: onramp1,
+ ArmProxy: armAddr,
+ })
+ require.NoError(t, err)
+ addr2, _, ch2, err := commit_store_helper_1_2_0.DeployCommitStoreHelper(user, ec, commit_store_helper_1_2_0.CommitStoreStaticConfig{
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ SourceChainSelector: testutils.SimulatedChainID.Uint64(),
+ OnRamp: onramp2,
+ ArmProxy: armAddr,
+ })
+ require.NoError(t, err)
+ commitAndGetBlockTs(ec) // Deploy these
+ pr, _, _, err := price_registry_1_0_0.DeployPriceRegistry(user, ec, []common.Address{addr}, nil, 1e6)
+ require.NoError(t, err)
+ pr2, _, _, err := price_registry_1_2_0.DeployPriceRegistry(user, ec, []common.Address{addr2}, nil, 1e6)
+ require.NoError(t, err)
+ commitAndGetBlockTs(ec) // Deploy these
+ ge := new(gasmocks.EvmFeeEstimator)
+ lm := new(rollupMocks.L1Oracle)
+ ge.On("L1Oracle").Return(lm)
+
+ maxGasPrice := big.NewInt(1e8)
+ c10r, err := factory.NewCommitStoreReader(lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr), ec, lp) // ge, maxGasPrice
+ require.NoError(t, err)
+ err = c10r.SetGasEstimator(ctx, ge)
+ require.NoError(t, err)
+ err = c10r.SetSourceMaxGasPrice(ctx, maxGasPrice)
+ require.NoError(t, err)
+ assert.Equal(t, reflect.TypeOf(c10r).String(), reflect.TypeOf(&v1_0_0.CommitStore{}).String())
+ c12r, err := factory.NewCommitStoreReader(lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr2), ec, lp)
+ require.NoError(t, err)
+ err = c12r.SetGasEstimator(ctx, ge)
+ require.NoError(t, err)
+ err = c12r.SetSourceMaxGasPrice(ctx, maxGasPrice)
+ require.NoError(t, err)
+ assert.Equal(t, reflect.TypeOf(c12r).String(), reflect.TypeOf(&v1_2_0.CommitStore{}).String())
+
+ // Apply config
+ signers := []common.Address{utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress()}
+ transmitters := []common.Address{utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress()}
+ onchainConfig, err := abihelpers.EncodeAbiStruct[ccipdata.CommitOnchainConfig](ccipdata.CommitOnchainConfig{
+ PriceRegistry: pr,
+ })
+ require.NoError(t, err)
+
+ sourceFinalityDepth := uint32(1)
+ destFinalityDepth := uint32(2)
+ commonOffchain := cciptypes.CommitOffchainConfig{
+ GasPriceDeviationPPB: 1e6,
+ GasPriceHeartBeat: 1 * time.Hour,
+ TokenPriceDeviationPPB: 1e6,
+ TokenPriceHeartBeat: 1 * time.Hour,
+ InflightCacheExpiry: 3 * time.Hour,
+ PriceReportingDisabled: false,
+ }
+ offchainConfig, err := ccipconfig.EncodeOffchainConfig[v1_0_0.CommitOffchainConfig](v1_0_0.CommitOffchainConfig{
+ SourceFinalityDepth: sourceFinalityDepth,
+ DestFinalityDepth: destFinalityDepth,
+ FeeUpdateHeartBeat: *config.MustNewDuration(commonOffchain.GasPriceHeartBeat),
+ FeeUpdateDeviationPPB: commonOffchain.GasPriceDeviationPPB,
+ InflightCacheExpiry: *config.MustNewDuration(commonOffchain.InflightCacheExpiry),
+ })
+ require.NoError(t, err)
+ _, err = ch.SetOCR2Config(user, signers, transmitters, 1, onchainConfig, 1, []byte{})
+ require.NoError(t, err)
+ onchainConfig2, err := abihelpers.EncodeAbiStruct[ccipdata.CommitOnchainConfig](ccipdata.CommitOnchainConfig{
+ PriceRegistry: pr2,
+ })
+ require.NoError(t, err)
+ offchainConfig2, err := ccipconfig.EncodeOffchainConfig[v1_2_0.JSONCommitOffchainConfig](v1_2_0.JSONCommitOffchainConfig{
+ SourceFinalityDepth: sourceFinalityDepth,
+ DestFinalityDepth: destFinalityDepth,
+ GasPriceHeartBeat: *config.MustNewDuration(commonOffchain.GasPriceHeartBeat),
+ DAGasPriceDeviationPPB: 1e7,
+ ExecGasPriceDeviationPPB: commonOffchain.GasPriceDeviationPPB,
+ TokenPriceDeviationPPB: commonOffchain.TokenPriceDeviationPPB,
+ TokenPriceHeartBeat: *config.MustNewDuration(commonOffchain.TokenPriceHeartBeat),
+ InflightCacheExpiry: *config.MustNewDuration(commonOffchain.InflightCacheExpiry),
+ })
+ require.NoError(t, err)
+ _, err = ch2.SetOCR2Config(user, signers, transmitters, 1, onchainConfig2, 1, []byte{})
+ require.NoError(t, err)
+ commitAndGetBlockTs(ec)
+
+ // Apply report
+ b, err := c10r.EncodeCommitReport(ctx, rep)
+ require.NoError(t, err)
+ _, err = ch.Report(user, b, er)
+ require.NoError(t, err)
+ b, err = c12r.EncodeCommitReport(ctx, rep)
+ require.NoError(t, err)
+ _, err = ch2.Report(user, b, er)
+ require.NoError(t, err)
+ commitAndGetBlockTs(ec)
+
+ // Capture all logs.
+ lp.PollAndSaveLogs(context.Background(), 1)
+
+ configs := map[string][][]byte{
+ ccipdata.V1_0_0: {onchainConfig, offchainConfig},
+ ccipdata.V1_2_0: {onchainConfig2, offchainConfig2},
+ }
+ crs := map[string]ccipdata.CommitStoreReader{
+ ccipdata.V1_0_0: c10r,
+ ccipdata.V1_2_0: c12r,
+ }
+ prs := map[string]common.Address{
+ ccipdata.V1_0_0: pr,
+ ccipdata.V1_2_0: pr2,
+ }
+ gasPrice := big.NewInt(10)
+ daPrice := big.NewInt(20)
+ ge.On("GetFee", mock.Anything, mock.Anything, mock.Anything, assets.NewWei(maxGasPrice)).Return(gas.EvmFee{Legacy: assets.NewWei(gasPrice)}, uint64(0), nil)
+ lm.On("GasPrice", mock.Anything).Return(assets.NewWei(daPrice), nil)
+
+ for v, cr := range crs {
+ cr := cr
+ t.Run("CommitStoreReader "+v, func(t *testing.T) {
+ // Static config.
+ cfg, err := cr.GetCommitStoreStaticConfig(context.Background())
+ require.NoError(t, err)
+ require.NotNil(t, cfg)
+
+ // Assert encoding
+ b, err := cr.EncodeCommitReport(ctx, rep)
+ require.NoError(t, err)
+ d, err := cr.DecodeCommitReport(ctx, b)
+ require.NoError(t, err)
+ assert.Equal(t, d, rep)
+
+ // Assert reading
+ latest, err := cr.GetLatestPriceEpochAndRound(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, er.Uint64(), latest)
+
+ // Assert cursing
+ down, err := cr.IsDown(context.Background())
+ require.NoError(t, err)
+ assert.False(t, down)
+ _, err = arm.VoteToCurse(user, [32]byte{})
+ require.NoError(t, err)
+ ec.Commit()
+ down, err = cr.IsDown(context.Background())
+ require.NoError(t, err)
+ assert.True(t, down)
+ _, err = arm.OwnerUnvoteToCurse0(user, nil)
+ require.NoError(t, err)
+ ec.Commit()
+
+ seqNr, err := cr.GetExpectedNextSequenceNumber(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, rep.Interval.Max+1, seqNr)
+
+ reps, err := cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Max+1, 0)
+ require.NoError(t, err)
+ assert.Len(t, reps, 0)
+
+ reps, err = cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Max, 0)
+ require.NoError(t, err)
+ require.Len(t, reps, 1)
+ assert.Equal(t, reps[0].Interval, rep.Interval)
+ assert.Equal(t, reps[0].MerkleRoot, rep.MerkleRoot)
+ assert.Equal(t, reps[0].GasPrices, rep.GasPrices)
+ assert.Equal(t, reps[0].TokenPrices, rep.TokenPrices)
+
+ reps, err = cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Min, 0)
+ require.NoError(t, err)
+ require.Len(t, reps, 1)
+ assert.Equal(t, reps[0].Interval, rep.Interval)
+ assert.Equal(t, reps[0].MerkleRoot, rep.MerkleRoot)
+ assert.Equal(t, reps[0].GasPrices, rep.GasPrices)
+ assert.Equal(t, reps[0].TokenPrices, rep.TokenPrices)
+
+ reps, err = cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Min-1, 0)
+ require.NoError(t, err)
+ require.Len(t, reps, 0)
+
+ // Sanity
+ reps, err = cr.GetAcceptedCommitReportsGteTimestamp(context.Background(), time.Unix(0, 0), 0)
+ require.NoError(t, err)
+ require.Len(t, reps, 1)
+ assert.Equal(t, reps[0].Interval, rep.Interval)
+ assert.Equal(t, reps[0].MerkleRoot, rep.MerkleRoot)
+ assert.Equal(t, reps[0].GasPrices, rep.GasPrices)
+ assert.Equal(t, reps[0].TokenPrices, rep.TokenPrices)
+
+ // Until we detect the config, we'll have empty offchain config
+ c1, err := cr.OffchainConfig(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, c1, cciptypes.CommitOffchainConfig{})
+ newPr, err := cr.ChangeConfig(ctx, configs[v][0], configs[v][1])
+ require.NoError(t, err)
+ assert.Equal(t, ccipcalc.EvmAddrToGeneric(prs[v]), newPr)
+
+ c2, err := cr.OffchainConfig(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, commonOffchain, c2)
+ // We should be able to query for gas prices now.
+ gpe, err := cr.GasPriceEstimator(ctx)
+ require.NoError(t, err)
+ gp, err := gpe.GetGasPrice(context.Background())
+ require.NoError(t, err)
+ assert.True(t, gp.Cmp(big.NewInt(0)) > 0)
+ })
+ }
+}
+
+func TestNewCommitStoreReader(t *testing.T) {
+ var tt = []struct {
+ typeAndVersion string
+ expectedErr string
+ }{
+ {
+ typeAndVersion: "blah",
+ expectedErr: "unable to read type and version: invalid type and version blah",
+ },
+ {
+ typeAndVersion: "EVM2EVMOffRamp 1.0.0",
+ expectedErr: "expected CommitStore got EVM2EVMOffRamp",
+ },
+ {
+ typeAndVersion: "CommitStore 1.2.0",
+ expectedErr: "",
+ },
+ {
+ typeAndVersion: "CommitStore 2.0.0",
+ expectedErr: "unsupported commit store version 2.0.0",
+ },
+ }
+ for _, tc := range tt {
+ t.Run(tc.typeAndVersion, func(t *testing.T) {
+ b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion)
+ require.NoError(t, err)
+ c := evmclientmocks.NewClient(t)
+ c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil)
+ addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress())
+ lp := lpmocks.NewLogPoller(t)
+ if tc.expectedErr == "" {
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil)
+ }
+ _, err = factory.NewCommitStoreReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), addr, c, lp)
+ if tc.expectedErr != "" {
+ require.EqualError(t, err, tc.expectedErr)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go
new file mode 100644
index 00000000000..d431d2863a0
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go
@@ -0,0 +1,121 @@
+package factory
+
+import (
+ "github.com/Masterminds/semver/v3"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+)
+
+func NewCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller) (ccipdata.CommitStoreReader, error) {
+ return initOrCloseCommitStoreReader(lggr, versionFinder, address, ec, lp, false)
+}
+
+func CloseCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller) error {
+ _, err := initOrCloseCommitStoreReader(lggr, versionFinder, address, ec, lp, true)
+ return err
+}
+
+func initOrCloseCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller, closeReader bool) (ccipdata.CommitStoreReader, error) {
+ contractType, version, err := versionFinder.TypeAndVersion(address, ec)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to read type and version")
+ }
+ if contractType != ccipconfig.CommitStore {
+ return nil, errors.Errorf("expected %v got %v", ccipconfig.CommitStore, contractType)
+ }
+
+ evmAddr, err := ccipcalc.GenericAddrToEvm(address)
+ if err != nil {
+ return nil, err
+ }
+
+ lggr.Infow("Initializing CommitStore Reader", "version", version.String())
+
+ switch version.String() {
+ case ccipdata.V1_0_0, ccipdata.V1_1_0: // Versions are identical
+ cs, err := v1_0_0.NewCommitStore(lggr, evmAddr, ec, lp)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, cs.Close()
+ }
+ return cs, cs.RegisterFilters()
+ case ccipdata.V1_2_0:
+ cs, err := v1_2_0.NewCommitStore(lggr, evmAddr, ec, lp)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, cs.Close()
+ }
+ return cs, cs.RegisterFilters()
+ case ccipdata.V1_5_0:
+ cs, err := v1_5_0.NewCommitStore(lggr, evmAddr, ec, lp)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, cs.Close()
+ }
+ return cs, cs.RegisterFilters()
+ default:
+ return nil, errors.Errorf("unsupported commit store version %v", version.String())
+ }
+}
+
+func CommitReportToEthTxMeta(typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) {
+ if typ != ccipconfig.CommitStore {
+ return nil, errors.Errorf("expected %v got %v", ccipconfig.CommitStore, typ)
+ }
+ switch ver.String() {
+ case ccipdata.V1_0_0, ccipdata.V1_1_0:
+ commitStoreABI := abihelpers.MustParseABI(commit_store_1_0_0.CommitStoreABI)
+ return func(report []byte) (*txmgr.TxMeta, error) {
+ commitReport, err := v1_0_0.DecodeCommitReport(abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI), report)
+ if err != nil {
+ return nil, err
+ }
+ return commitReportToEthTxMeta(commitReport)
+ }, nil
+ case ccipdata.V1_2_0, ccipdata.V1_5_0:
+ commitStoreABI := abihelpers.MustParseABI(commit_store.CommitStoreABI)
+ return func(report []byte) (*txmgr.TxMeta, error) {
+ commitReport, err := v1_2_0.DecodeCommitReport(abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI), report)
+ if err != nil {
+ return nil, err
+ }
+ return commitReportToEthTxMeta(commitReport)
+ }, nil
+ default:
+ return nil, errors.Errorf("got unexpected version %v", ver.String())
+ }
+}
+
+// CommitReportToEthTxMeta generates a txmgr.EthTxMeta from the given commit report.
+// sequence numbers of the committed messages will be added to tx metadata
+func commitReportToEthTxMeta(commitReport cciptypes.CommitStoreReport) (*txmgr.TxMeta, error) {
+ n := (commitReport.Interval.Max - commitReport.Interval.Min) + 1
+ seqRange := make([]uint64, n)
+ for i := uint64(0); i < n; i++ {
+ seqRange[i] = i + commitReport.Interval.Min
+ }
+ return &txmgr.TxMeta{
+ SeqNumbers: seqRange,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go
new file mode 100644
index 00000000000..e1b8ff929c3
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go
@@ -0,0 +1,37 @@
+package factory
+
+import (
+ "testing"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+)
+
+func TestCommitStore(t *testing.T) {
+ for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_2_0} {
+ lggr := logger.TestLogger(t)
+ addr := cciptypes.Address(utils.RandomAddress().String())
+ lp := mocks2.NewLogPoller(t)
+
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil)
+ versionFinder := newMockVersionFinder(ccipconfig.CommitStore, *semver.MustParse(versionStr), nil)
+ _, err := NewCommitStoreReader(lggr, versionFinder, addr, nil, lp)
+ assert.NoError(t, err)
+
+ expFilterName := logpoller.FilterName(v1_0_0.EXEC_REPORT_ACCEPTS, addr)
+ lp.On("UnregisterFilter", mock.Anything, expFilterName).Return(nil)
+ err = CloseCommitStoreReader(lggr, versionFinder, addr, nil, lp)
+ assert.NoError(t, err)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go
new file mode 100644
index 00000000000..c6fa63ee820
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go
@@ -0,0 +1,125 @@
+package factory
+
+import (
+ "context"
+ "math/big"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+)
+
+func NewOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr cciptypes.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int, registerFilters bool) (ccipdata.OffRampReader, error) {
+ return initOrCloseOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice, false, registerFilters)
+}
+
+func CloseOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr cciptypes.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) error {
+ _, err := initOrCloseOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice, true, false)
+ return err
+}
+
+func initOrCloseOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr cciptypes.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int, closeReader bool, registerFilters bool) (ccipdata.OffRampReader, error) {
+ contractType, version, err := versionFinder.TypeAndVersion(addr, destClient)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to read type and version")
+ }
+ if contractType != ccipconfig.EVM2EVMOffRamp {
+ return nil, errors.Errorf("expected %v got %v", ccipconfig.EVM2EVMOffRamp, contractType)
+ }
+
+ evmAddr, err := ccipcalc.GenericAddrToEvm(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ lggr.Infow("Initializing OffRamp Reader", "version", version.String(), "destMaxGasPrice", destMaxGasPrice.String())
+
+ switch version.String() {
+ case ccipdata.V1_0_0, ccipdata.V1_1_0:
+ offRamp, err := v1_0_0.NewOffRamp(lggr, evmAddr, destClient, lp, estimator, destMaxGasPrice)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, offRamp.Close()
+ }
+ return offRamp, offRamp.RegisterFilters()
+ case ccipdata.V1_2_0:
+ offRamp, err := v1_2_0.NewOffRamp(lggr, evmAddr, destClient, lp, estimator, destMaxGasPrice)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, offRamp.Close()
+ }
+ return offRamp, offRamp.RegisterFilters()
+ case ccipdata.V1_5_0:
+ offRamp, err := v1_5_0.NewOffRamp(lggr, evmAddr, destClient, lp, estimator, destMaxGasPrice)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, offRamp.Close()
+ }
+ return offRamp, offRamp.RegisterFilters()
+ default:
+ return nil, errors.Errorf("unsupported offramp version %v", version.String())
+ }
+ // TODO can validate it pointing to the correct version
+}
+
+func ExecReportToEthTxMeta(ctx context.Context, typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) {
+ if typ != ccipconfig.EVM2EVMOffRamp {
+ return nil, errors.Errorf("expected %v got %v", ccipconfig.EVM2EVMOffRamp, typ)
+ }
+ switch ver.String() {
+ case ccipdata.V1_0_0, ccipdata.V1_1_0:
+ offRampABI := abihelpers.MustParseABI(evm_2_evm_offramp_1_0_0.EVM2EVMOffRampABI)
+ return func(report []byte) (*txmgr.TxMeta, error) {
+ execReport, err := v1_0_0.DecodeExecReport(ctx, abihelpers.MustGetMethodInputs(ccipdata.ManuallyExecute, offRampABI)[:1], report)
+ if err != nil {
+ return nil, err
+ }
+ return execReportToEthTxMeta(execReport)
+ }, nil
+ case ccipdata.V1_2_0, ccipdata.V1_5_0:
+ offRampABI := abihelpers.MustParseABI(evm_2_evm_offramp.EVM2EVMOffRampABI)
+ return func(report []byte) (*txmgr.TxMeta, error) {
+ execReport, err := v1_2_0.DecodeExecReport(ctx, abihelpers.MustGetMethodInputs(ccipdata.ManuallyExecute, offRampABI)[:1], report)
+ if err != nil {
+ return nil, err
+ }
+ return execReportToEthTxMeta(execReport)
+ }, nil
+ default:
+ return nil, errors.Errorf("got unexpected version %v", ver.String())
+ }
+}
+
+func execReportToEthTxMeta(execReport cciptypes.ExecReport) (*txmgr.TxMeta, error) {
+ msgIDs := make([]string, len(execReport.Messages))
+ for i, msg := range execReport.Messages {
+ msgIDs[i] = hexutil.Encode(msg.MessageID[:])
+ }
+
+ return &txmgr.TxMeta{
+ MessageIDs: msgIDs,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go
new file mode 100644
index 00000000000..4b9e57ecfbd
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go
@@ -0,0 +1,44 @@
+package factory
+
+import (
+ "testing"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+)
+
+func TestOffRamp(t *testing.T) {
+ for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_2_0} {
+ lggr := logger.TestLogger(t)
+ addr := cciptypes.Address(utils.RandomAddress().String())
+ lp := mocks2.NewLogPoller(t)
+
+ expFilterNames := []string{
+ logpoller.FilterName(v1_0_0.EXEC_EXECUTION_STATE_CHANGES, addr),
+ logpoller.FilterName(v1_0_0.EXEC_TOKEN_POOL_ADDED, addr),
+ logpoller.FilterName(v1_0_0.EXEC_TOKEN_POOL_REMOVED, addr),
+ }
+ versionFinder := newMockVersionFinder(ccipconfig.EVM2EVMOffRamp, *semver.MustParse(versionStr), nil)
+
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Times(len(expFilterNames))
+ _, err := NewOffRampReader(lggr, versionFinder, addr, nil, lp, nil, nil, true)
+ assert.NoError(t, err)
+
+ for _, f := range expFilterNames {
+ lp.On("UnregisterFilter", mock.Anything, f).Return(nil)
+ }
+ err = CloseOffRampReader(lggr, versionFinder, addr, nil, lp, nil, nil)
+ assert.NoError(t, err)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go
new file mode 100644
index 00000000000..e82584ac7cc
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go
@@ -0,0 +1,88 @@
+package factory
+
+import (
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+)
+
+// NewOnRampReader determines the appropriate version of the onramp and returns a reader for it
+func NewOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress cciptypes.Address, sourceLP logpoller.LogPoller, source client.Client) (ccipdata.OnRampReader, error) {
+ return initOrCloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source, false)
+}
+
+func CloseOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress cciptypes.Address, sourceLP logpoller.LogPoller, source client.Client) error {
+ _, err := initOrCloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source, true)
+ return err
+}
+
+func initOrCloseOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress cciptypes.Address, sourceLP logpoller.LogPoller, source client.Client, closeReader bool) (ccipdata.OnRampReader, error) {
+ contractType, version, err := versionFinder.TypeAndVersion(onRampAddress, source)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to read type and version")
+ }
+ if contractType != ccipconfig.EVM2EVMOnRamp {
+ return nil, errors.Errorf("expected %v got %v", ccipconfig.EVM2EVMOnRamp, contractType)
+ }
+
+ onRampAddrEvm, err := ccipcalc.GenericAddrToEvm(onRampAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ lggr.Infof("Initializing onRamp for version %v", version.String())
+
+ switch version.String() {
+ case ccipdata.V1_0_0:
+ onRamp, err := v1_0_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, onRamp.Close()
+ }
+ return onRamp, onRamp.RegisterFilters()
+ case ccipdata.V1_1_0:
+ onRamp, err := v1_1_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, onRamp.Close()
+ }
+ return onRamp, onRamp.RegisterFilters()
+ case ccipdata.V1_2_0:
+ onRamp, err := v1_2_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, onRamp.Close()
+ }
+ return onRamp, onRamp.RegisterFilters()
+ case ccipdata.V1_5_0:
+ onRamp, err := v1_5_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, onRamp.Close()
+ }
+ return onRamp, onRamp.RegisterFilters()
+ // Adding a new version?
+ // Please update the public factory function in leafer.go if the new version updates the leaf hash function.
+ default:
+ return nil, errors.Errorf("unsupported onramp version %v", version.String())
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go
new file mode 100644
index 00000000000..8cf47ddc7be
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go
@@ -0,0 +1,45 @@
+package factory
+
+import (
+ "testing"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+func TestOnRamp(t *testing.T) {
+ for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_1_0, ccipdata.V1_2_0, ccipdata.V1_5_0} {
+ lggr := logger.TestLogger(t)
+ addr := cciptypes.Address(utils.RandomAddress().String())
+ lp := mocks2.NewLogPoller(t)
+
+ sourceSelector := uint64(1000)
+ destSelector := uint64(2000)
+
+ expFilterNames := []string{
+ logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, addr),
+ logpoller.FilterName(ccipdata.CONFIG_CHANGED, addr),
+ }
+ versionFinder := newMockVersionFinder(ccipconfig.EVM2EVMOnRamp, *semver.MustParse(versionStr), nil)
+
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Times(len(expFilterNames))
+ _, err := NewOnRampReader(lggr, versionFinder, sourceSelector, destSelector, addr, lp, nil)
+ assert.NoError(t, err)
+
+ for _, f := range expFilterNames {
+ lp.On("UnregisterFilter", mock.Anything, f).Return(nil)
+ }
+ err = CloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, addr, lp, nil)
+ assert.NoError(t, err)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go
new file mode 100644
index 00000000000..f1fa7c4e81a
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go
@@ -0,0 +1,82 @@
+package factory
+
+import (
+ "context"
+
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+)
+
+// NewPriceRegistryReader determines the appropriate version of the price registry and returns a reader for it.
+func NewPriceRegistryReader(ctx context.Context, lggr logger.Logger, versionFinder VersionFinder, priceRegistryAddress cciptypes.Address, lp logpoller.LogPoller, cl client.Client) (ccipdata.PriceRegistryReader, error) {
+ return initOrClosePriceRegistryReader(ctx, lggr, versionFinder, priceRegistryAddress, lp, cl, false)
+}
+
+func ClosePriceRegistryReader(ctx context.Context, lggr logger.Logger, versionFinder VersionFinder, priceRegistryAddress cciptypes.Address, lp logpoller.LogPoller, cl client.Client) error {
+ _, err := initOrClosePriceRegistryReader(ctx, lggr, versionFinder, priceRegistryAddress, lp, cl, true)
+ return err
+}
+
+func initOrClosePriceRegistryReader(ctx context.Context, lggr logger.Logger, versionFinder VersionFinder, priceRegistryAddress cciptypes.Address, lp logpoller.LogPoller, cl client.Client, closeReader bool) (ccipdata.PriceRegistryReader, error) {
+ registerFilters := !closeReader
+
+ priceRegistryEvmAddr, err := ccipcalc.GenericAddrToEvm(priceRegistryAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ contractType, version, err := versionFinder.TypeAndVersion(priceRegistryAddress, cl)
+ isV1_0_0 := ccipcommon.IsTxRevertError(err) || (contractType == ccipconfig.PriceRegistry && version.String() == ccipdata.V1_0_0)
+ if isV1_0_0 {
+ lggr.Infof("Assuming %v is 1.0.0 price registry, got %v", priceRegistryEvmAddr, err)
+ // Unfortunately the v1 price registry doesn't have a method to get the version so assume if it reverts its v1.
+ pr, err2 := v1_0_0.NewPriceRegistry(lggr, priceRegistryEvmAddr, lp, cl, registerFilters)
+ if err2 != nil {
+ return nil, err2
+ }
+ if closeReader {
+ return nil, pr.Close()
+ }
+ return pr, nil
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to read type and version")
+ }
+
+ if contractType != ccipconfig.PriceRegistry {
+ return nil, errors.Errorf("expected %v got %v", ccipconfig.PriceRegistry, contractType)
+ }
+ switch version.String() {
+ case ccipdata.V1_2_0:
+ pr, err := v1_2_0.NewPriceRegistry(lggr, priceRegistryEvmAddr, lp, cl, registerFilters)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, pr.Close()
+ }
+ return pr, nil
+ case ccipdata.V1_6_0:
+ pr, err := v1_2_0.NewPriceRegistry(lggr, priceRegistryEvmAddr, lp, cl, registerFilters)
+ if err != nil {
+ return nil, err
+ }
+ if closeReader {
+ return nil, pr.Close()
+ }
+ return pr, nil
+ default:
+ return nil, errors.Errorf("unsupported price registry version %v", version.String())
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go
new file mode 100644
index 00000000000..b4a9d307147
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go
@@ -0,0 +1,46 @@
+package factory
+
+import (
+ "testing"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+func TestPriceRegistry(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_2_0} {
+ lggr := logger.TestLogger(t)
+ addr := cciptypes.Address(utils.RandomAddress().String())
+ lp := mocks2.NewLogPoller(t)
+
+ expFilterNames := []string{
+ logpoller.FilterName(ccipdata.COMMIT_PRICE_UPDATES, addr),
+ logpoller.FilterName(ccipdata.FEE_TOKEN_ADDED, addr),
+ logpoller.FilterName(ccipdata.FEE_TOKEN_REMOVED, addr),
+ }
+ versionFinder := newMockVersionFinder(ccipconfig.PriceRegistry, *semver.MustParse(versionStr), nil)
+
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Times(len(expFilterNames))
+ _, err := NewPriceRegistryReader(ctx, lggr, versionFinder, addr, lp, nil)
+ assert.NoError(t, err)
+
+ for _, f := range expFilterNames {
+ lp.On("UnregisterFilter", mock.Anything, f).Return(nil)
+ }
+ err = ClosePriceRegistryReader(ctx, lggr, versionFinder, addr, lp, nil)
+ assert.NoError(t, err)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go
new file mode 100644
index 00000000000..ac16fc4df2f
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go
@@ -0,0 +1,44 @@
+package factory
+
+import (
+ "github.com/Masterminds/semver/v3"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+// VersionFinder accepts a contract address and a client and performs an on-chain call to
+// determine the contract type.
+type VersionFinder interface {
+ TypeAndVersion(addr cciptypes.Address, client bind.ContractBackend) (config.ContractType, semver.Version, error)
+}
+
+type EvmVersionFinder struct{}
+
+func NewEvmVersionFinder() EvmVersionFinder {
+ return EvmVersionFinder{}
+}
+
+func (e EvmVersionFinder) TypeAndVersion(addr cciptypes.Address, client bind.ContractBackend) (config.ContractType, semver.Version, error) {
+ evmAddr, err := ccipcalc.GenericAddrToEvm(addr)
+ if err != nil {
+ return "", semver.Version{}, err
+ }
+ return config.TypeAndVersion(evmAddr, client)
+}
+
+type mockVersionFinder struct {
+ typ config.ContractType
+ version semver.Version
+ err error
+}
+
+func newMockVersionFinder(typ config.ContractType, version semver.Version, err error) *mockVersionFinder {
+ return &mockVersionFinder{typ: typ, version: version, err: err}
+}
+
+func (m mockVersionFinder) TypeAndVersion(addr cciptypes.Address, client bind.ContractBackend) (config.ContractType, semver.Version, error) {
+ return m.typ, m.version, m.err
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go
new file mode 100644
index 00000000000..f383a87a8a9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go
@@ -0,0 +1,985 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ big "math/big"
+
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ context "context"
+
+ gas "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+
+ mock "github.com/stretchr/testify/mock"
+
+ time "time"
+)
+
+// CommitStoreReader is an autogenerated mock type for the CommitStoreReader type
+type CommitStoreReader struct {
+ mock.Mock
+}
+
+type CommitStoreReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *CommitStoreReader) EXPECT() *CommitStoreReader_Expecter {
+ return &CommitStoreReader_Expecter{mock: &_m.Mock}
+}
+
+// ChangeConfig provides a mock function with given fields: ctx, onchainConfig, offchainConfig
+func (_m *CommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (ccip.Address, error) {
+ ret := _m.Called(ctx, onchainConfig, offchainConfig)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ChangeConfig")
+ }
+
+ var r0 ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) (ccip.Address, error)); ok {
+ return rf(ctx, onchainConfig, offchainConfig)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) ccip.Address); ok {
+ r0 = rf(ctx, onchainConfig, offchainConfig)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte) error); ok {
+ r1 = rf(ctx, onchainConfig, offchainConfig)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_ChangeConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChangeConfig'
+type CommitStoreReader_ChangeConfig_Call struct {
+ *mock.Call
+}
+
+// ChangeConfig is a helper method to define mock.On call
+// - ctx context.Context
+// - onchainConfig []byte
+// - offchainConfig []byte
+func (_e *CommitStoreReader_Expecter) ChangeConfig(ctx interface{}, onchainConfig interface{}, offchainConfig interface{}) *CommitStoreReader_ChangeConfig_Call {
+ return &CommitStoreReader_ChangeConfig_Call{Call: _e.mock.On("ChangeConfig", ctx, onchainConfig, offchainConfig)}
+}
+
+func (_c *CommitStoreReader_ChangeConfig_Call) Run(run func(ctx context.Context, onchainConfig []byte, offchainConfig []byte)) *CommitStoreReader_ChangeConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]byte), args[2].([]byte))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_ChangeConfig_Call) Return(_a0 ccip.Address, _a1 error) *CommitStoreReader_ChangeConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_ChangeConfig_Call) RunAndReturn(run func(context.Context, []byte, []byte) (ccip.Address, error)) *CommitStoreReader_ChangeConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Close provides a mock function with given fields:
+func (_m *CommitStoreReader) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// CommitStoreReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type CommitStoreReader_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *CommitStoreReader_Expecter) Close() *CommitStoreReader_Close_Call {
+ return &CommitStoreReader_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *CommitStoreReader_Close_Call) Run(run func()) *CommitStoreReader_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_Close_Call) Return(_a0 error) *CommitStoreReader_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *CommitStoreReader_Close_Call) RunAndReturn(run func() error) *CommitStoreReader_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DecodeCommitReport provides a mock function with given fields: ctx, report
+func (_m *CommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (ccip.CommitStoreReport, error) {
+ ret := _m.Called(ctx, report)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DecodeCommitReport")
+ }
+
+ var r0 ccip.CommitStoreReport
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []byte) (ccip.CommitStoreReport, error)); ok {
+ return rf(ctx, report)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []byte) ccip.CommitStoreReport); ok {
+ r0 = rf(ctx, report)
+ } else {
+ r0 = ret.Get(0).(ccip.CommitStoreReport)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok {
+ r1 = rf(ctx, report)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_DecodeCommitReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DecodeCommitReport'
+type CommitStoreReader_DecodeCommitReport_Call struct {
+ *mock.Call
+}
+
+// DecodeCommitReport is a helper method to define mock.On call
+// - ctx context.Context
+// - report []byte
+func (_e *CommitStoreReader_Expecter) DecodeCommitReport(ctx interface{}, report interface{}) *CommitStoreReader_DecodeCommitReport_Call {
+ return &CommitStoreReader_DecodeCommitReport_Call{Call: _e.mock.On("DecodeCommitReport", ctx, report)}
+}
+
+func (_c *CommitStoreReader_DecodeCommitReport_Call) Run(run func(ctx context.Context, report []byte)) *CommitStoreReader_DecodeCommitReport_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]byte))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_DecodeCommitReport_Call) Return(_a0 ccip.CommitStoreReport, _a1 error) *CommitStoreReader_DecodeCommitReport_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_DecodeCommitReport_Call) RunAndReturn(run func(context.Context, []byte) (ccip.CommitStoreReport, error)) *CommitStoreReader_DecodeCommitReport_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// EncodeCommitReport provides a mock function with given fields: ctx, report
+func (_m *CommitStoreReader) EncodeCommitReport(ctx context.Context, report ccip.CommitStoreReport) ([]byte, error) {
+ ret := _m.Called(ctx, report)
+
+ if len(ret) == 0 {
+ panic("no return value specified for EncodeCommitReport")
+ }
+
+ var r0 []byte
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.CommitStoreReport) ([]byte, error)); ok {
+ return rf(ctx, report)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.CommitStoreReport) []byte); ok {
+ r0 = rf(ctx, report)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ccip.CommitStoreReport) error); ok {
+ r1 = rf(ctx, report)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_EncodeCommitReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EncodeCommitReport'
+type CommitStoreReader_EncodeCommitReport_Call struct {
+ *mock.Call
+}
+
+// EncodeCommitReport is a helper method to define mock.On call
+// - ctx context.Context
+// - report ccip.CommitStoreReport
+func (_e *CommitStoreReader_Expecter) EncodeCommitReport(ctx interface{}, report interface{}) *CommitStoreReader_EncodeCommitReport_Call {
+ return &CommitStoreReader_EncodeCommitReport_Call{Call: _e.mock.On("EncodeCommitReport", ctx, report)}
+}
+
+func (_c *CommitStoreReader_EncodeCommitReport_Call) Run(run func(ctx context.Context, report ccip.CommitStoreReport)) *CommitStoreReader_EncodeCommitReport_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(ccip.CommitStoreReport))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_EncodeCommitReport_Call) Return(_a0 []byte, _a1 error) *CommitStoreReader_EncodeCommitReport_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_EncodeCommitReport_Call) RunAndReturn(run func(context.Context, ccip.CommitStoreReport) ([]byte, error)) *CommitStoreReader_EncodeCommitReport_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GasPriceEstimator provides a mock function with given fields: ctx
+func (_m *CommitStoreReader) GasPriceEstimator(ctx context.Context) (ccip.GasPriceEstimatorCommit, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GasPriceEstimator")
+ }
+
+ var r0 ccip.GasPriceEstimatorCommit
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.GasPriceEstimatorCommit, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.GasPriceEstimatorCommit); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(ccip.GasPriceEstimatorCommit)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_GasPriceEstimator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPriceEstimator'
+type CommitStoreReader_GasPriceEstimator_Call struct {
+ *mock.Call
+}
+
+// GasPriceEstimator is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *CommitStoreReader_Expecter) GasPriceEstimator(ctx interface{}) *CommitStoreReader_GasPriceEstimator_Call {
+ return &CommitStoreReader_GasPriceEstimator_Call{Call: _e.mock.On("GasPriceEstimator", ctx)}
+}
+
+func (_c *CommitStoreReader_GasPriceEstimator_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GasPriceEstimator_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_GasPriceEstimator_Call) Return(_a0 ccip.GasPriceEstimatorCommit, _a1 error) *CommitStoreReader_GasPriceEstimator_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_GasPriceEstimator_Call) RunAndReturn(run func(context.Context) (ccip.GasPriceEstimatorCommit, error)) *CommitStoreReader_GasPriceEstimator_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetAcceptedCommitReportsGteTimestamp provides a mock function with given fields: ctx, ts, confirmations
+func (_m *CommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]ccip.CommitStoreReportWithTxMeta, error) {
+ ret := _m.Called(ctx, ts, confirmations)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetAcceptedCommitReportsGteTimestamp")
+ }
+
+ var r0 []ccip.CommitStoreReportWithTxMeta
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) ([]ccip.CommitStoreReportWithTxMeta, error)); ok {
+ return rf(ctx, ts, confirmations)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) []ccip.CommitStoreReportWithTxMeta); ok {
+ r0 = rf(ctx, ts, confirmations)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.CommitStoreReportWithTxMeta)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, time.Time, int) error); ok {
+ r1 = rf(ctx, ts, confirmations)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAcceptedCommitReportsGteTimestamp'
+type CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call struct {
+ *mock.Call
+}
+
+// GetAcceptedCommitReportsGteTimestamp is a helper method to define mock.On call
+// - ctx context.Context
+// - ts time.Time
+// - confirmations int
+func (_e *CommitStoreReader_Expecter) GetAcceptedCommitReportsGteTimestamp(ctx interface{}, ts interface{}, confirmations interface{}) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call {
+ return &CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call{Call: _e.mock.On("GetAcceptedCommitReportsGteTimestamp", ctx, ts, confirmations)}
+}
+
+func (_c *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call) Run(run func(ctx context.Context, ts time.Time, confirmations int)) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(time.Time), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call) Return(_a0 []ccip.CommitStoreReportWithTxMeta, _a1 error) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call) RunAndReturn(run func(context.Context, time.Time, int) ([]ccip.CommitStoreReportWithTxMeta, error)) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetCommitReportMatchingSeqNum provides a mock function with given fields: ctx, seqNum, confirmations
+func (_m *CommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]ccip.CommitStoreReportWithTxMeta, error) {
+ ret := _m.Called(ctx, seqNum, confirmations)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetCommitReportMatchingSeqNum")
+ }
+
+ var r0 []ccip.CommitStoreReportWithTxMeta
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, int) ([]ccip.CommitStoreReportWithTxMeta, error)); ok {
+ return rf(ctx, seqNum, confirmations)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, int) []ccip.CommitStoreReportWithTxMeta); ok {
+ r0 = rf(ctx, seqNum, confirmations)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.CommitStoreReportWithTxMeta)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, int) error); ok {
+ r1 = rf(ctx, seqNum, confirmations)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_GetCommitReportMatchingSeqNum_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCommitReportMatchingSeqNum'
+type CommitStoreReader_GetCommitReportMatchingSeqNum_Call struct {
+ *mock.Call
+}
+
+// GetCommitReportMatchingSeqNum is a helper method to define mock.On call
+// - ctx context.Context
+// - seqNum uint64
+// - confirmations int
+func (_e *CommitStoreReader_Expecter) GetCommitReportMatchingSeqNum(ctx interface{}, seqNum interface{}, confirmations interface{}) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call {
+ return &CommitStoreReader_GetCommitReportMatchingSeqNum_Call{Call: _e.mock.On("GetCommitReportMatchingSeqNum", ctx, seqNum, confirmations)}
+}
+
+func (_c *CommitStoreReader_GetCommitReportMatchingSeqNum_Call) Run(run func(ctx context.Context, seqNum uint64, confirmations int)) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_GetCommitReportMatchingSeqNum_Call) Return(_a0 []ccip.CommitStoreReportWithTxMeta, _a1 error) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_GetCommitReportMatchingSeqNum_Call) RunAndReturn(run func(context.Context, uint64, int) ([]ccip.CommitStoreReportWithTxMeta, error)) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetCommitStoreStaticConfig provides a mock function with given fields: ctx
+func (_m *CommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (ccip.CommitStoreStaticConfig, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetCommitStoreStaticConfig")
+ }
+
+ var r0 ccip.CommitStoreStaticConfig
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.CommitStoreStaticConfig, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.CommitStoreStaticConfig); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.CommitStoreStaticConfig)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_GetCommitStoreStaticConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCommitStoreStaticConfig'
+type CommitStoreReader_GetCommitStoreStaticConfig_Call struct {
+ *mock.Call
+}
+
+// GetCommitStoreStaticConfig is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *CommitStoreReader_Expecter) GetCommitStoreStaticConfig(ctx interface{}) *CommitStoreReader_GetCommitStoreStaticConfig_Call {
+ return &CommitStoreReader_GetCommitStoreStaticConfig_Call{Call: _e.mock.On("GetCommitStoreStaticConfig", ctx)}
+}
+
+func (_c *CommitStoreReader_GetCommitStoreStaticConfig_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GetCommitStoreStaticConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_GetCommitStoreStaticConfig_Call) Return(_a0 ccip.CommitStoreStaticConfig, _a1 error) *CommitStoreReader_GetCommitStoreStaticConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_GetCommitStoreStaticConfig_Call) RunAndReturn(run func(context.Context) (ccip.CommitStoreStaticConfig, error)) *CommitStoreReader_GetCommitStoreStaticConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetExpectedNextSequenceNumber provides a mock function with given fields: ctx
+func (_m *CommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetExpectedNextSequenceNumber")
+ }
+
+ var r0 uint64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) uint64); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(uint64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_GetExpectedNextSequenceNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExpectedNextSequenceNumber'
+type CommitStoreReader_GetExpectedNextSequenceNumber_Call struct {
+ *mock.Call
+}
+
+// GetExpectedNextSequenceNumber is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *CommitStoreReader_Expecter) GetExpectedNextSequenceNumber(ctx interface{}) *CommitStoreReader_GetExpectedNextSequenceNumber_Call {
+ return &CommitStoreReader_GetExpectedNextSequenceNumber_Call{Call: _e.mock.On("GetExpectedNextSequenceNumber", ctx)}
+}
+
+func (_c *CommitStoreReader_GetExpectedNextSequenceNumber_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GetExpectedNextSequenceNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_GetExpectedNextSequenceNumber_Call) Return(_a0 uint64, _a1 error) *CommitStoreReader_GetExpectedNextSequenceNumber_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_GetExpectedNextSequenceNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *CommitStoreReader_GetExpectedNextSequenceNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetLatestPriceEpochAndRound provides a mock function with given fields: ctx
+func (_m *CommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetLatestPriceEpochAndRound")
+ }
+
+ var r0 uint64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) uint64); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(uint64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_GetLatestPriceEpochAndRound_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestPriceEpochAndRound'
+type CommitStoreReader_GetLatestPriceEpochAndRound_Call struct {
+ *mock.Call
+}
+
+// GetLatestPriceEpochAndRound is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *CommitStoreReader_Expecter) GetLatestPriceEpochAndRound(ctx interface{}) *CommitStoreReader_GetLatestPriceEpochAndRound_Call {
+ return &CommitStoreReader_GetLatestPriceEpochAndRound_Call{Call: _e.mock.On("GetLatestPriceEpochAndRound", ctx)}
+}
+
+func (_c *CommitStoreReader_GetLatestPriceEpochAndRound_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GetLatestPriceEpochAndRound_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_GetLatestPriceEpochAndRound_Call) Return(_a0 uint64, _a1 error) *CommitStoreReader_GetLatestPriceEpochAndRound_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_GetLatestPriceEpochAndRound_Call) RunAndReturn(run func(context.Context) (uint64, error)) *CommitStoreReader_GetLatestPriceEpochAndRound_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// IsBlessed provides a mock function with given fields: ctx, root
+func (_m *CommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) {
+ ret := _m.Called(ctx, root)
+
+ if len(ret) == 0 {
+ panic("no return value specified for IsBlessed")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, [32]byte) (bool, error)); ok {
+ return rf(ctx, root)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, [32]byte) bool); ok {
+ r0 = rf(ctx, root)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, [32]byte) error); ok {
+ r1 = rf(ctx, root)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_IsBlessed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsBlessed'
+type CommitStoreReader_IsBlessed_Call struct {
+ *mock.Call
+}
+
+// IsBlessed is a helper method to define mock.On call
+// - ctx context.Context
+// - root [32]byte
+func (_e *CommitStoreReader_Expecter) IsBlessed(ctx interface{}, root interface{}) *CommitStoreReader_IsBlessed_Call {
+ return &CommitStoreReader_IsBlessed_Call{Call: _e.mock.On("IsBlessed", ctx, root)}
+}
+
+func (_c *CommitStoreReader_IsBlessed_Call) Run(run func(ctx context.Context, root [32]byte)) *CommitStoreReader_IsBlessed_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([32]byte))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_IsBlessed_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_IsBlessed_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_IsBlessed_Call) RunAndReturn(run func(context.Context, [32]byte) (bool, error)) *CommitStoreReader_IsBlessed_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// IsDestChainHealthy provides a mock function with given fields: ctx
+func (_m *CommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for IsDestChainHealthy")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_IsDestChainHealthy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsDestChainHealthy'
+type CommitStoreReader_IsDestChainHealthy_Call struct {
+ *mock.Call
+}
+
+// IsDestChainHealthy is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *CommitStoreReader_Expecter) IsDestChainHealthy(ctx interface{}) *CommitStoreReader_IsDestChainHealthy_Call {
+ return &CommitStoreReader_IsDestChainHealthy_Call{Call: _e.mock.On("IsDestChainHealthy", ctx)}
+}
+
+func (_c *CommitStoreReader_IsDestChainHealthy_Call) Run(run func(ctx context.Context)) *CommitStoreReader_IsDestChainHealthy_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_IsDestChainHealthy_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_IsDestChainHealthy_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_IsDestChainHealthy_Call) RunAndReturn(run func(context.Context) (bool, error)) *CommitStoreReader_IsDestChainHealthy_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// IsDown provides a mock function with given fields: ctx
+func (_m *CommitStoreReader) IsDown(ctx context.Context) (bool, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for IsDown")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_IsDown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsDown'
+type CommitStoreReader_IsDown_Call struct {
+ *mock.Call
+}
+
+// IsDown is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *CommitStoreReader_Expecter) IsDown(ctx interface{}) *CommitStoreReader_IsDown_Call {
+ return &CommitStoreReader_IsDown_Call{Call: _e.mock.On("IsDown", ctx)}
+}
+
+func (_c *CommitStoreReader_IsDown_Call) Run(run func(ctx context.Context)) *CommitStoreReader_IsDown_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_IsDown_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_IsDown_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_IsDown_Call) RunAndReturn(run func(context.Context) (bool, error)) *CommitStoreReader_IsDown_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// OffchainConfig provides a mock function with given fields: ctx
+func (_m *CommitStoreReader) OffchainConfig(ctx context.Context) (ccip.CommitOffchainConfig, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for OffchainConfig")
+ }
+
+ var r0 ccip.CommitOffchainConfig
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.CommitOffchainConfig, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.CommitOffchainConfig); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.CommitOffchainConfig)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_OffchainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OffchainConfig'
+type CommitStoreReader_OffchainConfig_Call struct {
+ *mock.Call
+}
+
+// OffchainConfig is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *CommitStoreReader_Expecter) OffchainConfig(ctx interface{}) *CommitStoreReader_OffchainConfig_Call {
+ return &CommitStoreReader_OffchainConfig_Call{Call: _e.mock.On("OffchainConfig", ctx)}
+}
+
+func (_c *CommitStoreReader_OffchainConfig_Call) Run(run func(ctx context.Context)) *CommitStoreReader_OffchainConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_OffchainConfig_Call) Return(_a0 ccip.CommitOffchainConfig, _a1 error) *CommitStoreReader_OffchainConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_OffchainConfig_Call) RunAndReturn(run func(context.Context) (ccip.CommitOffchainConfig, error)) *CommitStoreReader_OffchainConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetGasEstimator provides a mock function with given fields: ctx, gpe
+func (_m *CommitStoreReader) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error {
+ ret := _m.Called(ctx, gpe)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SetGasEstimator")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, gas.EvmFeeEstimator) error); ok {
+ r0 = rf(ctx, gpe)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// CommitStoreReader_SetGasEstimator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetGasEstimator'
+type CommitStoreReader_SetGasEstimator_Call struct {
+ *mock.Call
+}
+
+// SetGasEstimator is a helper method to define mock.On call
+// - ctx context.Context
+// - gpe gas.EvmFeeEstimator
+func (_e *CommitStoreReader_Expecter) SetGasEstimator(ctx interface{}, gpe interface{}) *CommitStoreReader_SetGasEstimator_Call {
+ return &CommitStoreReader_SetGasEstimator_Call{Call: _e.mock.On("SetGasEstimator", ctx, gpe)}
+}
+
+func (_c *CommitStoreReader_SetGasEstimator_Call) Run(run func(ctx context.Context, gpe gas.EvmFeeEstimator)) *CommitStoreReader_SetGasEstimator_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(gas.EvmFeeEstimator))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_SetGasEstimator_Call) Return(_a0 error) *CommitStoreReader_SetGasEstimator_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *CommitStoreReader_SetGasEstimator_Call) RunAndReturn(run func(context.Context, gas.EvmFeeEstimator) error) *CommitStoreReader_SetGasEstimator_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetSourceMaxGasPrice provides a mock function with given fields: ctx, sourceMaxGasPrice
+func (_m *CommitStoreReader) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error {
+ ret := _m.Called(ctx, sourceMaxGasPrice)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SetSourceMaxGasPrice")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *big.Int) error); ok {
+ r0 = rf(ctx, sourceMaxGasPrice)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// CommitStoreReader_SetSourceMaxGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetSourceMaxGasPrice'
+type CommitStoreReader_SetSourceMaxGasPrice_Call struct {
+ *mock.Call
+}
+
+// SetSourceMaxGasPrice is a helper method to define mock.On call
+// - ctx context.Context
+// - sourceMaxGasPrice *big.Int
+func (_e *CommitStoreReader_Expecter) SetSourceMaxGasPrice(ctx interface{}, sourceMaxGasPrice interface{}) *CommitStoreReader_SetSourceMaxGasPrice_Call {
+ return &CommitStoreReader_SetSourceMaxGasPrice_Call{Call: _e.mock.On("SetSourceMaxGasPrice", ctx, sourceMaxGasPrice)}
+}
+
+func (_c *CommitStoreReader_SetSourceMaxGasPrice_Call) Run(run func(ctx context.Context, sourceMaxGasPrice *big.Int)) *CommitStoreReader_SetSourceMaxGasPrice_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_SetSourceMaxGasPrice_Call) Return(_a0 error) *CommitStoreReader_SetSourceMaxGasPrice_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *CommitStoreReader_SetSourceMaxGasPrice_Call) RunAndReturn(run func(context.Context, *big.Int) error) *CommitStoreReader_SetSourceMaxGasPrice_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// VerifyExecutionReport provides a mock function with given fields: ctx, report
+func (_m *CommitStoreReader) VerifyExecutionReport(ctx context.Context, report ccip.ExecReport) (bool, error) {
+ ret := _m.Called(ctx, report)
+
+ if len(ret) == 0 {
+ panic("no return value specified for VerifyExecutionReport")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) (bool, error)); ok {
+ return rf(ctx, report)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) bool); ok {
+ r0 = rf(ctx, report)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ccip.ExecReport) error); ok {
+ r1 = rf(ctx, report)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CommitStoreReader_VerifyExecutionReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VerifyExecutionReport'
+type CommitStoreReader_VerifyExecutionReport_Call struct {
+ *mock.Call
+}
+
+// VerifyExecutionReport is a helper method to define mock.On call
+// - ctx context.Context
+// - report ccip.ExecReport
+func (_e *CommitStoreReader_Expecter) VerifyExecutionReport(ctx interface{}, report interface{}) *CommitStoreReader_VerifyExecutionReport_Call {
+ return &CommitStoreReader_VerifyExecutionReport_Call{Call: _e.mock.On("VerifyExecutionReport", ctx, report)}
+}
+
+func (_c *CommitStoreReader_VerifyExecutionReport_Call) Run(run func(ctx context.Context, report ccip.ExecReport)) *CommitStoreReader_VerifyExecutionReport_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(ccip.ExecReport))
+ })
+ return _c
+}
+
+func (_c *CommitStoreReader_VerifyExecutionReport_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_VerifyExecutionReport_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *CommitStoreReader_VerifyExecutionReport_Call) RunAndReturn(run func(context.Context, ccip.ExecReport) (bool, error)) *CommitStoreReader_VerifyExecutionReport_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewCommitStoreReader creates a new instance of CommitStoreReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewCommitStoreReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *CommitStoreReader {
+ mock := &CommitStoreReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go
new file mode 100644
index 00000000000..f383ccdc0ba
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go
@@ -0,0 +1,949 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// OffRampReader is an autogenerated mock type for the OffRampReader type
+type OffRampReader struct {
+ mock.Mock
+}
+
+type OffRampReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *OffRampReader) EXPECT() *OffRampReader_Expecter {
+ return &OffRampReader_Expecter{mock: &_m.Mock}
+}
+
+// Address provides a mock function with given fields: ctx
+func (_m *OffRampReader) Address(ctx context.Context) (ccip.Address, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Address")
+ }
+
+ var r0 ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address'
+type OffRampReader_Address_Call struct {
+ *mock.Call
+}
+
+// Address is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) Address(ctx interface{}) *OffRampReader_Address_Call {
+ return &OffRampReader_Address_Call{Call: _e.mock.On("Address", ctx)}
+}
+
+func (_c *OffRampReader_Address_Call) Run(run func(ctx context.Context)) *OffRampReader_Address_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_Address_Call) Return(_a0 ccip.Address, _a1 error) *OffRampReader_Address_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_Address_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OffRampReader_Address_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ChangeConfig provides a mock function with given fields: ctx, onchainConfig, offchainConfig
+func (_m *OffRampReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (ccip.Address, ccip.Address, error) {
+ ret := _m.Called(ctx, onchainConfig, offchainConfig)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ChangeConfig")
+ }
+
+ var r0 ccip.Address
+ var r1 ccip.Address
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) (ccip.Address, ccip.Address, error)); ok {
+ return rf(ctx, onchainConfig, offchainConfig)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) ccip.Address); ok {
+ r0 = rf(ctx, onchainConfig, offchainConfig)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte) ccip.Address); ok {
+ r1 = rf(ctx, onchainConfig, offchainConfig)
+ } else {
+ r1 = ret.Get(1).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, []byte, []byte) error); ok {
+ r2 = rf(ctx, onchainConfig, offchainConfig)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// OffRampReader_ChangeConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChangeConfig'
+type OffRampReader_ChangeConfig_Call struct {
+ *mock.Call
+}
+
+// ChangeConfig is a helper method to define mock.On call
+// - ctx context.Context
+// - onchainConfig []byte
+// - offchainConfig []byte
+func (_e *OffRampReader_Expecter) ChangeConfig(ctx interface{}, onchainConfig interface{}, offchainConfig interface{}) *OffRampReader_ChangeConfig_Call {
+ return &OffRampReader_ChangeConfig_Call{Call: _e.mock.On("ChangeConfig", ctx, onchainConfig, offchainConfig)}
+}
+
+func (_c *OffRampReader_ChangeConfig_Call) Run(run func(ctx context.Context, onchainConfig []byte, offchainConfig []byte)) *OffRampReader_ChangeConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]byte), args[2].([]byte))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_ChangeConfig_Call) Return(_a0 ccip.Address, _a1 ccip.Address, _a2 error) *OffRampReader_ChangeConfig_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *OffRampReader_ChangeConfig_Call) RunAndReturn(run func(context.Context, []byte, []byte) (ccip.Address, ccip.Address, error)) *OffRampReader_ChangeConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Close provides a mock function with given fields:
+func (_m *OffRampReader) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// OffRampReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type OffRampReader_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *OffRampReader_Expecter) Close() *OffRampReader_Close_Call {
+ return &OffRampReader_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *OffRampReader_Close_Call) Run(run func()) *OffRampReader_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *OffRampReader_Close_Call) Return(_a0 error) *OffRampReader_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *OffRampReader_Close_Call) RunAndReturn(run func() error) *OffRampReader_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CurrentRateLimiterState provides a mock function with given fields: ctx
+func (_m *OffRampReader) CurrentRateLimiterState(ctx context.Context) (ccip.TokenBucketRateLimit, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CurrentRateLimiterState")
+ }
+
+ var r0 ccip.TokenBucketRateLimit
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.TokenBucketRateLimit, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.TokenBucketRateLimit); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.TokenBucketRateLimit)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_CurrentRateLimiterState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CurrentRateLimiterState'
+type OffRampReader_CurrentRateLimiterState_Call struct {
+ *mock.Call
+}
+
+// CurrentRateLimiterState is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) CurrentRateLimiterState(ctx interface{}) *OffRampReader_CurrentRateLimiterState_Call {
+ return &OffRampReader_CurrentRateLimiterState_Call{Call: _e.mock.On("CurrentRateLimiterState", ctx)}
+}
+
+func (_c *OffRampReader_CurrentRateLimiterState_Call) Run(run func(ctx context.Context)) *OffRampReader_CurrentRateLimiterState_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_CurrentRateLimiterState_Call) Return(_a0 ccip.TokenBucketRateLimit, _a1 error) *OffRampReader_CurrentRateLimiterState_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_CurrentRateLimiterState_Call) RunAndReturn(run func(context.Context) (ccip.TokenBucketRateLimit, error)) *OffRampReader_CurrentRateLimiterState_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DecodeExecutionReport provides a mock function with given fields: ctx, report
+func (_m *OffRampReader) DecodeExecutionReport(ctx context.Context, report []byte) (ccip.ExecReport, error) {
+ ret := _m.Called(ctx, report)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DecodeExecutionReport")
+ }
+
+ var r0 ccip.ExecReport
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []byte) (ccip.ExecReport, error)); ok {
+ return rf(ctx, report)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []byte) ccip.ExecReport); ok {
+ r0 = rf(ctx, report)
+ } else {
+ r0 = ret.Get(0).(ccip.ExecReport)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok {
+ r1 = rf(ctx, report)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_DecodeExecutionReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DecodeExecutionReport'
+type OffRampReader_DecodeExecutionReport_Call struct {
+ *mock.Call
+}
+
+// DecodeExecutionReport is a helper method to define mock.On call
+// - ctx context.Context
+// - report []byte
+func (_e *OffRampReader_Expecter) DecodeExecutionReport(ctx interface{}, report interface{}) *OffRampReader_DecodeExecutionReport_Call {
+ return &OffRampReader_DecodeExecutionReport_Call{Call: _e.mock.On("DecodeExecutionReport", ctx, report)}
+}
+
+func (_c *OffRampReader_DecodeExecutionReport_Call) Run(run func(ctx context.Context, report []byte)) *OffRampReader_DecodeExecutionReport_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]byte))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_DecodeExecutionReport_Call) Return(_a0 ccip.ExecReport, _a1 error) *OffRampReader_DecodeExecutionReport_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_DecodeExecutionReport_Call) RunAndReturn(run func(context.Context, []byte) (ccip.ExecReport, error)) *OffRampReader_DecodeExecutionReport_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// EncodeExecutionReport provides a mock function with given fields: ctx, report
+func (_m *OffRampReader) EncodeExecutionReport(ctx context.Context, report ccip.ExecReport) ([]byte, error) {
+ ret := _m.Called(ctx, report)
+
+ if len(ret) == 0 {
+ panic("no return value specified for EncodeExecutionReport")
+ }
+
+ var r0 []byte
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) ([]byte, error)); ok {
+ return rf(ctx, report)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) []byte); ok {
+ r0 = rf(ctx, report)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ccip.ExecReport) error); ok {
+ r1 = rf(ctx, report)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_EncodeExecutionReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EncodeExecutionReport'
+type OffRampReader_EncodeExecutionReport_Call struct {
+ *mock.Call
+}
+
+// EncodeExecutionReport is a helper method to define mock.On call
+// - ctx context.Context
+// - report ccip.ExecReport
+func (_e *OffRampReader_Expecter) EncodeExecutionReport(ctx interface{}, report interface{}) *OffRampReader_EncodeExecutionReport_Call {
+ return &OffRampReader_EncodeExecutionReport_Call{Call: _e.mock.On("EncodeExecutionReport", ctx, report)}
+}
+
+func (_c *OffRampReader_EncodeExecutionReport_Call) Run(run func(ctx context.Context, report ccip.ExecReport)) *OffRampReader_EncodeExecutionReport_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(ccip.ExecReport))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_EncodeExecutionReport_Call) Return(_a0 []byte, _a1 error) *OffRampReader_EncodeExecutionReport_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_EncodeExecutionReport_Call) RunAndReturn(run func(context.Context, ccip.ExecReport) ([]byte, error)) *OffRampReader_EncodeExecutionReport_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GasPriceEstimator provides a mock function with given fields: ctx
+func (_m *OffRampReader) GasPriceEstimator(ctx context.Context) (ccip.GasPriceEstimatorExec, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GasPriceEstimator")
+ }
+
+ var r0 ccip.GasPriceEstimatorExec
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.GasPriceEstimatorExec, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.GasPriceEstimatorExec); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(ccip.GasPriceEstimatorExec)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_GasPriceEstimator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPriceEstimator'
+type OffRampReader_GasPriceEstimator_Call struct {
+ *mock.Call
+}
+
+// GasPriceEstimator is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) GasPriceEstimator(ctx interface{}) *OffRampReader_GasPriceEstimator_Call {
+ return &OffRampReader_GasPriceEstimator_Call{Call: _e.mock.On("GasPriceEstimator", ctx)}
+}
+
+func (_c *OffRampReader_GasPriceEstimator_Call) Run(run func(ctx context.Context)) *OffRampReader_GasPriceEstimator_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_GasPriceEstimator_Call) Return(_a0 ccip.GasPriceEstimatorExec, _a1 error) *OffRampReader_GasPriceEstimator_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_GasPriceEstimator_Call) RunAndReturn(run func(context.Context) (ccip.GasPriceEstimatorExec, error)) *OffRampReader_GasPriceEstimator_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetExecutionState provides a mock function with given fields: ctx, sequenceNumber
+func (_m *OffRampReader) GetExecutionState(ctx context.Context, sequenceNumber uint64) (uint8, error) {
+ ret := _m.Called(ctx, sequenceNumber)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetExecutionState")
+ }
+
+ var r0 uint8
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) (uint8, error)); ok {
+ return rf(ctx, sequenceNumber)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) uint8); ok {
+ r0 = rf(ctx, sequenceNumber)
+ } else {
+ r0 = ret.Get(0).(uint8)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok {
+ r1 = rf(ctx, sequenceNumber)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_GetExecutionState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExecutionState'
+type OffRampReader_GetExecutionState_Call struct {
+ *mock.Call
+}
+
+// GetExecutionState is a helper method to define mock.On call
+// - ctx context.Context
+// - sequenceNumber uint64
+func (_e *OffRampReader_Expecter) GetExecutionState(ctx interface{}, sequenceNumber interface{}) *OffRampReader_GetExecutionState_Call {
+ return &OffRampReader_GetExecutionState_Call{Call: _e.mock.On("GetExecutionState", ctx, sequenceNumber)}
+}
+
+func (_c *OffRampReader_GetExecutionState_Call) Run(run func(ctx context.Context, sequenceNumber uint64)) *OffRampReader_GetExecutionState_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_GetExecutionState_Call) Return(_a0 uint8, _a1 error) *OffRampReader_GetExecutionState_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_GetExecutionState_Call) RunAndReturn(run func(context.Context, uint64) (uint8, error)) *OffRampReader_GetExecutionState_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetExecutionStateChangesBetweenSeqNums provides a mock function with given fields: ctx, seqNumMin, seqNumMax, confirmations
+func (_m *OffRampReader) GetExecutionStateChangesBetweenSeqNums(ctx context.Context, seqNumMin uint64, seqNumMax uint64, confirmations int) ([]ccip.ExecutionStateChangedWithTxMeta, error) {
+ ret := _m.Called(ctx, seqNumMin, seqNumMax, confirmations)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetExecutionStateChangesBetweenSeqNums")
+ }
+
+ var r0 []ccip.ExecutionStateChangedWithTxMeta
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, int) ([]ccip.ExecutionStateChangedWithTxMeta, error)); ok {
+ return rf(ctx, seqNumMin, seqNumMax, confirmations)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, int) []ccip.ExecutionStateChangedWithTxMeta); ok {
+ r0 = rf(ctx, seqNumMin, seqNumMax, confirmations)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.ExecutionStateChangedWithTxMeta)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, int) error); ok {
+ r1 = rf(ctx, seqNumMin, seqNumMax, confirmations)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExecutionStateChangesBetweenSeqNums'
+type OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call struct {
+ *mock.Call
+}
+
+// GetExecutionStateChangesBetweenSeqNums is a helper method to define mock.On call
+// - ctx context.Context
+// - seqNumMin uint64
+// - seqNumMax uint64
+// - confirmations int
+func (_e *OffRampReader_Expecter) GetExecutionStateChangesBetweenSeqNums(ctx interface{}, seqNumMin interface{}, seqNumMax interface{}, confirmations interface{}) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call {
+ return &OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call{Call: _e.mock.On("GetExecutionStateChangesBetweenSeqNums", ctx, seqNumMin, seqNumMax, confirmations)}
+}
+
+func (_c *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call) Run(run func(ctx context.Context, seqNumMin uint64, seqNumMax uint64, confirmations int)) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(int))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call) Return(_a0 []ccip.ExecutionStateChangedWithTxMeta, _a1 error) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call) RunAndReturn(run func(context.Context, uint64, uint64, int) ([]ccip.ExecutionStateChangedWithTxMeta, error)) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetRouter provides a mock function with given fields: ctx
+func (_m *OffRampReader) GetRouter(ctx context.Context) (ccip.Address, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetRouter")
+ }
+
+ var r0 ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_GetRouter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRouter'
+type OffRampReader_GetRouter_Call struct {
+ *mock.Call
+}
+
+// GetRouter is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) GetRouter(ctx interface{}) *OffRampReader_GetRouter_Call {
+ return &OffRampReader_GetRouter_Call{Call: _e.mock.On("GetRouter", ctx)}
+}
+
+func (_c *OffRampReader_GetRouter_Call) Run(run func(ctx context.Context)) *OffRampReader_GetRouter_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_GetRouter_Call) Return(_a0 ccip.Address, _a1 error) *OffRampReader_GetRouter_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_GetRouter_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OffRampReader_GetRouter_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetSourceToDestTokensMapping provides a mock function with given fields: ctx
+func (_m *OffRampReader) GetSourceToDestTokensMapping(ctx context.Context) (map[ccip.Address]ccip.Address, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetSourceToDestTokensMapping")
+ }
+
+ var r0 map[ccip.Address]ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (map[ccip.Address]ccip.Address, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) map[ccip.Address]ccip.Address); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[ccip.Address]ccip.Address)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_GetSourceToDestTokensMapping_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSourceToDestTokensMapping'
+type OffRampReader_GetSourceToDestTokensMapping_Call struct {
+ *mock.Call
+}
+
+// GetSourceToDestTokensMapping is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) GetSourceToDestTokensMapping(ctx interface{}) *OffRampReader_GetSourceToDestTokensMapping_Call {
+ return &OffRampReader_GetSourceToDestTokensMapping_Call{Call: _e.mock.On("GetSourceToDestTokensMapping", ctx)}
+}
+
+func (_c *OffRampReader_GetSourceToDestTokensMapping_Call) Run(run func(ctx context.Context)) *OffRampReader_GetSourceToDestTokensMapping_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_GetSourceToDestTokensMapping_Call) Return(_a0 map[ccip.Address]ccip.Address, _a1 error) *OffRampReader_GetSourceToDestTokensMapping_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_GetSourceToDestTokensMapping_Call) RunAndReturn(run func(context.Context) (map[ccip.Address]ccip.Address, error)) *OffRampReader_GetSourceToDestTokensMapping_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetStaticConfig provides a mock function with given fields: ctx
+func (_m *OffRampReader) GetStaticConfig(ctx context.Context) (ccip.OffRampStaticConfig, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetStaticConfig")
+ }
+
+ var r0 ccip.OffRampStaticConfig
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.OffRampStaticConfig, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.OffRampStaticConfig); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.OffRampStaticConfig)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_GetStaticConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStaticConfig'
+type OffRampReader_GetStaticConfig_Call struct {
+ *mock.Call
+}
+
+// GetStaticConfig is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) GetStaticConfig(ctx interface{}) *OffRampReader_GetStaticConfig_Call {
+ return &OffRampReader_GetStaticConfig_Call{Call: _e.mock.On("GetStaticConfig", ctx)}
+}
+
+func (_c *OffRampReader_GetStaticConfig_Call) Run(run func(ctx context.Context)) *OffRampReader_GetStaticConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_GetStaticConfig_Call) Return(_a0 ccip.OffRampStaticConfig, _a1 error) *OffRampReader_GetStaticConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_GetStaticConfig_Call) RunAndReturn(run func(context.Context) (ccip.OffRampStaticConfig, error)) *OffRampReader_GetStaticConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetTokens provides a mock function with given fields: ctx
+func (_m *OffRampReader) GetTokens(ctx context.Context) (ccip.OffRampTokens, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetTokens")
+ }
+
+ var r0 ccip.OffRampTokens
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.OffRampTokens, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.OffRampTokens); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.OffRampTokens)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_GetTokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokens'
+type OffRampReader_GetTokens_Call struct {
+ *mock.Call
+}
+
+// GetTokens is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) GetTokens(ctx interface{}) *OffRampReader_GetTokens_Call {
+ return &OffRampReader_GetTokens_Call{Call: _e.mock.On("GetTokens", ctx)}
+}
+
+func (_c *OffRampReader_GetTokens_Call) Run(run func(ctx context.Context)) *OffRampReader_GetTokens_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_GetTokens_Call) Return(_a0 ccip.OffRampTokens, _a1 error) *OffRampReader_GetTokens_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_GetTokens_Call) RunAndReturn(run func(context.Context) (ccip.OffRampTokens, error)) *OffRampReader_GetTokens_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListSenderNonces provides a mock function with given fields: ctx, senders
+func (_m *OffRampReader) ListSenderNonces(ctx context.Context, senders []ccip.Address) (map[ccip.Address]uint64, error) {
+ ret := _m.Called(ctx, senders)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListSenderNonces")
+ }
+
+ var r0 map[ccip.Address]uint64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) (map[ccip.Address]uint64, error)); ok {
+ return rf(ctx, senders)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) map[ccip.Address]uint64); ok {
+ r0 = rf(ctx, senders)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[ccip.Address]uint64)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok {
+ r1 = rf(ctx, senders)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_ListSenderNonces_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSenderNonces'
+type OffRampReader_ListSenderNonces_Call struct {
+ *mock.Call
+}
+
+// ListSenderNonces is a helper method to define mock.On call
+// - ctx context.Context
+// - senders []ccip.Address
+func (_e *OffRampReader_Expecter) ListSenderNonces(ctx interface{}, senders interface{}) *OffRampReader_ListSenderNonces_Call {
+ return &OffRampReader_ListSenderNonces_Call{Call: _e.mock.On("ListSenderNonces", ctx, senders)}
+}
+
+func (_c *OffRampReader_ListSenderNonces_Call) Run(run func(ctx context.Context, senders []ccip.Address)) *OffRampReader_ListSenderNonces_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]ccip.Address))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_ListSenderNonces_Call) Return(_a0 map[ccip.Address]uint64, _a1 error) *OffRampReader_ListSenderNonces_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_ListSenderNonces_Call) RunAndReturn(run func(context.Context, []ccip.Address) (map[ccip.Address]uint64, error)) *OffRampReader_ListSenderNonces_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// OffchainConfig provides a mock function with given fields: ctx
+func (_m *OffRampReader) OffchainConfig(ctx context.Context) (ccip.ExecOffchainConfig, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for OffchainConfig")
+ }
+
+ var r0 ccip.ExecOffchainConfig
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.ExecOffchainConfig, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.ExecOffchainConfig); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.ExecOffchainConfig)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_OffchainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OffchainConfig'
+type OffRampReader_OffchainConfig_Call struct {
+ *mock.Call
+}
+
+// OffchainConfig is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) OffchainConfig(ctx interface{}) *OffRampReader_OffchainConfig_Call {
+ return &OffRampReader_OffchainConfig_Call{Call: _e.mock.On("OffchainConfig", ctx)}
+}
+
+func (_c *OffRampReader_OffchainConfig_Call) Run(run func(ctx context.Context)) *OffRampReader_OffchainConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_OffchainConfig_Call) Return(_a0 ccip.ExecOffchainConfig, _a1 error) *OffRampReader_OffchainConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_OffchainConfig_Call) RunAndReturn(run func(context.Context) (ccip.ExecOffchainConfig, error)) *OffRampReader_OffchainConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// OnchainConfig provides a mock function with given fields: ctx
+func (_m *OffRampReader) OnchainConfig(ctx context.Context) (ccip.ExecOnchainConfig, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for OnchainConfig")
+ }
+
+ var r0 ccip.ExecOnchainConfig
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.ExecOnchainConfig, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.ExecOnchainConfig); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.ExecOnchainConfig)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OffRampReader_OnchainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnchainConfig'
+type OffRampReader_OnchainConfig_Call struct {
+ *mock.Call
+}
+
+// OnchainConfig is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OffRampReader_Expecter) OnchainConfig(ctx interface{}) *OffRampReader_OnchainConfig_Call {
+ return &OffRampReader_OnchainConfig_Call{Call: _e.mock.On("OnchainConfig", ctx)}
+}
+
+func (_c *OffRampReader_OnchainConfig_Call) Run(run func(ctx context.Context)) *OffRampReader_OnchainConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OffRampReader_OnchainConfig_Call) Return(_a0 ccip.ExecOnchainConfig, _a1 error) *OffRampReader_OnchainConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OffRampReader_OnchainConfig_Call) RunAndReturn(run func(context.Context) (ccip.ExecOnchainConfig, error)) *OffRampReader_OnchainConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewOffRampReader creates a new instance of OffRampReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewOffRampReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *OffRampReader {
+ mock := &OffRampReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go
new file mode 100644
index 00000000000..ccf5bd78463
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go
@@ -0,0 +1,480 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// OnRampReader is an autogenerated mock type for the OnRampReader type
+type OnRampReader struct {
+ mock.Mock
+}
+
+type OnRampReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *OnRampReader) EXPECT() *OnRampReader_Expecter {
+ return &OnRampReader_Expecter{mock: &_m.Mock}
+}
+
+// Address provides a mock function with given fields: ctx
+func (_m *OnRampReader) Address(ctx context.Context) (ccip.Address, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Address")
+ }
+
+ var r0 ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OnRampReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address'
+type OnRampReader_Address_Call struct {
+ *mock.Call
+}
+
+// Address is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OnRampReader_Expecter) Address(ctx interface{}) *OnRampReader_Address_Call {
+ return &OnRampReader_Address_Call{Call: _e.mock.On("Address", ctx)}
+}
+
+func (_c *OnRampReader_Address_Call) Run(run func(ctx context.Context)) *OnRampReader_Address_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OnRampReader_Address_Call) Return(_a0 ccip.Address, _a1 error) *OnRampReader_Address_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OnRampReader_Address_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OnRampReader_Address_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Close provides a mock function with given fields:
+func (_m *OnRampReader) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// OnRampReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type OnRampReader_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *OnRampReader_Expecter) Close() *OnRampReader_Close_Call {
+ return &OnRampReader_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *OnRampReader_Close_Call) Run(run func()) *OnRampReader_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *OnRampReader_Close_Call) Return(_a0 error) *OnRampReader_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *OnRampReader_Close_Call) RunAndReturn(run func() error) *OnRampReader_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetDynamicConfig provides a mock function with given fields: ctx
+func (_m *OnRampReader) GetDynamicConfig(ctx context.Context) (ccip.OnRampDynamicConfig, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetDynamicConfig")
+ }
+
+ var r0 ccip.OnRampDynamicConfig
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.OnRampDynamicConfig, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.OnRampDynamicConfig); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.OnRampDynamicConfig)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OnRampReader_GetDynamicConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDynamicConfig'
+type OnRampReader_GetDynamicConfig_Call struct {
+ *mock.Call
+}
+
+// GetDynamicConfig is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OnRampReader_Expecter) GetDynamicConfig(ctx interface{}) *OnRampReader_GetDynamicConfig_Call {
+ return &OnRampReader_GetDynamicConfig_Call{Call: _e.mock.On("GetDynamicConfig", ctx)}
+}
+
+func (_c *OnRampReader_GetDynamicConfig_Call) Run(run func(ctx context.Context)) *OnRampReader_GetDynamicConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OnRampReader_GetDynamicConfig_Call) Return(_a0 ccip.OnRampDynamicConfig, _a1 error) *OnRampReader_GetDynamicConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OnRampReader_GetDynamicConfig_Call) RunAndReturn(run func(context.Context) (ccip.OnRampDynamicConfig, error)) *OnRampReader_GetDynamicConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetSendRequestsBetweenSeqNums provides a mock function with given fields: ctx, seqNumMin, seqNumMax, finalized
+func (_m *OnRampReader) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin uint64, seqNumMax uint64, finalized bool) ([]ccip.EVM2EVMMessageWithTxMeta, error) {
+ ret := _m.Called(ctx, seqNumMin, seqNumMax, finalized)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetSendRequestsBetweenSeqNums")
+ }
+
+ var r0 []ccip.EVM2EVMMessageWithTxMeta
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, bool) ([]ccip.EVM2EVMMessageWithTxMeta, error)); ok {
+ return rf(ctx, seqNumMin, seqNumMax, finalized)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, bool) []ccip.EVM2EVMMessageWithTxMeta); ok {
+ r0 = rf(ctx, seqNumMin, seqNumMax, finalized)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.EVM2EVMMessageWithTxMeta)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, bool) error); ok {
+ r1 = rf(ctx, seqNumMin, seqNumMax, finalized)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OnRampReader_GetSendRequestsBetweenSeqNums_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSendRequestsBetweenSeqNums'
+type OnRampReader_GetSendRequestsBetweenSeqNums_Call struct {
+ *mock.Call
+}
+
+// GetSendRequestsBetweenSeqNums is a helper method to define mock.On call
+// - ctx context.Context
+// - seqNumMin uint64
+// - seqNumMax uint64
+// - finalized bool
+func (_e *OnRampReader_Expecter) GetSendRequestsBetweenSeqNums(ctx interface{}, seqNumMin interface{}, seqNumMax interface{}, finalized interface{}) *OnRampReader_GetSendRequestsBetweenSeqNums_Call {
+ return &OnRampReader_GetSendRequestsBetweenSeqNums_Call{Call: _e.mock.On("GetSendRequestsBetweenSeqNums", ctx, seqNumMin, seqNumMax, finalized)}
+}
+
+func (_c *OnRampReader_GetSendRequestsBetweenSeqNums_Call) Run(run func(ctx context.Context, seqNumMin uint64, seqNumMax uint64, finalized bool)) *OnRampReader_GetSendRequestsBetweenSeqNums_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(bool))
+ })
+ return _c
+}
+
+func (_c *OnRampReader_GetSendRequestsBetweenSeqNums_Call) Return(_a0 []ccip.EVM2EVMMessageWithTxMeta, _a1 error) *OnRampReader_GetSendRequestsBetweenSeqNums_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OnRampReader_GetSendRequestsBetweenSeqNums_Call) RunAndReturn(run func(context.Context, uint64, uint64, bool) ([]ccip.EVM2EVMMessageWithTxMeta, error)) *OnRampReader_GetSendRequestsBetweenSeqNums_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// IsSourceChainHealthy provides a mock function with given fields: ctx
+func (_m *OnRampReader) IsSourceChainHealthy(ctx context.Context) (bool, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for IsSourceChainHealthy")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OnRampReader_IsSourceChainHealthy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSourceChainHealthy'
+type OnRampReader_IsSourceChainHealthy_Call struct {
+ *mock.Call
+}
+
+// IsSourceChainHealthy is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OnRampReader_Expecter) IsSourceChainHealthy(ctx interface{}) *OnRampReader_IsSourceChainHealthy_Call {
+ return &OnRampReader_IsSourceChainHealthy_Call{Call: _e.mock.On("IsSourceChainHealthy", ctx)}
+}
+
+func (_c *OnRampReader_IsSourceChainHealthy_Call) Run(run func(ctx context.Context)) *OnRampReader_IsSourceChainHealthy_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OnRampReader_IsSourceChainHealthy_Call) Return(_a0 bool, _a1 error) *OnRampReader_IsSourceChainHealthy_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OnRampReader_IsSourceChainHealthy_Call) RunAndReturn(run func(context.Context) (bool, error)) *OnRampReader_IsSourceChainHealthy_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// IsSourceCursed provides a mock function with given fields: ctx
+func (_m *OnRampReader) IsSourceCursed(ctx context.Context) (bool, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for IsSourceCursed")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OnRampReader_IsSourceCursed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSourceCursed'
+type OnRampReader_IsSourceCursed_Call struct {
+ *mock.Call
+}
+
+// IsSourceCursed is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OnRampReader_Expecter) IsSourceCursed(ctx interface{}) *OnRampReader_IsSourceCursed_Call {
+ return &OnRampReader_IsSourceCursed_Call{Call: _e.mock.On("IsSourceCursed", ctx)}
+}
+
+func (_c *OnRampReader_IsSourceCursed_Call) Run(run func(ctx context.Context)) *OnRampReader_IsSourceCursed_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OnRampReader_IsSourceCursed_Call) Return(_a0 bool, _a1 error) *OnRampReader_IsSourceCursed_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OnRampReader_IsSourceCursed_Call) RunAndReturn(run func(context.Context) (bool, error)) *OnRampReader_IsSourceCursed_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RouterAddress provides a mock function with given fields: _a0
+func (_m *OnRampReader) RouterAddress(_a0 context.Context) (ccip.Address, error) {
+ ret := _m.Called(_a0)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RouterAddress")
+ }
+
+ var r0 ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok {
+ return rf(_a0)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(_a0)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OnRampReader_RouterAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RouterAddress'
+type OnRampReader_RouterAddress_Call struct {
+ *mock.Call
+}
+
+// RouterAddress is a helper method to define mock.On call
+// - _a0 context.Context
+func (_e *OnRampReader_Expecter) RouterAddress(_a0 interface{}) *OnRampReader_RouterAddress_Call {
+ return &OnRampReader_RouterAddress_Call{Call: _e.mock.On("RouterAddress", _a0)}
+}
+
+func (_c *OnRampReader_RouterAddress_Call) Run(run func(_a0 context.Context)) *OnRampReader_RouterAddress_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OnRampReader_RouterAddress_Call) Return(_a0 ccip.Address, _a1 error) *OnRampReader_RouterAddress_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OnRampReader_RouterAddress_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OnRampReader_RouterAddress_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SourcePriceRegistryAddress provides a mock function with given fields: ctx
+func (_m *OnRampReader) SourcePriceRegistryAddress(ctx context.Context) (ccip.Address, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SourcePriceRegistryAddress")
+ }
+
+ var r0 ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// OnRampReader_SourcePriceRegistryAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SourcePriceRegistryAddress'
+type OnRampReader_SourcePriceRegistryAddress_Call struct {
+ *mock.Call
+}
+
+// SourcePriceRegistryAddress is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *OnRampReader_Expecter) SourcePriceRegistryAddress(ctx interface{}) *OnRampReader_SourcePriceRegistryAddress_Call {
+ return &OnRampReader_SourcePriceRegistryAddress_Call{Call: _e.mock.On("SourcePriceRegistryAddress", ctx)}
+}
+
+func (_c *OnRampReader_SourcePriceRegistryAddress_Call) Run(run func(ctx context.Context)) *OnRampReader_SourcePriceRegistryAddress_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *OnRampReader_SourcePriceRegistryAddress_Call) Return(_a0 ccip.Address, _a1 error) *OnRampReader_SourcePriceRegistryAddress_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *OnRampReader_SourcePriceRegistryAddress_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OnRampReader_SourcePriceRegistryAddress_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewOnRampReader creates a new instance of OnRampReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewOnRampReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *OnRampReader {
+ mock := &OnRampReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go
new file mode 100644
index 00000000000..94e354acb25
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go
@@ -0,0 +1,498 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+
+ time "time"
+)
+
+// PriceRegistryReader is an autogenerated mock type for the PriceRegistryReader type
+type PriceRegistryReader struct {
+ mock.Mock
+}
+
+type PriceRegistryReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *PriceRegistryReader) EXPECT() *PriceRegistryReader_Expecter {
+ return &PriceRegistryReader_Expecter{mock: &_m.Mock}
+}
+
+// Address provides a mock function with given fields: ctx
+func (_m *PriceRegistryReader) Address(ctx context.Context) (ccip.Address, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Address")
+ }
+
+ var r0 ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(ccip.Address)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistryReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address'
+type PriceRegistryReader_Address_Call struct {
+ *mock.Call
+}
+
+// Address is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *PriceRegistryReader_Expecter) Address(ctx interface{}) *PriceRegistryReader_Address_Call {
+ return &PriceRegistryReader_Address_Call{Call: _e.mock.On("Address", ctx)}
+}
+
+func (_c *PriceRegistryReader_Address_Call) Run(run func(ctx context.Context)) *PriceRegistryReader_Address_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_Address_Call) Return(_a0 ccip.Address, _a1 error) *PriceRegistryReader_Address_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistryReader_Address_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *PriceRegistryReader_Address_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Close provides a mock function with given fields:
+func (_m *PriceRegistryReader) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// PriceRegistryReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type PriceRegistryReader_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *PriceRegistryReader_Expecter) Close() *PriceRegistryReader_Close_Call {
+ return &PriceRegistryReader_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *PriceRegistryReader_Close_Call) Run(run func()) *PriceRegistryReader_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_Close_Call) Return(_a0 error) *PriceRegistryReader_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PriceRegistryReader_Close_Call) RunAndReturn(run func() error) *PriceRegistryReader_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetAllGasPriceUpdatesCreatedAfter provides a mock function with given fields: ctx, ts, confirmations
+func (_m *PriceRegistryReader) GetAllGasPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confirmations int) ([]ccip.GasPriceUpdateWithTxMeta, error) {
+ ret := _m.Called(ctx, ts, confirmations)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetAllGasPriceUpdatesCreatedAfter")
+ }
+
+ var r0 []ccip.GasPriceUpdateWithTxMeta
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)); ok {
+ return rf(ctx, ts, confirmations)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) []ccip.GasPriceUpdateWithTxMeta); ok {
+ r0 = rf(ctx, ts, confirmations)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.GasPriceUpdateWithTxMeta)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, time.Time, int) error); ok {
+ r1 = rf(ctx, ts, confirmations)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllGasPriceUpdatesCreatedAfter'
+type PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call struct {
+ *mock.Call
+}
+
+// GetAllGasPriceUpdatesCreatedAfter is a helper method to define mock.On call
+// - ctx context.Context
+// - ts time.Time
+// - confirmations int
+func (_e *PriceRegistryReader_Expecter) GetAllGasPriceUpdatesCreatedAfter(ctx interface{}, ts interface{}, confirmations interface{}) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call {
+ return &PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call{Call: _e.mock.On("GetAllGasPriceUpdatesCreatedAfter", ctx, ts, confirmations)}
+}
+
+func (_c *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call) Run(run func(ctx context.Context, ts time.Time, confirmations int)) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(time.Time), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call) Return(_a0 []ccip.GasPriceUpdateWithTxMeta, _a1 error) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call) RunAndReturn(run func(context.Context, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetFeeTokens provides a mock function with given fields: ctx
+func (_m *PriceRegistryReader) GetFeeTokens(ctx context.Context) ([]ccip.Address, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetFeeTokens")
+ }
+
+ var r0 []ccip.Address
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]ccip.Address, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []ccip.Address); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.Address)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistryReader_GetFeeTokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFeeTokens'
+type PriceRegistryReader_GetFeeTokens_Call struct {
+ *mock.Call
+}
+
+// GetFeeTokens is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *PriceRegistryReader_Expecter) GetFeeTokens(ctx interface{}) *PriceRegistryReader_GetFeeTokens_Call {
+ return &PriceRegistryReader_GetFeeTokens_Call{Call: _e.mock.On("GetFeeTokens", ctx)}
+}
+
+func (_c *PriceRegistryReader_GetFeeTokens_Call) Run(run func(ctx context.Context)) *PriceRegistryReader_GetFeeTokens_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetFeeTokens_Call) Return(_a0 []ccip.Address, _a1 error) *PriceRegistryReader_GetFeeTokens_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetFeeTokens_Call) RunAndReturn(run func(context.Context) ([]ccip.Address, error)) *PriceRegistryReader_GetFeeTokens_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGasPriceUpdatesCreatedAfter provides a mock function with given fields: ctx, chainSelector, ts, confirmations
+func (_m *PriceRegistryReader) GetGasPriceUpdatesCreatedAfter(ctx context.Context, chainSelector uint64, ts time.Time, confirmations int) ([]ccip.GasPriceUpdateWithTxMeta, error) {
+ ret := _m.Called(ctx, chainSelector, ts, confirmations)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGasPriceUpdatesCreatedAfter")
+ }
+
+ var r0 []ccip.GasPriceUpdateWithTxMeta
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)); ok {
+ return rf(ctx, chainSelector, ts, confirmations)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, time.Time, int) []ccip.GasPriceUpdateWithTxMeta); ok {
+ r0 = rf(ctx, chainSelector, ts, confirmations)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.GasPriceUpdateWithTxMeta)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, time.Time, int) error); ok {
+ r1 = rf(ctx, chainSelector, ts, confirmations)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPriceUpdatesCreatedAfter'
+type PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call struct {
+ *mock.Call
+}
+
+// GetGasPriceUpdatesCreatedAfter is a helper method to define mock.On call
+// - ctx context.Context
+// - chainSelector uint64
+// - ts time.Time
+// - confirmations int
+func (_e *PriceRegistryReader_Expecter) GetGasPriceUpdatesCreatedAfter(ctx interface{}, chainSelector interface{}, ts interface{}, confirmations interface{}) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call {
+ return &PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call{Call: _e.mock.On("GetGasPriceUpdatesCreatedAfter", ctx, chainSelector, ts, confirmations)}
+}
+
+func (_c *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call) Run(run func(ctx context.Context, chainSelector uint64, ts time.Time, confirmations int)) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(time.Time), args[3].(int))
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call) Return(_a0 []ccip.GasPriceUpdateWithTxMeta, _a1 error) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call) RunAndReturn(run func(context.Context, uint64, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetTokenPriceUpdatesCreatedAfter provides a mock function with given fields: ctx, ts, confirmations
+func (_m *PriceRegistryReader) GetTokenPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confirmations int) ([]ccip.TokenPriceUpdateWithTxMeta, error) {
+ ret := _m.Called(ctx, ts, confirmations)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetTokenPriceUpdatesCreatedAfter")
+ }
+
+ var r0 []ccip.TokenPriceUpdateWithTxMeta
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) ([]ccip.TokenPriceUpdateWithTxMeta, error)); ok {
+ return rf(ctx, ts, confirmations)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) []ccip.TokenPriceUpdateWithTxMeta); ok {
+ r0 = rf(ctx, ts, confirmations)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.TokenPriceUpdateWithTxMeta)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, time.Time, int) error); ok {
+ r1 = rf(ctx, ts, confirmations)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokenPriceUpdatesCreatedAfter'
+type PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call struct {
+ *mock.Call
+}
+
+// GetTokenPriceUpdatesCreatedAfter is a helper method to define mock.On call
+// - ctx context.Context
+// - ts time.Time
+// - confirmations int
+func (_e *PriceRegistryReader_Expecter) GetTokenPriceUpdatesCreatedAfter(ctx interface{}, ts interface{}, confirmations interface{}) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call {
+ return &PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call{Call: _e.mock.On("GetTokenPriceUpdatesCreatedAfter", ctx, ts, confirmations)}
+}
+
+func (_c *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call) Run(run func(ctx context.Context, ts time.Time, confirmations int)) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(time.Time), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call) Return(_a0 []ccip.TokenPriceUpdateWithTxMeta, _a1 error) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call) RunAndReturn(run func(context.Context, time.Time, int) ([]ccip.TokenPriceUpdateWithTxMeta, error)) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetTokenPrices provides a mock function with given fields: ctx, wantedTokens
+func (_m *PriceRegistryReader) GetTokenPrices(ctx context.Context, wantedTokens []ccip.Address) ([]ccip.TokenPriceUpdate, error) {
+ ret := _m.Called(ctx, wantedTokens)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetTokenPrices")
+ }
+
+ var r0 []ccip.TokenPriceUpdate
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]ccip.TokenPriceUpdate, error)); ok {
+ return rf(ctx, wantedTokens)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []ccip.TokenPriceUpdate); ok {
+ r0 = rf(ctx, wantedTokens)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.TokenPriceUpdate)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok {
+ r1 = rf(ctx, wantedTokens)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistryReader_GetTokenPrices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokenPrices'
+type PriceRegistryReader_GetTokenPrices_Call struct {
+ *mock.Call
+}
+
+// GetTokenPrices is a helper method to define mock.On call
+// - ctx context.Context
+// - wantedTokens []ccip.Address
+func (_e *PriceRegistryReader_Expecter) GetTokenPrices(ctx interface{}, wantedTokens interface{}) *PriceRegistryReader_GetTokenPrices_Call {
+ return &PriceRegistryReader_GetTokenPrices_Call{Call: _e.mock.On("GetTokenPrices", ctx, wantedTokens)}
+}
+
+func (_c *PriceRegistryReader_GetTokenPrices_Call) Run(run func(ctx context.Context, wantedTokens []ccip.Address)) *PriceRegistryReader_GetTokenPrices_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]ccip.Address))
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetTokenPrices_Call) Return(_a0 []ccip.TokenPriceUpdate, _a1 error) *PriceRegistryReader_GetTokenPrices_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetTokenPrices_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]ccip.TokenPriceUpdate, error)) *PriceRegistryReader_GetTokenPrices_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetTokensDecimals provides a mock function with given fields: ctx, tokenAddresses
+func (_m *PriceRegistryReader) GetTokensDecimals(ctx context.Context, tokenAddresses []ccip.Address) ([]uint8, error) {
+ ret := _m.Called(ctx, tokenAddresses)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetTokensDecimals")
+ }
+
+ var r0 []uint8
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]uint8, error)); ok {
+ return rf(ctx, tokenAddresses)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []uint8); ok {
+ r0 = rf(ctx, tokenAddresses)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]uint8)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok {
+ r1 = rf(ctx, tokenAddresses)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PriceRegistryReader_GetTokensDecimals_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokensDecimals'
+type PriceRegistryReader_GetTokensDecimals_Call struct {
+ *mock.Call
+}
+
+// GetTokensDecimals is a helper method to define mock.On call
+// - ctx context.Context
+// - tokenAddresses []ccip.Address
+func (_e *PriceRegistryReader_Expecter) GetTokensDecimals(ctx interface{}, tokenAddresses interface{}) *PriceRegistryReader_GetTokensDecimals_Call {
+ return &PriceRegistryReader_GetTokensDecimals_Call{Call: _e.mock.On("GetTokensDecimals", ctx, tokenAddresses)}
+}
+
+func (_c *PriceRegistryReader_GetTokensDecimals_Call) Run(run func(ctx context.Context, tokenAddresses []ccip.Address)) *PriceRegistryReader_GetTokensDecimals_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]ccip.Address))
+ })
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetTokensDecimals_Call) Return(_a0 []uint8, _a1 error) *PriceRegistryReader_GetTokensDecimals_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PriceRegistryReader_GetTokensDecimals_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]uint8, error)) *PriceRegistryReader_GetTokensDecimals_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewPriceRegistryReader creates a new instance of PriceRegistryReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewPriceRegistryReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *PriceRegistryReader {
+ mock := &PriceRegistryReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go
new file mode 100644
index 00000000000..0bb23b9cc23
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go
@@ -0,0 +1,127 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ common "github.com/ethereum/go-ethereum/common"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// TokenPoolReader is an autogenerated mock type for the TokenPoolReader type
+type TokenPoolReader struct {
+ mock.Mock
+}
+
+type TokenPoolReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *TokenPoolReader) EXPECT() *TokenPoolReader_Expecter {
+ return &TokenPoolReader_Expecter{mock: &_m.Mock}
+}
+
+// Address provides a mock function with given fields:
+func (_m *TokenPoolReader) Address() common.Address {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Address")
+ }
+
+ var r0 common.Address
+ if rf, ok := ret.Get(0).(func() common.Address); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(common.Address)
+ }
+ }
+
+ return r0
+}
+
+// TokenPoolReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address'
+type TokenPoolReader_Address_Call struct {
+ *mock.Call
+}
+
+// Address is a helper method to define mock.On call
+func (_e *TokenPoolReader_Expecter) Address() *TokenPoolReader_Address_Call {
+ return &TokenPoolReader_Address_Call{Call: _e.mock.On("Address")}
+}
+
+func (_c *TokenPoolReader_Address_Call) Run(run func()) *TokenPoolReader_Address_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *TokenPoolReader_Address_Call) Return(_a0 common.Address) *TokenPoolReader_Address_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *TokenPoolReader_Address_Call) RunAndReturn(run func() common.Address) *TokenPoolReader_Address_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Type provides a mock function with given fields:
+func (_m *TokenPoolReader) Type() string {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Type")
+ }
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// TokenPoolReader_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type'
+type TokenPoolReader_Type_Call struct {
+ *mock.Call
+}
+
+// Type is a helper method to define mock.On call
+func (_e *TokenPoolReader_Expecter) Type() *TokenPoolReader_Type_Call {
+ return &TokenPoolReader_Type_Call{Call: _e.mock.On("Type")}
+}
+
+func (_c *TokenPoolReader_Type_Call) Run(run func()) *TokenPoolReader_Type_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *TokenPoolReader_Type_Call) Return(_a0 string) *TokenPoolReader_Type_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *TokenPoolReader_Type_Call) RunAndReturn(run func() string) *TokenPoolReader_Type_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewTokenPoolReader creates a new instance of TokenPoolReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewTokenPoolReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *TokenPoolReader {
+ mock := &TokenPoolReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go
new file mode 100644
index 00000000000..ac72d599923
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go
@@ -0,0 +1,97 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// USDCReader is an autogenerated mock type for the USDCReader type
+type USDCReader struct {
+ mock.Mock
+}
+
+type USDCReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *USDCReader) EXPECT() *USDCReader_Expecter {
+ return &USDCReader_Expecter{mock: &_m.Mock}
+}
+
+// GetUSDCMessagePriorToLogIndexInTx provides a mock function with given fields: ctx, logIndex, usdcTokenIndexOffset, txHash
+func (_m *USDCReader) GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string) ([]byte, error) {
+ ret := _m.Called(ctx, logIndex, usdcTokenIndexOffset, txHash)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetUSDCMessagePriorToLogIndexInTx")
+ }
+
+ var r0 []byte
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64, int, string) ([]byte, error)); ok {
+ return rf(ctx, logIndex, usdcTokenIndexOffset, txHash)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64, int, string) []byte); ok {
+ r0 = rf(ctx, logIndex, usdcTokenIndexOffset, txHash)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64, int, string) error); ok {
+ r1 = rf(ctx, logIndex, usdcTokenIndexOffset, txHash)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUSDCMessagePriorToLogIndexInTx'
+type USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call struct {
+ *mock.Call
+}
+
+// GetUSDCMessagePriorToLogIndexInTx is a helper method to define mock.On call
+// - ctx context.Context
+// - logIndex int64
+// - usdcTokenIndexOffset int
+// - txHash string
+func (_e *USDCReader_Expecter) GetUSDCMessagePriorToLogIndexInTx(ctx interface{}, logIndex interface{}, usdcTokenIndexOffset interface{}, txHash interface{}) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call {
+ return &USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call{Call: _e.mock.On("GetUSDCMessagePriorToLogIndexInTx", ctx, logIndex, usdcTokenIndexOffset, txHash)}
+}
+
+func (_c *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call) Run(run func(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string)) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64), args[2].(int), args[3].(string))
+ })
+ return _c
+}
+
+func (_c *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call) Return(_a0 []byte, _a1 error) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call) RunAndReturn(run func(context.Context, int64, int, string) ([]byte, error)) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewUSDCReader creates a new instance of USDCReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewUSDCReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *USDCReader {
+ mock := &USDCReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go
new file mode 100644
index 00000000000..c3bad6235b3
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go
@@ -0,0 +1,13 @@
+package ccipdata
+
+import (
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+const (
+ ManuallyExecute = "manuallyExecute"
+)
+
+type OffRampReader interface {
+ cciptypes.OffRampReader
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go
new file mode 100644
index 00000000000..6f14fb8559c
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go
@@ -0,0 +1,416 @@
+package ccipdata_test
+
+import (
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+)
+
+type offRampReaderTH struct {
+ user *bind.TransactOpts
+ reader ccipdata.OffRampReader
+}
+
+func TestExecOnchainConfig100(t *testing.T) {
+ tests := []struct {
+ name string
+ want v1_0_0.ExecOnchainConfig
+ expectErr bool
+ }{
+ {
+ name: "encodes and decodes config with all fields set",
+ want: v1_0_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: rand.Uint32(),
+ Router: utils.RandomAddress(),
+ PriceRegistry: utils.RandomAddress(),
+ MaxTokensLength: uint16(rand.Uint32()),
+ MaxDataSize: rand.Uint32(),
+ },
+ },
+ {
+ name: "encodes and fails decoding config with missing fields",
+ want: v1_0_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: rand.Uint32(),
+ MaxDataSize: rand.Uint32(),
+ },
+ expectErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ encoded, err := abihelpers.EncodeAbiStruct(tt.want)
+ require.NoError(t, err)
+
+ decoded, err := abihelpers.DecodeAbiStruct[v1_0_0.ExecOnchainConfig](encoded)
+ if tt.expectErr {
+ require.ErrorContains(t, err, "must set")
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.want, decoded)
+ }
+ })
+ }
+}
+
+func TestExecOnchainConfig120(t *testing.T) {
+ tests := []struct {
+ name string
+ want v1_2_0.ExecOnchainConfig
+ expectErr bool
+ }{
+ {
+ name: "encodes and decodes config with all fields set",
+ want: v1_2_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: rand.Uint32(),
+ Router: utils.RandomAddress(),
+ PriceRegistry: utils.RandomAddress(),
+ MaxNumberOfTokensPerMsg: uint16(rand.Uint32()),
+ MaxDataBytes: rand.Uint32(),
+ MaxPoolReleaseOrMintGas: rand.Uint32(),
+ },
+ },
+ {
+ name: "encodes and fails decoding config with missing fields",
+ want: v1_2_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: rand.Uint32(),
+ MaxDataBytes: rand.Uint32(),
+ },
+ expectErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ encoded, err := abihelpers.EncodeAbiStruct(tt.want)
+ require.NoError(t, err)
+
+ decoded, err := abihelpers.DecodeAbiStruct[v1_2_0.ExecOnchainConfig](encoded)
+ if tt.expectErr {
+ require.ErrorContains(t, err, "must set")
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.want, decoded)
+ }
+ })
+ }
+}
+
+func TestOffRampReaderInit(t *testing.T) {
+ tests := []struct {
+ name string
+ version string
+ }{
+ {
+ name: "OffRampReader_V1_0_0",
+ version: ccipdata.V1_0_0,
+ },
+ {
+ name: "OffRampReader_V1_1_0",
+ version: ccipdata.V1_1_0,
+ },
+ {
+ name: "OffRampReader_V1_2_0",
+ version: ccipdata.V1_2_0,
+ },
+ {
+ name: "OffRampReader_V1_5_0",
+ version: ccipdata.V1_5_0,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ th := setupOffRampReaderTH(t, test.version)
+ testOffRampReader(t, th)
+ })
+ }
+}
+
+func setupOffRampReaderTH(t *testing.T, version string) offRampReaderTH {
+ ctx := testutils.Context(t)
+ user, bc := ccipdata.NewSimulation(t)
+ log := logger.TestLogger(t)
+ orm := logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), log)
+ lpOpts := logpoller.Opts{
+ PollPeriod: 100 * time.Millisecond,
+ FinalityDepth: 2,
+ BackfillBatchSize: 3,
+ RpcBatchSize: 2,
+ KeepFinalizedBlocksDepth: 1000,
+ }
+ headTracker := headtracker.NewSimulatedHeadTracker(bc, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ if lpOpts.PollPeriod == 0 {
+ lpOpts.PollPeriod = 1 * time.Hour
+ }
+ lp := logpoller.NewLogPoller(
+ orm,
+ bc,
+ log,
+ headTracker,
+ lpOpts)
+ assert.NoError(t, orm.InsertBlock(ctx, common.Hash{}, 1, time.Now(), 1))
+ // Setup offRamp.
+ var offRampAddress common.Address
+ switch version {
+ case ccipdata.V1_0_0:
+ offRampAddress = setupOffRampV1_0_0(t, user, bc)
+ case ccipdata.V1_1_0:
+ // Version 1.1.0 uses the same contracts as 1.0.0.
+ offRampAddress = setupOffRampV1_0_0(t, user, bc)
+ case ccipdata.V1_2_0:
+ offRampAddress = setupOffRampV1_2_0(t, user, bc)
+ case ccipdata.V1_5_0:
+ offRampAddress = setupOffRampV1_5_0(t, user, bc)
+ default:
+ require.Fail(t, "Unknown version: ", version)
+ }
+
+ // Create the version-specific reader.
+ reader, err := factory.NewOffRampReader(log, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(offRampAddress), bc, lp, nil, nil, true)
+ require.NoError(t, err)
+ addr, err := reader.Address(ctx)
+ require.NoError(t, err)
+ require.Equal(t, ccipcalc.EvmAddrToGeneric(offRampAddress), addr)
+
+ return offRampReaderTH{
+ user: user,
+ reader: reader,
+ }
+}
+
+func setupOffRampV1_0_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address {
+ onRampAddr := utils.RandomAddress()
+ armAddr := deployMockArm(t, user, bc)
+ csAddr := deployCommitStore(t, user, bc, onRampAddr, armAddr)
+
+ // Deploy the OffRamp.
+ staticConfig := evm_2_evm_offramp_1_0_0.EVM2EVMOffRampStaticConfig{
+ CommitStore: csAddr,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ SourceChainSelector: testutils.SimulatedChainID.Uint64(),
+ OnRamp: onRampAddr,
+ PrevOffRamp: common.Address{},
+ ArmProxy: armAddr,
+ }
+ sourceTokens := []common.Address{}
+ pools := []common.Address{}
+ rateLimiterConfig := evm_2_evm_offramp_1_0_0.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(0),
+ Rate: big.NewInt(0),
+ }
+
+ offRampAddr, tx, offRamp, err := evm_2_evm_offramp_1_0_0.DeployEVM2EVMOffRamp(user, bc, staticConfig, sourceTokens, pools, rateLimiterConfig)
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, tx, bc, user)
+
+ // Verify the deployed OffRamp.
+ tav, err := offRamp.TypeAndVersion(&bind.CallOpts{
+ Context: testutils.Context(t),
+ })
+ require.NoError(t, err)
+ require.Equal(t, "EVM2EVMOffRamp 1.0.0", tav)
+ return offRampAddr
+}
+
+func setupOffRampV1_2_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address {
+ onRampAddr := utils.RandomAddress()
+ armAddr := deployMockArm(t, user, bc)
+ csAddr := deployCommitStore(t, user, bc, onRampAddr, armAddr)
+
+ // Deploy the OffRamp.
+ staticConfig := evm_2_evm_offramp_1_2_0.EVM2EVMOffRampStaticConfig{
+ CommitStore: csAddr,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ SourceChainSelector: testutils.SimulatedChainID.Uint64(),
+ OnRamp: onRampAddr,
+ PrevOffRamp: common.Address{},
+ ArmProxy: armAddr,
+ }
+ sourceTokens := []common.Address{}
+ pools := []common.Address{}
+ rateLimiterConfig := evm_2_evm_offramp_1_2_0.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(0),
+ Rate: big.NewInt(0),
+ }
+
+ offRampAddr, tx, offRamp, err := evm_2_evm_offramp_1_2_0.DeployEVM2EVMOffRamp(user, bc, staticConfig, sourceTokens, pools, rateLimiterConfig)
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, tx, bc, user)
+
+ // Verify the deployed OffRamp.
+ tav, err := offRamp.TypeAndVersion(&bind.CallOpts{
+ Context: testutils.Context(t),
+ })
+ require.NoError(t, err)
+ require.Equal(t, "EVM2EVMOffRamp 1.2.0", tav)
+ return offRampAddr
+}
+
+func setupOffRampV1_5_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address {
+ onRampAddr := utils.RandomAddress()
+ tokenAdminRegAddr := utils.RandomAddress()
+ rmnAddr := deployMockArm(t, user, bc)
+ csAddr := deployCommitStore(t, user, bc, onRampAddr, rmnAddr)
+
+ // Deploy the OffRamp.
+ staticConfig := evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{
+ CommitStore: csAddr,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ SourceChainSelector: testutils.SimulatedChainID.Uint64(),
+ OnRamp: onRampAddr,
+ PrevOffRamp: common.Address{},
+ RmnProxy: rmnAddr,
+ TokenAdminRegistry: tokenAdminRegAddr,
+ }
+ rateLimiterConfig := evm_2_evm_offramp.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(0),
+ Rate: big.NewInt(0),
+ }
+
+ offRampAddr, tx, offRamp, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp(user, bc, staticConfig, rateLimiterConfig)
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, tx, bc, user)
+
+ // Verify the deployed OffRamp.
+ tav, err := offRamp.TypeAndVersion(&bind.CallOpts{
+ Context: testutils.Context(t),
+ })
+ require.NoError(t, err)
+ require.Equal(t, "EVM2EVMOffRamp 1.5.0-dev", tav)
+ return offRampAddr
+}
+
+func deployMockArm(
+ t *testing.T,
+ user *bind.TransactOpts,
+ bc *client.SimulatedBackendClient,
+) common.Address {
+ armAddr, tx, _, err := mock_arm_contract.DeployMockARMContract(user, bc)
+ require.NoError(t, err)
+ bc.Commit()
+ ccipdata.AssertNonRevert(t, tx, bc, user)
+ require.NotEqual(t, common.Address{}, armAddr)
+ return armAddr
+}
+
+// Deploy the CommitStore. We use the same CommitStore version for all versions of OffRamp tested.
+func deployCommitStore(
+ t *testing.T,
+ user *bind.TransactOpts,
+ bc *client.SimulatedBackendClient,
+ onRampAddress common.Address,
+ armAddress common.Address,
+) common.Address {
+ // Deploy the CommitStore using the helper.
+ csAddr, tx, cs, err := commit_store_helper.DeployCommitStoreHelper(user, bc, commit_store_helper.CommitStoreStaticConfig{
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ SourceChainSelector: testutils.SimulatedChainID.Uint64(),
+ OnRamp: onRampAddress,
+ RmnProxy: armAddress,
+ })
+ require.NoError(t, err)
+ bc.Commit()
+ ccipdata.AssertNonRevert(t, tx, bc, user)
+
+ // Test the deployed CommitStore.
+ callOpts := &bind.CallOpts{
+ Context: testutils.Context(t),
+ }
+ tav, err := cs.TypeAndVersion(callOpts)
+ require.NoError(t, err)
+ require.Equal(t, "CommitStore 1.5.0-dev", tav)
+ return csAddr
+}
+
+func testOffRampReader(t *testing.T, th offRampReaderTH) {
+ ctx := th.user.Context
+ tokens, err := th.reader.GetTokens(ctx)
+ require.NoError(t, err)
+ require.Equal(t, []cciptypes.Address{}, tokens.DestinationTokens)
+
+ events, err := th.reader.GetExecutionStateChangesBetweenSeqNums(ctx, 0, 10, 0)
+ require.NoError(t, err)
+ require.Equal(t, []cciptypes.ExecutionStateChangedWithTxMeta{}, events)
+
+ sourceToDestTokens, err := th.reader.GetSourceToDestTokensMapping(ctx)
+ require.NoError(t, err)
+ require.Empty(t, sourceToDestTokens)
+
+ require.NoError(t, err)
+}
+
+func TestNewOffRampReader(t *testing.T) {
+ var tt = []struct {
+ typeAndVersion string
+ expectedErr string
+ }{
+ {
+ typeAndVersion: "blah",
+ expectedErr: "unable to read type and version: invalid type and version blah",
+ },
+ {
+ typeAndVersion: "CommitStore 1.0.0",
+ expectedErr: "expected EVM2EVMOffRamp got CommitStore",
+ },
+ {
+ typeAndVersion: "EVM2EVMOffRamp 1.2.0",
+ expectedErr: "",
+ },
+ {
+ typeAndVersion: "EVM2EVMOffRamp 2.0.0",
+ expectedErr: "unsupported offramp version 2.0.0",
+ },
+ }
+ for _, tc := range tt {
+ t.Run(tc.typeAndVersion, func(t *testing.T) {
+ b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion)
+ require.NoError(t, err)
+ c := evmclientmocks.NewClient(t)
+ c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil)
+ addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress())
+ lp := lpmocks.NewLogPoller(t)
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Maybe()
+ _, err = factory.NewOffRampReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), addr, c, lp, nil, nil, true)
+ if tc.expectedErr != "" {
+ assert.EqualError(t, err, tc.expectedErr)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go
new file mode 100644
index 00000000000..e2571de57f6
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go
@@ -0,0 +1,21 @@
+package ccipdata
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+type LeafHasherInterface[H hashutil.Hash] interface {
+ HashLeaf(log types.Log) (H, error)
+}
+
+const (
+ COMMIT_CCIP_SENDS = "Commit ccip sends"
+ CONFIG_CHANGED = "Dynamic config changed"
+)
+
+type OnRampReader interface {
+ cciptypes.OnRampReader
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go
new file mode 100644
index 00000000000..9cfe3f628c0
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go
@@ -0,0 +1,479 @@
+package ccipdata_test
+
+import (
+ "fmt"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_1_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+)
+
+type onRampReaderTH struct {
+ user *bind.TransactOpts
+ reader ccipdata.OnRampReader
+}
+
+func TestNewOnRampReader_noContractAtAddress(t *testing.T) {
+ _, bc := ccipdata.NewSimulation(t)
+ addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress())
+ _, err := factory.NewOnRampReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), testutils.SimulatedChainID.Uint64(), testutils.SimulatedChainID.Uint64(), addr, lpmocks.NewLogPoller(t), bc)
+ assert.EqualError(t, err, fmt.Sprintf("unable to read type and version: error calling typeAndVersion on addr: %s no contract code at given address", addr))
+}
+
+func TestOnRampReaderInit(t *testing.T) {
+ tests := []struct {
+ name string
+ version string
+ }{
+ {
+ name: "OnRampReader_V1_0_0",
+ version: ccipdata.V1_0_0,
+ },
+ {
+ name: "OnRampReader_V1_1_0",
+ version: ccipdata.V1_1_0,
+ },
+ {
+ name: "OnRampReader_V1_2_0",
+ version: ccipdata.V1_2_0,
+ },
+ {
+ name: "OnRampReader_V1_5_0",
+ version: ccipdata.V1_5_0,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ th := setupOnRampReaderTH(t, test.version)
+ testVersionSpecificOnRampReader(t, th, test.version)
+ })
+ }
+}
+
+func setupOnRampReaderTH(t *testing.T, version string) onRampReaderTH {
+ user, bc := ccipdata.NewSimulation(t)
+ log := logger.TestLogger(t)
+ orm := logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), log)
+ lpOpts := logpoller.Opts{
+ PollPeriod: 100 * time.Millisecond,
+ FinalityDepth: 2,
+ BackfillBatchSize: 3,
+ RpcBatchSize: 2,
+ KeepFinalizedBlocksDepth: 1000,
+ }
+ headTracker := headtracker.NewSimulatedHeadTracker(bc, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ if lpOpts.PollPeriod == 0 {
+ lpOpts.PollPeriod = 1 * time.Hour
+ }
+ lp := logpoller.NewLogPoller(
+ orm,
+ bc,
+ log,
+ headTracker,
+ lpOpts)
+
+ // Setup onRamp.
+ var onRampAddress common.Address
+ switch version {
+ case ccipdata.V1_0_0:
+ onRampAddress = setupOnRampV1_0_0(t, user, bc)
+ case ccipdata.V1_1_0:
+ onRampAddress = setupOnRampV1_1_0(t, user, bc)
+ case ccipdata.V1_2_0:
+ onRampAddress = setupOnRampV1_2_0(t, user, bc)
+ case ccipdata.V1_5_0:
+ onRampAddress = setupOnRampV1_5_0(t, user, bc)
+ default:
+ require.Fail(t, "Unknown version: ", version)
+ }
+
+ // Create the version-specific reader.
+ reader, err := factory.NewOnRampReader(log, factory.NewEvmVersionFinder(), testutils.SimulatedChainID.Uint64(), testutils.SimulatedChainID.Uint64(), ccipcalc.EvmAddrToGeneric(onRampAddress), lp, bc)
+ require.NoError(t, err)
+
+ return onRampReaderTH{
+ user: user,
+ reader: reader,
+ }
+}
+
+func setupOnRampV1_0_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address {
+ linkTokenAddress := common.HexToAddress("0x000011")
+ staticConfig := evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig{
+ LinkToken: linkTokenAddress,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ DestChainSelector: testutils.SimulatedChainID.Uint64(),
+ DefaultTxGasLimit: 30000,
+ MaxNopFeesJuels: big.NewInt(1000000),
+ PrevOnRamp: common.Address{},
+ ArmProxy: utils.RandomAddress(),
+ }
+ dynamicConfig := evm_2_evm_onramp_1_0_0.EVM2EVMOnRampDynamicConfig{
+ Router: common.HexToAddress("0x000100"),
+ MaxTokensLength: 4,
+ PriceRegistry: utils.RandomAddress(),
+ MaxDataSize: 100000,
+ MaxGasLimit: 100000,
+ }
+ rateLimiterConfig := evm_2_evm_onramp_1_0_0.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(5),
+ Rate: big.NewInt(5),
+ }
+ allowList := []common.Address{user.From}
+ feeTokenConfigs := []evm_2_evm_onramp_1_0_0.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: linkTokenAddress,
+ GasMultiplier: 1,
+ NetworkFeeAmountUSD: big.NewInt(0),
+ DestGasOverhead: 50,
+ DestGasPerPayloadByte: 60,
+ Enabled: false,
+ },
+ }
+ tokenTransferConfigArgs := []evm_2_evm_onramp_1_0_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: utils.RandomAddress(),
+ MinFee: 10,
+ MaxFee: 1000,
+ Ratio: 1,
+ },
+ }
+ nopsAndWeights := []evm_2_evm_onramp_1_0_0.EVM2EVMOnRampNopAndWeight{
+ {
+ Nop: utils.RandomAddress(),
+ Weight: 1,
+ },
+ }
+ tokenAndPool := []evm_2_evm_onramp_1_0_0.InternalPoolUpdate{}
+ onRampAddress, transaction, _, err := evm_2_evm_onramp_1_0_0.DeployEVM2EVMOnRamp(
+ user,
+ bc,
+ staticConfig,
+ dynamicConfig,
+ tokenAndPool,
+ allowList,
+ rateLimiterConfig,
+ feeTokenConfigs,
+ tokenTransferConfigArgs,
+ nopsAndWeights,
+ )
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, transaction, bc, user)
+ return onRampAddress
+}
+
+func setupOnRampV1_1_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address {
+ linkTokenAddress := common.HexToAddress("0x000011")
+ staticConfig := evm_2_evm_onramp_1_1_0.EVM2EVMOnRampStaticConfig{
+ LinkToken: linkTokenAddress,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ DestChainSelector: testutils.SimulatedChainID.Uint64(),
+ DefaultTxGasLimit: 30000,
+ MaxNopFeesJuels: big.NewInt(1000000),
+ PrevOnRamp: common.Address{},
+ ArmProxy: utils.RandomAddress(),
+ }
+ dynamicConfig := evm_2_evm_onramp_1_1_0.EVM2EVMOnRampDynamicConfig{
+ Router: common.HexToAddress("0x000110"),
+ MaxTokensLength: 4,
+ PriceRegistry: common.HexToAddress("0x000066"),
+ MaxDataSize: 100000,
+ MaxGasLimit: 100000,
+ }
+ rateLimiterConfig := evm_2_evm_onramp_1_1_0.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(5),
+ Rate: big.NewInt(5),
+ }
+ allowList := []common.Address{user.From}
+ feeTokenConfigs := []evm_2_evm_onramp_1_1_0.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: linkTokenAddress,
+ NetworkFeeUSD: 0,
+ MinTokenTransferFeeUSD: 0,
+ MaxTokenTransferFeeUSD: 0,
+ GasMultiplier: 0,
+ PremiumMultiplier: 0,
+ Enabled: false,
+ },
+ }
+ tokenTransferConfigArgs := []evm_2_evm_onramp_1_1_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: linkTokenAddress,
+ Ratio: 0,
+ DestGasOverhead: 0,
+ },
+ }
+ nopsAndWeights := []evm_2_evm_onramp_1_1_0.EVM2EVMOnRampNopAndWeight{
+ {
+ Nop: common.HexToAddress("0x222222222"),
+ Weight: 1,
+ },
+ }
+ tokenAndPool := []evm_2_evm_onramp_1_1_0.InternalPoolUpdate{}
+ onRampAddress, transaction, _, err := evm_2_evm_onramp_1_1_0.DeployEVM2EVMOnRamp(
+ user,
+ bc,
+ staticConfig,
+ dynamicConfig,
+ tokenAndPool,
+ allowList,
+ rateLimiterConfig,
+ feeTokenConfigs,
+ tokenTransferConfigArgs,
+ nopsAndWeights,
+ )
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, transaction, bc, user)
+ return onRampAddress
+}
+
+func setupOnRampV1_2_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address {
+ linkTokenAddress := common.HexToAddress("0x000011")
+ staticConfig := evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig{
+ LinkToken: linkTokenAddress,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ DestChainSelector: testutils.SimulatedChainID.Uint64(),
+ DefaultTxGasLimit: 30000,
+ MaxNopFeesJuels: big.NewInt(1000000),
+ PrevOnRamp: common.Address{},
+ ArmProxy: utils.RandomAddress(),
+ }
+ dynamicConfig := evm_2_evm_onramp_1_2_0.EVM2EVMOnRampDynamicConfig{
+ Router: common.HexToAddress("0x0000000000000000000000000000000000000120"),
+ MaxNumberOfTokensPerMsg: 0,
+ DestGasOverhead: 0,
+ DestGasPerPayloadByte: 0,
+ DestDataAvailabilityOverheadGas: 0,
+ DestGasPerDataAvailabilityByte: 0,
+ DestDataAvailabilityMultiplierBps: 0,
+ PriceRegistry: utils.RandomAddress(),
+ MaxDataBytes: 0,
+ MaxPerMsgGasLimit: 0,
+ }
+ rateLimiterConfig := evm_2_evm_onramp_1_2_0.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(5),
+ Rate: big.NewInt(5),
+ }
+ feeTokenConfigs := []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: linkTokenAddress,
+ NetworkFeeUSDCents: 0,
+ GasMultiplierWeiPerEth: 0,
+ PremiumMultiplierWeiPerEth: 0,
+ Enabled: false,
+ },
+ }
+ tokenTransferConfigArgs := []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: linkTokenAddress,
+ MinFeeUSDCents: 0,
+ MaxFeeUSDCents: 0,
+ DeciBps: 0,
+ DestGasOverhead: 0,
+ DestBytesOverhead: 0,
+ },
+ }
+ nopsAndWeights := []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampNopAndWeight{
+ {
+ Nop: utils.RandomAddress(),
+ Weight: 1,
+ },
+ }
+ tokenAndPool := []evm_2_evm_onramp_1_2_0.InternalPoolUpdate{}
+ onRampAddress, transaction, _, err := evm_2_evm_onramp_1_2_0.DeployEVM2EVMOnRamp(
+ user,
+ bc,
+ staticConfig,
+ dynamicConfig,
+ tokenAndPool,
+ rateLimiterConfig,
+ feeTokenConfigs,
+ tokenTransferConfigArgs,
+ nopsAndWeights,
+ )
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, transaction, bc, user)
+ return onRampAddress
+}
+
+func setupOnRampV1_5_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address {
+ linkTokenAddress := common.HexToAddress("0x000011")
+ staticConfig := evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{
+ LinkToken: linkTokenAddress,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ DestChainSelector: testutils.SimulatedChainID.Uint64(),
+ DefaultTxGasLimit: 30000,
+ MaxNopFeesJuels: big.NewInt(1000000),
+ PrevOnRamp: common.Address{},
+ RmnProxy: utils.RandomAddress(),
+ TokenAdminRegistry: utils.RandomAddress(),
+ }
+ dynamicConfig := evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{
+ Router: common.HexToAddress("0x0000000000000000000000000000000000000150"),
+ MaxNumberOfTokensPerMsg: 0,
+ DestGasOverhead: 0,
+ DestGasPerPayloadByte: 0,
+ DestDataAvailabilityOverheadGas: 0,
+ DestGasPerDataAvailabilityByte: 0,
+ DestDataAvailabilityMultiplierBps: 0,
+ PriceRegistry: utils.RandomAddress(),
+ MaxDataBytes: 0,
+ MaxPerMsgGasLimit: 0,
+ DefaultTokenFeeUSDCents: 50,
+ DefaultTokenDestGasOverhead: 34_000,
+ DefaultTokenDestBytesOverhead: 500,
+ }
+ rateLimiterConfig := evm_2_evm_onramp.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(5),
+ Rate: big.NewInt(5),
+ }
+ feeTokenConfigs := []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: linkTokenAddress,
+ NetworkFeeUSDCents: 0,
+ GasMultiplierWeiPerEth: 0,
+ PremiumMultiplierWeiPerEth: 0,
+ Enabled: false,
+ },
+ }
+ tokenTransferConfigArgs := []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: linkTokenAddress,
+ MinFeeUSDCents: 0,
+ MaxFeeUSDCents: 0,
+ DeciBps: 0,
+ DestGasOverhead: 0,
+ DestBytesOverhead: 64,
+ AggregateRateLimitEnabled: true,
+ },
+ }
+ nopsAndWeights := []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{
+ {
+ Nop: utils.RandomAddress(),
+ Weight: 1,
+ },
+ }
+ onRampAddress, transaction, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp(
+ user,
+ bc,
+ staticConfig,
+ dynamicConfig,
+ rateLimiterConfig,
+ feeTokenConfigs,
+ tokenTransferConfigArgs,
+ nopsAndWeights,
+ )
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, transaction, bc, user)
+ return onRampAddress
+}
+
+func testVersionSpecificOnRampReader(t *testing.T, th onRampReaderTH, version string) {
+ switch version {
+ case ccipdata.V1_0_0:
+ testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000100"))
+ case ccipdata.V1_1_0:
+ testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000110"))
+ case ccipdata.V1_2_0:
+ testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000120"))
+ case ccipdata.V1_5_0:
+ testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000150"))
+ default:
+ require.Fail(t, "Unknown version: ", version)
+ }
+}
+
+func testOnRampReader(t *testing.T, th onRampReaderTH, expectedRouterAddress common.Address) {
+ ctx := th.user.Context
+ res, err := th.reader.RouterAddress(ctx)
+ require.NoError(t, err)
+ require.Equal(t, ccipcalc.EvmAddrToGeneric(expectedRouterAddress), res)
+
+ msg, err := th.reader.GetSendRequestsBetweenSeqNums(ctx, 0, 10, true)
+ require.NoError(t, err)
+ require.NotNil(t, msg)
+ require.Equal(t, []cciptypes.EVM2EVMMessageWithTxMeta{}, msg)
+
+ address, err := th.reader.Address(ctx)
+ require.NoError(t, err)
+ require.NotNil(t, address)
+
+ cfg, err := th.reader.GetDynamicConfig(ctx)
+ require.NoError(t, err)
+ require.NotNil(t, cfg)
+ require.Equal(t, ccipcalc.EvmAddrToGeneric(expectedRouterAddress), cfg.Router)
+}
+
+func TestNewOnRampReader(t *testing.T) {
+ var tt = []struct {
+ typeAndVersion string
+ expectedErr string
+ }{
+ {
+ typeAndVersion: "blah",
+ expectedErr: "unable to read type and version: invalid type and version blah",
+ },
+ {
+ typeAndVersion: "EVM2EVMOffRamp 1.0.0",
+ expectedErr: "expected EVM2EVMOnRamp got EVM2EVMOffRamp",
+ },
+ {
+ typeAndVersion: "EVM2EVMOnRamp 1.2.0",
+ expectedErr: "",
+ },
+ {
+ typeAndVersion: "EVM2EVMOnRamp 2.0.0",
+ expectedErr: "unsupported onramp version 2.0.0",
+ },
+ }
+ for _, tc := range tt {
+ t.Run(tc.typeAndVersion, func(t *testing.T) {
+ b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion)
+ require.NoError(t, err)
+ c := evmclientmocks.NewClient(t)
+ c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil)
+ addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress())
+ lp := lpmocks.NewLogPoller(t)
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Maybe()
+ _, err = factory.NewOnRampReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), 1, 2, addr, lp, c)
+ if tc.expectedErr != "" {
+ require.EqualError(t, err, tc.expectedErr)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go
new file mode 100644
index 00000000000..02aef5e9efc
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go
@@ -0,0 +1,14 @@
+package ccipdata
+
+import cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+const (
+ COMMIT_PRICE_UPDATES = "Commit price updates"
+ FEE_TOKEN_ADDED = "Fee token added"
+ FEE_TOKEN_REMOVED = "Fee token removed"
+ ExecPluginLabel = "exec"
+)
+
+type PriceRegistryReader interface {
+ cciptypes.PriceRegistryReader
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go
new file mode 100644
index 00000000000..e17b885cff2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go
@@ -0,0 +1,296 @@
+package ccipdata_test
+
+import (
+ "context"
+ "math/big"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+)
+
+type priceRegReaderTH struct {
+ lp logpoller.LogPollerTest
+ ec client.Client
+ lggr logger.Logger
+ user *bind.TransactOpts
+ readers map[string]ccipdata.PriceRegistryReader
+
+ // Expected state
+ blockTs []uint64
+ expectedFeeTokens []common.Address
+ expectedGasUpdates map[uint64][]cciptypes.GasPrice
+ expectedTokenUpdates map[uint64][]cciptypes.TokenPrice
+ destSelectors []uint64
+}
+
+func commitAndGetBlockTs(ec *client.SimulatedBackendClient) uint64 {
+ h := ec.Commit()
+ b, _ := ec.BlockByHash(context.Background(), h)
+ return b.Time()
+}
+
+func newSim(t *testing.T) (*bind.TransactOpts, *client.SimulatedBackendClient) {
+ user := testutils.MustNewSimTransactor(t)
+ sim := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{
+ user.From: {
+ Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)),
+ },
+ }, 10e6)
+ ec := client.NewSimulatedBackendClient(t, sim, testutils.SimulatedChainID)
+ return user, ec
+}
+
+// setupPriceRegistryReaderTH instantiates all versions of the price registry reader
+// with a snapshot of data so reader tests can do multi-version assertions.
+func setupPriceRegistryReaderTH(t *testing.T) priceRegReaderTH {
+ user, ec := newSim(t)
+ lggr := logger.TestLogger(t)
+ lpOpts := logpoller.Opts{
+ PollPeriod: 100 * time.Millisecond,
+ FinalityDepth: 2,
+ BackfillBatchSize: 3,
+ RpcBatchSize: 2,
+ KeepFinalizedBlocksDepth: 1000,
+ }
+ headTracker := headtracker.NewSimulatedHeadTracker(ec, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ if lpOpts.PollPeriod == 0 {
+ lpOpts.PollPeriod = 1 * time.Hour
+ }
+ // TODO: We should be able to use an in memory log poller ORM here to speed up the tests.
+ lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), lggr), ec, lggr, headTracker, lpOpts)
+
+ feeTokens := []common.Address{utils.RandomAddress(), utils.RandomAddress()}
+ dest1 := uint64(10)
+ dest2 := uint64(11)
+ gasPriceUpdatesBlock1 := []cciptypes.GasPrice{
+ {
+ DestChainSelector: dest1,
+ Value: big.NewInt(11),
+ },
+ }
+ gasPriceUpdatesBlock2 := []cciptypes.GasPrice{
+ {
+ DestChainSelector: dest1, // Reset same gas price
+ Value: big.NewInt(12), // Intentionally different from block1
+ },
+ {
+ DestChainSelector: dest2, // Set gas price for different chain
+ Value: big.NewInt(12),
+ },
+ }
+ token1 := ccipcalc.EvmAddrToGeneric(utils.RandomAddress())
+ token2 := ccipcalc.EvmAddrToGeneric(utils.RandomAddress())
+ tokenPriceUpdatesBlock1 := []cciptypes.TokenPrice{
+ {
+ Token: token1,
+ Value: big.NewInt(12),
+ },
+ }
+ tokenPriceUpdatesBlock2 := []cciptypes.TokenPrice{
+ {
+ Token: token1,
+ Value: big.NewInt(13), // Intentionally change token1 value
+ },
+ {
+ Token: token2,
+ Value: big.NewInt(12), // Intentionally set a same value different token
+ },
+ }
+ ctx := testutils.Context(t)
+ addr, _, _, err := price_registry_1_0_0.DeployPriceRegistry(user, ec, nil, feeTokens, 1000)
+ require.NoError(t, err)
+ addr2, _, _, err := price_registry_1_2_0.DeployPriceRegistry(user, ec, nil, feeTokens, 1000)
+ require.NoError(t, err)
+ commitAndGetBlockTs(ec) // Deploy these
+ pr10r, err := factory.NewPriceRegistryReader(ctx, lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr), lp, ec)
+ require.NoError(t, err)
+ assert.Equal(t, reflect.TypeOf(pr10r).String(), reflect.TypeOf(&v1_0_0.PriceRegistry{}).String())
+ pr12r, err := factory.NewPriceRegistryReader(ctx, lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr2), lp, ec)
+ require.NoError(t, err)
+ assert.Equal(t, reflect.TypeOf(pr12r).String(), reflect.TypeOf(&v1_2_0.PriceRegistry{}).String())
+ // Apply block1.
+ v1_0_0.ApplyPriceRegistryUpdate(t, user, addr, ec, gasPriceUpdatesBlock1, tokenPriceUpdatesBlock1)
+ v1_2_0.ApplyPriceRegistryUpdate(t, user, addr2, ec, gasPriceUpdatesBlock1, tokenPriceUpdatesBlock1)
+ b1 := commitAndGetBlockTs(ec)
+ // Apply block2
+ v1_0_0.ApplyPriceRegistryUpdate(t, user, addr, ec, gasPriceUpdatesBlock2, tokenPriceUpdatesBlock2)
+ v1_2_0.ApplyPriceRegistryUpdate(t, user, addr2, ec, gasPriceUpdatesBlock2, tokenPriceUpdatesBlock2)
+ b2 := commitAndGetBlockTs(ec)
+
+ // Capture all lp data.
+ lp.PollAndSaveLogs(context.Background(), 1)
+
+ return priceRegReaderTH{
+ lp: lp,
+ ec: ec,
+ lggr: lggr,
+ user: user,
+ readers: map[string]ccipdata.PriceRegistryReader{
+ ccipdata.V1_0_0: pr10r, ccipdata.V1_2_0: pr12r,
+ },
+ expectedFeeTokens: feeTokens,
+ expectedGasUpdates: map[uint64][]cciptypes.GasPrice{
+ b1: gasPriceUpdatesBlock1,
+ b2: gasPriceUpdatesBlock2,
+ },
+ expectedTokenUpdates: map[uint64][]cciptypes.TokenPrice{
+ b1: tokenPriceUpdatesBlock1,
+ b2: tokenPriceUpdatesBlock2,
+ },
+ blockTs: []uint64{b1, b2},
+ destSelectors: []uint64{dest1, dest2},
+ }
+}
+
+func testPriceRegistryReader(t *testing.T, th priceRegReaderTH, pr ccipdata.PriceRegistryReader) {
+ // Assert have expected fee tokens.
+ gotFeeTokens, err := pr.GetFeeTokens(context.Background())
+ require.NoError(t, err)
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(gotFeeTokens...)
+ require.NoError(t, err)
+ assert.Equal(t, th.expectedFeeTokens, evmAddrs)
+
+ // Note unsupported chain selector simply returns an empty set not an error
+ gasUpdates, err := pr.GetGasPriceUpdatesCreatedAfter(context.Background(), 1e6, time.Unix(0, 0), 0)
+ require.NoError(t, err)
+ assert.Len(t, gasUpdates, 0)
+
+ for i, ts := range th.blockTs {
+ // Should see all updates >= ts.
+ var expectedGas []cciptypes.GasPrice
+ var expectedDest0Gas []cciptypes.GasPrice
+ var expectedToken []cciptypes.TokenPrice
+ for j := i; j < len(th.blockTs); j++ {
+ expectedGas = append(expectedGas, th.expectedGasUpdates[th.blockTs[j]]...)
+ for _, g := range th.expectedGasUpdates[th.blockTs[j]] {
+ if g.DestChainSelector == th.destSelectors[0] {
+ expectedDest0Gas = append(expectedDest0Gas, g)
+ }
+ }
+ expectedToken = append(expectedToken, th.expectedTokenUpdates[th.blockTs[j]]...)
+ }
+ gasUpdates, err = pr.GetAllGasPriceUpdatesCreatedAfter(context.Background(), time.Unix(int64(ts-1), 0), 0)
+ require.NoError(t, err)
+ assert.Len(t, gasUpdates, len(expectedGas))
+
+ gasUpdates, err = pr.GetGasPriceUpdatesCreatedAfter(context.Background(), th.destSelectors[0], time.Unix(int64(ts-1), 0), 0)
+ require.NoError(t, err)
+ assert.Len(t, gasUpdates, len(expectedDest0Gas))
+
+ tokenUpdates, err2 := pr.GetTokenPriceUpdatesCreatedAfter(context.Background(), time.Unix(int64(ts-1), 0), 0)
+ require.NoError(t, err2)
+ assert.Len(t, tokenUpdates, len(expectedToken))
+ }
+
+ // Empty token set should return empty set no error.
+ gotEmpty, err := pr.GetTokenPrices(context.Background(), []cciptypes.Address{})
+ require.NoError(t, err)
+ assert.Len(t, gotEmpty, 0)
+
+ // We expect latest token prices to apply
+ allTokenUpdates, err := pr.GetTokenPriceUpdatesCreatedAfter(context.Background(), time.Unix(0, 0), 0)
+ require.NoError(t, err)
+ // Build latest map
+ latest := make(map[cciptypes.Address]*big.Int)
+ // Comes back in ascending order (oldest first)
+ var allTokens []cciptypes.Address
+ for i := len(allTokenUpdates) - 1; i >= 0; i-- {
+ assert.NoError(t, err)
+ _, have := latest[allTokenUpdates[i].Token]
+ if have {
+ continue
+ }
+ latest[allTokenUpdates[i].Token] = allTokenUpdates[i].Value
+ allTokens = append(allTokens, allTokenUpdates[i].Token)
+ }
+ tokenPrices, err := pr.GetTokenPrices(context.Background(), allTokens)
+ require.NoError(t, err)
+ require.Len(t, tokenPrices, len(allTokens))
+ for _, p := range tokenPrices {
+ assert.Equal(t, p.Value, latest[p.Token])
+ }
+}
+
+func TestPriceRegistryReader(t *testing.T) {
+ th := setupPriceRegistryReaderTH(t)
+ // Assert all readers produce the same expected results.
+ for version, pr := range th.readers {
+ pr := pr
+ t.Run("PriceRegistryReader"+version, func(t *testing.T) {
+ testPriceRegistryReader(t, th, pr)
+ })
+ }
+}
+
+func TestNewPriceRegistryReader(t *testing.T) {
+ var tt = []struct {
+ typeAndVersion string
+ expectedErr string
+ }{
+ {
+ typeAndVersion: "blah",
+ expectedErr: "unable to read type and version: invalid type and version blah",
+ },
+ {
+ typeAndVersion: "EVM2EVMOffRamp 1.0.0",
+ expectedErr: "expected PriceRegistry got EVM2EVMOffRamp",
+ },
+ {
+ typeAndVersion: "PriceRegistry 1.2.0",
+ expectedErr: "",
+ },
+ {
+ typeAndVersion: "PriceRegistry 1.6.0-dev",
+ expectedErr: "",
+ },
+ {
+ typeAndVersion: "PriceRegistry 2.0.0",
+ expectedErr: "unsupported price registry version 2.0.0",
+ },
+ }
+ ctx := testutils.Context(t)
+ for _, tc := range tt {
+ t.Run(tc.typeAndVersion, func(t *testing.T) {
+ b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion)
+ require.NoError(t, err)
+ c := evmclientmocks.NewClient(t)
+ c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil)
+ addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress())
+ lp := lpmocks.NewLogPoller(t)
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Maybe()
+ _, err = factory.NewPriceRegistryReader(ctx, logger.TestLogger(t), factory.NewEvmVersionFinder(), addr, lp, c)
+ if tc.expectedErr != "" {
+ require.EqualError(t, err, tc.expectedErr)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go
new file mode 100644
index 00000000000..a9a07f0879b
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go
@@ -0,0 +1,78 @@
+package ccipdata
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/types"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+const (
+ V1_0_0 = "1.0.0"
+ V1_1_0 = "1.1.0"
+ V1_2_0 = "1.2.0"
+ V1_4_0 = "1.4.0"
+ V1_5_0 = "1.5.0-dev"
+ V1_6_0 = "1.6.0-dev"
+)
+
+const (
+ // CommitExecLogsRetention defines the duration for which logs critical for Commit/Exec plugins processing are retained.
+ // Although Exec relies on permissionlessExecThreshold which is lower than 24hours for picking eligible CommitRoots,
+ // Commit still can reach to older logs because it filters them by sequence numbers. For instance, in case of RMN curse on chain,
+ // we might have logs waiting in OnRamp to be committed first. When outage takes days we still would
+ // be able to bring back processing without replaying any logs from chain. You can read that param as
+ // "how long CCIP can be down and still be able to process all the messages after getting back to life".
+ // Breaching this threshold would require replaying chain using LogPoller from the beginning of the outage.
+ CommitExecLogsRetention = 30 * 24 * time.Hour // 30 days
+ // CacheEvictionLogsRetention defines the duration for which logs used for caching on-chain data are kept.
+ // Restarting node clears the cache entirely and rebuilds it from scratch by fetching data from chain,
+ // so we don't need to keep these logs for very long. All events relying on cache.NewLogpollerEventsBased should use this retention.
+ CacheEvictionLogsRetention = 7 * 24 * time.Hour // 7 days
+ // PriceUpdatesLogsRetention defines the duration for which logs with price updates are kept.
+ // These logs are emitted whenever the token price or gas price is updated and Commit scans very small time windows (e.g. 2 hours)
+ PriceUpdatesLogsRetention = 1 * 24 * time.Hour // 1 day
+)
+
+type Event[T any] struct {
+ Data T
+ cciptypes.TxMeta
+}
+
+func LogsConfirmations(finalized bool) evmtypes.Confirmations {
+ if finalized {
+ return evmtypes.Finalized
+ }
+ return evmtypes.Unconfirmed
+}
+
+func ParseLogs[T any](logs []logpoller.Log, lggr logger.Logger, parseFunc func(log types.Log) (*T, error)) ([]Event[T], error) {
+ reqs := make([]Event[T], 0, len(logs))
+
+ for _, log := range logs {
+ data, err := parseFunc(log.ToGethLog())
+ if err != nil {
+ lggr.Errorw("Unable to parse log", "err", err)
+ continue
+ }
+ reqs = append(reqs, Event[T]{
+ Data: *data,
+ TxMeta: cciptypes.TxMeta{
+ BlockTimestampUnixMilli: log.BlockTimestamp.UnixMilli(),
+ BlockNumber: uint64(log.BlockNumber),
+ TxHash: log.TxHash.String(),
+ LogIndex: uint64(log.LogIndex),
+ },
+ })
+ }
+
+ if len(logs) != len(reqs) {
+ return nil, fmt.Errorf("%d logs were not parsed", len(logs)-len(reqs))
+ }
+ return reqs, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go
new file mode 100644
index 00000000000..06766be81ee
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go
@@ -0,0 +1,72 @@
+package ccipdata
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func Test_parseLogs(t *testing.T) {
+ // generate 100 logs
+ logs := make([]logpoller.Log, 100)
+ for i := range logs {
+ logs[i].LogIndex = int64(i + 1)
+ logs[i].BlockNumber = int64(i) * 1000
+ logs[i].BlockTimestamp = time.Now()
+ }
+
+ parseFn := func(log types.Log) (*uint, error) {
+ return &log.Index, nil
+ }
+
+ parsedEvents, err := ParseLogs[uint](logs, logger.TestLogger(t), parseFn)
+ require.NoError(t, err)
+ assert.Len(t, parsedEvents, 100)
+
+ // Make sure everything is parsed according to the parse func
+ for i, ev := range parsedEvents {
+ assert.Equal(t, i+1, int(ev.Data))
+ assert.Equal(t, i*1000, int(ev.BlockNumber))
+ assert.Greater(t, ev.BlockTimestampUnixMilli, time.Now().Add(-time.Minute).UnixMilli())
+ }
+}
+
+func Test_parseLogs_withErrors(t *testing.T) {
+ // generate 50 valid logs and 50 errors
+ actualErrorCount := 50
+ logs := make([]logpoller.Log, actualErrorCount*2)
+ for i := range logs {
+ logs[i].LogIndex = int64(i + 1)
+ }
+
+ // return an error for half of the logs.
+ parseFn := func(log types.Log) (*uint, error) {
+ if log.Index%2 == 0 {
+ return nil, fmt.Errorf("cannot parse %d", log.Index)
+ }
+ return &log.Index, nil
+ }
+
+ log, observed := logger.TestLoggerObserved(t, zapcore.DebugLevel)
+ parsedEvents, err := ParseLogs[uint](logs, log, parseFn)
+ assert.ErrorContains(t, err, fmt.Sprintf("%d logs were not parsed", len(logs)/2))
+ assert.Nil(t, parsedEvents, "No events are returned if there was an error.")
+
+ // logs are written for errors.
+ require.Equal(t, actualErrorCount, observed.Len(), "Expect 51 warnings: one for each error and a summary.")
+ for i, entry := range observed.All() {
+ assert.Equal(t, zapcore.ErrorLevel, entry.Level)
+ assert.Contains(t, entry.Message, "Unable to parse log")
+ contextMap := entry.ContextMap()
+ require.Contains(t, contextMap, "err")
+ assert.Contains(t, contextMap["err"], fmt.Sprintf("cannot parse %d", (i+1)*2), "each error should be logged as a warning")
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go
new file mode 100644
index 00000000000..41161ee9388
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go
@@ -0,0 +1,9 @@
+package ccipdata
+
+import "time"
+
+// RetryConfig configures an initial delay between retries and a max delay between retries
+type RetryConfig struct {
+ InitialDelay time.Duration
+ MaxDelay time.Duration
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go
new file mode 100644
index 00000000000..6dc51b888ed
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go
@@ -0,0 +1,36 @@
+package ccipdata
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+)
+
+// NewSimulation returns a client and a simulated backend.
+func NewSimulation(t testing.TB) (*bind.TransactOpts, *client.SimulatedBackendClient) {
+ user := testutils.MustNewSimTransactor(t)
+ simulatedBackend := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{
+ user.From: {
+ Balance: big.NewInt(0).Mul(big.NewInt(3), big.NewInt(1e18)),
+ },
+ }, 10e6)
+ simulatedBackendClient := client.NewSimulatedBackendClient(t, simulatedBackend, testutils.SimulatedChainID)
+ return user, simulatedBackendClient
+}
+
+// AssertNonRevert Verify that a transaction was not reverted.
+func AssertNonRevert(t testing.TB, tx *types.Transaction, bc *client.SimulatedBackendClient, user *bind.TransactOpts) {
+ require.NotNil(t, tx, "Transaction should not be nil")
+ receipt, err := bc.TransactionReceipt(user.Context, tx.Hash())
+ require.NoError(t, err)
+ require.NotEqual(t, uint64(0), receipt.Status, "Transaction should not have reverted")
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go
new file mode 100644
index 00000000000..999061f4913
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go
@@ -0,0 +1,10 @@
+package ccipdata
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type TokenPoolReader interface {
+ Address() common.Address
+ Type() string
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go
new file mode 100644
index 00000000000..51ce0db7c04
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go
@@ -0,0 +1,169 @@
+package ccipdata
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/patrickmn/go-cache"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+var (
+ // shortLivedInMemLogsCacheExpiration is used for the short-lived in meme logs cache.
+ // Value should usually be set to just a few seconds, a larger duration will not increase performance and might
+ // cause performance issues on re-orged logs.
+ shortLivedInMemLogsCacheExpiration = 20 * time.Second
+)
+
+const (
+ MESSAGE_SENT_FILTER_NAME = "USDC message sent"
+)
+
+var _ USDCReader = &USDCReaderImpl{}
+
+type USDCReader interface {
+ // GetUSDCMessagePriorToLogIndexInTx returns the specified USDC message data.
+ // e.g. if msg contains 3 tokens: [usdc1, wETH, usdc2] ignoring non-usdc tokens
+ // if usdcTokenIndexOffset is 0 we select usdc2
+ // if usdcTokenIndexOffset is 1 we select usdc1
+ // The message logs are found using the provided transaction hash.
+ GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string) ([]byte, error)
+}
+
+type USDCReaderImpl struct {
+ usdcMessageSent common.Hash
+ lp logpoller.LogPoller
+ filter logpoller.Filter
+ lggr logger.Logger
+ transmitterAddress common.Address
+
+ // shortLivedInMemLogs is a short-lived cache (items expire every few seconds)
+ // used to prevent frequent log fetching from the log poller
+ shortLivedInMemLogs *cache.Cache
+}
+
+func (u *USDCReaderImpl) Close() error {
+ // FIXME Dim pgOpts removed from LogPoller
+ return u.lp.UnregisterFilter(context.Background(), u.filter.Name)
+}
+
+func (u *USDCReaderImpl) RegisterFilters() error {
+ // FIXME Dim pgOpts removed from LogPoller
+ return u.lp.RegisterFilter(context.Background(), u.filter)
+}
+
+// usdcPayload has to match the onchain event emitted by the USDC message transmitter
+type usdcPayload []byte
+
+func (d usdcPayload) AbiString() string {
+ return `[{"type": "bytes"}]`
+}
+
+func (d usdcPayload) Validate() error {
+ if len(d) == 0 {
+ return errors.New("must be non-empty")
+ }
+ return nil
+}
+
+func parseUSDCMessageSent(logData []byte) ([]byte, error) {
+ decodeAbiStruct, err := abihelpers.DecodeAbiStruct[usdcPayload](logData)
+ if err != nil {
+ return nil, err
+ }
+ return decodeAbiStruct, nil
+}
+
+func (u *USDCReaderImpl) GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string) ([]byte, error) {
+ var lpLogs []logpoller.Log
+
+ // fetch all the usdc logs for the provided tx hash
+ k := fmt.Sprintf("usdc-%s", txHash) // custom prefix to avoid key collision if someone re-uses the cache
+ if rawLogs, foundInMem := u.shortLivedInMemLogs.Get(k); foundInMem {
+ inMemLogs, ok := rawLogs.([]logpoller.Log)
+ if !ok {
+ return nil, errors.Errorf("unexpected in-mem logs type %T", rawLogs)
+ }
+ u.lggr.Debugw("found logs in memory", "k", k, "len", len(inMemLogs))
+ lpLogs = inMemLogs
+ }
+
+ if len(lpLogs) == 0 {
+ u.lggr.Debugw("fetching logs from lp", "k", k)
+ logs, err := u.lp.IndexedLogsByTxHash(
+ ctx,
+ u.usdcMessageSent,
+ u.transmitterAddress,
+ common.HexToHash(txHash),
+ )
+ if err != nil {
+ return nil, err
+ }
+ lpLogs = logs
+ u.shortLivedInMemLogs.Set(k, logs, cache.DefaultExpiration)
+ u.lggr.Debugw("fetched logs from lp", "logs", len(lpLogs))
+ }
+
+ // collect the logs with log index less than the provided log index
+ allUsdcTokensData := make([][]byte, 0)
+ for _, current := range lpLogs {
+ if current.LogIndex < logIndex {
+ u.lggr.Infow("Found USDC message", "logIndex", current.LogIndex, "txHash", current.TxHash.Hex(), "data", hexutil.Encode(current.Data))
+ allUsdcTokensData = append(allUsdcTokensData, current.Data)
+ }
+ }
+
+ usdcTokenIndex := (len(allUsdcTokensData) - 1) - usdcTokenIndexOffset
+
+ if usdcTokenIndex < 0 || usdcTokenIndex >= len(allUsdcTokensData) {
+ u.lggr.Errorw("usdc message not found",
+ "logIndex", logIndex,
+ "allUsdcTokenData", len(allUsdcTokensData),
+ "txHash", txHash,
+ "usdcTokenIndex", usdcTokenIndex,
+ )
+ return nil, errors.Errorf("usdc token index %d is not valid", usdcTokenIndex)
+ }
+ return parseUSDCMessageSent(allUsdcTokensData[usdcTokenIndex])
+}
+
+func NewUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller, registerFilters bool) (*USDCReaderImpl, error) {
+ eventSig := utils.Keccak256Fixed([]byte("MessageSent(bytes)"))
+
+ r := &USDCReaderImpl{
+ lggr: lggr,
+ lp: lp,
+ usdcMessageSent: eventSig,
+ filter: logpoller.Filter{
+ Name: logpoller.FilterName(MESSAGE_SENT_FILTER_NAME, jobID, transmitter.Hex()),
+ EventSigs: []common.Hash{eventSig},
+ Addresses: []common.Address{transmitter},
+ Retention: CommitExecLogsRetention,
+ },
+ transmitterAddress: transmitter,
+ shortLivedInMemLogs: cache.New(shortLivedInMemLogsCacheExpiration, 2*shortLivedInMemLogsCacheExpiration),
+ }
+
+ if registerFilters {
+ if err := r.RegisterFilters(); err != nil {
+ return nil, fmt.Errorf("register filters: %w", err)
+ }
+ }
+ return r, nil
+}
+
+func CloseUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller) error {
+ r, err := NewUSDCReader(lggr, jobID, transmitter, lp, false)
+ if err != nil {
+ return err
+ }
+ return r.Close()
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go
new file mode 100644
index 00000000000..a5f0a1ffd06
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go
@@ -0,0 +1,178 @@
+package ccipdata
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func TestLogPollerClient_GetUSDCMessagePriorToLogIndexInTx(t *testing.T) {
+ addr := utils.RandomAddress()
+ txHash := common.BytesToHash(addr[:])
+ ccipLogIndex := int64(100)
+
+ expectedData := "0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000f80000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc08610000000000000000"
+ expectedPostParse := "0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861"
+ lggr := logger.TestLogger(t)
+
+ t.Run("multiple found - selected last", func(t *testing.T) {
+ lp := lpmocks.NewLogPoller(t)
+ u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false)
+
+ lp.On("IndexedLogsByTxHash",
+ mock.Anything,
+ u.usdcMessageSent,
+ u.transmitterAddress,
+ txHash,
+ ).Return([]logpoller.Log{
+ {LogIndex: ccipLogIndex - 2, Data: []byte("-2")},
+ {LogIndex: ccipLogIndex - 1, Data: hexutil.MustDecode(expectedData)},
+ {LogIndex: ccipLogIndex, Data: []byte("0")},
+ {LogIndex: ccipLogIndex + 1, Data: []byte("1")},
+ }, nil)
+ usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 0, txHash.String())
+ assert.NoError(t, err)
+ assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData))
+ lp.AssertExpectations(t)
+ })
+
+ t.Run("multiple found - selected first", func(t *testing.T) {
+ lp := lpmocks.NewLogPoller(t)
+ u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false)
+
+ lp.On("IndexedLogsByTxHash",
+ mock.Anything,
+ u.usdcMessageSent,
+ u.transmitterAddress,
+ txHash,
+ ).Return([]logpoller.Log{
+ {LogIndex: ccipLogIndex - 2, Data: hexutil.MustDecode(expectedData)},
+ {LogIndex: ccipLogIndex - 1, Data: []byte("-2")},
+ {LogIndex: ccipLogIndex, Data: []byte("0")},
+ {LogIndex: ccipLogIndex + 1, Data: []byte("1")},
+ }, nil)
+ usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 1, txHash.String())
+ assert.NoError(t, err)
+ assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData))
+ lp.AssertExpectations(t)
+ })
+
+ t.Run("logs fetched from memory in subsequent calls", func(t *testing.T) {
+ lp := lpmocks.NewLogPoller(t)
+ u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false)
+
+ lp.On("IndexedLogsByTxHash",
+ mock.Anything,
+ u.usdcMessageSent,
+ u.transmitterAddress,
+ txHash,
+ ).Return([]logpoller.Log{
+ {LogIndex: ccipLogIndex - 2, Data: hexutil.MustDecode(expectedData)},
+ {LogIndex: ccipLogIndex - 1, Data: []byte("-2")},
+ {LogIndex: ccipLogIndex, Data: []byte("0")},
+ {LogIndex: ccipLogIndex + 1, Data: []byte("1")},
+ }, nil).Once()
+
+ // first call logs must be fetched from lp
+ usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 1, txHash.String())
+ assert.NoError(t, err)
+ assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData))
+
+ // subsequent call, logs must be fetched from memory
+ usdcMessageData, err = u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 1, txHash.String())
+ assert.NoError(t, err)
+ assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData))
+
+ lp.AssertExpectations(t)
+ })
+
+ t.Run("none found", func(t *testing.T) {
+ lp := lpmocks.NewLogPoller(t)
+ u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false)
+ lp.On("IndexedLogsByTxHash",
+ mock.Anything,
+ u.usdcMessageSent,
+ u.transmitterAddress,
+ txHash,
+ ).Return([]logpoller.Log{}, nil)
+
+ usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 0, txHash.String())
+ assert.Errorf(t, err, fmt.Sprintf("no USDC message found prior to log index %d in tx %s", ccipLogIndex, txHash.Hex()))
+ assert.Nil(t, usdcMessageData)
+
+ lp.AssertExpectations(t)
+ })
+}
+
+func TestParse(t *testing.T) {
+ expectedBody, err := hexutil.Decode("0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000f80000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc08610000000000000000")
+ require.NoError(t, err)
+
+ parsedBody, err := parseUSDCMessageSent(expectedBody)
+ require.NoError(t, err)
+
+ expectedPostParse := "0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861"
+
+ require.Equal(t, expectedPostParse, hexutil.Encode(parsedBody))
+}
+
+func TestFilters(t *testing.T) {
+ t.Run("filters of different jobs should be distinct", func(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ chainID := testutils.NewRandomEVMChainID()
+ db := pgtest.NewSqlxDB(t)
+ o := logpoller.NewORM(chainID, db, lggr)
+ ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{}, 10e6)
+ esc := client.NewSimulatedBackendClient(t, ec, chainID)
+ lpOpts := logpoller.Opts{
+ PollPeriod: 1 * time.Hour,
+ FinalityDepth: 1,
+ BackfillBatchSize: 1,
+ RpcBatchSize: 1,
+ KeepFinalizedBlocksDepth: 100,
+ }
+ headTracker := headtracker.NewSimulatedHeadTracker(esc, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ if lpOpts.PollPeriod == 0 {
+ lpOpts.PollPeriod = 1 * time.Hour
+ }
+ lp := logpoller.NewLogPoller(o, esc, lggr, headTracker, lpOpts)
+
+ jobID1 := "job-1"
+ jobID2 := "job-2"
+ transmitter := utils.RandomAddress()
+
+ f1 := logpoller.FilterName("USDC message sent", jobID1, transmitter.Hex())
+ f2 := logpoller.FilterName("USDC message sent", jobID2, transmitter.Hex())
+
+ _, err := NewUSDCReader(lggr, jobID1, transmitter, lp, true)
+ assert.NoError(t, err)
+ assert.True(t, lp.HasFilter(f1))
+
+ _, err = NewUSDCReader(lggr, jobID2, transmitter, lp, true)
+ assert.NoError(t, err)
+ assert.True(t, lp.HasFilter(f2))
+
+ err = CloseUSDCReader(lggr, jobID2, transmitter, lp)
+ assert.NoError(t, err)
+ assert.True(t, lp.HasFilter(f1))
+ assert.False(t, lp.HasFilter(f2))
+ })
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go
new file mode 100644
index 00000000000..3e58143a284
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go
@@ -0,0 +1,456 @@
+package v1_0_0
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/query"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+const (
+ EXEC_REPORT_ACCEPTS = "Exec report accepts"
+ ReportAccepted = "ReportAccepted"
+)
+
+var _ ccipdata.CommitStoreReader = &CommitStore{}
+
+type CommitStore struct {
+ // Static config
+ commitStore *commit_store_1_0_0.CommitStore
+ lggr logger.Logger
+ lp logpoller.LogPoller
+ address common.Address
+ estimator *gas.EvmFeeEstimator
+ sourceMaxGasPrice *big.Int
+ filters []logpoller.Filter
+ reportAcceptedSig common.Hash
+ reportAcceptedMaxSeqIndex int
+ commitReportArgs abi.Arguments
+
+ // Dynamic config
+ configMu sync.RWMutex
+ gasPriceEstimator prices.ExecGasPriceEstimator
+ offchainConfig cciptypes.CommitOffchainConfig
+}
+
+func (c *CommitStore) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) {
+ legacyConfig, err := c.commitStore.GetStaticConfig(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.CommitStoreStaticConfig{}, errors.New("Could not get commitStore static config")
+ }
+ return cciptypes.CommitStoreStaticConfig{
+ ChainSelector: legacyConfig.ChainSelector,
+ SourceChainSelector: legacyConfig.SourceChainSelector,
+ OnRamp: ccipcalc.EvmAddrToGeneric(legacyConfig.OnRamp),
+ ArmProxy: ccipcalc.EvmAddrToGeneric(legacyConfig.ArmProxy),
+ }, nil
+}
+
+func (c *CommitStore) EncodeCommitReport(_ context.Context, report cciptypes.CommitStoreReport) ([]byte, error) {
+ return encodeCommitReport(c.commitReportArgs, report)
+}
+
+func encodeCommitReport(commitReportArgs abi.Arguments, report cciptypes.CommitStoreReport) ([]byte, error) {
+ var tokenPriceUpdates []commit_store_1_0_0.InternalTokenPriceUpdate
+ for _, tokenPriceUpdate := range report.TokenPrices {
+ sourceTokenEvmAddr, err := ccipcalc.GenericAddrToEvm(tokenPriceUpdate.Token)
+ if err != nil {
+ return nil, err
+ }
+ tokenPriceUpdates = append(tokenPriceUpdates, commit_store_1_0_0.InternalTokenPriceUpdate{
+ SourceToken: sourceTokenEvmAddr,
+ UsdPerToken: tokenPriceUpdate.Value,
+ })
+ }
+ var usdPerUnitGas = big.NewInt(0)
+ var destChainSelector = uint64(0)
+ if len(report.GasPrices) > 1 {
+ return []byte{}, errors.Errorf("CommitStore V1_0_0 can only accept 1 gas price, received: %d", len(report.GasPrices))
+ }
+ if len(report.GasPrices) > 0 {
+ usdPerUnitGas = report.GasPrices[0].Value
+ destChainSelector = report.GasPrices[0].DestChainSelector
+ }
+ rep := commit_store_1_0_0.CommitStoreCommitReport{
+ PriceUpdates: commit_store_1_0_0.InternalPriceUpdates{
+ TokenPriceUpdates: tokenPriceUpdates,
+ UsdPerUnitGas: usdPerUnitGas,
+ DestChainSelector: destChainSelector,
+ },
+ Interval: commit_store_1_0_0.CommitStoreInterval{Min: report.Interval.Min, Max: report.Interval.Max},
+ MerkleRoot: report.MerkleRoot,
+ }
+ return commitReportArgs.PackValues([]interface{}{rep})
+}
+
+func DecodeCommitReport(commitReportArgs abi.Arguments, report []byte) (cciptypes.CommitStoreReport, error) {
+ unpacked, err := commitReportArgs.Unpack(report)
+ if err != nil {
+ return cciptypes.CommitStoreReport{}, err
+ }
+ if len(unpacked) != 1 {
+ return cciptypes.CommitStoreReport{}, errors.New("expected single struct value")
+ }
+
+ commitReport, ok := unpacked[0].(struct {
+ PriceUpdates struct {
+ TokenPriceUpdates []struct {
+ SourceToken common.Address `json:"sourceToken"`
+ UsdPerToken *big.Int `json:"usdPerToken"`
+ } `json:"tokenPriceUpdates"`
+ DestChainSelector uint64 `json:"destChainSelector"`
+ UsdPerUnitGas *big.Int `json:"usdPerUnitGas"`
+ } `json:"priceUpdates"`
+ Interval struct {
+ Min uint64 `json:"min"`
+ Max uint64 `json:"max"`
+ } `json:"interval"`
+ MerkleRoot [32]byte `json:"merkleRoot"`
+ })
+ if !ok {
+ return cciptypes.CommitStoreReport{}, errors.Errorf("invalid commit report got %T", unpacked[0])
+ }
+
+ var tokenPriceUpdates []cciptypes.TokenPrice
+ for _, u := range commitReport.PriceUpdates.TokenPriceUpdates {
+ tokenPriceUpdates = append(tokenPriceUpdates, cciptypes.TokenPrice{
+ Token: cciptypes.Address(u.SourceToken.String()),
+ Value: u.UsdPerToken,
+ })
+ }
+
+ var gasPrices []cciptypes.GasPrice
+ if commitReport.PriceUpdates.DestChainSelector != 0 {
+ // No gas price update {
+ gasPrices = append(gasPrices, cciptypes.GasPrice{
+ DestChainSelector: commitReport.PriceUpdates.DestChainSelector,
+ Value: commitReport.PriceUpdates.UsdPerUnitGas,
+ })
+ }
+
+ return cciptypes.CommitStoreReport{
+ TokenPrices: tokenPriceUpdates,
+ GasPrices: gasPrices,
+ Interval: cciptypes.CommitStoreInterval{
+ Min: commitReport.Interval.Min,
+ Max: commitReport.Interval.Max,
+ },
+ MerkleRoot: commitReport.MerkleRoot,
+ }, nil
+}
+
+func (c *CommitStore) DecodeCommitReport(_ context.Context, report []byte) (cciptypes.CommitStoreReport, error) {
+ return DecodeCommitReport(c.commitReportArgs, report)
+}
+
+func (c *CommitStore) IsBlessed(ctx context.Context, root [32]byte) (bool, error) {
+ return c.commitStore.IsBlessed(&bind.CallOpts{Context: ctx}, root)
+}
+
+func (c *CommitStore) OffchainConfig(context.Context) (cciptypes.CommitOffchainConfig, error) {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ return c.offchainConfig, nil
+}
+
+func (c *CommitStore) GasPriceEstimator(context.Context) (cciptypes.GasPriceEstimatorCommit, error) {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ return c.gasPriceEstimator, nil
+}
+
+func (c *CommitStore) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ c.estimator = &gpe
+ return nil
+}
+
+func (c *CommitStore) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ c.sourceMaxGasPrice = sourceMaxGasPrice
+ return nil
+}
+
+// CommitOffchainConfig is a legacy version of CommitOffchainConfig, used for CommitStore version 1.0.0 and 1.1.0
+type CommitOffchainConfig struct {
+ SourceFinalityDepth uint32
+ DestFinalityDepth uint32
+ FeeUpdateHeartBeat config.Duration
+ FeeUpdateDeviationPPB uint32
+ InflightCacheExpiry config.Duration
+ PriceReportingDisabled bool
+}
+
+func (c CommitOffchainConfig) Validate() error {
+ if c.SourceFinalityDepth == 0 {
+ return errors.New("must set SourceFinalityDepth")
+ }
+ if c.DestFinalityDepth == 0 {
+ return errors.New("must set DestFinalityDepth")
+ }
+ if c.FeeUpdateHeartBeat.Duration() == 0 {
+ return errors.New("must set FeeUpdateHeartBeat")
+ }
+ if c.FeeUpdateDeviationPPB == 0 {
+ return errors.New("must set FeeUpdateDeviationPPB")
+ }
+ if c.InflightCacheExpiry.Duration() == 0 {
+ return errors.New("must set InflightCacheExpiry")
+ }
+
+ return nil
+}
+
+func (c *CommitStore) ChangeConfig(_ context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) {
+ onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onchainConfig)
+ if err != nil {
+ return "", err
+ }
+
+ offchainConfigV1, err := ccipconfig.DecodeOffchainConfig[CommitOffchainConfig](offchainConfig)
+ if err != nil {
+ return "", err
+ }
+ c.configMu.Lock()
+ defer c.configMu.Unlock()
+
+ if c.estimator == nil {
+ return "", fmt.Errorf("this CommitStore estimator is nil. SetGasEstimator should be called before ChangeConfig")
+ }
+
+ if c.sourceMaxGasPrice == nil {
+ return "", fmt.Errorf("this CommitStore sourceMaxGasPrice is nil. SetSourceMaxGasPrice should be called before ChangeConfig")
+ }
+
+ c.gasPriceEstimator = prices.NewExecGasPriceEstimator(
+ *c.estimator,
+ c.sourceMaxGasPrice,
+ int64(offchainConfigV1.FeeUpdateDeviationPPB))
+ c.offchainConfig = ccipdata.NewCommitOffchainConfig(
+ offchainConfigV1.FeeUpdateDeviationPPB,
+ offchainConfigV1.FeeUpdateHeartBeat.Duration(),
+ offchainConfigV1.FeeUpdateDeviationPPB,
+ offchainConfigV1.FeeUpdateHeartBeat.Duration(),
+ offchainConfigV1.InflightCacheExpiry.Duration(),
+ offchainConfigV1.PriceReportingDisabled)
+ c.lggr.Infow("ChangeConfig",
+ "offchainConfig", offchainConfigV1,
+ "onchainConfig", onchainConfigParsed,
+ )
+ return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), nil
+}
+
+func (c *CommitStore) Close() error {
+ return logpollerutil.UnregisterLpFilters(c.lp, c.filters)
+}
+
+func (c *CommitStore) parseReport(log types.Log) (*cciptypes.CommitStoreReport, error) {
+ repAccepted, err := c.commitStore.ParseReportAccepted(log)
+ if err != nil {
+ return nil, err
+ }
+ // Translate to common struct.
+ var tokenPrices []cciptypes.TokenPrice
+ for _, tpu := range repAccepted.Report.PriceUpdates.TokenPriceUpdates {
+ tokenPrices = append(tokenPrices, cciptypes.TokenPrice{
+ Token: cciptypes.Address(tpu.SourceToken.String()),
+ Value: tpu.UsdPerToken,
+ })
+ }
+ return &cciptypes.CommitStoreReport{
+ TokenPrices: tokenPrices,
+ GasPrices: []cciptypes.GasPrice{{DestChainSelector: repAccepted.Report.PriceUpdates.DestChainSelector, Value: repAccepted.Report.PriceUpdates.UsdPerUnitGas}},
+ MerkleRoot: repAccepted.Report.MerkleRoot,
+ Interval: cciptypes.CommitStoreInterval{Min: repAccepted.Report.Interval.Min, Max: repAccepted.Report.Interval.Max},
+ }, nil
+}
+
+func (c *CommitStore) GetCommitReportMatchingSeqNum(ctx context.Context, seqNr uint64, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ logs, err := c.lp.LogsDataWordBetween(
+ ctx,
+ c.reportAcceptedSig,
+ c.address,
+ c.reportAcceptedMaxSeqIndex-1,
+ c.reportAcceptedMaxSeqIndex,
+ logpoller.EvmWord(seqNr),
+ evmtypes.Confirmations(confs),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport](
+ logs,
+ c.lggr,
+ c.parseReport,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.CommitStoreReportWithTxMeta{
+ TxMeta: log.TxMeta,
+ CommitStoreReport: log.Data,
+ })
+ }
+
+ if len(res) > 1 {
+ c.lggr.Errorw("More than one report found for seqNr", "seqNr", seqNr, "commitReports", parsedLogs)
+ return res[:1], nil
+ }
+ return res, nil
+}
+
+func (c *CommitStore) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ latestBlock, err := c.lp.LatestBlock(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ reportsQuery, err := query.Where(
+ c.address.String(),
+ logpoller.NewAddressFilter(c.address),
+ logpoller.NewEventSigFilter(c.reportAcceptedSig),
+ query.Timestamp(uint64(ts.Unix()), primitives.Gte),
+ logpoller.NewConfirmationsFilter(evmtypes.Confirmations(confs)),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ logs, err := c.lp.FilteredLogs(
+ ctx,
+ reportsQuery,
+ query.NewLimitAndSort(query.Limit{}, query.NewSortBySequence(query.Asc)),
+ "GetAcceptedCommitReportsGteTimestamp",
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport](logs, c.lggr, c.parseReport)
+ if err != nil {
+ return nil, fmt.Errorf("parse logs: %w", err)
+ }
+
+ parsedReports := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ parsedReports = append(parsedReports, cciptypes.CommitStoreReportWithTxMeta{
+ TxMeta: log.TxMeta.WithFinalityStatus(uint64(latestBlock.FinalizedBlockNumber)),
+ CommitStoreReport: log.Data,
+ })
+ }
+
+ return parsedReports, nil
+}
+
+func (c *CommitStore) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) {
+ return c.commitStore.GetExpectedNextSequenceNumber(&bind.CallOpts{Context: ctx})
+}
+
+func (c *CommitStore) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) {
+ return c.commitStore.GetLatestPriceEpochAndRound(&bind.CallOpts{Context: ctx})
+}
+
+func (c *CommitStore) IsDestChainHealthy(context.Context) (bool, error) {
+ if err := c.lp.Healthy(); err != nil {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (c *CommitStore) IsDown(ctx context.Context) (bool, error) {
+ unPausedAndHealthy, err := c.commitStore.IsUnpausedAndARMHealthy(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return true, err
+ }
+ return !unPausedAndHealthy, nil
+}
+
+func (c *CommitStore) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) {
+ var hashes [][32]byte
+ for _, msg := range report.Messages {
+ hashes = append(hashes, msg.Hash)
+ }
+ res, err := c.commitStore.Verify(&bind.CallOpts{Context: ctx}, hashes, report.Proofs, report.ProofFlagBits)
+ if err != nil {
+ c.lggr.Errorw("Unable to call verify", "messages", report.Messages, "err", err)
+ return false, nil
+ }
+ // No timestamp, means failed to verify root.
+ if res.Cmp(big.NewInt(0)) == 0 {
+ c.lggr.Errorw("Root does not verify", "messages", report.Messages)
+ return false, nil
+ }
+ return true, nil
+}
+
+func (c *CommitStore) RegisterFilters() error {
+ return logpollerutil.RegisterLpFilters(c.lp, c.filters)
+}
+
+func NewCommitStore(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller) (*CommitStore, error) {
+ commitStore, err := commit_store_1_0_0.NewCommitStore(addr, ec)
+ if err != nil {
+ return nil, err
+ }
+ commitStoreABI := abihelpers.MustParseABI(commit_store_1_0_0.CommitStoreABI)
+ eventSig := abihelpers.MustGetEventID(ReportAccepted, commitStoreABI)
+ commitReportArgs := abihelpers.MustGetEventInputs(ReportAccepted, commitStoreABI)
+ filters := []logpoller.Filter{
+ {
+ Name: logpoller.FilterName(EXEC_REPORT_ACCEPTS, addr.String()),
+ EventSigs: []common.Hash{eventSig},
+ Addresses: []common.Address{addr},
+ Retention: ccipdata.CommitExecLogsRetention,
+ },
+ }
+ return &CommitStore{
+ commitStore: commitStore,
+ address: addr,
+ lggr: lggr,
+ lp: lp,
+
+ // Note that sourceMaxGasPrice and estimator now have explicit setters (CCIP-2493)
+
+ filters: filters,
+ commitReportArgs: commitReportArgs,
+ reportAcceptedSig: eventSig,
+ // offset || priceUpdatesOffset || minSeqNum || maxSeqNum || merkleRoot
+ reportAcceptedMaxSeqIndex: 3,
+ configMu: sync.RWMutex{},
+
+ // The fields below are initially empty and set on ChangeConfig method
+ offchainConfig: cciptypes.CommitOffchainConfig{},
+ gasPriceEstimator: prices.ExecGasPriceEstimator{},
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go
new file mode 100644
index 00000000000..31bcaf8a187
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go
@@ -0,0 +1,49 @@
+package v1_0_0
+
+import (
+ "math/big"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func TestCommitReportEncoding(t *testing.T) {
+ ctx := testutils.Context(t)
+ report := cciptypes.CommitStoreReport{
+ TokenPrices: []cciptypes.TokenPrice{
+ {
+ Token: cciptypes.Address(utils.RandomAddress().String()),
+ Value: big.NewInt(9e18),
+ },
+ },
+ GasPrices: []cciptypes.GasPrice{
+ {
+ DestChainSelector: rand.Uint64(),
+ Value: big.NewInt(2000e9),
+ },
+ },
+ MerkleRoot: [32]byte{123},
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ }
+
+ c, err := NewCommitStore(logger.TestLogger(t), utils.RandomAddress(), nil, mocks.NewLogPoller(t))
+ assert.NoError(t, err)
+
+ encodedReport, err := c.EncodeCommitReport(ctx, report)
+ require.NoError(t, err)
+ assert.Greater(t, len(encodedReport), 0)
+
+ decodedReport, err := c.DecodeCommitReport(ctx, encodedReport)
+ require.NoError(t, err)
+ require.Equal(t, report.TokenPrices, decodedReport.TokenPrices)
+ require.Equal(t, report, decodedReport)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go
new file mode 100644
index 00000000000..0d1b7f736f6
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go
@@ -0,0 +1,85 @@
+package v1_0_0
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+const (
+ MetaDataHashPrefix = "EVM2EVMMessageEvent"
+)
+
+var LeafDomainSeparator = [1]byte{0x00}
+
+type LeafHasher struct {
+ metaDataHash [32]byte
+ ctx hashutil.Hasher[[32]byte]
+ onRamp *evm_2_evm_onramp_1_0_0.EVM2EVMOnRamp
+}
+
+func GetMetaDataHash[H hashutil.Hash](ctx hashutil.Hasher[H], prefix [32]byte, sourceChainSelector uint64, onRampId common.Address, destChainSelector uint64) H {
+ paddedOnRamp := common.BytesToHash(onRampId[:])
+ return ctx.Hash(utils.ConcatBytes(prefix[:], math.U256Bytes(big.NewInt(0).SetUint64(sourceChainSelector)), math.U256Bytes(big.NewInt(0).SetUint64(destChainSelector)), paddedOnRamp[:]))
+}
+
+func NewLeafHasher(sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte], onRamp *evm_2_evm_onramp_1_0_0.EVM2EVMOnRamp) *LeafHasher {
+ return &LeafHasher{
+ metaDataHash: GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampId, destChainSelector),
+ ctx: ctx,
+ onRamp: onRamp,
+ }
+}
+
+func (t *LeafHasher) HashLeaf(log types.Log) ([32]byte, error) {
+ message, err := t.onRamp.ParseCCIPSendRequested(log)
+ if err != nil {
+ return [32]byte{}, err
+ }
+ encodedTokens, err := abihelpers.ABIEncode(
+ `[
+{"components": [{"name":"token","type":"address"},{"name":"amount","type":"uint256"}], "type":"tuple[]"}]`, message.Message.TokenAmounts)
+ if err != nil {
+ return [32]byte{}, err
+ }
+
+ packedValues, err := abihelpers.ABIEncode(
+ `[
+{"name": "leafDomainSeparator","type":"bytes1"},
+{"name": "metadataHash", "type":"bytes32"},
+{"name": "sequenceNumber", "type":"uint64"},
+{"name": "nonce", "type":"uint64"},
+{"name": "sender", "type":"address"},
+{"name": "receiver", "type":"address"},
+{"name": "dataHash", "type":"bytes32"},
+{"name": "tokenAmountsHash", "type":"bytes32"},
+{"name": "gasLimit", "type":"uint256"},
+{"name": "strict", "type":"bool"},
+{"name": "feeToken","type": "address"},
+{"name": "feeTokenAmount","type": "uint256"}
+]`,
+ LeafDomainSeparator,
+ t.metaDataHash,
+ message.Message.SequenceNumber,
+ message.Message.Nonce,
+ message.Message.Sender,
+ message.Message.Receiver,
+ t.ctx.Hash(message.Message.Data),
+ t.ctx.Hash(encodedTokens),
+ message.Message.GasLimit,
+ message.Message.Strict,
+ message.Message.FeeToken,
+ message.Message.FeeTokenAmount,
+ )
+ if err != nil {
+ return [32]byte{}, err
+ }
+ return t.ctx.Hash(packedValues), nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go
new file mode 100644
index 00000000000..b1219a27dfa
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go
@@ -0,0 +1,84 @@
+package v1_0_0
+
+import (
+ "encoding/hex"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+func TestHasherV1_0_0(t *testing.T) {
+ sourceChainSelector, destChainSelector := uint64(1), uint64(4)
+ onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001")
+ onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp_1_0_0.EVM2EVMOnRampABI)
+
+ ramp, err := evm_2_evm_onramp_1_0_0.NewEVM2EVMOnRamp(onRampAddress, nil)
+ require.NoError(t, err)
+ hashingCtx := hashutil.NewKeccak()
+ hasher := NewLeafHasher(sourceChainSelector, destChainSelector, onRampAddress, hashingCtx, ramp)
+
+ message := evm_2_evm_onramp_1_0_0.InternalEVM2EVMMessage{
+ SourceChainSelector: sourceChainSelector,
+ Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"),
+ Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"),
+ SequenceNumber: 1337,
+ GasLimit: big.NewInt(100),
+ Strict: false,
+ Nonce: 1337,
+ FeeToken: common.Address{},
+ FeeTokenAmount: big.NewInt(1),
+ Data: []byte{},
+ TokenAmounts: []evm_2_evm_onramp_1_0_0.ClientEVMTokenAmount{{Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}},
+ MessageId: [32]byte{},
+ }
+
+ data, err := onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message)
+ require.NoError(t, err)
+ hash, err := hasher.HashLeaf(types.Log{Topics: []common.Hash{abihelpers.MustGetEventID("CCIPSendRequested", onRampABI)}, Data: data})
+ require.NoError(t, err)
+
+ // NOTE: Must match spec
+ require.Equal(t, "26f282c6ac8231933b1799648d01ff6cec792a33fb37408b4d135968f9168ace", hex.EncodeToString(hash[:]))
+
+ message = evm_2_evm_onramp_1_0_0.InternalEVM2EVMMessage{
+ SourceChainSelector: sourceChainSelector,
+ Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"),
+ Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"),
+ SequenceNumber: 1337,
+ GasLimit: big.NewInt(100),
+ Strict: false,
+ Nonce: 1337,
+ FeeToken: common.Address{},
+ FeeTokenAmount: big.NewInt(1e12),
+ Data: []byte("foo bar baz"),
+ TokenAmounts: []evm_2_evm_onramp_1_0_0.ClientEVMTokenAmount{
+ {Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)},
+ {Token: common.HexToAddress("0x6660000000000000000000000000000000000001"), Amount: big.NewInt(4204242)},
+ },
+ MessageId: [32]byte{},
+ }
+
+ data, err = onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message)
+ require.NoError(t, err)
+ hash, err = hasher.HashLeaf(types.Log{Topics: []common.Hash{abihelpers.MustGetEventID("CCIPSendRequested", onRampABI)}, Data: data})
+ require.NoError(t, err)
+
+ // NOTE: Must match spec
+ require.Equal(t, "05cee92e7cb86a37b6536554828a5b21ff20ac3d4ef821ec47056f1d963313de", hex.EncodeToString(hash[:]))
+}
+
+func TestMetaDataHash(t *testing.T) {
+ sourceChainSelector, destChainSelector := uint64(1), uint64(4)
+ onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001")
+ ctx := hashutil.NewKeccak()
+ hash := GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampAddress, destChainSelector)
+ require.Equal(t, "1409948abde219f43870c3d6d1c16beabd8878eb5039a3fa765eb56e4b8ded9e", hex.EncodeToString(hash[:]))
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go
new file mode 100644
index 00000000000..137cbaf451d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go
@@ -0,0 +1,689 @@
+package v1_0_0
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+const (
+ EXEC_EXECUTION_STATE_CHANGES = "Exec execution state changes"
+ EXEC_TOKEN_POOL_ADDED = "Token pool added"
+ EXEC_TOKEN_POOL_REMOVED = "Token pool removed"
+)
+
+var (
+ abiOffRamp = abihelpers.MustParseABI(evm_2_evm_offramp_1_0_0.EVM2EVMOffRampABI)
+ _ ccipdata.OffRampReader = &OffRamp{}
+ ExecutionStateChangedEvent = abihelpers.MustGetEventID("ExecutionStateChanged", abiOffRamp)
+ PoolAddedEvent = abihelpers.MustGetEventID("PoolAdded", abiOffRamp)
+ PoolRemovedEvent = abihelpers.MustGetEventID("PoolRemoved", abiOffRamp)
+ ExecutionStateChangedSeqNrIndex = 1
+)
+
+var offRamp_poolAddedPoolRemovedEvents = []common.Hash{PoolAddedEvent, PoolRemovedEvent}
+
+type ExecOnchainConfig evm_2_evm_offramp_1_0_0.EVM2EVMOffRampDynamicConfig
+
+func (d ExecOnchainConfig) AbiString() string {
+ return `
+ [
+ {
+ "components": [
+ {"name": "permissionLessExecutionThresholdSeconds", "type": "uint32"},
+ {"name": "router", "type": "address"},
+ {"name": "priceRegistry", "type": "address"},
+ {"name": "maxTokensLength", "type": "uint16"},
+ {"name": "maxDataSize", "type": "uint32"}
+ ],
+ "type": "tuple"
+ }
+ ]`
+}
+
+func (d ExecOnchainConfig) Validate() error {
+ if d.PermissionLessExecutionThresholdSeconds == 0 {
+ return errors.New("must set PermissionLessExecutionThresholdSeconds")
+ }
+ if d.Router == (common.Address{}) {
+ return errors.New("must set Router address")
+ }
+ if d.PriceRegistry == (common.Address{}) {
+ return errors.New("must set PriceRegistry address")
+ }
+ if d.MaxTokensLength == 0 {
+ return errors.New("must set MaxTokensLength")
+ }
+ if d.MaxDataSize == 0 {
+ return errors.New("must set MaxDataSize")
+ }
+ return nil
+}
+
+// ExecOffchainConfig is the configuration for nodes executing committed CCIP messages (v1.0–v1.2).
+// It comes from the OffchainConfig field of the corresponding OCR2 plugin configuration.
+// NOTE: do not change the JSON format of this struct without consulting with the RDD people first.
+type ExecOffchainConfig struct {
+ // SourceFinalityDepth indicates how many confirmations a transaction should get on the source chain event before we consider it finalized.
+ SourceFinalityDepth uint32
+ // See [ccipdata.ExecOffchainConfig.DestOptimisticConfirmations]
+ DestOptimisticConfirmations uint32
+ // DestFinalityDepth indicates how many confirmations a transaction should get on the destination chain event before we consider it finalized.
+ DestFinalityDepth uint32
+ // See [ccipdata.ExecOffchainConfig.BatchGasLimit]
+ BatchGasLimit uint32
+ // See [ccipdata.ExecOffchainConfig.RelativeBoostPerWaitHour]
+ RelativeBoostPerWaitHour float64
+ // See [ccipdata.ExecOffchainConfig.InflightCacheExpiry]
+ InflightCacheExpiry config.Duration
+ // See [ccipdata.ExecOffchainConfig.RootSnoozeTime]
+ RootSnoozeTime config.Duration
+ // See [ccipdata.ExecOffchainConfig.BatchingStrategyID]
+ BatchingStrategyID uint32
+ // See [ccipdata.ExecOffchainConfig.MessageVisibilityInterval]
+ MessageVisibilityInterval config.Duration
+}
+
+func (c ExecOffchainConfig) Validate() error {
+ if c.SourceFinalityDepth == 0 {
+ return errors.New("must set SourceFinalityDepth")
+ }
+ if c.DestFinalityDepth == 0 {
+ return errors.New("must set DestFinalityDepth")
+ }
+ if c.DestOptimisticConfirmations == 0 {
+ return errors.New("must set DestOptimisticConfirmations")
+ }
+ if c.BatchGasLimit == 0 {
+ return errors.New("must set BatchGasLimit")
+ }
+ if c.RelativeBoostPerWaitHour == 0 {
+ return errors.New("must set RelativeBoostPerWaitHour")
+ }
+ if c.InflightCacheExpiry.Duration() == 0 {
+ return errors.New("must set InflightCacheExpiry")
+ }
+ if c.RootSnoozeTime.Duration() == 0 {
+ return errors.New("must set RootSnoozeTime")
+ }
+
+ return nil
+}
+
+type OffRamp struct {
+ offRampV100 evm_2_evm_offramp_1_0_0.EVM2EVMOffRampInterface
+ addr common.Address
+ lp logpoller.LogPoller
+ Logger logger.Logger
+ Client client.Client
+ evmBatchCaller rpclib.EvmBatchCaller
+ filters []logpoller.Filter
+ Estimator gas.EvmFeeEstimator
+ DestMaxGasPrice *big.Int
+ ExecutionReportArgs abi.Arguments
+ eventIndex int
+ eventSig common.Hash
+ cachedOffRampTokens cache.AutoSync[cciptypes.OffRampTokens]
+ sourceToDestTokensCache sync.Map
+
+ // Dynamic config
+ // configMu guards all the dynamic config fields.
+ configMu sync.RWMutex
+ gasPriceEstimator prices.GasPriceEstimatorExec
+ offchainConfig cciptypes.ExecOffchainConfig
+ onchainConfig cciptypes.ExecOnchainConfig
+}
+
+func (o *OffRamp) GetStaticConfig(ctx context.Context) (cciptypes.OffRampStaticConfig, error) {
+ if o.offRampV100 == nil {
+ return cciptypes.OffRampStaticConfig{}, fmt.Errorf("offramp not initialized")
+ }
+ c, err := o.offRampV100.GetStaticConfig(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.OffRampStaticConfig{}, fmt.Errorf("error while retrieving offramp config: %w", err)
+ }
+ return cciptypes.OffRampStaticConfig{
+ CommitStore: cciptypes.Address(c.CommitStore.String()),
+ ChainSelector: c.ChainSelector,
+ SourceChainSelector: c.SourceChainSelector,
+ OnRamp: cciptypes.Address(c.OnRamp.String()),
+ PrevOffRamp: cciptypes.Address(c.PrevOffRamp.String()),
+ ArmProxy: cciptypes.Address(c.ArmProxy.String()),
+ }, nil
+}
+
+func (o *OffRamp) GetExecutionState(ctx context.Context, sequenceNumber uint64) (uint8, error) {
+ return o.offRampV100.GetExecutionState(&bind.CallOpts{Context: ctx}, sequenceNumber)
+}
+
+func (o *OffRamp) GetSenderNonce(ctx context.Context, sender cciptypes.Address) (uint64, error) {
+ evmAddr, err := ccipcalc.GenericAddrToEvm(sender)
+ if err != nil {
+ return 0, err
+ }
+ return o.offRampV100.GetSenderNonce(&bind.CallOpts{Context: ctx}, evmAddr)
+}
+
+func (o *OffRamp) ListSenderNonces(ctx context.Context, senders []cciptypes.Address) (map[cciptypes.Address]uint64, error) {
+ if len(senders) == 0 {
+ return make(map[cciptypes.Address]uint64), nil
+ }
+
+ evmSenders, err := ccipcalc.GenericAddrsToEvm(senders...)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to convert generic addresses to evm addresses")
+ }
+
+ evmCalls := make([]rpclib.EvmCall, 0, len(evmSenders))
+ for _, evmAddr := range evmSenders {
+ evmCalls = append(evmCalls, rpclib.NewEvmCall(
+ abiOffRamp,
+ "getSenderNonce",
+ o.addr,
+ evmAddr,
+ ))
+ }
+
+ results, err := o.evmBatchCaller.BatchCall(ctx, 0, evmCalls)
+ if err != nil {
+ o.Logger.Errorw("error while batch fetching sender nonces", "err", err, "senders", evmSenders)
+ return nil, err
+ }
+
+ nonces, err := rpclib.ParseOutputs[uint64](results, func(d rpclib.DataAndErr) (uint64, error) {
+ return rpclib.ParseOutput[uint64](d, 0)
+ })
+ if err != nil {
+ o.Logger.Errorw("error while parsing sender nonces", "err", err, "senders", evmSenders)
+ return nil, err
+ }
+
+ if len(senders) != len(nonces) {
+ o.Logger.Errorw("unexpected number of nonces returned", "senders", evmSenders, "nonces", nonces)
+ return nil, errors.New("unexpected number of nonces returned")
+ }
+
+ senderNonce := make(map[cciptypes.Address]uint64, len(senders))
+ for i, sender := range senders {
+ senderNonce[sender] = nonces[i]
+ }
+ return senderNonce, nil
+}
+
+func (o *OffRamp) CurrentRateLimiterState(ctx context.Context) (cciptypes.TokenBucketRateLimit, error) {
+ state, err := o.offRampV100.CurrentRateLimiterState(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.TokenBucketRateLimit{}, err
+ }
+ return cciptypes.TokenBucketRateLimit{
+ Tokens: state.Tokens,
+ LastUpdated: state.LastUpdated,
+ IsEnabled: state.IsEnabled,
+ Capacity: state.Capacity,
+ Rate: state.Rate,
+ }, nil
+}
+
+func (o *OffRamp) getDestinationTokensFromSourceTokens(ctx context.Context, tokenAddresses []cciptypes.Address) ([]cciptypes.Address, error) {
+ destTokens := make([]cciptypes.Address, len(tokenAddresses))
+ found := make(map[cciptypes.Address]bool)
+
+ for i, tokenAddress := range tokenAddresses {
+ if v, exists := o.sourceToDestTokensCache.Load(tokenAddress); exists {
+ if destToken, isAddr := v.(cciptypes.Address); isAddr {
+ destTokens[i] = destToken
+ found[tokenAddress] = true
+ } else {
+ o.Logger.Errorf("source to dest cache contains invalid type %T", v)
+ }
+ }
+ }
+
+ if len(found) == len(tokenAddresses) {
+ return destTokens, nil
+ }
+
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokenAddresses...)
+ if err != nil {
+ return nil, err
+ }
+
+ evmCalls := make([]rpclib.EvmCall, 0, len(tokenAddresses))
+ for i, sourceTk := range tokenAddresses {
+ if !found[sourceTk] {
+ evmCalls = append(evmCalls, rpclib.NewEvmCall(abiOffRamp, "getDestinationToken", o.addr, evmAddrs[i]))
+ }
+ }
+
+ results, err := o.evmBatchCaller.BatchCall(ctx, 0, evmCalls)
+ if err != nil {
+ return nil, fmt.Errorf("batch call limit: %w", err)
+ }
+
+ destTokensFromRpc, err := rpclib.ParseOutputs[common.Address](results, func(d rpclib.DataAndErr) (common.Address, error) {
+ return rpclib.ParseOutput[common.Address](d, 0)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("parse outputs: %w", err)
+ }
+
+ j := 0
+ for i, sourceToken := range tokenAddresses {
+ if !found[sourceToken] {
+ destTokens[i] = cciptypes.Address(destTokensFromRpc[j].String())
+ o.sourceToDestTokensCache.Store(sourceToken, destTokens[i])
+ j++
+ }
+ }
+
+ seenDestTokens := mapset.NewSet[cciptypes.Address]()
+ for _, destToken := range destTokens {
+ if seenDestTokens.Contains(destToken) {
+ return nil, fmt.Errorf("offRamp misconfig, destination token %s already exists", destToken)
+ }
+ seenDestTokens.Add(destToken)
+ }
+
+ return destTokens, nil
+}
+
+func (o *OffRamp) GetSourceToDestTokensMapping(ctx context.Context) (map[cciptypes.Address]cciptypes.Address, error) {
+ tokens, err := o.GetTokens(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ destTokens, err := o.getDestinationTokensFromSourceTokens(ctx, tokens.SourceTokens)
+ if err != nil {
+ return nil, fmt.Errorf("get destination tokens from source tokens: %w", err)
+ }
+
+ srcToDstTokenMapping := make(map[cciptypes.Address]cciptypes.Address, len(tokens.SourceTokens))
+ for i, sourceToken := range tokens.SourceTokens {
+ srcToDstTokenMapping[sourceToken] = destTokens[i]
+ }
+ return srcToDstTokenMapping, nil
+}
+
+func (o *OffRamp) GetTokens(ctx context.Context) (cciptypes.OffRampTokens, error) {
+ return o.cachedOffRampTokens.Get(ctx, func(ctx context.Context) (cciptypes.OffRampTokens, error) {
+ destTokens, err := o.offRampV100.GetDestinationTokens(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.OffRampTokens{}, fmt.Errorf("get destination tokens: %w", err)
+ }
+ sourceTokens, err := o.offRampV100.GetSupportedTokens(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.OffRampTokens{}, err
+ }
+
+ return cciptypes.OffRampTokens{
+ DestinationTokens: ccipcalc.EvmAddrsToGeneric(destTokens...),
+ SourceTokens: ccipcalc.EvmAddrsToGeneric(sourceTokens...),
+ }, nil
+ })
+}
+
+func (o *OffRamp) GetRouter(ctx context.Context) (cciptypes.Address, error) {
+ dynamicConfig, err := o.offRampV100.GetDynamicConfig(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return "", err
+ }
+ return ccipcalc.EvmAddrToGeneric(dynamicConfig.Router), nil
+}
+
+func (o *OffRamp) OffchainConfig(ctx context.Context) (cciptypes.ExecOffchainConfig, error) {
+ o.configMu.RLock()
+ defer o.configMu.RUnlock()
+ return o.offchainConfig, nil
+}
+
+func (o *OffRamp) OnchainConfig(ctx context.Context) (cciptypes.ExecOnchainConfig, error) {
+ o.configMu.RLock()
+ defer o.configMu.RUnlock()
+ return o.onchainConfig, nil
+}
+
+func (o *OffRamp) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorExec, error) {
+ o.configMu.RLock()
+ defer o.configMu.RUnlock()
+ return o.gasPriceEstimator, nil
+}
+
+func (o *OffRamp) Address(ctx context.Context) (cciptypes.Address, error) {
+ return cciptypes.Address(o.addr.String()), nil
+}
+
+func (o *OffRamp) UpdateDynamicConfig(onchainConfig cciptypes.ExecOnchainConfig, offchainConfig cciptypes.ExecOffchainConfig, gasPriceEstimator prices.GasPriceEstimatorExec) {
+ o.configMu.Lock()
+ o.onchainConfig = onchainConfig
+ o.offchainConfig = offchainConfig
+ o.gasPriceEstimator = gasPriceEstimator
+ o.configMu.Unlock()
+}
+
+func (o *OffRamp) ChangeConfig(ctx context.Context, onchainConfigBytes []byte, offchainConfigBytes []byte) (cciptypes.Address, cciptypes.Address, error) {
+ onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ExecOnchainConfig](onchainConfigBytes)
+ if err != nil {
+ return "", "", err
+ }
+
+ offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[ExecOffchainConfig](offchainConfigBytes)
+ if err != nil {
+ return "", "", err
+ }
+ destRouter, err := router.NewRouter(onchainConfigParsed.Router, o.Client)
+ if err != nil {
+ return "", "", err
+ }
+ destWrappedNative, err := destRouter.GetWrappedNative(nil)
+ if err != nil {
+ return "", "", err
+ }
+
+ offchainConfig := cciptypes.ExecOffchainConfig{
+ DestOptimisticConfirmations: offchainConfigParsed.DestOptimisticConfirmations,
+ BatchGasLimit: offchainConfigParsed.BatchGasLimit,
+ RelativeBoostPerWaitHour: offchainConfigParsed.RelativeBoostPerWaitHour,
+ InflightCacheExpiry: offchainConfigParsed.InflightCacheExpiry,
+ RootSnoozeTime: offchainConfigParsed.RootSnoozeTime,
+ MessageVisibilityInterval: offchainConfigParsed.MessageVisibilityInterval,
+ BatchingStrategyID: offchainConfigParsed.BatchingStrategyID,
+ }
+ onchainConfig := cciptypes.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: time.Second * time.Duration(onchainConfigParsed.PermissionLessExecutionThresholdSeconds),
+ Router: cciptypes.Address(onchainConfigParsed.Router.String()),
+ }
+ gasPriceEstimator := prices.NewExecGasPriceEstimator(o.Estimator, o.DestMaxGasPrice, 0)
+
+ o.UpdateDynamicConfig(onchainConfig, offchainConfig, gasPriceEstimator)
+
+ o.Logger.Infow("Starting exec plugin",
+ "offchainConfig", onchainConfigParsed,
+ "onchainConfig", offchainConfigParsed)
+ return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()),
+ cciptypes.Address(destWrappedNative.String()), nil
+}
+
+func (o *OffRamp) Close() error {
+ return logpollerutil.UnregisterLpFilters(o.lp, o.filters)
+}
+
+func (o *OffRamp) GetExecutionStateChangesBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, confs int) ([]cciptypes.ExecutionStateChangedWithTxMeta, error) {
+ latestBlock, err := o.lp.LatestBlock(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get lp latest block: %w", err)
+ }
+
+ logs, err := o.lp.IndexedLogsTopicRange(
+ ctx,
+ o.eventSig,
+ o.addr,
+ o.eventIndex,
+ logpoller.EvmWord(seqNumMin),
+ logpoller.EvmWord(seqNumMax),
+ evmtypes.Confirmations(confs),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.ExecutionStateChanged](
+ logs,
+ o.Logger,
+ func(log types.Log) (*cciptypes.ExecutionStateChanged, error) {
+ sc, err1 := o.offRampV100.ParseExecutionStateChanged(log)
+ if err1 != nil {
+ return nil, err1
+ }
+
+ return &cciptypes.ExecutionStateChanged{
+ SequenceNumber: sc.SequenceNumber,
+ }, nil
+ },
+ )
+ if err != nil {
+ return nil, fmt.Errorf("parse logs: %w", err)
+ }
+
+ res := make([]cciptypes.ExecutionStateChangedWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.ExecutionStateChangedWithTxMeta{
+ TxMeta: log.TxMeta.WithFinalityStatus(uint64(latestBlock.FinalizedBlockNumber)),
+ ExecutionStateChanged: log.Data,
+ })
+ }
+ return res, nil
+}
+
+func encodeExecutionReport(args abi.Arguments, report cciptypes.ExecReport) ([]byte, error) {
+ var msgs []evm_2_evm_offramp_1_0_0.InternalEVM2EVMMessage
+ for _, msg := range report.Messages {
+ var ta []evm_2_evm_offramp_1_0_0.ClientEVMTokenAmount
+ for _, tokenAndAmount := range msg.TokenAmounts {
+ evmTokenAddr, err := ccipcalc.GenericAddrToEvm(tokenAndAmount.Token)
+ if err != nil {
+ return nil, err
+ }
+
+ ta = append(ta, evm_2_evm_offramp_1_0_0.ClientEVMTokenAmount{
+ Token: evmTokenAddr,
+ Amount: tokenAndAmount.Amount,
+ })
+ }
+
+ senderEvmAddr, err := ccipcalc.GenericAddrToEvm(msg.Sender)
+ if err != nil {
+ return nil, fmt.Errorf("msg sender is not evm addr: %w", err)
+ }
+
+ receiverEvmAddr, err := ccipcalc.GenericAddrToEvm(msg.Receiver)
+ if err != nil {
+ return nil, fmt.Errorf("msg receiver is not evm addr: %w", err)
+ }
+
+ feeTokenEvmAddr, err := ccipcalc.GenericAddrToEvm(msg.FeeToken)
+ if err != nil {
+ return nil, fmt.Errorf("fee token is not evm addr: %w", err)
+ }
+
+ msgs = append(msgs, evm_2_evm_offramp_1_0_0.InternalEVM2EVMMessage{
+ SourceChainSelector: msg.SourceChainSelector,
+ Sender: senderEvmAddr,
+ Receiver: receiverEvmAddr,
+ SequenceNumber: msg.SequenceNumber,
+ GasLimit: msg.GasLimit,
+ Strict: msg.Strict,
+ Nonce: msg.Nonce,
+ FeeToken: feeTokenEvmAddr,
+ FeeTokenAmount: msg.FeeTokenAmount,
+ Data: msg.Data,
+ TokenAmounts: ta,
+ MessageId: msg.MessageID,
+ })
+ }
+
+ rep := evm_2_evm_offramp_1_0_0.InternalExecutionReport{
+ Messages: msgs,
+ OffchainTokenData: report.OffchainTokenData,
+ Proofs: report.Proofs,
+ ProofFlagBits: report.ProofFlagBits,
+ }
+ return args.PackValues([]interface{}{&rep})
+}
+
+func (o *OffRamp) EncodeExecutionReport(ctx context.Context, report cciptypes.ExecReport) ([]byte, error) {
+ return encodeExecutionReport(o.ExecutionReportArgs, report)
+}
+
+func DecodeExecReport(ctx context.Context, args abi.Arguments, report []byte) (cciptypes.ExecReport, error) {
+ unpacked, err := args.Unpack(report)
+ if err != nil {
+ return cciptypes.ExecReport{}, err
+ }
+ if len(unpacked) == 0 {
+ return cciptypes.ExecReport{}, errors.New("assumptionViolation: expected at least one element")
+ }
+
+ erStruct, ok := unpacked[0].(struct {
+ Messages []struct {
+ SourceChainSelector uint64 `json:"sourceChainSelector"`
+ SequenceNumber uint64 `json:"sequenceNumber"`
+ FeeTokenAmount *big.Int `json:"feeTokenAmount"`
+ Sender common.Address `json:"sender"`
+ Nonce uint64 `json:"nonce"`
+ GasLimit *big.Int `json:"gasLimit"`
+ Strict bool `json:"strict"`
+ Receiver common.Address `json:"receiver"`
+ Data []uint8 `json:"data"`
+ TokenAmounts []struct {
+ Token common.Address `json:"token"`
+ Amount *big.Int `json:"amount"`
+ } `json:"tokenAmounts"`
+ FeeToken common.Address `json:"feeToken"`
+ MessageId [32]uint8 `json:"messageId"`
+ } `json:"messages"`
+ OffchainTokenData [][][]uint8 `json:"offchainTokenData"`
+ Proofs [][32]uint8 `json:"proofs"`
+ ProofFlagBits *big.Int `json:"proofFlagBits"`
+ })
+
+ if !ok {
+ return cciptypes.ExecReport{}, fmt.Errorf("got %T", unpacked[0])
+ }
+ messages := make([]cciptypes.EVM2EVMMessage, 0, len(erStruct.Messages))
+ for _, msg := range erStruct.Messages {
+ var tokensAndAmounts []cciptypes.TokenAmount
+ for _, tokenAndAmount := range msg.TokenAmounts {
+ tokensAndAmounts = append(tokensAndAmounts, cciptypes.TokenAmount{
+ Token: cciptypes.Address(tokenAndAmount.Token.String()),
+ Amount: tokenAndAmount.Amount,
+ })
+ }
+ messages = append(messages, cciptypes.EVM2EVMMessage{
+ SequenceNumber: msg.SequenceNumber,
+ GasLimit: msg.GasLimit,
+ Nonce: msg.Nonce,
+ MessageID: msg.MessageId,
+ SourceChainSelector: msg.SourceChainSelector,
+ Sender: cciptypes.Address(msg.Sender.String()),
+ Receiver: cciptypes.Address(msg.Receiver.String()),
+ Strict: msg.Strict,
+ FeeToken: cciptypes.Address(msg.FeeToken.String()),
+ FeeTokenAmount: msg.FeeTokenAmount,
+ Data: msg.Data,
+ TokenAmounts: tokensAndAmounts,
+ // TODO: Not needed for plugins, but should be recomputed for consistency.
+ // Requires the offramp knowing about onramp version
+ Hash: [32]byte{},
+ })
+ }
+
+ // Unpack will populate with big.Int{false, } for 0 values,
+ // which is different from the expected big.NewInt(0). Rebuild to the expected value for this case.
+ return cciptypes.ExecReport{
+ Messages: messages,
+ OffchainTokenData: erStruct.OffchainTokenData,
+ Proofs: erStruct.Proofs,
+ ProofFlagBits: new(big.Int).SetBytes(erStruct.ProofFlagBits.Bytes()),
+ }, nil
+}
+
+func (o *OffRamp) DecodeExecutionReport(ctx context.Context, report []byte) (cciptypes.ExecReport, error) {
+ return DecodeExecReport(ctx, o.ExecutionReportArgs, report)
+}
+
+func (o *OffRamp) RegisterFilters() error {
+ return logpollerutil.RegisterLpFilters(o.lp, o.filters)
+}
+
+func NewOffRamp(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) (*OffRamp, error) {
+ offRamp, err := evm_2_evm_offramp_1_0_0.NewEVM2EVMOffRamp(addr, ec)
+ if err != nil {
+ return nil, err
+ }
+
+ executionStateChangedSequenceNumberIndex := 1
+ executionReportArgs := abihelpers.MustGetMethodInputs("manuallyExecute", abiOffRamp)[:1]
+ filters := []logpoller.Filter{
+ {
+ Name: logpoller.FilterName(EXEC_EXECUTION_STATE_CHANGES, addr.String()),
+ EventSigs: []common.Hash{ExecutionStateChangedEvent},
+ Addresses: []common.Address{addr},
+ Retention: ccipdata.CommitExecLogsRetention,
+ },
+ {
+ Name: logpoller.FilterName(EXEC_TOKEN_POOL_ADDED, addr.String()),
+ EventSigs: []common.Hash{PoolAddedEvent},
+ Addresses: []common.Address{addr},
+ Retention: ccipdata.CacheEvictionLogsRetention,
+ },
+ {
+ Name: logpoller.FilterName(EXEC_TOKEN_POOL_REMOVED, addr.String()),
+ EventSigs: []common.Hash{PoolRemovedEvent},
+ Addresses: []common.Address{addr},
+ Retention: ccipdata.CacheEvictionLogsRetention,
+ },
+ }
+
+ return &OffRamp{
+ offRampV100: offRamp,
+ Client: ec,
+ addr: addr,
+ Logger: lggr,
+ lp: lp,
+ filters: filters,
+ Estimator: estimator,
+ DestMaxGasPrice: destMaxGasPrice,
+ ExecutionReportArgs: executionReportArgs,
+ eventSig: ExecutionStateChangedEvent,
+ eventIndex: executionStateChangedSequenceNumberIndex,
+ configMu: sync.RWMutex{},
+ evmBatchCaller: rpclib.NewDynamicLimitedBatchCaller(
+ lggr,
+ ec,
+ rpclib.DefaultRpcBatchSizeLimit,
+ rpclib.DefaultRpcBatchBackOffMultiplier,
+ rpclib.DefaultMaxParallelRpcCalls,
+ ),
+ cachedOffRampTokens: cache.NewLogpollerEventsBased[cciptypes.OffRampTokens](
+ lp,
+ offRamp_poolAddedPoolRemovedEvents,
+ offRamp.Address(),
+ ),
+ // values set on the fly after ChangeConfig is called
+ gasPriceEstimator: prices.ExecGasPriceEstimator{},
+ offchainConfig: cciptypes.ExecOffchainConfig{},
+ onchainConfig: cciptypes.ExecOnchainConfig{},
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go
new file mode 100644
index 00000000000..d834b792ce4
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go
@@ -0,0 +1,38 @@
+package v1_0_0_test
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+)
+
+func TestExecutionReportEncodingV100(t *testing.T) {
+ // Note could consider some fancier testing here (fuzz/property)
+ // but I think that would essentially be testing geth's abi library
+ // as our encode/decode is a thin wrapper around that.
+ report := cciptypes.ExecReport{
+ Messages: []cciptypes.EVM2EVMMessage{},
+ OffchainTokenData: [][][]byte{{}},
+ Proofs: [][32]byte{testutils.Random32Byte()},
+ ProofFlagBits: big.NewInt(133),
+ }
+
+ offRamp, err := v1_0_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, lpmocks.NewLogPoller(t), nil, nil)
+ require.NoError(t, err)
+
+ ctx := testutils.Context(t)
+ encodeExecutionReport, err := offRamp.EncodeExecutionReport(ctx, report)
+ require.NoError(t, err)
+ decodeCommitReport, err := offRamp.DecodeExecutionReport(ctx, encodeExecutionReport)
+ require.NoError(t, err)
+ require.Equal(t, report.Proofs, decodeCommitReport.Proofs)
+ require.Equal(t, report, decodeCommitReport)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go
new file mode 100644
index 00000000000..f8b1dc4e615
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go
@@ -0,0 +1,231 @@
+package v1_0_0
+
+import (
+ "fmt"
+ "math/rand"
+ "slices"
+ "testing"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0"
+ mock_contracts "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/mocks/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+func TestOffRampGetDestinationTokensFromSourceTokens(t *testing.T) {
+ ctx := testutils.Context(t)
+ const numSrcTokens = 20
+
+ testCases := []struct {
+ name string
+ outputChangeFn func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr
+ expErr bool
+ }{
+ {
+ name: "happy path",
+ outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr { return outputs },
+ expErr: false,
+ },
+ {
+ name: "rpc error",
+ outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr {
+ outputs[2].Err = fmt.Errorf("some error")
+ return outputs
+ },
+ expErr: true,
+ },
+ {
+ name: "unexpected outputs length should be fine if the type is correct",
+ outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr {
+ outputs[0].Outputs = append(outputs[0].Outputs, "unexpected", 123)
+ return outputs
+ },
+ expErr: false,
+ },
+ {
+ name: "different compatible type",
+ outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr {
+ outputs[0].Outputs = []any{outputs[0].Outputs[0].(common.Address)}
+ return outputs
+ },
+ expErr: false,
+ },
+ {
+ name: "different incompatible type",
+ outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr {
+ outputs[0].Outputs = []any{outputs[0].Outputs[0].(common.Address).Bytes()}
+ return outputs
+ },
+ expErr: true,
+ },
+ }
+
+ lp := mocks.NewLogPoller(t)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ batchCaller := rpclibmocks.NewEvmBatchCaller(t)
+ o := &OffRamp{evmBatchCaller: batchCaller, lp: lp}
+ srcTks, dstTks, outputs := generateTokensAndOutputs(numSrcTokens)
+ outputs = tc.outputChangeFn(outputs)
+ batchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything).Return(outputs, nil)
+ genericAddrs := ccipcalc.EvmAddrsToGeneric(srcTks...)
+ actualDstTokens, err := o.getDestinationTokensFromSourceTokens(ctx, genericAddrs)
+
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+ assert.Equal(t, ccipcalc.EvmAddrsToGeneric(dstTks...), actualDstTokens)
+ })
+ }
+}
+
+func TestCachedOffRampTokens(t *testing.T) {
+ // Test data.
+ srcTks, dstTks, _ := generateTokensAndOutputs(3)
+
+ // Mock contract wrapper.
+ mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t)
+ mockOffRamp.On("GetDestinationTokens", mock.Anything).Return(dstTks, nil)
+ mockOffRamp.On("GetSupportedTokens", mock.Anything).Return(srcTks, nil)
+ mockOffRamp.On("Address").Return(utils.RandomAddress())
+
+ lp := mocks.NewLogPoller(t)
+ lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: rand.Int63()}, nil)
+
+ offRamp := OffRamp{
+ offRampV100: mockOffRamp,
+ lp: lp,
+ Logger: logger.TestLogger(t),
+ Client: evmclimocks.NewClient(t),
+ evmBatchCaller: rpclibmocks.NewEvmBatchCaller(t),
+ cachedOffRampTokens: cache.NewLogpollerEventsBased[cciptypes.OffRampTokens](
+ lp,
+ offRamp_poolAddedPoolRemovedEvents,
+ mockOffRamp.Address(),
+ ),
+ }
+
+ ctx := testutils.Context(t)
+ tokens, err := offRamp.GetTokens(ctx)
+ require.NoError(t, err)
+
+ // Verify data is properly loaded in the cache.
+ expectedPools := make(map[cciptypes.Address]cciptypes.Address)
+ for i := range dstTks {
+ expectedPools[cciptypes.Address(dstTks[i].String())] = cciptypes.Address(dstTks[i].String())
+ }
+ require.Equal(t, cciptypes.OffRampTokens{
+ DestinationTokens: ccipcalc.EvmAddrsToGeneric(dstTks...),
+ SourceTokens: ccipcalc.EvmAddrsToGeneric(srcTks...),
+ }, tokens)
+}
+
+func generateTokensAndOutputs(nbTokens uint) ([]common.Address, []common.Address, []rpclib.DataAndErr) {
+ srcTks := make([]common.Address, nbTokens)
+ dstTks := make([]common.Address, nbTokens)
+ outputs := make([]rpclib.DataAndErr, nbTokens)
+ for i := range srcTks {
+ srcTks[i] = utils.RandomAddress()
+ dstTks[i] = utils.RandomAddress()
+ outputs[i] = rpclib.DataAndErr{
+ Outputs: []any{dstTks[i]}, Err: nil,
+ }
+ }
+ return srcTks, dstTks, outputs
+}
+
+func Test_LogsAreProperlyMarkedAsFinalized(t *testing.T) {
+ minSeqNr := uint64(10)
+ maxSeqNr := uint64(14)
+ inputLogs := []logpoller.Log{
+ CreateExecutionStateChangeEventLog(t, 10, 2, utils.RandomBytes32()),
+ CreateExecutionStateChangeEventLog(t, 11, 3, utils.RandomBytes32()),
+ CreateExecutionStateChangeEventLog(t, 12, 5, utils.RandomBytes32()),
+ CreateExecutionStateChangeEventLog(t, 14, 7, utils.RandomBytes32()),
+ }
+
+ tests := []struct {
+ name string
+ lastFinalizedBlock uint64
+ expectedFinalizedSequenceNr []uint64
+ }{
+ {
+ "all logs are finalized",
+ 10,
+ []uint64{10, 11, 12, 14},
+ },
+ {
+ "some logs are finalized",
+ 5,
+ []uint64{10, 11, 12},
+ },
+ {
+ "no logs are finalized",
+ 1,
+ []uint64{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ offrampAddress := utils.RandomAddress()
+
+ lp := mocks.NewLogPoller(t)
+ lp.On("LatestBlock", mock.Anything).
+ Return(logpoller.LogPollerBlock{FinalizedBlockNumber: int64(tt.lastFinalizedBlock)}, nil)
+ lp.On("IndexedLogsTopicRange", mock.Anything, ExecutionStateChangedEvent, offrampAddress, 1, logpoller.EvmWord(minSeqNr), logpoller.EvmWord(maxSeqNr), evmtypes.Confirmations(0)).
+ Return(inputLogs, nil)
+
+ offRamp, err := NewOffRamp(logger.TestLogger(t), offrampAddress, evmclimocks.NewClient(t), lp, nil, nil)
+ require.NoError(t, err)
+ logs, err := offRamp.GetExecutionStateChangesBetweenSeqNums(testutils.Context(t), minSeqNr, maxSeqNr, 0)
+ require.NoError(t, err)
+ assert.Len(t, logs, len(inputLogs))
+
+ for _, log := range logs {
+ assert.Equal(t, slices.Contains(tt.expectedFinalizedSequenceNr, log.SequenceNumber), log.IsFinalized())
+ }
+ })
+ }
+}
+
+func TestGetRouter(t *testing.T) {
+ routerAddr := utils.RandomAddress()
+
+ mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t)
+ mockOffRamp.On("GetDynamicConfig", mock.Anything).Return(evm_2_evm_offramp_1_0_0.EVM2EVMOffRampDynamicConfig{
+ Router: routerAddr,
+ }, nil)
+
+ offRamp := OffRamp{
+ offRampV100: mockOffRamp,
+ }
+
+ ctx := testutils.Context(t)
+ gotRouterAddr, err := offRamp.GetRouter(ctx)
+ require.NoError(t, err)
+
+ gotRouterEvmAddr, err := ccipcalc.GenericAddrToEvm(gotRouterAddr)
+ require.NoError(t, err)
+ assert.Equal(t, routerAddr, gotRouterEvmAddr)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go
new file mode 100644
index 00000000000..44fb6ca0630
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go
@@ -0,0 +1,232 @@
+package v1_0_0
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+)
+
+func TestExecOffchainConfig100_Encoding(t *testing.T) {
+ tests := []struct {
+ name string
+ want ExecOffchainConfig
+ expectErr bool
+ }{
+ {
+ name: "encodes and decodes config with all fields set",
+ want: ExecOffchainConfig{
+ SourceFinalityDepth: 3,
+ DestOptimisticConfirmations: 6,
+ DestFinalityDepth: 3,
+ BatchGasLimit: 5_000_000,
+ RelativeBoostPerWaitHour: 0.07,
+ InflightCacheExpiry: *config.MustNewDuration(64 * time.Second),
+ RootSnoozeTime: *config.MustNewDuration(128 * time.Minute),
+ MessageVisibilityInterval: *config.MustNewDuration(6 * time.Hour),
+ },
+ },
+ {
+ name: "fails decoding when all fields present but with 0 values",
+ want: ExecOffchainConfig{
+ SourceFinalityDepth: 0,
+ DestFinalityDepth: 0,
+ DestOptimisticConfirmations: 0,
+ BatchGasLimit: 0,
+ RelativeBoostPerWaitHour: 0,
+ InflightCacheExpiry: *config.MustNewDuration(0),
+ RootSnoozeTime: *config.MustNewDuration(0),
+ MessageVisibilityInterval: *config.MustNewDuration(0),
+ },
+ expectErr: true,
+ },
+ {
+ name: "fails decoding when all fields are missing",
+ want: ExecOffchainConfig{},
+ expectErr: true,
+ },
+ {
+ name: "fails decoding when some fields are missing",
+ want: ExecOffchainConfig{
+ SourceFinalityDepth: 99999999,
+ InflightCacheExpiry: *config.MustNewDuration(64 * time.Second),
+ },
+ expectErr: true,
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ exp := tc.want
+ encode, err := ccipconfig.EncodeOffchainConfig(&exp)
+ require.NoError(t, err)
+ got, err := ccipconfig.DecodeOffchainConfig[ExecOffchainConfig](encode)
+
+ if tc.expectErr {
+ require.ErrorContains(t, err, "must set")
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.want, got)
+ }
+ })
+ }
+}
+
+func TestExecOffchainConfig100_AllFieldsRequired(t *testing.T) {
+ cfg := ExecOffchainConfig{
+ SourceFinalityDepth: 3,
+ DestOptimisticConfirmations: 6,
+ DestFinalityDepth: 3,
+ BatchGasLimit: 5_000_000,
+ RelativeBoostPerWaitHour: 0.07,
+ InflightCacheExpiry: *config.MustNewDuration(64 * time.Second),
+ RootSnoozeTime: *config.MustNewDuration(128 * time.Minute),
+ BatchingStrategyID: 0,
+ }
+ encoded, err := ccipconfig.EncodeOffchainConfig(&cfg)
+ require.NoError(t, err)
+
+ var configAsMap map[string]any
+ err = json.Unmarshal(encoded, &configAsMap)
+ require.NoError(t, err)
+ for keyToDelete := range configAsMap {
+ if keyToDelete == "MessageVisibilityInterval" {
+ continue // this field is optional
+ }
+
+ partialConfig := make(map[string]any)
+ for k, v := range configAsMap {
+ if k != keyToDelete {
+ partialConfig[k] = v
+ }
+ }
+ encodedPartialConfig, err := json.Marshal(partialConfig)
+ require.NoError(t, err)
+ _, err = ccipconfig.DecodeOffchainConfig[ExecOffchainConfig](encodedPartialConfig)
+ if keyToDelete == "BatchingStrategyID" {
+ require.NoError(t, err)
+ } else {
+ require.ErrorContains(t, err, keyToDelete)
+ }
+ }
+}
+
+func Test_GetSendersNonce(t *testing.T) {
+ sender1 := cciptypes.Address(utils.RandomAddress().String())
+ sender2 := cciptypes.Address(utils.RandomAddress().String())
+
+ tests := []struct {
+ name string
+ addresses []cciptypes.Address
+ batchCaller *rpclibmocks.EvmBatchCaller
+ expectedResult map[cciptypes.Address]uint64
+ expectedError bool
+ }{
+ {
+ name: "return empty map when input is empty",
+ addresses: []cciptypes.Address{},
+ batchCaller: rpclibmocks.NewEvmBatchCaller(t),
+ expectedResult: map[cciptypes.Address]uint64{},
+ },
+ {
+ name: "return error when batch call fails",
+ addresses: []cciptypes.Address{sender1},
+ batchCaller: func() *rpclibmocks.EvmBatchCaller {
+ mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t)
+ mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything).
+ Return(nil, errors.New("batch call error"))
+ return mockBatchCaller
+ }(),
+ expectedError: true,
+ },
+ {
+ name: "return error when nonces dont match senders",
+ addresses: []cciptypes.Address{sender1, sender2},
+ batchCaller: func() *rpclibmocks.EvmBatchCaller {
+ mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t)
+ results := []rpclib.DataAndErr{
+ {
+ Outputs: []any{uint64(1)},
+ Err: nil,
+ },
+ }
+ mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything).
+ Return(results, nil)
+ return mockBatchCaller
+ }(),
+ expectedError: true,
+ },
+ {
+ name: "return error when single request from batch fails",
+ addresses: []cciptypes.Address{sender1, sender2},
+ batchCaller: func() *rpclibmocks.EvmBatchCaller {
+ mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t)
+ results := []rpclib.DataAndErr{
+ {
+ Outputs: []any{uint64(1)},
+ Err: nil,
+ },
+ {
+ Outputs: []any{},
+ Err: errors.New("request failed"),
+ },
+ }
+ mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything).
+ Return(results, nil)
+ return mockBatchCaller
+ }(),
+ expectedError: true,
+ },
+ {
+ name: "return map of nonce per sender",
+ addresses: []cciptypes.Address{sender1, sender2},
+ batchCaller: func() *rpclibmocks.EvmBatchCaller {
+ mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t)
+ results := []rpclib.DataAndErr{
+ {
+ Outputs: []any{uint64(1)},
+ Err: nil,
+ },
+ {
+ Outputs: []any{uint64(2)},
+ Err: nil,
+ },
+ }
+ mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything).
+ Return(results, nil)
+ return mockBatchCaller
+ }(),
+ expectedResult: map[cciptypes.Address]uint64{
+ sender1: uint64(1),
+ sender2: uint64(2),
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ offramp := OffRamp{evmBatchCaller: test.batchCaller, Logger: logger.TestLogger(t)}
+ nonce, err := offramp.ListSenderNonces(testutils.Context(t), test.addresses)
+
+ if test.expectedError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, test.expectedResult, nonce)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go
new file mode 100644
index 00000000000..29cb357223b
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go
@@ -0,0 +1,240 @@
+package v1_0_0
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil"
+)
+
+const (
+ CCIPSendRequestedEventName = "CCIPSendRequested"
+ ConfigSetEventName = "ConfigSet"
+)
+
+var _ ccipdata.OnRampReader = &OnRamp{}
+
+type OnRamp struct {
+ address common.Address
+ onRamp *evm_2_evm_onramp_1_0_0.EVM2EVMOnRamp
+ lp logpoller.LogPoller
+ lggr logger.Logger
+ client client.Client
+ leafHasher ccipdata.LeafHasherInterface[[32]byte]
+ sendRequestedEventSig common.Hash
+ sendRequestedSeqNumberWord int
+ filters []logpoller.Filter
+ cachedSourcePriceRegistryAddress cache.AutoSync[cciptypes.Address]
+ // Static config can be cached, because it's never expected to change.
+ // The only way to change that is through the contract's constructor (redeployment)
+ cachedStaticConfig cache.OnceCtxFunction[evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig]
+ cachedRmnContract cache.OnceCtxFunction[*arm_contract.ARMContract]
+}
+
+func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) {
+ onRamp, err := evm_2_evm_onramp_1_0_0.NewEVM2EVMOnRamp(onRampAddress, source)
+ if err != nil {
+ return nil, err
+ }
+ onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp_1_0_0.EVM2EVMOnRampABI)
+ eventSig := abihelpers.MustGetEventID(CCIPSendRequestedEventName, onRampABI)
+ configSetEventSig := abihelpers.MustGetEventID(ConfigSetEventName, onRampABI)
+ filters := []logpoller.Filter{
+ {
+ Name: logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, onRampAddress),
+ EventSigs: []common.Hash{eventSig},
+ Addresses: []common.Address{onRampAddress},
+ Retention: ccipdata.CommitExecLogsRetention,
+ },
+ {
+ Name: logpoller.FilterName(ccipdata.CONFIG_CHANGED, onRampAddress),
+ EventSigs: []common.Hash{configSetEventSig},
+ Addresses: []common.Address{onRampAddress},
+ Retention: ccipdata.CacheEvictionLogsRetention,
+ },
+ }
+ cachedStaticConfig := cache.OnceCtxFunction[evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig](func(ctx context.Context) (evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig, error) {
+ return onRamp.GetStaticConfig(&bind.CallOpts{Context: ctx})
+ })
+ cachedRmnContract := cache.OnceCtxFunction[*arm_contract.ARMContract](func(ctx context.Context) (*arm_contract.ARMContract, error) {
+ staticConfig, err := cachedStaticConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return arm_contract.NewARMContract(staticConfig.ArmProxy, source)
+ })
+ return &OnRamp{
+ lggr: lggr,
+ address: onRampAddress,
+ onRamp: onRamp,
+ client: source,
+ filters: filters,
+ lp: sourceLP,
+ leafHasher: NewLeafHasher(sourceSelector, destSelector, onRampAddress, hashutil.NewKeccak(), onRamp),
+ // offset || sourceChainID || seqNum || ...
+ sendRequestedSeqNumberWord: 2,
+ sendRequestedEventSig: eventSig,
+ cachedSourcePriceRegistryAddress: cache.NewLogpollerEventsBased[cciptypes.Address](
+ sourceLP,
+ []common.Hash{configSetEventSig},
+ onRampAddress,
+ ),
+ cachedStaticConfig: cache.CallOnceOnNoError(cachedStaticConfig),
+ cachedRmnContract: cache.CallOnceOnNoError(cachedRmnContract),
+ }, nil
+}
+
+func (o *OnRamp) Address(context.Context) (cciptypes.Address, error) {
+ return cciptypes.Address(o.onRamp.Address().String()), nil
+}
+
+func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) {
+ if o.onRamp == nil {
+ return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized")
+ }
+ legacyDynamicConfig, err := o.onRamp.GetDynamicConfig(nil)
+ if err != nil {
+ return cciptypes.OnRampDynamicConfig{}, err
+ }
+ return cciptypes.OnRampDynamicConfig{
+ Router: cciptypes.Address(legacyDynamicConfig.Router.String()),
+ MaxNumberOfTokensPerMsg: legacyDynamicConfig.MaxTokensLength,
+ DestGasOverhead: 0,
+ DestGasPerPayloadByte: 0,
+ DestDataAvailabilityOverheadGas: 0,
+ DestGasPerDataAvailabilityByte: 0,
+ DestDataAvailabilityMultiplierBps: 0,
+ PriceRegistry: cciptypes.Address(legacyDynamicConfig.PriceRegistry.String()),
+ MaxDataBytes: legacyDynamicConfig.MaxDataSize,
+ MaxPerMsgGasLimit: uint32(legacyDynamicConfig.MaxGasLimit),
+ }, nil
+}
+
+func (o *OnRamp) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) {
+ return o.cachedSourcePriceRegistryAddress.Get(ctx, func(ctx context.Context) (cciptypes.Address, error) {
+ c, err := o.GetDynamicConfig(ctx)
+ if err != nil {
+ return "", err
+ }
+ return c.PriceRegistry, nil
+ })
+}
+
+func (o *OnRamp) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) {
+ logs, err := o.lp.LogsDataWordRange(
+ ctx,
+ o.sendRequestedEventSig,
+ o.address,
+ o.sendRequestedSeqNumberWord,
+ logpoller.EvmWord(seqNumMin),
+ logpoller.EvmWord(seqNumMax),
+ ccipdata.LogsConfirmations(finalized),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.EVM2EVMMessage](logs, o.lggr, o.logToMessage)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.EVM2EVMMessageWithTxMeta{
+ TxMeta: log.TxMeta,
+ EVM2EVMMessage: log.Data,
+ })
+ }
+ return res, nil
+}
+
+func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) {
+ config, err := o.onRamp.GetDynamicConfig(nil)
+ if err != nil {
+ return "", err
+ }
+ return cciptypes.Address(config.Router.String()), nil
+}
+
+func (o *OnRamp) IsSourceChainHealthy(context.Context) (bool, error) {
+ if err := o.lp.Healthy(); err != nil {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (o *OnRamp) IsSourceCursed(ctx context.Context) (bool, error) {
+ arm, err := o.cachedRmnContract(ctx)
+ if err != nil {
+ return false, fmt.Errorf("intializing Arm contract through the ArmProxy: %w", err)
+ }
+
+ cursed, err := arm.IsCursed0(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return false, fmt.Errorf("checking if source Arm is cursed: %w", err)
+ }
+ return cursed, nil
+}
+
+func (o *OnRamp) GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex, offsetFromFinal int64, txHash common.Hash) ([]byte, error) {
+ return nil, errors.New("USDC not supported in < 1.2.0")
+}
+
+func (o *OnRamp) Close() error {
+ return logpollerutil.UnregisterLpFilters(o.lp, o.filters)
+}
+
+func (o *OnRamp) RegisterFilters() error {
+ return logpollerutil.RegisterLpFilters(o.lp, o.filters)
+}
+
+func (o *OnRamp) logToMessage(log types.Log) (*cciptypes.EVM2EVMMessage, error) {
+ msg, err := o.onRamp.ParseCCIPSendRequested(log)
+ if err != nil {
+ return nil, err
+ }
+ h, err := o.leafHasher.HashLeaf(log)
+ if err != nil {
+ return nil, err
+ }
+ tokensAndAmounts := make([]cciptypes.TokenAmount, len(msg.Message.TokenAmounts))
+ for i, tokenAndAmount := range msg.Message.TokenAmounts {
+ tokensAndAmounts[i] = cciptypes.TokenAmount{
+ Token: cciptypes.Address(tokenAndAmount.Token.String()),
+ Amount: tokenAndAmount.Amount,
+ }
+ }
+ return &cciptypes.EVM2EVMMessage{
+ SequenceNumber: msg.Message.SequenceNumber,
+ GasLimit: msg.Message.GasLimit,
+ Nonce: msg.Message.Nonce,
+ MessageID: msg.Message.MessageId,
+ SourceChainSelector: msg.Message.SourceChainSelector,
+ Sender: cciptypes.Address(msg.Message.Sender.String()),
+ Receiver: cciptypes.Address(msg.Message.Receiver.String()),
+ Strict: msg.Message.Strict,
+ FeeToken: cciptypes.Address(msg.Message.FeeToken.String()),
+ FeeTokenAmount: msg.Message.FeeTokenAmount,
+ Data: msg.Message.Data,
+ TokenAmounts: tokensAndAmounts,
+ SourceTokenData: make([][]byte, len(msg.Message.TokenAmounts)), // Always empty in 1.0
+ Hash: h,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go
new file mode 100644
index 00000000000..d2104f985b9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go
@@ -0,0 +1,310 @@
+package v1_0_0
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/erc20"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil"
+)
+
+var (
+ abiERC20 = abihelpers.MustParseABI(erc20.ERC20ABI)
+ _ ccipdata.PriceRegistryReader = &PriceRegistry{}
+ // Exposed only for backwards compatibility with tests.
+ UsdPerUnitGasUpdated = abihelpers.MustGetEventID("UsdPerUnitGasUpdated", abihelpers.MustParseABI(price_registry_1_0_0.PriceRegistryABI))
+)
+
+type PriceRegistry struct {
+ priceRegistry price_registry_1_0_0.PriceRegistryInterface
+ address common.Address
+ lp logpoller.LogPoller
+ evmBatchCaller rpclib.EvmBatchCaller
+ lggr logger.Logger
+ filters []logpoller.Filter
+ tokenUpdated common.Hash
+ gasUpdated common.Hash
+ feeTokenAdded common.Hash
+ feeTokenRemoved common.Hash
+
+ feeTokensCache cache.AutoSync[[]common.Address]
+ tokenDecimalsCache sync.Map
+}
+
+func NewPriceRegistry(lggr logger.Logger, priceRegistryAddr common.Address, lp logpoller.LogPoller, ec client.Client, registerFilters bool) (*PriceRegistry, error) {
+ priceRegistry, err := price_registry_1_0_0.NewPriceRegistry(priceRegistryAddr, ec)
+ if err != nil {
+ return nil, err
+ }
+ priceRegABI := abihelpers.MustParseABI(price_registry_1_0_0.PriceRegistryABI)
+ usdPerTokenUpdated := abihelpers.MustGetEventID("UsdPerTokenUpdated", priceRegABI)
+ feeTokenRemoved := abihelpers.MustGetEventID("FeeTokenRemoved", priceRegABI)
+ feeTokenAdded := abihelpers.MustGetEventID("FeeTokenAdded", priceRegABI)
+ var filters = []logpoller.Filter{
+ {
+ Name: logpoller.FilterName(ccipdata.COMMIT_PRICE_UPDATES, priceRegistryAddr.String()),
+ EventSigs: []common.Hash{UsdPerUnitGasUpdated, usdPerTokenUpdated},
+ Addresses: []common.Address{priceRegistryAddr},
+ Retention: ccipdata.PriceUpdatesLogsRetention,
+ },
+ {
+ Name: logpoller.FilterName(ccipdata.FEE_TOKEN_ADDED, priceRegistryAddr.String()),
+ EventSigs: []common.Hash{feeTokenAdded},
+ Addresses: []common.Address{priceRegistryAddr},
+ Retention: ccipdata.CacheEvictionLogsRetention,
+ },
+ {
+ Name: logpoller.FilterName(ccipdata.FEE_TOKEN_REMOVED, priceRegistryAddr.String()),
+ EventSigs: []common.Hash{feeTokenRemoved},
+ Addresses: []common.Address{priceRegistryAddr},
+ Retention: ccipdata.CacheEvictionLogsRetention,
+ }}
+ if registerFilters {
+ err = logpollerutil.RegisterLpFilters(lp, filters)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &PriceRegistry{
+ priceRegistry: priceRegistry,
+ address: priceRegistryAddr,
+ lp: lp,
+ evmBatchCaller: rpclib.NewDynamicLimitedBatchCaller(
+ lggr,
+ ec,
+ rpclib.DefaultRpcBatchSizeLimit,
+ rpclib.DefaultRpcBatchBackOffMultiplier,
+ rpclib.DefaultMaxParallelRpcCalls,
+ ),
+ lggr: lggr,
+ gasUpdated: UsdPerUnitGasUpdated,
+ tokenUpdated: usdPerTokenUpdated,
+ feeTokenRemoved: feeTokenRemoved,
+ feeTokenAdded: feeTokenAdded,
+ filters: filters,
+ feeTokensCache: cache.NewLogpollerEventsBased[[]common.Address](
+ lp,
+ []common.Hash{feeTokenAdded, feeTokenRemoved},
+ priceRegistryAddr,
+ ),
+ }, nil
+}
+
+func (p *PriceRegistry) GetTokenPrices(ctx context.Context, wantedTokens []cciptypes.Address) ([]cciptypes.TokenPriceUpdate, error) {
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(wantedTokens...)
+ if err != nil {
+ return nil, err
+ }
+
+ tps, err := p.priceRegistry.GetTokenPrices(&bind.CallOpts{Context: ctx}, evmAddrs)
+ if err != nil {
+ return nil, err
+ }
+ var tpu []cciptypes.TokenPriceUpdate
+ for i, tp := range tps {
+ tpu = append(tpu, cciptypes.TokenPriceUpdate{
+ TokenPrice: cciptypes.TokenPrice{
+ Token: cciptypes.Address(evmAddrs[i].String()),
+ Value: tp.Value,
+ },
+ TimestampUnixSec: big.NewInt(int64(tp.Timestamp)),
+ })
+ }
+ return tpu, nil
+}
+
+func (p *PriceRegistry) Address(ctx context.Context) (cciptypes.Address, error) {
+ return cciptypes.Address(p.address.String()), nil
+}
+
+func (p *PriceRegistry) GetFeeTokens(ctx context.Context) ([]cciptypes.Address, error) {
+ feeTokens, err := p.feeTokensCache.Get(ctx, func(ctx context.Context) ([]common.Address, error) {
+ return p.priceRegistry.GetFeeTokens(&bind.CallOpts{Context: ctx})
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get fee tokens: %w", err)
+ }
+
+ return ccipcalc.EvmAddrsToGeneric(feeTokens...), nil
+}
+
+func (p *PriceRegistry) Close() error {
+ return logpollerutil.UnregisterLpFilters(p.lp, p.filters)
+}
+
+func (p *PriceRegistry) GetTokenPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.TokenPriceUpdateWithTxMeta, error) {
+ logs, err := p.lp.LogsCreatedAfter(
+ ctx,
+ p.tokenUpdated,
+ p.address,
+ ts,
+ evmtypes.Confirmations(confs),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.TokenPriceUpdate](
+ logs,
+ p.lggr,
+ func(log types.Log) (*cciptypes.TokenPriceUpdate, error) {
+ tp, err1 := p.priceRegistry.ParseUsdPerTokenUpdated(log)
+ if err1 != nil {
+ return nil, err1
+ }
+ return &cciptypes.TokenPriceUpdate{
+ TokenPrice: cciptypes.TokenPrice{
+ Token: cciptypes.Address(tp.Token.String()),
+ Value: tp.Value,
+ },
+ TimestampUnixSec: tp.Timestamp,
+ }, nil
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]cciptypes.TokenPriceUpdateWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.TokenPriceUpdateWithTxMeta{
+ TxMeta: log.TxMeta,
+ TokenPriceUpdate: log.Data,
+ })
+ }
+ return res, nil
+}
+
+func (p *PriceRegistry) GetGasPriceUpdatesCreatedAfter(ctx context.Context, chainSelector uint64, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) {
+ logs, err := p.lp.IndexedLogsCreatedAfter(
+ ctx,
+ p.gasUpdated,
+ p.address,
+ 1,
+ []common.Hash{abihelpers.EvmWord(chainSelector)},
+ ts,
+ evmtypes.Confirmations(confs),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return p.parseGasPriceUpdatesLogs(logs)
+}
+
+func (p *PriceRegistry) GetAllGasPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) {
+ logs, err := p.lp.LogsCreatedAfter(
+ ctx,
+ p.gasUpdated,
+ p.address,
+ ts,
+ evmtypes.Confirmations(confs),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return p.parseGasPriceUpdatesLogs(logs)
+}
+
+func (p *PriceRegistry) parseGasPriceUpdatesLogs(logs []logpoller.Log) ([]cciptypes.GasPriceUpdateWithTxMeta, error) {
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.GasPriceUpdate](
+ logs,
+ p.lggr,
+ func(log types.Log) (*cciptypes.GasPriceUpdate, error) {
+ p, err1 := p.priceRegistry.ParseUsdPerUnitGasUpdated(log)
+ if err1 != nil {
+ return nil, err1
+ }
+ return &cciptypes.GasPriceUpdate{
+ GasPrice: cciptypes.GasPrice{
+ DestChainSelector: p.DestChain,
+ Value: p.Value,
+ },
+ TimestampUnixSec: p.Timestamp,
+ }, nil
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]cciptypes.GasPriceUpdateWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.GasPriceUpdateWithTxMeta{
+ TxMeta: log.TxMeta,
+ GasPriceUpdate: log.Data,
+ })
+ }
+ return res, nil
+}
+
+func (p *PriceRegistry) GetTokensDecimals(ctx context.Context, tokenAddresses []cciptypes.Address) ([]uint8, error) {
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokenAddresses...)
+ if err != nil {
+ return nil, err
+ }
+
+ found := make(map[common.Address]bool)
+ tokenDecimals := make([]uint8, len(evmAddrs))
+ for i, tokenAddress := range evmAddrs {
+ if v, ok := p.tokenDecimalsCache.Load(tokenAddress); ok {
+ if decimals, isUint8 := v.(uint8); isUint8 {
+ tokenDecimals[i] = decimals
+ found[tokenAddress] = true
+ } else {
+ p.lggr.Errorf("token decimals cache contains invalid type %T", v)
+ }
+ }
+ }
+ if len(found) == len(evmAddrs) {
+ return tokenDecimals, nil
+ }
+
+ evmCalls := make([]rpclib.EvmCall, 0, len(evmAddrs))
+ for _, tokenAddress := range evmAddrs {
+ if !found[tokenAddress] {
+ evmCalls = append(evmCalls, rpclib.NewEvmCall(abiERC20, "decimals", tokenAddress))
+ }
+ }
+
+ results, err := p.evmBatchCaller.BatchCall(ctx, 0, evmCalls)
+ if err != nil {
+ return nil, fmt.Errorf("batch call limit: %w", err)
+ }
+
+ decimals, err := rpclib.ParseOutputs[uint8](results, func(d rpclib.DataAndErr) (uint8, error) {
+ return rpclib.ParseOutput[uint8](d, 0)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("parse outputs: %w", err)
+ }
+
+ j := 0
+ for i, tokenAddress := range evmAddrs {
+ if !found[tokenAddress] {
+ tokenDecimals[i] = decimals[j]
+ p.tokenDecimalsCache.Store(tokenAddress, tokenDecimals[i])
+ j++
+ }
+ }
+ return tokenDecimals, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go
new file mode 100644
index 00000000000..34f832e17fc
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go
@@ -0,0 +1,90 @@
+package v1_0_0
+
+import (
+ "encoding/binary"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+// ApplyPriceRegistryUpdate is a helper function used in tests only.
+func ApplyPriceRegistryUpdate(t *testing.T, user *bind.TransactOpts, addr common.Address, ec client.Client, gasPrice []cciptypes.GasPrice, tokenPrices []cciptypes.TokenPrice) {
+ require.True(t, len(gasPrice) <= 2)
+ pr, err := price_registry_1_0_0.NewPriceRegistry(addr, ec)
+ require.NoError(t, err)
+ var tps []price_registry_1_0_0.InternalTokenPriceUpdate
+ for _, tp := range tokenPrices {
+ evmAddrs, err1 := ccipcalc.GenericAddrsToEvm(tp.Token)
+ assert.NoError(t, err1)
+ tps = append(tps, price_registry_1_0_0.InternalTokenPriceUpdate{
+ SourceToken: evmAddrs[0],
+ UsdPerToken: tp.Value,
+ })
+ }
+ dest := uint64(0)
+ gas := big.NewInt(0)
+ if len(gasPrice) >= 1 {
+ dest = gasPrice[0].DestChainSelector
+ gas = gasPrice[0].Value
+ }
+ _, err = pr.UpdatePrices(user, price_registry_1_0_0.InternalPriceUpdates{
+ TokenPriceUpdates: tps,
+ DestChainSelector: dest,
+ UsdPerUnitGas: gas,
+ })
+ require.NoError(t, err)
+
+ for i := 1; i < len(gasPrice); i++ {
+ dest = gasPrice[i].DestChainSelector
+ gas = gasPrice[i].Value
+ _, err = pr.UpdatePrices(user, price_registry_1_0_0.InternalPriceUpdates{
+ TokenPriceUpdates: []price_registry_1_0_0.InternalTokenPriceUpdate{},
+ DestChainSelector: dest,
+ UsdPerUnitGas: gas,
+ })
+ require.NoError(t, err)
+ }
+}
+
+func CreateExecutionStateChangeEventLog(t *testing.T, seqNr uint64, blockNumber int64, messageID common.Hash) logpoller.Log {
+ tAbi, err := evm_2_evm_offramp.EVM2EVMOffRampMetaData.GetAbi()
+ require.NoError(t, err)
+ eseEvent, ok := tAbi.Events["ExecutionStateChanged"]
+ require.True(t, ok)
+
+ logData, err := eseEvent.Inputs.NonIndexed().Pack(uint8(1), []byte("some return data"))
+ require.NoError(t, err)
+ seqNrBytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(seqNrBytes, seqNr)
+ seqNrTopic := common.BytesToHash(seqNrBytes)
+ topic0 := evm_2_evm_offramp.EVM2EVMOffRampExecutionStateChanged{}.Topic()
+
+ return logpoller.Log{
+ Topics: [][]byte{
+ topic0[:],
+ seqNrTopic[:],
+ messageID[:],
+ },
+ Data: logData,
+ LogIndex: 1,
+ BlockHash: utils.RandomBytes32(),
+ BlockNumber: blockNumber,
+ EventSig: topic0,
+ Address: testutils.NewAddress(),
+ TxHash: utils.RandomBytes32(),
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go
new file mode 100644
index 00000000000..d4d73219fc0
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go
@@ -0,0 +1,70 @@
+package v1_1_0
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_1_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+)
+
+var _ ccipdata.OnRampReader = &OnRamp{}
+
+// OnRamp The only difference that the plugins care about in 1.1 is that the dynamic config struct has changed.
+type OnRamp struct {
+ *v1_0_0.OnRamp
+ onRamp *evm_2_evm_onramp_1_1_0.EVM2EVMOnRamp
+}
+
+func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) {
+ onRamp, err := evm_2_evm_onramp_1_1_0.NewEVM2EVMOnRamp(onRampAddress, source)
+ if err != nil {
+ return nil, err
+ }
+ onRamp100, err := v1_0_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddress, sourceLP, source)
+ if err != nil {
+ return nil, err
+ }
+ return &OnRamp{
+ OnRamp: onRamp100,
+ onRamp: onRamp,
+ }, nil
+}
+
+func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) {
+ config, err := o.onRamp.GetDynamicConfig(nil)
+ if err != nil {
+ return "", err
+ }
+ return cciptypes.Address(config.Router.String()), nil
+}
+
+func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) {
+ if o.onRamp == nil {
+ return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized")
+ }
+ legacyDynamicConfig, err := o.onRamp.GetDynamicConfig(nil)
+ if err != nil {
+ return cciptypes.OnRampDynamicConfig{}, err
+ }
+ return cciptypes.OnRampDynamicConfig{
+ Router: cciptypes.Address(legacyDynamicConfig.Router.String()),
+ MaxNumberOfTokensPerMsg: legacyDynamicConfig.MaxTokensLength,
+ DestGasOverhead: legacyDynamicConfig.DestGasOverhead,
+ DestGasPerPayloadByte: legacyDynamicConfig.DestGasPerPayloadByte,
+ DestDataAvailabilityOverheadGas: 0,
+ DestGasPerDataAvailabilityByte: 0,
+ DestDataAvailabilityMultiplierBps: 0,
+ PriceRegistry: cciptypes.Address(legacyDynamicConfig.PriceRegistry.String()),
+ MaxDataBytes: legacyDynamicConfig.MaxDataSize,
+ MaxPerMsgGasLimit: uint32(legacyDynamicConfig.MaxGasLimit),
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go
new file mode 100644
index 00000000000..7612e544195
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go
@@ -0,0 +1,469 @@
+package v1_2_0
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/query"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+var _ ccipdata.CommitStoreReader = &CommitStore{}
+
+type CommitStore struct {
+ // Static config
+ commitStore *commit_store_1_2_0.CommitStore
+ lggr logger.Logger
+ lp logpoller.LogPoller
+ address common.Address
+ estimator *gas.EvmFeeEstimator
+ sourceMaxGasPrice *big.Int
+ filters []logpoller.Filter
+ reportAcceptedSig common.Hash
+ reportAcceptedMaxSeqIndex int
+ commitReportArgs abi.Arguments
+
+ // Dynamic config
+ configMu sync.RWMutex
+ gasPriceEstimator *prices.DAGasPriceEstimator
+ offchainConfig cciptypes.CommitOffchainConfig
+}
+
+func (c *CommitStore) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) {
+ staticConfig, err := c.commitStore.GetStaticConfig(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.CommitStoreStaticConfig{}, err
+ }
+ return cciptypes.CommitStoreStaticConfig{
+ ChainSelector: staticConfig.ChainSelector,
+ SourceChainSelector: staticConfig.SourceChainSelector,
+ OnRamp: cciptypes.Address(staticConfig.OnRamp.String()),
+ ArmProxy: cciptypes.Address(staticConfig.ArmProxy.String()),
+ }, nil
+}
+
+func (c *CommitStore) EncodeCommitReport(_ context.Context, report cciptypes.CommitStoreReport) ([]byte, error) {
+ return EncodeCommitReport(c.commitReportArgs, report)
+}
+
+func EncodeCommitReport(commitReportArgs abi.Arguments, report cciptypes.CommitStoreReport) ([]byte, error) {
+ var tokenPriceUpdates []commit_store_1_2_0.InternalTokenPriceUpdate
+ for _, tokenPriceUpdate := range report.TokenPrices {
+ tokenAddressEvm, err := ccipcalc.GenericAddrToEvm(tokenPriceUpdate.Token)
+ if err != nil {
+ return nil, fmt.Errorf("token price update address to evm: %w", err)
+ }
+
+ tokenPriceUpdates = append(tokenPriceUpdates, commit_store_1_2_0.InternalTokenPriceUpdate{
+ SourceToken: tokenAddressEvm,
+ UsdPerToken: tokenPriceUpdate.Value,
+ })
+ }
+
+ var gasPriceUpdates []commit_store_1_2_0.InternalGasPriceUpdate
+ for _, gasPriceUpdate := range report.GasPrices {
+ gasPriceUpdates = append(gasPriceUpdates, commit_store_1_2_0.InternalGasPriceUpdate{
+ DestChainSelector: gasPriceUpdate.DestChainSelector,
+ UsdPerUnitGas: gasPriceUpdate.Value,
+ })
+ }
+
+ rep := commit_store_1_2_0.CommitStoreCommitReport{
+ PriceUpdates: commit_store_1_2_0.InternalPriceUpdates{
+ TokenPriceUpdates: tokenPriceUpdates,
+ GasPriceUpdates: gasPriceUpdates,
+ },
+ Interval: commit_store_1_2_0.CommitStoreInterval{Min: report.Interval.Min, Max: report.Interval.Max},
+ MerkleRoot: report.MerkleRoot,
+ }
+ return commitReportArgs.PackValues([]interface{}{rep})
+}
+
+func DecodeCommitReport(commitReportArgs abi.Arguments, report []byte) (cciptypes.CommitStoreReport, error) {
+ unpacked, err := commitReportArgs.Unpack(report)
+ if err != nil {
+ return cciptypes.CommitStoreReport{}, err
+ }
+ if len(unpacked) != 1 {
+ return cciptypes.CommitStoreReport{}, errors.New("expected single struct value")
+ }
+
+ commitReport, ok := unpacked[0].(struct {
+ PriceUpdates struct {
+ TokenPriceUpdates []struct {
+ SourceToken common.Address `json:"sourceToken"`
+ UsdPerToken *big.Int `json:"usdPerToken"`
+ } `json:"tokenPriceUpdates"`
+ GasPriceUpdates []struct {
+ DestChainSelector uint64 `json:"destChainSelector"`
+ UsdPerUnitGas *big.Int `json:"usdPerUnitGas"`
+ } `json:"gasPriceUpdates"`
+ } `json:"priceUpdates"`
+ Interval struct {
+ Min uint64 `json:"min"`
+ Max uint64 `json:"max"`
+ } `json:"interval"`
+ MerkleRoot [32]byte `json:"merkleRoot"`
+ })
+ if !ok {
+ return cciptypes.CommitStoreReport{}, errors.Errorf("invalid commit report got %T", unpacked[0])
+ }
+
+ var tokenPriceUpdates []cciptypes.TokenPrice
+ for _, u := range commitReport.PriceUpdates.TokenPriceUpdates {
+ tokenPriceUpdates = append(tokenPriceUpdates, cciptypes.TokenPrice{
+ Token: cciptypes.Address(u.SourceToken.String()),
+ Value: u.UsdPerToken,
+ })
+ }
+
+ var gasPrices []cciptypes.GasPrice
+ for _, u := range commitReport.PriceUpdates.GasPriceUpdates {
+ gasPrices = append(gasPrices, cciptypes.GasPrice{
+ DestChainSelector: u.DestChainSelector,
+ Value: u.UsdPerUnitGas,
+ })
+ }
+
+ return cciptypes.CommitStoreReport{
+ TokenPrices: tokenPriceUpdates,
+ GasPrices: gasPrices,
+ Interval: cciptypes.CommitStoreInterval{
+ Min: commitReport.Interval.Min,
+ Max: commitReport.Interval.Max,
+ },
+ MerkleRoot: commitReport.MerkleRoot,
+ }, nil
+}
+
+func (c *CommitStore) DecodeCommitReport(_ context.Context, report []byte) (cciptypes.CommitStoreReport, error) {
+ return DecodeCommitReport(c.commitReportArgs, report)
+}
+
+func (c *CommitStore) IsBlessed(ctx context.Context, root [32]byte) (bool, error) {
+ return c.commitStore.IsBlessed(&bind.CallOpts{Context: ctx}, root)
+}
+
+func (c *CommitStore) OffchainConfig(context.Context) (cciptypes.CommitOffchainConfig, error) {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ return c.offchainConfig, nil
+}
+
+func (c *CommitStore) GasPriceEstimator(context.Context) (cciptypes.GasPriceEstimatorCommit, error) {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ return c.gasPriceEstimator, nil
+}
+
+func (c *CommitStore) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ c.estimator = &gpe
+ return nil
+}
+
+func (c *CommitStore) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error {
+ c.configMu.RLock()
+ defer c.configMu.RUnlock()
+ c.sourceMaxGasPrice = sourceMaxGasPrice
+ return nil
+}
+
+// Do not change the JSON format of this struct without consulting with the RDD people first.
+type JSONCommitOffchainConfig struct {
+ SourceFinalityDepth uint32
+ DestFinalityDepth uint32
+ GasPriceHeartBeat config.Duration
+ DAGasPriceDeviationPPB uint32
+ ExecGasPriceDeviationPPB uint32
+ TokenPriceHeartBeat config.Duration
+ TokenPriceDeviationPPB uint32
+ InflightCacheExpiry config.Duration
+ PriceReportingDisabled bool
+}
+
+func (c JSONCommitOffchainConfig) Validate() error {
+ if c.GasPriceHeartBeat.Duration() == 0 {
+ return errors.New("must set GasPriceHeartBeat")
+ }
+ if c.ExecGasPriceDeviationPPB == 0 {
+ return errors.New("must set ExecGasPriceDeviationPPB")
+ }
+ if c.TokenPriceHeartBeat.Duration() == 0 {
+ return errors.New("must set TokenPriceHeartBeat")
+ }
+ if c.TokenPriceDeviationPPB == 0 {
+ return errors.New("must set TokenPriceDeviationPPB")
+ }
+ if c.InflightCacheExpiry.Duration() == 0 {
+ return errors.New("must set InflightCacheExpiry")
+ }
+ // DAGasPriceDeviationPPB is not validated because it can be 0 on non-rollups
+
+ return nil
+}
+
+func (c *CommitStore) ChangeConfig(_ context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) {
+ onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onchainConfig)
+ if err != nil {
+ return "", err
+ }
+
+ offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[JSONCommitOffchainConfig](offchainConfig)
+ if err != nil {
+ return "", err
+ }
+ c.configMu.Lock()
+ defer c.configMu.Unlock()
+
+ if c.estimator == nil {
+ return "", fmt.Errorf("this CommitStore estimator is nil. SetGasEstimator should be called before ChangeConfig")
+ }
+
+ if c.sourceMaxGasPrice == nil {
+ return "", fmt.Errorf("this CommitStore sourceMaxGasPrice is nil. SetSourceMaxGasPrice should be called before ChangeConfig")
+ }
+
+ c.gasPriceEstimator = prices.NewDAGasPriceEstimator(
+ *c.estimator,
+ c.sourceMaxGasPrice,
+ int64(offchainConfigParsed.ExecGasPriceDeviationPPB),
+ int64(offchainConfigParsed.DAGasPriceDeviationPPB),
+ )
+ c.offchainConfig = ccipdata.NewCommitOffchainConfig(
+ offchainConfigParsed.ExecGasPriceDeviationPPB,
+ offchainConfigParsed.GasPriceHeartBeat.Duration(),
+ offchainConfigParsed.TokenPriceDeviationPPB,
+ offchainConfigParsed.TokenPriceHeartBeat.Duration(),
+ offchainConfigParsed.InflightCacheExpiry.Duration(),
+ offchainConfigParsed.PriceReportingDisabled,
+ )
+
+ c.lggr.Infow("ChangeConfig",
+ "offchainConfig", offchainConfigParsed,
+ "onchainConfig", onchainConfigParsed,
+ )
+ return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), nil
+}
+
+func (c *CommitStore) Close() error {
+ return logpollerutil.UnregisterLpFilters(c.lp, c.filters)
+}
+
+func (c *CommitStore) parseReport(log types.Log) (*cciptypes.CommitStoreReport, error) {
+ repAccepted, err := c.commitStore.ParseReportAccepted(log)
+ if err != nil {
+ return nil, err
+ }
+ // Translate to common struct.
+ var tokenPrices []cciptypes.TokenPrice
+ for _, tpu := range repAccepted.Report.PriceUpdates.TokenPriceUpdates {
+ tokenPrices = append(tokenPrices, cciptypes.TokenPrice{
+ Token: cciptypes.Address(tpu.SourceToken.String()),
+ Value: tpu.UsdPerToken,
+ })
+ }
+ var gasPrices []cciptypes.GasPrice
+ for _, tpu := range repAccepted.Report.PriceUpdates.GasPriceUpdates {
+ gasPrices = append(gasPrices, cciptypes.GasPrice{
+ DestChainSelector: tpu.DestChainSelector,
+ Value: tpu.UsdPerUnitGas,
+ })
+ }
+
+ return &cciptypes.CommitStoreReport{
+ TokenPrices: tokenPrices,
+ GasPrices: gasPrices,
+ MerkleRoot: repAccepted.Report.MerkleRoot,
+ Interval: cciptypes.CommitStoreInterval{Min: repAccepted.Report.Interval.Min, Max: repAccepted.Report.Interval.Max},
+ }, nil
+}
+
+func (c *CommitStore) GetCommitReportMatchingSeqNum(ctx context.Context, seqNr uint64, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ logs, err := c.lp.LogsDataWordBetween(
+ ctx,
+ c.reportAcceptedSig,
+ c.address,
+ c.reportAcceptedMaxSeqIndex-1,
+ c.reportAcceptedMaxSeqIndex,
+ logpoller.EvmWord(seqNr),
+ evmtypes.Confirmations(confs),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport](
+ logs,
+ c.lggr,
+ c.parseReport,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.CommitStoreReportWithTxMeta{
+ TxMeta: log.TxMeta,
+ CommitStoreReport: log.Data,
+ })
+ }
+
+ if len(res) > 1 {
+ c.lggr.Errorw("More than one report found for seqNr", "seqNr", seqNr, "commitReports", parsedLogs)
+ return res[:1], nil
+ }
+ return res, nil
+}
+
+func (c *CommitStore) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ latestBlock, err := c.lp.LatestBlock(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ reportsQuery, err := query.Where(
+ c.address.String(),
+ logpoller.NewAddressFilter(c.address),
+ logpoller.NewEventSigFilter(c.reportAcceptedSig),
+ query.Timestamp(uint64(ts.Unix()), primitives.Gte),
+ logpoller.NewConfirmationsFilter(evmtypes.Confirmations(confs)),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ logs, err := c.lp.FilteredLogs(
+ ctx,
+ reportsQuery,
+ query.NewLimitAndSort(query.Limit{}, query.NewSortBySequence(query.Asc)),
+ "GetAcceptedCommitReportsGteTimestamp",
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport](logs, c.lggr, c.parseReport)
+ if err != nil {
+ return nil, fmt.Errorf("parse logs: %w", err)
+ }
+
+ res := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.CommitStoreReportWithTxMeta{
+ TxMeta: log.TxMeta.WithFinalityStatus(uint64(latestBlock.FinalizedBlockNumber)),
+ CommitStoreReport: log.Data,
+ })
+ }
+ return res, nil
+}
+
+func (c *CommitStore) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) {
+ return c.commitStore.GetExpectedNextSequenceNumber(&bind.CallOpts{Context: ctx})
+}
+
+func (c *CommitStore) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) {
+ return c.commitStore.GetLatestPriceEpochAndRound(&bind.CallOpts{Context: ctx})
+}
+
+func (c *CommitStore) IsDestChainHealthy(context.Context) (bool, error) {
+ if err := c.lp.Healthy(); err != nil {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (c *CommitStore) IsDown(ctx context.Context) (bool, error) {
+ unPausedAndHealthy, err := c.commitStore.IsUnpausedAndARMHealthy(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return true, err
+ }
+ return !unPausedAndHealthy, nil
+}
+
+func (c *CommitStore) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) {
+ var hashes [][32]byte
+ for _, msg := range report.Messages {
+ hashes = append(hashes, msg.Hash)
+ }
+ res, err := c.commitStore.Verify(&bind.CallOpts{Context: ctx}, hashes, report.Proofs, report.ProofFlagBits)
+ if err != nil {
+ c.lggr.Errorw("Unable to call verify", "messages", report.Messages, "err", err)
+ return false, nil
+ }
+ // No timestamp, means failed to verify root.
+ if res.Cmp(big.NewInt(0)) == 0 {
+ c.lggr.Errorw("Root does not verify", "messages", report.Messages)
+ return false, nil
+ }
+ return true, nil
+}
+
+func (c *CommitStore) RegisterFilters() error {
+ return logpollerutil.RegisterLpFilters(c.lp, c.filters)
+}
+
+func NewCommitStore(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller) (*CommitStore, error) {
+ commitStore, err := commit_store_1_2_0.NewCommitStore(addr, ec)
+ if err != nil {
+ return nil, err
+ }
+ commitStoreABI := abihelpers.MustParseABI(commit_store_1_2_0.CommitStoreABI)
+ eventSig := abihelpers.MustGetEventID(v1_0_0.ReportAccepted, commitStoreABI)
+ commitReportArgs := abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI)
+ filters := []logpoller.Filter{
+ {
+ Name: logpoller.FilterName(v1_0_0.EXEC_REPORT_ACCEPTS, addr.String()),
+ EventSigs: []common.Hash{eventSig},
+ Addresses: []common.Address{addr},
+ Retention: ccipdata.CommitExecLogsRetention,
+ },
+ }
+
+ return &CommitStore{
+ commitStore: commitStore,
+ address: addr,
+ lggr: lggr,
+ lp: lp,
+
+ // Note that sourceMaxGasPrice and estimator now have explicit setters (CCIP-2493)
+
+ filters: filters,
+ commitReportArgs: commitReportArgs,
+ reportAcceptedSig: eventSig,
+ // offset || priceUpdatesOffset || minSeqNum || maxSeqNum || merkleRoot
+ reportAcceptedMaxSeqIndex: 3,
+ configMu: sync.RWMutex{},
+
+ // The fields below are initially empty and set on ChangeConfig method
+ offchainConfig: cciptypes.CommitOffchainConfig{},
+ gasPriceEstimator: nil,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go
new file mode 100644
index 00000000000..8b293096339
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go
@@ -0,0 +1,224 @@
+package v1_2_0
+
+import (
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+)
+
+func TestCommitReportEncoding(t *testing.T) {
+ t.Parallel()
+ ctx := testutils.Context(t)
+ report := cciptypes.CommitStoreReport{
+ TokenPrices: []cciptypes.TokenPrice{
+ {
+ Token: cciptypes.Address(utils.RandomAddress().String()),
+ Value: big.NewInt(9e18),
+ },
+ {
+ Token: cciptypes.Address(utils.RandomAddress().String()),
+ Value: big.NewInt(1e18),
+ },
+ },
+ GasPrices: []cciptypes.GasPrice{
+ {
+ DestChainSelector: rand.Uint64(),
+ Value: big.NewInt(2000e9),
+ },
+ {
+ DestChainSelector: rand.Uint64(),
+ Value: big.NewInt(3000e9),
+ },
+ },
+ MerkleRoot: [32]byte{123},
+ Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10},
+ }
+
+ c, err := NewCommitStore(logger.TestLogger(t), utils.RandomAddress(), nil, mocks.NewLogPoller(t))
+ assert.NoError(t, err)
+
+ encodedReport, err := c.EncodeCommitReport(ctx, report)
+ require.NoError(t, err)
+ assert.Greater(t, len(encodedReport), 0)
+
+ decodedReport, err := c.DecodeCommitReport(ctx, encodedReport)
+ require.NoError(t, err)
+ require.Equal(t, report, decodedReport)
+}
+
+func TestCommitStoreV120ffchainConfigEncoding(t *testing.T) {
+ t.Parallel()
+ validConfig := JSONCommitOffchainConfig{
+ SourceFinalityDepth: 3,
+ DestFinalityDepth: 4,
+ GasPriceHeartBeat: *config.MustNewDuration(1 * time.Minute),
+ DAGasPriceDeviationPPB: 10,
+ ExecGasPriceDeviationPPB: 11,
+ TokenPriceHeartBeat: *config.MustNewDuration(2 * time.Minute),
+ TokenPriceDeviationPPB: 12,
+ InflightCacheExpiry: *config.MustNewDuration(3 * time.Minute),
+ }
+
+ require.NoError(t, validConfig.Validate())
+
+ tests := []struct {
+ name string
+ want JSONCommitOffchainConfig
+ errPattern string
+ }{
+ {
+ name: "legacy offchain config format parses",
+ want: validConfig,
+ },
+ {
+ name: "can omit finality depth",
+ want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) {
+ c.SourceFinalityDepth = 0
+ c.DestFinalityDepth = 0
+ }),
+ },
+ {
+ name: "can set PriceReportingDisabled",
+ want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) {
+ c.PriceReportingDisabled = true
+ }),
+ },
+ {
+ name: "must set GasPriceHeartBeat",
+ want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) {
+ c.GasPriceHeartBeat = *config.MustNewDuration(0)
+ }),
+ errPattern: "GasPriceHeartBeat",
+ },
+ {
+ name: "must set ExecGasPriceDeviationPPB",
+ want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) {
+ c.ExecGasPriceDeviationPPB = 0
+ }),
+ errPattern: "ExecGasPriceDeviationPPB",
+ },
+ {
+ name: "must set TokenPriceHeartBeat",
+ want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) {
+ c.TokenPriceHeartBeat = *config.MustNewDuration(0)
+ }),
+ errPattern: "TokenPriceHeartBeat",
+ },
+ {
+ name: "must set TokenPriceDeviationPPB",
+ want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) {
+ c.TokenPriceDeviationPPB = 0
+ }),
+ errPattern: "TokenPriceDeviationPPB",
+ },
+ {
+ name: "must set InflightCacheExpiry",
+ want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) {
+ c.InflightCacheExpiry = *config.MustNewDuration(0)
+ }),
+ errPattern: "InflightCacheExpiry",
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ exp := tc.want
+ encode, err := ccipconfig.EncodeOffchainConfig(&exp)
+ require.NoError(t, err)
+ got, err := ccipconfig.DecodeOffchainConfig[JSONCommitOffchainConfig](encode)
+
+ if tc.errPattern != "" {
+ require.ErrorContains(t, err, tc.errPattern)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.want, got)
+ }
+ })
+ }
+}
+
+func TestCommitStoreV120ffchainConfigDecodingCompatibility(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ config []byte
+ priceReportingDisabled bool
+ }{
+ {
+ name: "with MaxGasPrice",
+ config: []byte(`{
+ "SourceFinalityDepth": 3,
+ "DestFinalityDepth": 4,
+ "GasPriceHeartBeat": "60s",
+ "DAGasPriceDeviationPPB": 10,
+ "ExecGasPriceDeviationPPB": 11,
+ "TokenPriceHeartBeat": "120s",
+ "TokenPriceDeviationPPB": 12,
+ "MaxGasPrice": 100000000,
+ "SourceMaxGasPrice": 100000000,
+ "InflightCacheExpiry": "180s"
+ }`),
+ priceReportingDisabled: false,
+ },
+ {
+ name: "without MaxGasPrice",
+ config: []byte(`{
+ "SourceFinalityDepth": 3,
+ "DestFinalityDepth": 4,
+ "GasPriceHeartBeat": "60s",
+ "DAGasPriceDeviationPPB": 10,
+ "ExecGasPriceDeviationPPB": 11,
+ "TokenPriceHeartBeat": "120s",
+ "TokenPriceDeviationPPB": 12,
+ "InflightCacheExpiry": "180s"
+ }`),
+ priceReportingDisabled: false,
+ },
+ {
+ name: "with PriceReportingDisabled",
+ config: []byte(`{
+ "SourceFinalityDepth": 3,
+ "DestFinalityDepth": 4,
+ "GasPriceHeartBeat": "60s",
+ "DAGasPriceDeviationPPB": 10,
+ "ExecGasPriceDeviationPPB": 11,
+ "TokenPriceHeartBeat": "120s",
+ "TokenPriceDeviationPPB": 12,
+ "InflightCacheExpiry": "180s",
+ "PriceReportingDisabled": true
+ }`),
+ priceReportingDisabled: true,
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ decoded, err := ccipconfig.DecodeOffchainConfig[JSONCommitOffchainConfig](tc.config)
+ require.NoError(t, err)
+ require.Equal(t, JSONCommitOffchainConfig{
+ SourceFinalityDepth: 3,
+ DestFinalityDepth: 4,
+ GasPriceHeartBeat: *config.MustNewDuration(1 * time.Minute),
+ DAGasPriceDeviationPPB: 10,
+ ExecGasPriceDeviationPPB: 11,
+ TokenPriceHeartBeat: *config.MustNewDuration(2 * time.Minute),
+ TokenPriceDeviationPPB: 12,
+ InflightCacheExpiry: *config.MustNewDuration(3 * time.Minute),
+ PriceReportingDisabled: tc.priceReportingDisabled,
+ }, decoded)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go
new file mode 100644
index 00000000000..4739c946c36
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go
@@ -0,0 +1,101 @@
+package v1_2_0
+
+import (
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+)
+
+const (
+ MetaDataHashPrefix = "EVM2EVMMessageHashV2"
+)
+
+type LeafHasher struct {
+ metaDataHash [32]byte
+ ctx hashutil.Hasher[[32]byte]
+ onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp
+}
+
+func NewLeafHasher(sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte], onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp) *LeafHasher {
+ return &LeafHasher{
+ metaDataHash: v1_0_0.GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampId, destChainSelector),
+ ctx: ctx,
+ onRamp: onRamp,
+ }
+}
+
+func (t *LeafHasher) HashLeaf(log types.Log) ([32]byte, error) {
+ msg, err := t.onRamp.ParseCCIPSendRequested(log)
+ if err != nil {
+ return [32]byte{}, err
+ }
+ message := msg.Message
+ encodedTokens, err := abihelpers.ABIEncode(
+ `[
+{"components": [{"name":"token","type":"address"},{"name":"amount","type":"uint256"}], "type":"tuple[]"}]`, message.TokenAmounts)
+ if err != nil {
+ return [32]byte{}, err
+ }
+
+ bytesArray, err := abi.NewType("bytes[]", "bytes[]", nil)
+ if err != nil {
+ return [32]byte{}, err
+ }
+
+ encodedSourceTokenData, err := abi.Arguments{abi.Argument{Type: bytesArray}}.PackValues([]interface{}{message.SourceTokenData})
+ if err != nil {
+ return [32]byte{}, err
+ }
+
+ packedFixedSizeValues, err := abihelpers.ABIEncode(
+ `[
+{"name": "sender", "type":"address"},
+{"name": "receiver", "type":"address"},
+{"name": "sequenceNumber", "type":"uint64"},
+{"name": "gasLimit", "type":"uint256"},
+{"name": "strict", "type":"bool"},
+{"name": "nonce", "type":"uint64"},
+{"name": "feeToken","type": "address"},
+{"name": "feeTokenAmount","type": "uint256"}
+]`,
+ message.Sender,
+ message.Receiver,
+ message.SequenceNumber,
+ message.GasLimit,
+ message.Strict,
+ message.Nonce,
+ message.FeeToken,
+ message.FeeTokenAmount,
+ )
+ if err != nil {
+ return [32]byte{}, err
+ }
+ fixedSizeValuesHash := t.ctx.Hash(packedFixedSizeValues)
+
+ packedValues, err := abihelpers.ABIEncode(
+ `[
+{"name": "leafDomainSeparator","type":"bytes1"},
+{"name": "metadataHash", "type":"bytes32"},
+{"name": "fixedSizeValuesHash", "type":"bytes32"},
+{"name": "dataHash", "type":"bytes32"},
+{"name": "tokenAmountsHash", "type":"bytes32"},
+{"name": "sourceTokenDataHash", "type":"bytes32"}
+]`,
+ v1_0_0.LeafDomainSeparator,
+ t.metaDataHash,
+ fixedSizeValuesHash,
+ t.ctx.Hash(message.Data),
+ t.ctx.Hash(encodedTokens),
+ t.ctx.Hash(encodedSourceTokenData),
+ )
+ if err != nil {
+ return [32]byte{}, err
+ }
+ return t.ctx.Hash(packedValues), nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go
new file mode 100644
index 00000000000..4bfbf7295e6
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go
@@ -0,0 +1,78 @@
+package v1_2_0
+
+import (
+ "encoding/hex"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+func TestHasherV1_2_0(t *testing.T) {
+ sourceChainSelector, destChainSelector := uint64(1), uint64(4)
+ onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001")
+ onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp_1_2_0.EVM2EVMOnRampABI)
+
+ hashingCtx := hashutil.NewKeccak()
+ ramp, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampAddress, nil)
+ require.NoError(t, err)
+ hasher := NewLeafHasher(sourceChainSelector, destChainSelector, onRampAddress, hashingCtx, ramp)
+
+ message := evm_2_evm_onramp_1_2_0.InternalEVM2EVMMessage{
+ SourceChainSelector: sourceChainSelector,
+ Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"),
+ Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"),
+ SequenceNumber: 1337,
+ GasLimit: big.NewInt(100),
+ Strict: false,
+ Nonce: 1337,
+ FeeToken: common.Address{},
+ FeeTokenAmount: big.NewInt(1),
+ Data: []byte{},
+ TokenAmounts: []evm_2_evm_onramp_1_2_0.ClientEVMTokenAmount{{Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}},
+ SourceTokenData: [][]byte{},
+ MessageId: [32]byte{},
+ }
+
+ data, err := onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message)
+ require.NoError(t, err)
+ hash, err := hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data})
+ require.NoError(t, err)
+
+ // NOTE: Must match spec
+ require.Equal(t, "46ad031bfb052db2e4a2514fed8dc480b98e5ce4acb55d5640d91407e0d8a3e9", hex.EncodeToString(hash[:]))
+
+ message = evm_2_evm_onramp_1_2_0.InternalEVM2EVMMessage{
+ SourceChainSelector: sourceChainSelector,
+ Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"),
+ Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"),
+ SequenceNumber: 1337,
+ GasLimit: big.NewInt(100),
+ Strict: false,
+ Nonce: 1337,
+ FeeToken: common.Address{},
+ FeeTokenAmount: big.NewInt(1e12),
+ Data: []byte("foo bar baz"),
+ TokenAmounts: []evm_2_evm_onramp_1_2_0.ClientEVMTokenAmount{
+ {Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)},
+ {Token: common.HexToAddress("0x6660000000000000000000000000000000000001"), Amount: big.NewInt(4204242)},
+ },
+ SourceTokenData: [][]byte{{0x2, 0x1}},
+ MessageId: [32]byte{},
+ }
+
+ data, err = onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message)
+ require.NoError(t, err)
+ hash, err = hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data})
+ require.NoError(t, err)
+
+ // NOTE: Must match spec
+ require.Equal(t, "4362a13a42e52ff5ce4324e7184dc7aa41704c3146bc842d35d95b94b32a78b6", hex.EncodeToString(hash[:]))
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go
new file mode 100644
index 00000000000..fa00894b380
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go
@@ -0,0 +1,340 @@
+package v1_2_0
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+var (
+ abiOffRamp = abihelpers.MustParseABI(evm_2_evm_offramp_1_2_0.EVM2EVMOffRampABI)
+ _ ccipdata.OffRampReader = &OffRamp{}
+)
+
+type ExecOnchainConfig evm_2_evm_offramp_1_2_0.EVM2EVMOffRampDynamicConfig
+
+func (d ExecOnchainConfig) AbiString() string {
+ return `
+ [
+ {
+ "components": [
+ {"name": "permissionLessExecutionThresholdSeconds", "type": "uint32"},
+ {"name": "router", "type": "address"},
+ {"name": "priceRegistry", "type": "address"},
+ {"name": "maxNumberOfTokensPerMsg", "type": "uint16"},
+ {"name": "maxDataBytes", "type": "uint32"},
+ {"name": "maxPoolReleaseOrMintGas", "type": "uint32"}
+ ],
+ "type": "tuple"
+ }
+ ]`
+}
+
+func (d ExecOnchainConfig) Validate() error {
+ if d.PermissionLessExecutionThresholdSeconds == 0 {
+ return errors.New("must set PermissionLessExecutionThresholdSeconds")
+ }
+ if d.Router == (common.Address{}) {
+ return errors.New("must set Router address")
+ }
+ if d.PriceRegistry == (common.Address{}) {
+ return errors.New("must set PriceRegistry address")
+ }
+ if d.MaxNumberOfTokensPerMsg == 0 {
+ return errors.New("must set MaxNumberOfTokensPerMsg")
+ }
+ if d.MaxPoolReleaseOrMintGas == 0 {
+ return errors.New("must set MaxPoolReleaseOrMintGas")
+ }
+ return nil
+}
+
+// JSONExecOffchainConfig is the configuration for nodes executing committed CCIP messages (v1.2).
+// It comes from the OffchainConfig field of the corresponding OCR2 plugin configuration.
+// NOTE: do not change the JSON format of this struct without consulting with the RDD people first.
+type JSONExecOffchainConfig struct {
+ // SourceFinalityDepth indicates how many confirmations a transaction should get on the source chain event before we consider it finalized.
+ //
+ // Deprecated: we now use the source chain finality instead.
+ SourceFinalityDepth uint32
+ // See [ccipdata.ExecOffchainConfig.DestOptimisticConfirmations]
+ DestOptimisticConfirmations uint32
+ // DestFinalityDepth indicates how many confirmations a transaction should get on the destination chain event before we consider it finalized.
+ //
+ // Deprecated: we now use the destination chain finality instead.
+ DestFinalityDepth uint32
+ // See [ccipdata.ExecOffchainConfig.BatchGasLimit]
+ BatchGasLimit uint32
+ // See [ccipdata.ExecOffchainConfig.RelativeBoostPerWaitHour]
+ RelativeBoostPerWaitHour float64
+ // See [ccipdata.ExecOffchainConfig.InflightCacheExpiry]
+ InflightCacheExpiry config.Duration
+ // See [ccipdata.ExecOffchainConfig.RootSnoozeTime]
+ RootSnoozeTime config.Duration
+ // See [ccipdata.ExecOffchainConfig.BatchingStrategyID]
+ BatchingStrategyID uint32
+ // See [ccipdata.ExecOffchainConfig.MessageVisibilityInterval]
+ MessageVisibilityInterval config.Duration
+}
+
+func (c JSONExecOffchainConfig) Validate() error {
+ if c.DestOptimisticConfirmations == 0 {
+ return errors.New("must set DestOptimisticConfirmations")
+ }
+ if c.BatchGasLimit == 0 {
+ return errors.New("must set BatchGasLimit")
+ }
+ if c.RelativeBoostPerWaitHour == 0 {
+ return errors.New("must set RelativeBoostPerWaitHour")
+ }
+ if c.InflightCacheExpiry.Duration() == 0 {
+ return errors.New("must set InflightCacheExpiry")
+ }
+ if c.RootSnoozeTime.Duration() == 0 {
+ return errors.New("must set RootSnoozeTime")
+ }
+
+ return nil
+}
+
+// OffRamp In 1.2 we have a different estimator impl
+type OffRamp struct {
+ *v1_0_0.OffRamp
+ offRampV120 evm_2_evm_offramp_1_2_0.EVM2EVMOffRampInterface
+}
+
+func (o *OffRamp) CurrentRateLimiterState(ctx context.Context) (cciptypes.TokenBucketRateLimit, error) {
+ bucket, err := o.offRampV120.CurrentRateLimiterState(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.TokenBucketRateLimit{}, err
+ }
+ return cciptypes.TokenBucketRateLimit{
+ Tokens: bucket.Tokens,
+ LastUpdated: bucket.LastUpdated,
+ IsEnabled: bucket.IsEnabled,
+ Capacity: bucket.Capacity,
+ Rate: bucket.Rate,
+ }, nil
+}
+
+func (o *OffRamp) GetRouter(ctx context.Context) (cciptypes.Address, error) {
+ dynamicConfig, err := o.offRampV120.GetDynamicConfig(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return "", err
+ }
+ return ccipcalc.EvmAddrToGeneric(dynamicConfig.Router), nil
+}
+
+func (o *OffRamp) ChangeConfig(ctx context.Context, onchainConfigBytes []byte, offchainConfigBytes []byte) (cciptypes.Address, cciptypes.Address, error) {
+ // Same as the v1.0.0 method, except for the ExecOnchainConfig type.
+ onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ExecOnchainConfig](onchainConfigBytes)
+ if err != nil {
+ return "", "", err
+ }
+
+ offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[JSONExecOffchainConfig](offchainConfigBytes)
+ if err != nil {
+ return "", "", err
+ }
+ destRouter, err := router.NewRouter(onchainConfigParsed.Router, o.Client)
+ if err != nil {
+ return "", "", err
+ }
+ destWrappedNative, err := destRouter.GetWrappedNative(nil)
+ if err != nil {
+ return "", "", err
+ }
+ offchainConfig := cciptypes.ExecOffchainConfig{
+ DestOptimisticConfirmations: offchainConfigParsed.DestOptimisticConfirmations,
+ BatchGasLimit: offchainConfigParsed.BatchGasLimit,
+ RelativeBoostPerWaitHour: offchainConfigParsed.RelativeBoostPerWaitHour,
+ InflightCacheExpiry: offchainConfigParsed.InflightCacheExpiry,
+ RootSnoozeTime: offchainConfigParsed.RootSnoozeTime,
+ MessageVisibilityInterval: offchainConfigParsed.MessageVisibilityInterval,
+ BatchingStrategyID: offchainConfigParsed.BatchingStrategyID,
+ }
+ onchainConfig := cciptypes.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: time.Second * time.Duration(onchainConfigParsed.PermissionLessExecutionThresholdSeconds),
+ Router: cciptypes.Address(onchainConfigParsed.Router.String()),
+ }
+ priceEstimator := prices.NewDAGasPriceEstimator(o.Estimator, o.DestMaxGasPrice, 0, 0)
+
+ o.UpdateDynamicConfig(onchainConfig, offchainConfig, priceEstimator)
+
+ o.Logger.Infow("Starting exec plugin",
+ "offchainConfig", onchainConfigParsed,
+ "onchainConfig", offchainConfigParsed)
+ return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()),
+ cciptypes.Address(destWrappedNative.String()), nil
+}
+
+func EncodeExecutionReport(ctx context.Context, args abi.Arguments, report cciptypes.ExecReport) ([]byte, error) {
+ var msgs []evm_2_evm_offramp_1_2_0.InternalEVM2EVMMessage
+ for _, msg := range report.Messages {
+ var ta []evm_2_evm_offramp_1_2_0.ClientEVMTokenAmount
+ for _, tokenAndAmount := range msg.TokenAmounts {
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokenAndAmount.Token)
+ if err != nil {
+ return nil, err
+ }
+ ta = append(ta, evm_2_evm_offramp_1_2_0.ClientEVMTokenAmount{
+ Token: evmAddrs[0],
+ Amount: tokenAndAmount.Amount,
+ })
+ }
+
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(msg.Sender, msg.Receiver, msg.FeeToken)
+ if err != nil {
+ return nil, err
+ }
+
+ msgs = append(msgs, evm_2_evm_offramp_1_2_0.InternalEVM2EVMMessage{
+ SourceChainSelector: msg.SourceChainSelector,
+ Sender: evmAddrs[0],
+ Receiver: evmAddrs[1],
+ SequenceNumber: msg.SequenceNumber,
+ GasLimit: msg.GasLimit,
+ Strict: msg.Strict,
+ Nonce: msg.Nonce,
+ FeeToken: evmAddrs[2],
+ FeeTokenAmount: msg.FeeTokenAmount,
+ Data: msg.Data,
+ TokenAmounts: ta,
+ MessageId: msg.MessageID,
+ // NOTE: this field is new in v1.2.
+ SourceTokenData: msg.SourceTokenData,
+ })
+ }
+
+ rep := evm_2_evm_offramp_1_2_0.InternalExecutionReport{
+ Messages: msgs,
+ OffchainTokenData: report.OffchainTokenData,
+ Proofs: report.Proofs,
+ ProofFlagBits: report.ProofFlagBits,
+ }
+ return args.PackValues([]interface{}{&rep})
+}
+
+func (o *OffRamp) EncodeExecutionReport(ctx context.Context, report cciptypes.ExecReport) ([]byte, error) {
+ return EncodeExecutionReport(ctx, o.ExecutionReportArgs, report)
+}
+
+func DecodeExecReport(ctx context.Context, args abi.Arguments, report []byte) (cciptypes.ExecReport, error) {
+ unpacked, err := args.Unpack(report)
+ if err != nil {
+ return cciptypes.ExecReport{}, err
+ }
+ if len(unpacked) == 0 {
+ return cciptypes.ExecReport{}, errors.New("assumptionViolation: expected at least one element")
+ }
+ // Must be anonymous struct here
+ erStruct, ok := unpacked[0].(struct {
+ Messages []struct {
+ SourceChainSelector uint64 `json:"sourceChainSelector"`
+ Sender common.Address `json:"sender"`
+ Receiver common.Address `json:"receiver"`
+ SequenceNumber uint64 `json:"sequenceNumber"`
+ GasLimit *big.Int `json:"gasLimit"`
+ Strict bool `json:"strict"`
+ Nonce uint64 `json:"nonce"`
+ FeeToken common.Address `json:"feeToken"`
+ FeeTokenAmount *big.Int `json:"feeTokenAmount"`
+ Data []uint8 `json:"data"`
+ TokenAmounts []struct {
+ Token common.Address `json:"token"`
+ Amount *big.Int `json:"amount"`
+ } `json:"tokenAmounts"`
+ SourceTokenData [][]uint8 `json:"sourceTokenData"`
+ MessageId [32]uint8 `json:"messageId"`
+ } `json:"messages"`
+ OffchainTokenData [][][]uint8 `json:"offchainTokenData"`
+ Proofs [][32]uint8 `json:"proofs"`
+ ProofFlagBits *big.Int `json:"proofFlagBits"`
+ })
+ if !ok {
+ return cciptypes.ExecReport{}, fmt.Errorf("got %T", unpacked[0])
+ }
+ messages := make([]cciptypes.EVM2EVMMessage, 0, len(erStruct.Messages))
+ for _, msg := range erStruct.Messages {
+ var tokensAndAmounts []cciptypes.TokenAmount
+ for _, tokenAndAmount := range msg.TokenAmounts {
+ tokensAndAmounts = append(tokensAndAmounts, cciptypes.TokenAmount{
+ Token: cciptypes.Address(tokenAndAmount.Token.String()),
+ Amount: tokenAndAmount.Amount,
+ })
+ }
+ messages = append(messages, cciptypes.EVM2EVMMessage{
+ SequenceNumber: msg.SequenceNumber,
+ GasLimit: msg.GasLimit,
+ Nonce: msg.Nonce,
+ MessageID: msg.MessageId,
+ SourceChainSelector: msg.SourceChainSelector,
+ Sender: cciptypes.Address(msg.Sender.String()),
+ Receiver: cciptypes.Address(msg.Receiver.String()),
+ Strict: msg.Strict,
+ FeeToken: cciptypes.Address(msg.FeeToken.String()),
+ FeeTokenAmount: msg.FeeTokenAmount,
+ Data: msg.Data,
+ TokenAmounts: tokensAndAmounts,
+ SourceTokenData: msg.SourceTokenData,
+ // TODO: Not needed for plugins, but should be recomputed for consistency.
+ // Requires the offramp knowing about onramp version
+ Hash: [32]byte{},
+ })
+ }
+
+ // Unpack will populate with big.Int{false, } for 0 values,
+ // which is different from the expected big.NewInt(0). Rebuild to the expected value for this case.
+ return cciptypes.ExecReport{
+ Messages: messages,
+ OffchainTokenData: erStruct.OffchainTokenData,
+ Proofs: erStruct.Proofs,
+ ProofFlagBits: new(big.Int).SetBytes(erStruct.ProofFlagBits.Bytes()),
+ }, nil
+}
+
+func (o *OffRamp) DecodeExecutionReport(ctx context.Context, report []byte) (cciptypes.ExecReport, error) {
+ return DecodeExecReport(ctx, o.ExecutionReportArgs, report)
+}
+
+func NewOffRamp(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) (*OffRamp, error) {
+ v100, err := v1_0_0.NewOffRamp(lggr, addr, ec, lp, estimator, destMaxGasPrice)
+ if err != nil {
+ return nil, err
+ }
+
+ offRamp, err := evm_2_evm_offramp_1_2_0.NewEVM2EVMOffRamp(addr, ec)
+ if err != nil {
+ return nil, err
+ }
+
+ v100.ExecutionReportArgs = abihelpers.MustGetMethodInputs("manuallyExecute", abiOffRamp)[:1]
+
+ return &OffRamp{
+ OffRamp: v100,
+ offRampV120: offRamp,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go
new file mode 100644
index 00000000000..f87fc8842f6
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go
@@ -0,0 +1,38 @@
+package v1_2_0_test
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+)
+
+func TestExecutionReportEncodingV120(t *testing.T) {
+ // Note could consider some fancier testing here (fuzz/property)
+ // but I think that would essentially be testing geth's abi library
+ // as our encode/decode is a thin wrapper around that.
+ report := cciptypes.ExecReport{
+ Messages: []cciptypes.EVM2EVMMessage{},
+ OffchainTokenData: [][][]byte{{}},
+ Proofs: [][32]byte{testutils.Random32Byte()},
+ ProofFlagBits: big.NewInt(133),
+ }
+
+ offRamp, err := v1_2_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, lpmocks.NewLogPoller(t), nil, nil)
+ require.NoError(t, err)
+
+ ctx := testutils.Context(t)
+ encodeExecutionReport, err := offRamp.EncodeExecutionReport(ctx, report)
+ require.NoError(t, err)
+ decodeCommitReport, err := offRamp.DecodeExecutionReport(ctx, encodeExecutionReport)
+ require.NoError(t, err)
+ require.Equal(t, report.Proofs, decodeCommitReport.Proofs)
+ require.Equal(t, report, decodeCommitReport)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go
new file mode 100644
index 00000000000..98454ce59b2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go
@@ -0,0 +1,36 @@
+package v1_2_0
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0"
+ mock_contracts "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/mocks/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+func TestGetRouter(t *testing.T) {
+ routerAddr := utils.RandomAddress()
+
+ mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t)
+ mockOffRamp.On("GetDynamicConfig", mock.Anything).Return(evm_2_evm_offramp_1_2_0.EVM2EVMOffRampDynamicConfig{
+ Router: routerAddr,
+ }, nil)
+
+ offRamp := OffRamp{
+ offRampV120: mockOffRamp,
+ }
+
+ ctx := testutils.Context(t)
+ gotRouterAddr, err := offRamp.GetRouter(ctx)
+ require.NoError(t, err)
+
+ gotRouterEvmAddr, err := ccipcalc.GenericAddrToEvm(gotRouterAddr)
+ require.NoError(t, err)
+ assert.Equal(t, routerAddr, gotRouterEvmAddr)
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go
new file mode 100644
index 00000000000..7d174d5db71
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go
@@ -0,0 +1,173 @@
+package v1_2_0
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+)
+
+func modifyCopy[T any](c T, f func(c *T)) T {
+ f(&c)
+ return c
+}
+
+func TestExecOffchainConfig120_Encoding(t *testing.T) {
+ t.Parallel()
+ validConfig := JSONExecOffchainConfig{
+ SourceFinalityDepth: 3,
+ DestOptimisticConfirmations: 6,
+ DestFinalityDepth: 3,
+ BatchGasLimit: 5_000_000,
+ RelativeBoostPerWaitHour: 0.07,
+ InflightCacheExpiry: *config.MustNewDuration(64 * time.Second),
+ RootSnoozeTime: *config.MustNewDuration(128 * time.Minute),
+ BatchingStrategyID: 0,
+ }
+
+ tests := []struct {
+ name string
+ want JSONExecOffchainConfig
+ errPattern string
+ }{
+ {
+ name: "legacy offchain config format parses",
+ want: validConfig,
+ },
+ {
+ name: "can omit finality depth",
+ want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) {
+ c.SourceFinalityDepth = 0
+ c.DestFinalityDepth = 0
+ }),
+ },
+ {
+ name: "must set BatchGasLimit",
+ want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) {
+ c.BatchGasLimit = 0
+ }),
+ errPattern: "BatchGasLimit",
+ },
+ {
+ name: "must set DestOptimisticConfirmations",
+ want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) {
+ c.DestOptimisticConfirmations = 0
+ }),
+ errPattern: "DestOptimisticConfirmations",
+ },
+ {
+ name: "must set RelativeBoostPerWaitHour",
+ want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) {
+ c.RelativeBoostPerWaitHour = 0
+ }),
+ errPattern: "RelativeBoostPerWaitHour",
+ },
+ {
+ name: "must set InflightCacheExpiry",
+ want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) {
+ c.InflightCacheExpiry = *config.MustNewDuration(0)
+ }),
+ errPattern: "InflightCacheExpiry",
+ },
+ {
+ name: "must set RootSnoozeTime",
+ want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) {
+ c.RootSnoozeTime = *config.MustNewDuration(0)
+ }),
+ errPattern: "RootSnoozeTime",
+ },
+ {
+ name: "must set BatchingStrategyId",
+ want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) {
+ c.BatchingStrategyID = 1
+ }),
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ exp := tc.want
+ encode, err := ccipconfig.EncodeOffchainConfig(&exp)
+ require.NoError(t, err)
+ got, err := ccipconfig.DecodeOffchainConfig[JSONExecOffchainConfig](encode)
+
+ if tc.errPattern != "" {
+ require.ErrorContains(t, err, tc.errPattern)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.want, got)
+ }
+ })
+ }
+}
+
+func TestExecOffchainConfig120_ParseRawJson(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ config []byte
+ }{
+ {
+ name: "with MaxGasPrice",
+ config: []byte(`{
+ "DestOptimisticConfirmations": 6,
+ "BatchGasLimit": 5000000,
+ "RelativeBoostPerWaitHour": 0.07,
+ "MaxGasPrice": 200000000000,
+ "InflightCacheExpiry": "64s",
+ "RootSnoozeTime": "128m"
+ }`),
+ },
+ {
+ name: "without MaxGasPrice",
+ config: []byte(`{
+ "DestOptimisticConfirmations": 6,
+ "BatchGasLimit": 5000000,
+ "RelativeBoostPerWaitHour": 0.07,
+ "InflightCacheExpiry": "64s",
+ "RootSnoozeTime": "128m"
+ }`),
+ },
+ {
+ name: "with BatchingStrategyId",
+ config: []byte(`{
+ "DestOptimisticConfirmations": 6,
+ "BatchGasLimit": 5000000,
+ "RelativeBoostPerWaitHour": 0.07,
+ "InflightCacheExpiry": "64s",
+ "RootSnoozeTime": "128m",
+ "BatchingStrategyId": 1
+ }`),
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ decoded, err := ccipconfig.DecodeOffchainConfig[JSONExecOffchainConfig](tc.config)
+ require.NoError(t, err)
+
+ if tc.name == "with BatchingStrategyId" {
+ require.Equal(t, JSONExecOffchainConfig{
+ DestOptimisticConfirmations: 6,
+ BatchGasLimit: 5_000_000,
+ RelativeBoostPerWaitHour: 0.07,
+ InflightCacheExpiry: *config.MustNewDuration(64 * time.Second),
+ RootSnoozeTime: *config.MustNewDuration(128 * time.Minute),
+ BatchingStrategyID: 1, // Actual value
+ }, decoded)
+ } else {
+ require.Equal(t, JSONExecOffchainConfig{
+ DestOptimisticConfirmations: 6,
+ BatchGasLimit: 5_000_000,
+ RelativeBoostPerWaitHour: 0.07,
+ InflightCacheExpiry: *config.MustNewDuration(64 * time.Second),
+ RootSnoozeTime: *config.MustNewDuration(128 * time.Minute),
+ BatchingStrategyID: 0, // Default
+ }, decoded)
+ }
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go
new file mode 100644
index 00000000000..f727d7fd5fa
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go
@@ -0,0 +1,255 @@
+package v1_2_0
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil"
+)
+
+var (
+ // Backwards compat for integration tests
+ CCIPSendRequestEventSig common.Hash
+ ConfigSetEventSig common.Hash
+)
+
+const (
+ CCIPSendRequestSeqNumIndex = 4
+ CCIPSendRequestedEventName = "CCIPSendRequested"
+ ConfigSetEventName = "ConfigSet"
+)
+
+func init() {
+ onRampABI, err := abi.JSON(strings.NewReader(evm_2_evm_onramp_1_2_0.EVM2EVMOnRampABI))
+ if err != nil {
+ panic(err)
+ }
+ CCIPSendRequestEventSig = abihelpers.MustGetEventID(CCIPSendRequestedEventName, onRampABI)
+ ConfigSetEventSig = abihelpers.MustGetEventID(ConfigSetEventName, onRampABI)
+}
+
+var _ ccipdata.OnRampReader = &OnRamp{}
+
+// Significant change in 1.2:
+// - CCIPSendRequested event signature has changed
+type OnRamp struct {
+ onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp
+ address common.Address
+ lggr logger.Logger
+ lp logpoller.LogPoller
+ leafHasher ccipdata.LeafHasherInterface[[32]byte]
+ client client.Client
+ sendRequestedEventSig common.Hash
+ sendRequestedSeqNumberWord int
+ filters []logpoller.Filter
+ cachedSourcePriceRegistryAddress cache.AutoSync[cciptypes.Address]
+ // Static config can be cached, because it's never expected to change.
+ // The only way to change that is through the contract's constructor (redeployment)
+ cachedStaticConfig cache.OnceCtxFunction[evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig]
+ cachedRmnContract cache.OnceCtxFunction[*arm_contract.ARMContract]
+}
+
+func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) {
+ onRamp, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampAddress, source)
+ if err != nil {
+ return nil, err
+ }
+ // Subscribe to the relevant logs
+ // Note we can keep the same prefix across 1.0/1.1 and 1.2 because the onramp addresses will be different
+ filters := []logpoller.Filter{
+ {
+ Name: logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, onRampAddress),
+ EventSigs: []common.Hash{CCIPSendRequestEventSig},
+ Addresses: []common.Address{onRampAddress},
+ Retention: ccipdata.CommitExecLogsRetention,
+ },
+ {
+ Name: logpoller.FilterName(ccipdata.CONFIG_CHANGED, onRampAddress),
+ EventSigs: []common.Hash{ConfigSetEventSig},
+ Addresses: []common.Address{onRampAddress},
+ Retention: ccipdata.CacheEvictionLogsRetention,
+ },
+ }
+ cachedStaticConfig := cache.OnceCtxFunction[evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig](func(ctx context.Context) (evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig, error) {
+ return onRamp.GetStaticConfig(&bind.CallOpts{Context: ctx})
+ })
+ cachedRmnContract := cache.OnceCtxFunction[*arm_contract.ARMContract](func(ctx context.Context) (*arm_contract.ARMContract, error) {
+ staticConfig, err := cachedStaticConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return arm_contract.NewARMContract(staticConfig.ArmProxy, source)
+ })
+ return &OnRamp{
+ lggr: lggr,
+ client: source,
+ lp: sourceLP,
+ leafHasher: NewLeafHasher(sourceSelector, destSelector, onRampAddress, hashutil.NewKeccak(), onRamp),
+ onRamp: onRamp,
+ filters: filters,
+ address: onRampAddress,
+ sendRequestedSeqNumberWord: CCIPSendRequestSeqNumIndex,
+ sendRequestedEventSig: CCIPSendRequestEventSig,
+ cachedSourcePriceRegistryAddress: cache.NewLogpollerEventsBased[cciptypes.Address](
+ sourceLP,
+ []common.Hash{ConfigSetEventSig},
+ onRampAddress,
+ ),
+ cachedStaticConfig: cache.CallOnceOnNoError(cachedStaticConfig),
+ cachedRmnContract: cache.CallOnceOnNoError(cachedRmnContract),
+ }, nil
+}
+
+func (o *OnRamp) Address(context.Context) (cciptypes.Address, error) {
+ return cciptypes.Address(o.onRamp.Address().String()), nil
+}
+
+func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) {
+ if o.onRamp == nil {
+ return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized")
+ }
+ config, err := o.onRamp.GetDynamicConfig(&bind.CallOpts{})
+ if err != nil {
+ return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("get dynamic config v1.2: %w", err)
+ }
+ return cciptypes.OnRampDynamicConfig{
+ Router: cciptypes.Address(config.Router.String()),
+ MaxNumberOfTokensPerMsg: config.MaxNumberOfTokensPerMsg,
+ DestGasOverhead: config.DestGasOverhead,
+ DestGasPerPayloadByte: config.DestGasPerPayloadByte,
+ DestDataAvailabilityOverheadGas: config.DestDataAvailabilityOverheadGas,
+ DestGasPerDataAvailabilityByte: config.DestGasPerDataAvailabilityByte,
+ DestDataAvailabilityMultiplierBps: config.DestDataAvailabilityMultiplierBps,
+ PriceRegistry: cciptypes.Address(config.PriceRegistry.String()),
+ MaxDataBytes: config.MaxDataBytes,
+ MaxPerMsgGasLimit: config.MaxPerMsgGasLimit,
+ }, nil
+}
+
+func (o *OnRamp) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) {
+ return o.cachedSourcePriceRegistryAddress.Get(ctx, func(ctx context.Context) (cciptypes.Address, error) {
+ c, err := o.GetDynamicConfig(ctx)
+ if err != nil {
+ return "", err
+ }
+ return c.PriceRegistry, nil
+ })
+}
+
+func (o *OnRamp) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) {
+ logs, err := o.lp.LogsDataWordRange(
+ ctx,
+ o.sendRequestedEventSig,
+ o.address,
+ o.sendRequestedSeqNumberWord,
+ logpoller.EvmWord(seqNumMin),
+ logpoller.EvmWord(seqNumMax),
+ ccipdata.LogsConfirmations(finalized),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.EVM2EVMMessage](logs, o.lggr, o.logToMessage)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(logs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.EVM2EVMMessageWithTxMeta{
+ TxMeta: log.TxMeta,
+ EVM2EVMMessage: log.Data,
+ })
+ }
+
+ return res, nil
+}
+
+func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) {
+ config, err := o.onRamp.GetDynamicConfig(nil)
+ if err != nil {
+ return "", err
+ }
+ return cciptypes.Address(config.Router.String()), nil
+}
+
+func (o *OnRamp) IsSourceChainHealthy(context.Context) (bool, error) {
+ if err := o.lp.Healthy(); err != nil {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (o *OnRamp) IsSourceCursed(ctx context.Context) (bool, error) {
+ arm, err := o.cachedRmnContract(ctx)
+ if err != nil {
+ return false, fmt.Errorf("intializing Arm contract through the ArmProxy: %w", err)
+ }
+
+ cursed, err := arm.IsCursed0(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return false, fmt.Errorf("checking if source Arm is cursed: %w", err)
+ }
+ return cursed, nil
+}
+
+func (o *OnRamp) Close() error {
+ return logpollerutil.UnregisterLpFilters(o.lp, o.filters)
+}
+
+func (o *OnRamp) RegisterFilters() error {
+ return logpollerutil.RegisterLpFilters(o.lp, o.filters)
+}
+
+func (o *OnRamp) logToMessage(log types.Log) (*cciptypes.EVM2EVMMessage, error) {
+ msg, err := o.onRamp.ParseCCIPSendRequested(log)
+ if err != nil {
+ return nil, err
+ }
+ h, err := o.leafHasher.HashLeaf(log)
+ if err != nil {
+ return nil, err
+ }
+ tokensAndAmounts := make([]cciptypes.TokenAmount, len(msg.Message.TokenAmounts))
+ for i, tokenAndAmount := range msg.Message.TokenAmounts {
+ tokensAndAmounts[i] = cciptypes.TokenAmount{
+ Token: cciptypes.Address(tokenAndAmount.Token.String()),
+ Amount: tokenAndAmount.Amount,
+ }
+ }
+
+ return &cciptypes.EVM2EVMMessage{
+ SequenceNumber: msg.Message.SequenceNumber,
+ GasLimit: msg.Message.GasLimit,
+ Nonce: msg.Message.Nonce,
+ MessageID: msg.Message.MessageId,
+ SourceChainSelector: msg.Message.SourceChainSelector,
+ Sender: cciptypes.Address(msg.Message.Sender.String()),
+ Receiver: cciptypes.Address(msg.Message.Receiver.String()),
+ Strict: msg.Message.Strict,
+ FeeToken: cciptypes.Address(msg.Message.FeeToken.String()),
+ FeeTokenAmount: msg.Message.FeeTokenAmount,
+ Data: msg.Message.Data,
+ TokenAmounts: tokensAndAmounts,
+ SourceTokenData: msg.Message.SourceTokenData, // Breaking change 1.2
+ Hash: h,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go
new file mode 100644
index 00000000000..ec912667ac7
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go
@@ -0,0 +1,57 @@
+package v1_2_0
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+func TestLogPollerClient_GetSendRequestsBetweenSeqNumsV1_2_0(t *testing.T) {
+ onRampAddr := utils.RandomAddress()
+ seqNum := uint64(100)
+ limit := uint64(10)
+ lggr := logger.TestLogger(t)
+
+ tests := []struct {
+ name string
+ finalized bool
+ confirmations evmtypes.Confirmations
+ }{
+ {"finalized", true, evmtypes.Finalized},
+ {"unfinalized", false, evmtypes.Confirmations(0)},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ lp := mocks.NewLogPoller(t)
+ onRampV2, err := NewOnRamp(lggr, 1, 1, onRampAddr, lp, nil)
+ require.NoError(t, err)
+
+ lp.On("LogsDataWordRange",
+ mock.Anything,
+ onRampV2.sendRequestedEventSig,
+ onRampAddr,
+ onRampV2.sendRequestedSeqNumberWord,
+ abihelpers.EvmWord(seqNum),
+ abihelpers.EvmWord(seqNum+limit),
+ tt.confirmations,
+ ).Once().Return([]logpoller.Log{}, nil)
+
+ events, err1 := onRampV2.GetSendRequestsBetweenSeqNums(context.Background(), seqNum, seqNum+limit, tt.finalized)
+ assert.NoError(t, err1)
+ assert.Empty(t, events)
+
+ lp.AssertExpectations(t)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go
new file mode 100644
index 00000000000..9aac30e6123
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go
@@ -0,0 +1,68 @@
+package v1_2_0
+
+import (
+ "context"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+)
+
+var (
+ _ ccipdata.PriceRegistryReader = &PriceRegistry{}
+)
+
+type PriceRegistry struct {
+ *v1_0_0.PriceRegistry
+ pr *price_registry_1_2_0.PriceRegistry
+}
+
+func NewPriceRegistry(lggr logger.Logger, priceRegistryAddr common.Address, lp logpoller.LogPoller, ec client.Client, registerFilters bool) (*PriceRegistry, error) {
+ v100, err := v1_0_0.NewPriceRegistry(lggr, priceRegistryAddr, lp, ec, registerFilters)
+ if err != nil {
+ return nil, err
+ }
+ priceRegistry, err := price_registry_1_2_0.NewPriceRegistry(priceRegistryAddr, ec)
+ if err != nil {
+ return nil, err
+ }
+ return &PriceRegistry{
+ PriceRegistry: v100,
+ pr: priceRegistry,
+ }, nil
+}
+
+// GetTokenPrices must be overridden to use the 1.2 ABI (return parameter changed from uint192 to uint224)
+// See https://github.com/smartcontractkit/ccip/blob/ccip-develop/contracts/src/v0.8/ccip/PriceRegistry.sol#L141
+func (p *PriceRegistry) GetTokenPrices(ctx context.Context, wantedTokens []cciptypes.Address) ([]cciptypes.TokenPriceUpdate, error) {
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(wantedTokens...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Make call using 224 ABI.
+ tps, err := p.pr.GetTokenPrices(&bind.CallOpts{Context: ctx}, evmAddrs)
+ if err != nil {
+ return nil, err
+ }
+ var tpu []cciptypes.TokenPriceUpdate
+ for i, tp := range tps {
+ tpu = append(tpu, cciptypes.TokenPriceUpdate{
+ TokenPrice: cciptypes.TokenPrice{
+ Token: wantedTokens[i],
+ Value: tp.Value,
+ },
+ TimestampUnixSec: big.NewInt(int64(tp.Timestamp)),
+ })
+ }
+ return tpu, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go
new file mode 100644
index 00000000000..e7972d5f5fe
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go
@@ -0,0 +1,48 @@
+package v1_2_0
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+// ApplyPriceRegistryUpdate is a helper function used in tests only.
+func ApplyPriceRegistryUpdate(t *testing.T, user *bind.TransactOpts, addr common.Address, ec client.Client, gasPrices []cciptypes.GasPrice, tokenPrices []cciptypes.TokenPrice) common.Hash {
+ require.True(t, len(gasPrices) <= 2)
+ pr, err := price_registry.NewPriceRegistry(addr, ec)
+ require.NoError(t, err)
+ o, err := pr.Owner(nil)
+ require.NoError(t, err)
+ require.Equal(t, user.From, o)
+ var tps []price_registry.InternalTokenPriceUpdate
+ for _, tp := range tokenPrices {
+ evmAddrs, err1 := ccipcalc.GenericAddrsToEvm(tp.Token)
+ assert.NoError(t, err1)
+ tps = append(tps, price_registry.InternalTokenPriceUpdate{
+ SourceToken: evmAddrs[0],
+ UsdPerToken: tp.Value,
+ })
+ }
+ var gps []price_registry.InternalGasPriceUpdate
+ for _, gp := range gasPrices {
+ gps = append(gps, price_registry.InternalGasPriceUpdate{
+ DestChainSelector: gp.DestChainSelector,
+ UsdPerUnitGas: gp.Value,
+ })
+ }
+ tx, err := pr.UpdatePrices(user, price_registry.InternalPriceUpdates{
+ TokenPriceUpdates: tps,
+ GasPriceUpdates: gps,
+ })
+ require.NoError(t, err)
+ return tx.Hash()
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go
new file mode 100644
index 00000000000..a0850ebb2e9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go
@@ -0,0 +1,48 @@
+package v1_2_0
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/burn_mint_token_pool_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+var (
+ poolABI = abihelpers.MustParseABI(burn_mint_token_pool_1_2_0.BurnMintTokenPoolABI)
+)
+
+var _ ccipdata.TokenPoolReader = &TokenPool{}
+
+type TokenPool struct {
+ addr common.Address
+ OffRampAddress common.Address
+ poolType string
+}
+
+func NewTokenPool(poolType string, addr common.Address, offRampAddress common.Address) *TokenPool {
+ return &TokenPool{
+ addr: addr,
+ OffRampAddress: offRampAddress,
+ poolType: poolType,
+ }
+}
+
+func (p *TokenPool) Address() common.Address {
+ return p.addr
+}
+
+func (p *TokenPool) Type() string {
+ return p.poolType
+}
+
+func GetInboundTokenPoolRateLimitCall(tokenPoolAddress common.Address, offRampAddress common.Address) rpclib.EvmCall {
+ return rpclib.NewEvmCall(
+ poolABI,
+ "currentOffRampRateLimiterState",
+ tokenPoolAddress,
+ offRampAddress,
+ )
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go
new file mode 100644
index 00000000000..3308ab05cec
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go
@@ -0,0 +1,24 @@
+package v1_2_0
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+)
+
+func TestTokenPool(t *testing.T) {
+ addr := utils.RandomAddress()
+ offRamp := utils.RandomAddress()
+ poolType := "BurnMint"
+
+ tokenPool := NewTokenPool(poolType, addr, offRamp)
+
+ assert.Equal(t, addr, tokenPool.Address())
+ assert.Equal(t, poolType, tokenPool.Type())
+
+ inboundRateLimitCall := GetInboundTokenPoolRateLimitCall(addr, offRamp)
+
+ assert.Equal(t, "currentOffRampRateLimiterState", inboundRateLimitCall.MethodName())
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go
new file mode 100644
index 00000000000..caf652b9e4e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go
@@ -0,0 +1,48 @@
+package v1_4_0
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/burn_mint_token_pool_1_4_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+var (
+ poolABI = abihelpers.MustParseABI(burn_mint_token_pool_1_4_0.BurnMintTokenPoolABI)
+)
+
+var _ ccipdata.TokenPoolReader = &TokenPool{}
+
+type TokenPool struct {
+ addr common.Address
+ RemoteChainSelector uint64
+ poolType string
+}
+
+func NewTokenPool(poolType string, addr common.Address, remoteChainSelector uint64) *TokenPool {
+ return &TokenPool{
+ addr: addr,
+ RemoteChainSelector: remoteChainSelector,
+ poolType: poolType,
+ }
+}
+
+func (p *TokenPool) Address() common.Address {
+ return p.addr
+}
+
+func (p *TokenPool) Type() string {
+ return p.poolType
+}
+
+func GetInboundTokenPoolRateLimitCall(tokenPoolAddress common.Address, remoteChainSelector uint64) rpclib.EvmCall {
+ return rpclib.NewEvmCall(
+ poolABI,
+ "getCurrentInboundRateLimiterState",
+ tokenPoolAddress,
+ remoteChainSelector,
+ )
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go
new file mode 100644
index 00000000000..8aaddc3312e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go
@@ -0,0 +1,24 @@
+package v1_4_0
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+)
+
+func TestTokenPool(t *testing.T) {
+ addr := utils.RandomAddress()
+ chainSelector := uint64(2000)
+ poolType := "BurnMint"
+
+ tokenPool := NewTokenPool(poolType, addr, chainSelector)
+
+ assert.Equal(t, addr, tokenPool.Address())
+ assert.Equal(t, poolType, tokenPool.Type())
+
+ inboundRateLimitCall := GetInboundTokenPoolRateLimitCall(addr, chainSelector)
+
+ assert.Equal(t, "getCurrentInboundRateLimiterState", inboundRateLimitCall.MethodName())
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go
new file mode 100644
index 00000000000..3bb582f3a21
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go
@@ -0,0 +1,59 @@
+package v1_5_0
+
+import (
+ "context"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+)
+
+type CommitStore struct {
+ *v1_2_0.CommitStore
+ commitStore *commit_store.CommitStore
+}
+
+func (c *CommitStore) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) {
+ staticConfig, err := c.commitStore.GetStaticConfig(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return cciptypes.CommitStoreStaticConfig{}, err
+ }
+ return cciptypes.CommitStoreStaticConfig{
+ ChainSelector: staticConfig.ChainSelector,
+ SourceChainSelector: staticConfig.SourceChainSelector,
+ OnRamp: cciptypes.Address(staticConfig.OnRamp.String()),
+ ArmProxy: cciptypes.Address(staticConfig.RmnProxy.String()),
+ }, nil
+}
+
+func (c *CommitStore) IsDown(ctx context.Context) (bool, error) {
+ unPausedAndNotCursed, err := c.commitStore.IsUnpausedAndNotCursed(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return true, err
+ }
+ return !unPausedAndNotCursed, nil
+}
+
+func NewCommitStore(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller) (*CommitStore, error) {
+ v120, err := v1_2_0.NewCommitStore(lggr, addr, ec, lp)
+ if err != nil {
+ return nil, err
+ }
+
+ commitStore, err := commit_store.NewCommitStore(addr, ec)
+ if err != nil {
+ return nil, err
+ }
+
+ return &CommitStore{
+ commitStore: commitStore,
+ CommitStore: v120,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go
new file mode 100644
index 00000000000..a00ec376cdb
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go
@@ -0,0 +1,101 @@
+package v1_5_0
+
+import (
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+)
+
+const (
+ MetaDataHashPrefix = "EVM2EVMMessageHashV2"
+)
+
+type LeafHasher struct {
+ metaDataHash [32]byte
+ ctx hashutil.Hasher[[32]byte]
+ onRamp *evm_2_evm_onramp.EVM2EVMOnRamp
+}
+
+func NewLeafHasher(sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte], onRamp *evm_2_evm_onramp.EVM2EVMOnRamp) *LeafHasher {
+ return &LeafHasher{
+ metaDataHash: v1_0_0.GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampId, destChainSelector),
+ ctx: ctx,
+ onRamp: onRamp,
+ }
+}
+
+func (t *LeafHasher) HashLeaf(log types.Log) ([32]byte, error) {
+ msg, err := t.onRamp.ParseCCIPSendRequested(log)
+ if err != nil {
+ return [32]byte{}, err
+ }
+ message := msg.Message
+ encodedTokens, err := abihelpers.ABIEncode(
+ `[
+{"components": [{"name":"token","type":"address"},{"name":"amount","type":"uint256"}], "type":"tuple[]"}]`, message.TokenAmounts)
+ if err != nil {
+ return [32]byte{}, err
+ }
+
+ bytesArray, err := abi.NewType("bytes[]", "bytes[]", nil)
+ if err != nil {
+ return [32]byte{}, err
+ }
+
+ encodedSourceTokenData, err := abi.Arguments{abi.Argument{Type: bytesArray}}.PackValues([]interface{}{message.SourceTokenData})
+ if err != nil {
+ return [32]byte{}, err
+ }
+
+ packedFixedSizeValues, err := abihelpers.ABIEncode(
+ `[
+{"name": "sender", "type":"address"},
+{"name": "receiver", "type":"address"},
+{"name": "sequenceNumber", "type":"uint64"},
+{"name": "gasLimit", "type":"uint256"},
+{"name": "strict", "type":"bool"},
+{"name": "nonce", "type":"uint64"},
+{"name": "feeToken","type": "address"},
+{"name": "feeTokenAmount","type": "uint256"}
+]`,
+ message.Sender,
+ message.Receiver,
+ message.SequenceNumber,
+ message.GasLimit,
+ message.Strict,
+ message.Nonce,
+ message.FeeToken,
+ message.FeeTokenAmount,
+ )
+ if err != nil {
+ return [32]byte{}, err
+ }
+ fixedSizeValuesHash := t.ctx.Hash(packedFixedSizeValues)
+
+ packedValues, err := abihelpers.ABIEncode(
+ `[
+{"name": "leafDomainSeparator","type":"bytes1"},
+{"name": "metadataHash", "type":"bytes32"},
+{"name": "fixedSizeValuesHash", "type":"bytes32"},
+{"name": "dataHash", "type":"bytes32"},
+{"name": "tokenAmountsHash", "type":"bytes32"},
+{"name": "sourceTokenDataHash", "type":"bytes32"}
+]`,
+ v1_0_0.LeafDomainSeparator,
+ t.metaDataHash,
+ fixedSizeValuesHash,
+ t.ctx.Hash(message.Data),
+ t.ctx.Hash(encodedTokens),
+ t.ctx.Hash(encodedSourceTokenData),
+ )
+ if err != nil {
+ return [32]byte{}, err
+ }
+ return t.ctx.Hash(packedValues), nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go
new file mode 100644
index 00000000000..2a585f7bd1e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go
@@ -0,0 +1,78 @@
+package v1_5_0
+
+import (
+ "encoding/hex"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+func TestHasherV1_4_0(t *testing.T) {
+ sourceChainSelector, destChainSelector := uint64(1), uint64(4)
+ onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001")
+ onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp.EVM2EVMOnRampABI)
+
+ hashingCtx := hashutil.NewKeccak()
+ ramp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, nil)
+ require.NoError(t, err)
+ hasher := NewLeafHasher(sourceChainSelector, destChainSelector, onRampAddress, hashingCtx, ramp)
+
+ message := evm_2_evm_onramp.InternalEVM2EVMMessage{
+ SourceChainSelector: sourceChainSelector,
+ Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"),
+ Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"),
+ SequenceNumber: 1337,
+ GasLimit: big.NewInt(100),
+ Strict: false,
+ Nonce: 1337,
+ FeeToken: common.Address{},
+ FeeTokenAmount: big.NewInt(1),
+ Data: []byte{},
+ TokenAmounts: []evm_2_evm_onramp.ClientEVMTokenAmount{{Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}},
+ SourceTokenData: [][]byte{},
+ MessageId: [32]byte{},
+ }
+
+ data, err := onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message)
+ require.NoError(t, err)
+ hash, err := hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data})
+ require.NoError(t, err)
+
+ // NOTE: Must match spec
+ require.Equal(t, "46ad031bfb052db2e4a2514fed8dc480b98e5ce4acb55d5640d91407e0d8a3e9", hex.EncodeToString(hash[:]))
+
+ message = evm_2_evm_onramp.InternalEVM2EVMMessage{
+ SourceChainSelector: sourceChainSelector,
+ Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"),
+ Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"),
+ SequenceNumber: 1337,
+ GasLimit: big.NewInt(100),
+ Strict: false,
+ Nonce: 1337,
+ FeeToken: common.Address{},
+ FeeTokenAmount: big.NewInt(1e12),
+ Data: []byte("foo bar baz"),
+ TokenAmounts: []evm_2_evm_onramp.ClientEVMTokenAmount{
+ {Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)},
+ {Token: common.HexToAddress("0x6660000000000000000000000000000000000001"), Amount: big.NewInt(4204242)},
+ },
+ SourceTokenData: [][]byte{{0x2, 0x1}},
+ MessageId: [32]byte{},
+ }
+
+ data, err = onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message)
+ require.NoError(t, err)
+ hash, err = hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data})
+ require.NoError(t, err)
+
+ // NOTE: Must match spec
+ require.Equal(t, "4362a13a42e52ff5ce4324e7184dc7aa41704c3146bc842d35d95b94b32a78b6", hex.EncodeToString(hash[:]))
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go
new file mode 100644
index 00000000000..cac61c67878
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go
@@ -0,0 +1,199 @@
+package v1_5_0
+
+import (
+ "context"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+var (
+ abiOffRamp = abihelpers.MustParseABI(evm_2_evm_offramp.EVM2EVMOffRampABI)
+ _ ccipdata.OffRampReader = &OffRamp{}
+ RateLimitTokenAddedEvent = abihelpers.MustGetEventID("TokenAggregateRateLimitAdded", abiOffRamp)
+ RateLimitTokenRemovedEvent = abihelpers.MustGetEventID("TokenAggregateRateLimitRemoved", abiOffRamp)
+)
+
+type ExecOnchainConfig evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig
+
+func (d ExecOnchainConfig) AbiString() string {
+ return `
+ [
+ {
+ "components": [
+ {"name": "permissionLessExecutionThresholdSeconds", "type": "uint32"},
+ {"name": "maxDataBytes", "type": "uint32"},
+ {"name": "maxNumberOfTokensPerMsg", "type": "uint16"},
+ {"name": "router", "type": "address"},
+ {"name": "priceRegistry", "type": "address"},
+ {"name": "maxPoolReleaseOrMintGas", "type": "uint32"},
+ {"name": "maxTokenTransferGas", "type": "uint32"}
+ ],
+ "type": "tuple"
+ }
+ ]`
+}
+
+func (d ExecOnchainConfig) Validate() error {
+ if d.PermissionLessExecutionThresholdSeconds == 0 {
+ return errors.New("must set PermissionLessExecutionThresholdSeconds")
+ }
+ if d.Router == (common.Address{}) {
+ return errors.New("must set Router address")
+ }
+ if d.PriceRegistry == (common.Address{}) {
+ return errors.New("must set PriceRegistry address")
+ }
+ if d.MaxNumberOfTokensPerMsg == 0 {
+ return errors.New("must set MaxNumberOfTokensPerMsg")
+ }
+ if d.MaxPoolReleaseOrMintGas == 0 {
+ return errors.New("must set MaxPoolReleaseOrMintGas")
+ }
+ if d.MaxTokenTransferGas == 0 {
+ return errors.New("must set MaxTokenTransferGas")
+ }
+ return nil
+}
+
+type OffRamp struct {
+ *v1_2_0.OffRamp
+ offRampV150 evm_2_evm_offramp.EVM2EVMOffRampInterface
+ cachedRateLimitTokens cache.AutoSync[cciptypes.OffRampTokens]
+}
+
+// GetTokens Returns no data as the offRamps no longer have this information.
+func (o *OffRamp) GetTokens(ctx context.Context) (cciptypes.OffRampTokens, error) {
+ sourceTokens, destTokens, err := o.GetSourceAndDestRateLimitTokens(ctx)
+ if err != nil {
+ return cciptypes.OffRampTokens{}, err
+ }
+ return cciptypes.OffRampTokens{
+ SourceTokens: sourceTokens,
+ DestinationTokens: destTokens,
+ }, nil
+}
+
+func (o *OffRamp) GetSourceAndDestRateLimitTokens(ctx context.Context) (sourceTokens []cciptypes.Address, destTokens []cciptypes.Address, err error) {
+ cachedTokens, err := o.cachedRateLimitTokens.Get(ctx, func(ctx context.Context) (cciptypes.OffRampTokens, error) {
+ tokens, err2 := o.offRampV150.GetAllRateLimitTokens(&bind.CallOpts{Context: ctx})
+ if err2 != nil {
+ return cciptypes.OffRampTokens{}, err2
+ }
+
+ if len(tokens.SourceTokens) != len(tokens.DestTokens) {
+ return cciptypes.OffRampTokens{}, errors.New("source and destination tokens are not the same length")
+ }
+
+ return cciptypes.OffRampTokens{
+ DestinationTokens: ccipcalc.EvmAddrsToGeneric(tokens.DestTokens...),
+ SourceTokens: ccipcalc.EvmAddrsToGeneric(tokens.SourceTokens...),
+ }, nil
+ })
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to get rate limit tokens, if token set is large (~400k) batching may be needed")
+ }
+ return cachedTokens.SourceTokens, cachedTokens.DestinationTokens, nil
+}
+
+func (o *OffRamp) GetSourceToDestTokensMapping(ctx context.Context) (map[cciptypes.Address]cciptypes.Address, error) {
+ sourceTokens, destTokens, err := o.GetSourceAndDestRateLimitTokens(ctx)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get rate limit tokens, if token set is large (~400k) batching may be needed")
+ }
+
+ if sourceTokens == nil || destTokens == nil {
+ return nil, errors.New("source or destination tokens are nil")
+ }
+
+ mapping := make(map[cciptypes.Address]cciptypes.Address)
+ for i, sourceToken := range sourceTokens {
+ mapping[sourceToken] = destTokens[i]
+ }
+ return mapping, nil
+}
+
+func (o *OffRamp) ChangeConfig(ctx context.Context, onchainConfigBytes []byte, offchainConfigBytes []byte) (cciptypes.Address, cciptypes.Address, error) {
+ // Same as the v1.2.0 method, except for the ExecOnchainConfig type.
+ onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ExecOnchainConfig](onchainConfigBytes)
+ if err != nil {
+ return "", "", err
+ }
+
+ offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[v1_2_0.JSONExecOffchainConfig](offchainConfigBytes)
+ if err != nil {
+ return "", "", err
+ }
+ destRouter, err := router.NewRouter(onchainConfigParsed.Router, o.Client)
+ if err != nil {
+ return "", "", err
+ }
+ destWrappedNative, err := destRouter.GetWrappedNative(nil)
+ if err != nil {
+ return "", "", err
+ }
+ offchainConfig := cciptypes.ExecOffchainConfig{
+ DestOptimisticConfirmations: offchainConfigParsed.DestOptimisticConfirmations,
+ BatchGasLimit: offchainConfigParsed.BatchGasLimit,
+ RelativeBoostPerWaitHour: offchainConfigParsed.RelativeBoostPerWaitHour,
+ InflightCacheExpiry: offchainConfigParsed.InflightCacheExpiry,
+ RootSnoozeTime: offchainConfigParsed.RootSnoozeTime,
+ MessageVisibilityInterval: offchainConfigParsed.MessageVisibilityInterval,
+ BatchingStrategyID: offchainConfigParsed.BatchingStrategyID,
+ }
+ onchainConfig := cciptypes.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: time.Second * time.Duration(onchainConfigParsed.PermissionLessExecutionThresholdSeconds),
+ Router: cciptypes.Address(onchainConfigParsed.Router.String()),
+ }
+ priceEstimator := prices.NewDAGasPriceEstimator(o.Estimator, o.DestMaxGasPrice, 0, 0)
+
+ o.UpdateDynamicConfig(onchainConfig, offchainConfig, priceEstimator)
+
+ o.Logger.Infow("Starting exec plugin",
+ "offchainConfig", onchainConfigParsed,
+ "onchainConfig", offchainConfigParsed)
+ return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()),
+ cciptypes.Address(destWrappedNative.String()), nil
+}
+
+func NewOffRamp(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) (*OffRamp, error) {
+ v120, err := v1_2_0.NewOffRamp(lggr, addr, ec, lp, estimator, destMaxGasPrice)
+ if err != nil {
+ return nil, err
+ }
+
+ offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(addr, ec)
+ if err != nil {
+ return nil, err
+ }
+
+ v120.ExecutionReportArgs = abihelpers.MustGetMethodInputs("manuallyExecute", abiOffRamp)[:1]
+
+ return &OffRamp{
+ OffRamp: v120,
+ offRampV150: offRamp,
+ cachedRateLimitTokens: cache.NewLogpollerEventsBased[cciptypes.OffRampTokens](
+ lp,
+ []common.Hash{RateLimitTokenAddedEvent, RateLimitTokenRemovedEvent},
+ offRamp.Address(),
+ ),
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go
new file mode 100644
index 00000000000..a95445ec028
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go
@@ -0,0 +1 @@
+package v1_5_0
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go
new file mode 100644
index 00000000000..481933f89ad
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go
@@ -0,0 +1,259 @@
+package v1_5_0
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil"
+)
+
+var (
+ // Backwards compat for integration tests
+ CCIPSendRequestEventSig common.Hash
+ ConfigSetEventSig common.Hash
+)
+
+const (
+ CCIPSendRequestSeqNumIndex = 4
+ CCIPSendRequestedEventName = "CCIPSendRequested"
+ ConfigSetEventName = "ConfigSet"
+)
+
+func init() {
+ onRampABI, err := abi.JSON(strings.NewReader(evm_2_evm_onramp.EVM2EVMOnRampABI))
+ if err != nil {
+ panic(err)
+ }
+ CCIPSendRequestEventSig = abihelpers.MustGetEventID(CCIPSendRequestedEventName, onRampABI)
+ ConfigSetEventSig = abihelpers.MustGetEventID(ConfigSetEventName, onRampABI)
+}
+
+var _ ccipdata.OnRampReader = &OnRamp{}
+
+type OnRamp struct {
+ onRamp *evm_2_evm_onramp.EVM2EVMOnRamp
+ address common.Address
+ destChainSelectorBytes [16]byte
+ lggr logger.Logger
+ lp logpoller.LogPoller
+ leafHasher ccipdata.LeafHasherInterface[[32]byte]
+ client client.Client
+ sendRequestedEventSig common.Hash
+ sendRequestedSeqNumberWord int
+ filters []logpoller.Filter
+ cachedSourcePriceRegistryAddress cache.AutoSync[cciptypes.Address]
+ // Static config can be cached, because it's never expected to change.
+ // The only way to change that is through the contract's constructor (redeployment)
+ cachedStaticConfig cache.OnceCtxFunction[evm_2_evm_onramp.EVM2EVMOnRampStaticConfig]
+ cachedRmnContract cache.OnceCtxFunction[*arm_contract.ARMContract]
+}
+
+func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) {
+ onRamp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, source)
+ if err != nil {
+ return nil, err
+ }
+
+ // Subscribe to the relevant logs
+ // Note we can keep the same prefix across 1.0/1.1 and 1.2 because the onramp addresses will be different
+ filters := []logpoller.Filter{
+ {
+ Name: logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, onRampAddress),
+ EventSigs: []common.Hash{CCIPSendRequestEventSig},
+ Addresses: []common.Address{onRampAddress},
+ Retention: ccipdata.CommitExecLogsRetention,
+ },
+ {
+ Name: logpoller.FilterName(ccipdata.CONFIG_CHANGED, onRampAddress),
+ EventSigs: []common.Hash{ConfigSetEventSig},
+ Addresses: []common.Address{onRampAddress},
+ Retention: ccipdata.CacheEvictionLogsRetention,
+ },
+ }
+ cachedStaticConfig := cache.OnceCtxFunction[evm_2_evm_onramp.EVM2EVMOnRampStaticConfig](func(ctx context.Context) (evm_2_evm_onramp.EVM2EVMOnRampStaticConfig, error) {
+ return onRamp.GetStaticConfig(&bind.CallOpts{Context: ctx})
+ })
+ cachedRmnContract := cache.OnceCtxFunction[*arm_contract.ARMContract](func(ctx context.Context) (*arm_contract.ARMContract, error) {
+ staticConfig, err := cachedStaticConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return arm_contract.NewARMContract(staticConfig.RmnProxy, source)
+ })
+
+ return &OnRamp{
+ lggr: lggr,
+ client: source,
+ destChainSelectorBytes: ccipcommon.SelectorToBytes(destSelector),
+ lp: sourceLP,
+ leafHasher: NewLeafHasher(sourceSelector, destSelector, onRampAddress, hashutil.NewKeccak(), onRamp),
+ onRamp: onRamp,
+ filters: filters,
+ address: onRampAddress,
+ sendRequestedSeqNumberWord: CCIPSendRequestSeqNumIndex,
+ sendRequestedEventSig: CCIPSendRequestEventSig,
+ cachedSourcePriceRegistryAddress: cache.NewLogpollerEventsBased[cciptypes.Address](
+ sourceLP,
+ []common.Hash{ConfigSetEventSig},
+ onRampAddress,
+ ),
+ cachedStaticConfig: cache.CallOnceOnNoError(cachedStaticConfig),
+ cachedRmnContract: cache.CallOnceOnNoError(cachedRmnContract),
+ }, nil
+}
+
+func (o *OnRamp) Address(context.Context) (cciptypes.Address, error) {
+ return ccipcalc.EvmAddrToGeneric(o.onRamp.Address()), nil
+}
+
+func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) {
+ if o.onRamp == nil {
+ return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized")
+ }
+ config, err := o.onRamp.GetDynamicConfig(&bind.CallOpts{})
+ if err != nil {
+ return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("get dynamic config v1.5: %w", err)
+ }
+ return cciptypes.OnRampDynamicConfig{
+ Router: ccipcalc.EvmAddrToGeneric(config.Router),
+ MaxNumberOfTokensPerMsg: config.MaxNumberOfTokensPerMsg,
+ DestGasOverhead: config.DestGasOverhead,
+ DestGasPerPayloadByte: config.DestGasPerPayloadByte,
+ DestDataAvailabilityOverheadGas: config.DestDataAvailabilityOverheadGas,
+ DestGasPerDataAvailabilityByte: config.DestGasPerDataAvailabilityByte,
+ DestDataAvailabilityMultiplierBps: config.DestDataAvailabilityMultiplierBps,
+ PriceRegistry: ccipcalc.EvmAddrToGeneric(config.PriceRegistry),
+ MaxDataBytes: config.MaxDataBytes,
+ MaxPerMsgGasLimit: config.MaxPerMsgGasLimit,
+ }, nil
+}
+
+func (o *OnRamp) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) {
+ return o.cachedSourcePriceRegistryAddress.Get(ctx, func(ctx context.Context) (cciptypes.Address, error) {
+ c, err := o.GetDynamicConfig(ctx)
+ if err != nil {
+ return "", err
+ }
+ return c.PriceRegistry, nil
+ })
+}
+
+func (o *OnRamp) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) {
+ logs, err := o.lp.LogsDataWordRange(
+ ctx,
+ o.sendRequestedEventSig,
+ o.address,
+ o.sendRequestedSeqNumberWord,
+ logpoller.EvmWord(seqNumMin),
+ logpoller.EvmWord(seqNumMax),
+ ccipdata.LogsConfirmations(finalized),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsedLogs, err := ccipdata.ParseLogs[cciptypes.EVM2EVMMessage](logs, o.lggr, o.logToMessage)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(logs))
+ for _, log := range parsedLogs {
+ res = append(res, cciptypes.EVM2EVMMessageWithTxMeta{
+ TxMeta: log.TxMeta,
+ EVM2EVMMessage: log.Data,
+ })
+ }
+ return res, nil
+}
+
+func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) {
+ config, err := o.onRamp.GetDynamicConfig(nil)
+ if err != nil {
+ return "", err
+ }
+ return ccipcalc.EvmAddrToGeneric(config.Router), nil
+}
+
+func (o *OnRamp) IsSourceChainHealthy(context.Context) (bool, error) {
+ if err := o.lp.Healthy(); err != nil {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (o *OnRamp) IsSourceCursed(ctx context.Context) (bool, error) {
+ arm, err := o.cachedRmnContract(ctx)
+ if err != nil {
+ return false, fmt.Errorf("initializing RMN contract through the RmnProxy: %w", err)
+ }
+
+ cursed, err := arm.IsCursed(&bind.CallOpts{Context: ctx}, o.destChainSelectorBytes)
+ if err != nil {
+ return false, fmt.Errorf("checking if source is cursed by RMN: %w", err)
+ }
+ return cursed, nil
+}
+
+func (o *OnRamp) Close() error {
+ return logpollerutil.UnregisterLpFilters(o.lp, o.filters)
+}
+
+func (o *OnRamp) RegisterFilters() error {
+ return logpollerutil.RegisterLpFilters(o.lp, o.filters)
+}
+
+func (o *OnRamp) logToMessage(log types.Log) (*cciptypes.EVM2EVMMessage, error) {
+ msg, err := o.onRamp.ParseCCIPSendRequested(log)
+ if err != nil {
+ return nil, err
+ }
+ h, err := o.leafHasher.HashLeaf(log)
+ if err != nil {
+ return nil, err
+ }
+ tokensAndAmounts := make([]cciptypes.TokenAmount, len(msg.Message.TokenAmounts))
+ for i, tokenAndAmount := range msg.Message.TokenAmounts {
+ tokensAndAmounts[i] = cciptypes.TokenAmount{
+ Token: ccipcalc.EvmAddrToGeneric(tokenAndAmount.Token),
+ Amount: tokenAndAmount.Amount,
+ }
+ }
+
+ return &cciptypes.EVM2EVMMessage{
+ SequenceNumber: msg.Message.SequenceNumber,
+ GasLimit: msg.Message.GasLimit,
+ Nonce: msg.Message.Nonce,
+ MessageID: msg.Message.MessageId,
+ SourceChainSelector: msg.Message.SourceChainSelector,
+ Sender: ccipcalc.EvmAddrToGeneric(msg.Message.Sender),
+ Receiver: ccipcalc.EvmAddrToGeneric(msg.Message.Receiver),
+ Strict: msg.Message.Strict,
+ FeeToken: ccipcalc.EvmAddrToGeneric(msg.Message.FeeToken),
+ FeeTokenAmount: msg.Message.FeeTokenAmount,
+ Data: msg.Message.Data,
+ TokenAmounts: tokensAndAmounts,
+ SourceTokenData: msg.Message.SourceTokenData, // Breaking change 1.2
+ Hash: h,
+ }, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go
new file mode 100644
index 00000000000..3ca360c8ff2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go
@@ -0,0 +1,210 @@
+package v1_5_0
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+func TestLogPollerClient_GetSendRequestsBetweenSeqNums1_4_0(t *testing.T) {
+ onRampAddr := utils.RandomAddress()
+ seqNum := uint64(100)
+ limit := uint64(10)
+ lggr := logger.TestLogger(t)
+
+ tests := []struct {
+ name string
+ finalized bool
+ confirmations evmtypes.Confirmations
+ }{
+ {"finalized", true, evmtypes.Finalized},
+ {"unfinalized", false, evmtypes.Confirmations(0)},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ lp := mocks.NewLogPoller(t)
+ onRampV2, err := NewOnRamp(lggr, 1, 1, onRampAddr, lp, nil)
+ require.NoError(t, err)
+
+ lp.On("LogsDataWordRange",
+ mock.Anything,
+ onRampV2.sendRequestedEventSig,
+ onRampAddr,
+ onRampV2.sendRequestedSeqNumberWord,
+ abihelpers.EvmWord(seqNum),
+ abihelpers.EvmWord(seqNum+limit),
+ tt.confirmations,
+ ).Once().Return([]logpoller.Log{}, nil)
+
+ events, err1 := onRampV2.GetSendRequestsBetweenSeqNums(context.Background(), seqNum, seqNum+limit, tt.finalized)
+ assert.NoError(t, err1)
+ assert.Empty(t, events)
+
+ lp.AssertExpectations(t)
+ })
+ }
+}
+
+func Test_ProperlyRecognizesPerLaneCurses(t *testing.T) {
+ user, bc := ccipdata.NewSimulation(t)
+ ctx := testutils.Context(t)
+ destChainSelector := uint64(100)
+ sourceChainSelector := uint64(200)
+ onRampAddress, mockRMN, mockRMNAddress := setupOnRampV1_5_0(t, user, bc)
+
+ onRamp, err := NewOnRamp(logger.TestLogger(t), 1, destChainSelector, onRampAddress, mocks.NewLogPoller(t), bc)
+ require.NoError(t, err)
+
+ onRamp.cachedStaticConfig = func(ctx context.Context) (evm_2_evm_onramp.EVM2EVMOnRampStaticConfig, error) {
+ return evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{
+ RmnProxy: mockRMNAddress,
+ }, nil
+ }
+
+ // Lane is not cursed right after deployment
+ isCursed, err := onRamp.IsSourceCursed(ctx)
+ require.NoError(t, err)
+ assert.False(t, isCursed)
+
+ // Cursing different chain selector
+ _, err = mockRMN.VoteToCurse0(user, [32]byte{}, ccipcommon.SelectorToBytes(sourceChainSelector))
+ require.NoError(t, err)
+ bc.Commit()
+
+ isCursed, err = onRamp.IsSourceCursed(ctx)
+ require.NoError(t, err)
+ assert.False(t, isCursed)
+
+ // Cursing the correct chain selector
+ _, err = mockRMN.VoteToCurse0(user, [32]byte{}, ccipcommon.SelectorToBytes(destChainSelector))
+ require.NoError(t, err)
+ bc.Commit()
+
+ isCursed, err = onRamp.IsSourceCursed(ctx)
+ require.NoError(t, err)
+ assert.True(t, isCursed)
+
+ // Uncursing the chain selector
+ _, err = mockRMN.OwnerUnvoteToCurse(user, []mock_arm_contract.RMNUnvoteToCurseRecord{}, ccipcommon.SelectorToBytes(destChainSelector))
+ require.NoError(t, err)
+ bc.Commit()
+
+ isCursed, err = onRamp.IsSourceCursed(ctx)
+ require.NoError(t, err)
+ assert.False(t, isCursed)
+}
+
+// This is written to benchmark before and after the caching of StaticConfig and RMNContract
+func BenchmarkIsSourceCursedWithCache(b *testing.B) {
+ user, bc := ccipdata.NewSimulation(b)
+ ctx := testutils.Context(b)
+ destChainSelector := uint64(100)
+ onRampAddress, _, _ := setupOnRampV1_5_0(b, user, bc)
+
+ onRamp, err := NewOnRamp(logger.TestLogger(b), 1, destChainSelector, onRampAddress, mocks.NewLogPoller(b), bc)
+ require.NoError(b, err)
+
+ for i := 0; i < b.N; i++ {
+ _, _ = onRamp.IsSourceCursed(ctx)
+ }
+}
+
+func setupOnRampV1_5_0(t testing.TB, user *bind.TransactOpts, bc *client.SimulatedBackendClient) (common.Address, *mock_arm_contract.MockARMContract, common.Address) {
+ rmnAddress, transaction, rmnContract, err := mock_arm_contract.DeployMockARMContract(user, bc)
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, transaction, bc, user)
+
+ linkTokenAddress := common.HexToAddress("0x000011")
+ staticConfig := evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{
+ LinkToken: linkTokenAddress,
+ ChainSelector: testutils.SimulatedChainID.Uint64(),
+ DestChainSelector: testutils.SimulatedChainID.Uint64(),
+ DefaultTxGasLimit: 30000,
+ MaxNopFeesJuels: big.NewInt(1000000),
+ PrevOnRamp: common.Address{},
+ RmnProxy: rmnAddress,
+ TokenAdminRegistry: utils.RandomAddress(),
+ }
+ dynamicConfig := evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{
+ Router: common.HexToAddress("0x0000000000000000000000000000000000000150"),
+ MaxNumberOfTokensPerMsg: 0,
+ DestGasOverhead: 0,
+ DestGasPerPayloadByte: 0,
+ DestDataAvailabilityOverheadGas: 0,
+ DestGasPerDataAvailabilityByte: 0,
+ DestDataAvailabilityMultiplierBps: 0,
+ PriceRegistry: utils.RandomAddress(),
+ MaxDataBytes: 0,
+ MaxPerMsgGasLimit: 0,
+ DefaultTokenFeeUSDCents: 50,
+ DefaultTokenDestGasOverhead: 34_000,
+ DefaultTokenDestBytesOverhead: 500,
+ }
+ rateLimiterConfig := evm_2_evm_onramp.RateLimiterConfig{
+ IsEnabled: false,
+ Capacity: big.NewInt(5),
+ Rate: big.NewInt(5),
+ }
+ feeTokenConfigs := []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: linkTokenAddress,
+ NetworkFeeUSDCents: 0,
+ GasMultiplierWeiPerEth: 0,
+ PremiumMultiplierWeiPerEth: 0,
+ Enabled: false,
+ },
+ }
+ tokenTransferConfigArgs := []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: linkTokenAddress,
+ MinFeeUSDCents: 0,
+ MaxFeeUSDCents: 0,
+ DeciBps: 0,
+ DestGasOverhead: 0,
+ DestBytesOverhead: 32,
+ AggregateRateLimitEnabled: true,
+ },
+ }
+ nopsAndWeights := []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{
+ {
+ Nop: utils.RandomAddress(),
+ Weight: 1,
+ },
+ }
+ onRampAddress, transaction, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp(
+ user,
+ bc,
+ staticConfig,
+ dynamicConfig,
+ rateLimiterConfig,
+ feeTokenConfigs,
+ tokenTransferConfigArgs,
+ nopsAndWeights,
+ )
+ bc.Commit()
+ require.NoError(t, err)
+ ccipdata.AssertNonRevert(t, transaction, bc, user)
+
+ return onRampAddress, rmnContract, rmnAddress
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go
new file mode 100644
index 00000000000..39ba632aff9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go
@@ -0,0 +1,250 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ big "math/big"
+
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ ccipdata "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+
+ prices "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+// PriceService is an autogenerated mock type for the PriceService type
+type PriceService struct {
+ mock.Mock
+}
+
+type PriceService_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *PriceService) EXPECT() *PriceService_Expecter {
+ return &PriceService_Expecter{mock: &_m.Mock}
+}
+
+// Close provides a mock function with given fields:
+func (_m *PriceService) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// PriceService_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type PriceService_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *PriceService_Expecter) Close() *PriceService_Close_Call {
+ return &PriceService_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *PriceService_Close_Call) Run(run func()) *PriceService_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PriceService_Close_Call) Return(_a0 error) *PriceService_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PriceService_Close_Call) RunAndReturn(run func() error) *PriceService_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGasAndTokenPrices provides a mock function with given fields: ctx, destChainSelector
+func (_m *PriceService) GetGasAndTokenPrices(ctx context.Context, destChainSelector uint64) (map[uint64]*big.Int, map[ccip.Address]*big.Int, error) {
+ ret := _m.Called(ctx, destChainSelector)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGasAndTokenPrices")
+ }
+
+ var r0 map[uint64]*big.Int
+ var r1 map[ccip.Address]*big.Int
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) (map[uint64]*big.Int, map[ccip.Address]*big.Int, error)); ok {
+ return rf(ctx, destChainSelector)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) map[uint64]*big.Int); ok {
+ r0 = rf(ctx, destChainSelector)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[uint64]*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64) map[ccip.Address]*big.Int); ok {
+ r1 = rf(ctx, destChainSelector)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(map[ccip.Address]*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, uint64) error); ok {
+ r2 = rf(ctx, destChainSelector)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// PriceService_GetGasAndTokenPrices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasAndTokenPrices'
+type PriceService_GetGasAndTokenPrices_Call struct {
+ *mock.Call
+}
+
+// GetGasAndTokenPrices is a helper method to define mock.On call
+// - ctx context.Context
+// - destChainSelector uint64
+func (_e *PriceService_Expecter) GetGasAndTokenPrices(ctx interface{}, destChainSelector interface{}) *PriceService_GetGasAndTokenPrices_Call {
+ return &PriceService_GetGasAndTokenPrices_Call{Call: _e.mock.On("GetGasAndTokenPrices", ctx, destChainSelector)}
+}
+
+func (_c *PriceService_GetGasAndTokenPrices_Call) Run(run func(ctx context.Context, destChainSelector uint64)) *PriceService_GetGasAndTokenPrices_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64))
+ })
+ return _c
+}
+
+func (_c *PriceService_GetGasAndTokenPrices_Call) Return(_a0 map[uint64]*big.Int, _a1 map[ccip.Address]*big.Int, _a2 error) *PriceService_GetGasAndTokenPrices_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *PriceService_GetGasAndTokenPrices_Call) RunAndReturn(run func(context.Context, uint64) (map[uint64]*big.Int, map[ccip.Address]*big.Int, error)) *PriceService_GetGasAndTokenPrices_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Start provides a mock function with given fields: _a0
+func (_m *PriceService) Start(_a0 context.Context) error {
+ ret := _m.Called(_a0)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Start")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// PriceService_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
+type PriceService_Start_Call struct {
+ *mock.Call
+}
+
+// Start is a helper method to define mock.On call
+// - _a0 context.Context
+func (_e *PriceService_Expecter) Start(_a0 interface{}) *PriceService_Start_Call {
+ return &PriceService_Start_Call{Call: _e.mock.On("Start", _a0)}
+}
+
+func (_c *PriceService_Start_Call) Run(run func(_a0 context.Context)) *PriceService_Start_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *PriceService_Start_Call) Return(_a0 error) *PriceService_Start_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PriceService_Start_Call) RunAndReturn(run func(context.Context) error) *PriceService_Start_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateDynamicConfig provides a mock function with given fields: ctx, gasPriceEstimator, destPriceRegistryReader
+func (_m *PriceService) UpdateDynamicConfig(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader) error {
+ ret := _m.Called(ctx, gasPriceEstimator, destPriceRegistryReader)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateDynamicConfig")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, prices.GasPriceEstimatorCommit, ccipdata.PriceRegistryReader) error); ok {
+ r0 = rf(ctx, gasPriceEstimator, destPriceRegistryReader)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// PriceService_UpdateDynamicConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateDynamicConfig'
+type PriceService_UpdateDynamicConfig_Call struct {
+ *mock.Call
+}
+
+// UpdateDynamicConfig is a helper method to define mock.On call
+// - ctx context.Context
+// - gasPriceEstimator prices.GasPriceEstimatorCommit
+// - destPriceRegistryReader ccipdata.PriceRegistryReader
+func (_e *PriceService_Expecter) UpdateDynamicConfig(ctx interface{}, gasPriceEstimator interface{}, destPriceRegistryReader interface{}) *PriceService_UpdateDynamicConfig_Call {
+ return &PriceService_UpdateDynamicConfig_Call{Call: _e.mock.On("UpdateDynamicConfig", ctx, gasPriceEstimator, destPriceRegistryReader)}
+}
+
+func (_c *PriceService_UpdateDynamicConfig_Call) Run(run func(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader)) *PriceService_UpdateDynamicConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(prices.GasPriceEstimatorCommit), args[2].(ccipdata.PriceRegistryReader))
+ })
+ return _c
+}
+
+func (_c *PriceService_UpdateDynamicConfig_Call) Return(_a0 error) *PriceService_UpdateDynamicConfig_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PriceService_UpdateDynamicConfig_Call) RunAndReturn(run func(context.Context, prices.GasPriceEstimatorCommit, ccipdata.PriceRegistryReader) error) *PriceService_UpdateDynamicConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewPriceService creates a new instance of PriceService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewPriceService(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *PriceService {
+ mock := &PriceService{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go
new file mode 100644
index 00000000000..7d7d5bda3ad
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go
@@ -0,0 +1,400 @@
+package db
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sort"
+ "sync"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ "github.com/smartcontractkit/chainlink-common/pkg/services"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ cciporm "github.com/smartcontractkit/chainlink/v2/core/services/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+// PriceService manages DB access for gas and token price data.
+// In the background, PriceService periodically inserts latest gas and token prices into the DB.
+// During `Observation` phase, Commit plugin calls PriceService to fetch the latest prices from DB.
+// This enables all lanes connected to a chain to feed price data to the leader lane's Commit plugin for that chain.
+type PriceService interface {
+ job.ServiceCtx
+
+ // UpdateDynamicConfig updates gasPriceEstimator and destPriceRegistryReader during Commit plugin dynamic config change.
+ UpdateDynamicConfig(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader) error
+
+ // GetGasAndTokenPrices fetches source chain gas prices and relevant token prices from all lanes that touch the given dest chain.
+ // The prices have been written into the DB by each lane's PriceService in the background. The prices are denoted in USD.
+ GetGasAndTokenPrices(ctx context.Context, destChainSelector uint64) (map[uint64]*big.Int, map[cciptypes.Address]*big.Int, error)
+}
+
+var _ PriceService = (*priceService)(nil)
+
+const (
+ // Prices should expire after 10 minutes in DB. Prices should be fresh in the Commit plugin.
+ // 10 min provides sufficient buffer for the Commit plugin to withstand transient price update outages, while
+ // surfacing price update outages quickly enough.
+ priceExpireSec = 600
+ // Cleanups are called every 10 minutes. For a given job, on average we may expect 3 token prices and 1 gas price.
+ // 10 minutes should result in 40 rows being cleaned up per job, it is not a heavy load on DB, so there is no need
+ // to run cleanup more frequently. We shouldn't clean up less frequently than `priceExpireSec`.
+ priceCleanupInterval = 600 * time.Second
+
+ // Prices are refreshed every 1 minute, they are sufficiently accurate, and consistent with Commit OCR round time.
+ priceUpdateInterval = 60 * time.Second
+)
+
+type priceService struct {
+ priceExpireSec int
+ cleanupInterval time.Duration
+ updateInterval time.Duration
+
+ lggr logger.Logger
+ orm cciporm.ORM
+ jobId int32
+ destChainSelector uint64
+
+ sourceChainSelector uint64
+ sourceNative cciptypes.Address
+ priceGetter pricegetter.PriceGetter
+ offRampReader ccipdata.OffRampReader
+ gasPriceEstimator prices.GasPriceEstimatorCommit
+ destPriceRegistryReader ccipdata.PriceRegistryReader
+
+ services.StateMachine
+ wg *sync.WaitGroup
+ backgroundCtx context.Context //nolint:containedctx
+ backgroundCancel context.CancelFunc
+ dynamicConfigMu *sync.RWMutex
+}
+
+func NewPriceService(
+ lggr logger.Logger,
+ orm cciporm.ORM,
+ jobId int32,
+ destChainSelector uint64,
+ sourceChainSelector uint64,
+
+ sourceNative cciptypes.Address,
+ priceGetter pricegetter.PriceGetter,
+ offRampReader ccipdata.OffRampReader,
+) PriceService {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ pw := &priceService{
+ priceExpireSec: priceExpireSec,
+ cleanupInterval: utils.WithJitter(priceCleanupInterval), // use WithJitter to avoid multiple services impacting DB at same time
+ updateInterval: utils.WithJitter(priceUpdateInterval),
+
+ lggr: lggr,
+ orm: orm,
+ jobId: jobId,
+ destChainSelector: destChainSelector,
+
+ sourceChainSelector: sourceChainSelector,
+ sourceNative: sourceNative,
+ priceGetter: priceGetter,
+ offRampReader: offRampReader,
+
+ wg: new(sync.WaitGroup),
+ backgroundCtx: ctx,
+ backgroundCancel: cancel,
+ dynamicConfigMu: &sync.RWMutex{},
+ }
+ return pw
+}
+
+func (p *priceService) Start(context.Context) error {
+ return p.StateMachine.StartOnce("PriceService", func() error {
+ p.lggr.Info("Starting PriceService")
+ p.wg.Add(1)
+ p.run()
+ return nil
+ })
+}
+
+func (p *priceService) Close() error {
+ return p.StateMachine.StopOnce("PriceService", func() error {
+ p.lggr.Info("Closing PriceService")
+ p.backgroundCancel()
+ p.wg.Wait()
+ return nil
+ })
+}
+
+func (p *priceService) run() {
+ cleanupTicker := time.NewTicker(p.cleanupInterval)
+ updateTicker := time.NewTicker(p.updateInterval)
+
+ go func() {
+ defer p.wg.Done()
+
+ for {
+ select {
+ case <-p.backgroundCtx.Done():
+ return
+ case <-cleanupTicker.C:
+ err := p.runCleanup(p.backgroundCtx)
+ if err != nil {
+ p.lggr.Errorw("Error when cleaning up in-db prices in the background", "err", err)
+ }
+ case <-updateTicker.C:
+ err := p.runUpdate(p.backgroundCtx)
+ if err != nil {
+ p.lggr.Errorw("Error when updating prices in the background", "err", err)
+ }
+ }
+ }
+ }()
+}
+
+func (p *priceService) UpdateDynamicConfig(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader) error {
+ p.dynamicConfigMu.Lock()
+ p.gasPriceEstimator = gasPriceEstimator
+ p.destPriceRegistryReader = destPriceRegistryReader
+ p.dynamicConfigMu.Unlock()
+
+ // Config update may substantially change the prices, refresh the prices immediately, this also makes testing easier
+ // for not having to wait to the full update interval.
+ if err := p.runUpdate(ctx); err != nil {
+ p.lggr.Errorw("Error when updating prices after dynamic config update", "err", err)
+ }
+
+ return nil
+}
+
+func (p *priceService) GetGasAndTokenPrices(ctx context.Context, destChainSelector uint64) (map[uint64]*big.Int, map[cciptypes.Address]*big.Int, error) {
+ eg := new(errgroup.Group)
+
+ var gasPricesInDB []cciporm.GasPrice
+ var tokenPricesInDB []cciporm.TokenPrice
+
+ eg.Go(func() error {
+ gasPrices, err := p.orm.GetGasPricesByDestChain(ctx, destChainSelector)
+ if err != nil {
+ return fmt.Errorf("failed to get gas prices from db: %w", err)
+ }
+ gasPricesInDB = gasPrices
+ return nil
+ })
+
+ eg.Go(func() error {
+ tokenPrices, err := p.orm.GetTokenPricesByDestChain(ctx, destChainSelector)
+ if err != nil {
+ return fmt.Errorf("failed to get token prices from db: %w", err)
+ }
+ tokenPricesInDB = tokenPrices
+ return nil
+ })
+
+ if err := eg.Wait(); err != nil {
+ return nil, nil, err
+ }
+
+ gasPrices := make(map[uint64]*big.Int, len(gasPricesInDB))
+ tokenPrices := make(map[cciptypes.Address]*big.Int, len(tokenPricesInDB))
+
+ for _, gasPrice := range gasPricesInDB {
+ if gasPrice.GasPrice != nil {
+ gasPrices[gasPrice.SourceChainSelector] = gasPrice.GasPrice.ToInt()
+ }
+ }
+
+ for _, tokenPrice := range tokenPricesInDB {
+ if tokenPrice.TokenPrice != nil {
+ tokenPrices[cciptypes.Address(tokenPrice.TokenAddr)] = tokenPrice.TokenPrice.ToInt()
+ }
+ }
+
+ return gasPrices, tokenPrices, nil
+}
+
+func (p *priceService) runCleanup(ctx context.Context) error {
+ eg := new(errgroup.Group)
+
+ eg.Go(func() error {
+ err := p.orm.ClearGasPricesByDestChain(ctx, p.destChainSelector, p.priceExpireSec)
+ if err != nil {
+ return fmt.Errorf("error clearing gas prices: %w", err)
+ }
+ return nil
+ })
+
+ eg.Go(func() error {
+ err := p.orm.ClearTokenPricesByDestChain(ctx, p.destChainSelector, p.priceExpireSec)
+ if err != nil {
+ return fmt.Errorf("error clearing token prices: %w", err)
+ }
+ return nil
+ })
+
+ return eg.Wait()
+}
+
+func (p *priceService) runUpdate(ctx context.Context) error {
+ // Protect against concurrent updates of `gasPriceEstimator` and `destPriceRegistryReader`
+ // Price updates happen infrequently - once every `priceUpdateInterval` seconds.
+ // It does not happen on any code path that is performance sensitive.
+ // We can afford to have non-performant unlocks here that is simple and safe.
+ p.dynamicConfigMu.RLock()
+ defer p.dynamicConfigMu.RUnlock()
+
+ // There may be a period of time between service is started and dynamic config is updated
+ if p.gasPriceEstimator == nil || p.destPriceRegistryReader == nil {
+ p.lggr.Info("Skipping price update due to gasPriceEstimator and/or destPriceRegistry not ready")
+ return nil
+ }
+
+ sourceGasPriceUSD, tokenPricesUSD, err := p.observePriceUpdates(ctx, p.lggr)
+ if err != nil {
+ return fmt.Errorf("failed to observe price updates: %w", err)
+ }
+
+ err = p.writePricesToDB(ctx, sourceGasPriceUSD, tokenPricesUSD)
+ if err != nil {
+ return fmt.Errorf("failed to write prices to db: %w", err)
+ }
+
+ return nil
+}
+
+func (p *priceService) observePriceUpdates(
+ ctx context.Context,
+ lggr logger.Logger,
+) (sourceGasPriceUSD *big.Int, tokenPricesUSD map[cciptypes.Address]*big.Int, err error) {
+ if p.gasPriceEstimator == nil || p.destPriceRegistryReader == nil {
+ return nil, nil, fmt.Errorf("gasPriceEstimator and/or destPriceRegistry is not set yet")
+ }
+
+ sortedLaneTokens, filteredLaneTokens, err := ccipcommon.GetFilteredSortedLaneTokens(ctx, p.offRampReader, p.destPriceRegistryReader, p.priceGetter)
+
+ lggr.Debugw("Filtered bridgeable tokens with no configured price getter", "filteredLaneTokens", filteredLaneTokens)
+
+ if err != nil {
+ return nil, nil, fmt.Errorf("get destination tokens: %w", err)
+ }
+
+ return p.generatePriceUpdates(ctx, lggr, sortedLaneTokens)
+}
+
+// All prices are USD ($1=1e18) denominated. All prices must be not nil.
+// Return token prices should contain the exact same tokens as in tokenDecimals.
+func (p *priceService) generatePriceUpdates(
+ ctx context.Context,
+ lggr logger.Logger,
+ sortedLaneTokens []cciptypes.Address,
+) (sourceGasPriceUSD *big.Int, tokenPricesUSD map[cciptypes.Address]*big.Int, err error) {
+ // Include wrapped native in our token query as way to identify the source native USD price.
+ // notice USD is in 1e18 scale, i.e. $1 = 1e18
+ queryTokens := ccipcommon.FlattenUniqueSlice([]cciptypes.Address{p.sourceNative}, sortedLaneTokens)
+
+ rawTokenPricesUSD, err := p.priceGetter.TokenPricesUSD(ctx, queryTokens)
+ if err != nil {
+ return nil, nil, err
+ }
+ lggr.Infow("Raw token prices", "rawTokenPrices", rawTokenPricesUSD)
+
+ // make sure that we got prices for all the tokens of our query
+ for _, token := range queryTokens {
+ if rawTokenPricesUSD[token] == nil {
+ return nil, nil, fmt.Errorf("missing token price: %+v", token)
+ }
+ }
+
+ sourceNativePriceUSD, exists := rawTokenPricesUSD[p.sourceNative]
+ if !exists {
+ return nil, nil, fmt.Errorf("missing source native (%s) price", p.sourceNative)
+ }
+
+ destTokensDecimals, err := p.destPriceRegistryReader.GetTokensDecimals(ctx, sortedLaneTokens)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get tokens decimals: %w", err)
+ }
+
+ tokenPricesUSD = make(map[cciptypes.Address]*big.Int, len(rawTokenPricesUSD))
+ for i, token := range sortedLaneTokens {
+ tokenPricesUSD[token] = calculateUsdPer1e18TokenAmount(rawTokenPricesUSD[token], destTokensDecimals[i])
+ }
+
+ sourceGasPrice, err := p.gasPriceEstimator.GetGasPrice(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ if sourceGasPrice == nil {
+ return nil, nil, fmt.Errorf("missing gas price")
+ }
+ sourceGasPriceUSD, err = p.gasPriceEstimator.DenoteInUSD(sourceGasPrice, sourceNativePriceUSD)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ lggr.Infow("PriceService observed latest price",
+ "sourceChainSelector", p.sourceChainSelector,
+ "destChainSelector", p.destChainSelector,
+ "gasPriceWei", sourceGasPrice,
+ "sourceNativePriceUSD", sourceNativePriceUSD,
+ "sourceGasPriceUSD", sourceGasPriceUSD,
+ "tokenPricesUSD", tokenPricesUSD,
+ )
+ return sourceGasPriceUSD, tokenPricesUSD, nil
+}
+
+func (p *priceService) writePricesToDB(
+ ctx context.Context,
+ sourceGasPriceUSD *big.Int,
+ tokenPricesUSD map[cciptypes.Address]*big.Int,
+) (err error) {
+ eg := new(errgroup.Group)
+
+ if sourceGasPriceUSD != nil {
+ eg.Go(func() error {
+ return p.orm.InsertGasPricesForDestChain(ctx, p.destChainSelector, p.jobId, []cciporm.GasPriceUpdate{
+ {
+ SourceChainSelector: p.sourceChainSelector,
+ GasPrice: assets.NewWei(sourceGasPriceUSD),
+ },
+ })
+ })
+ }
+
+ if tokenPricesUSD != nil {
+ var tokenPrices []cciporm.TokenPriceUpdate
+
+ for token, price := range tokenPricesUSD {
+ tokenPrices = append(tokenPrices, cciporm.TokenPriceUpdate{
+ TokenAddr: string(token),
+ TokenPrice: assets.NewWei(price),
+ })
+ }
+
+ // Sort token by addr to make price updates ordering deterministic, easier to testing and debugging
+ sort.Slice(tokenPrices, func(i, j int) bool {
+ return tokenPrices[i].TokenAddr < tokenPrices[j].TokenAddr
+ })
+
+ eg.Go(func() error {
+ return p.orm.InsertTokenPricesForDestChain(ctx, p.destChainSelector, p.jobId, tokenPrices)
+ })
+ }
+
+ return eg.Wait()
+}
+
+// Input price is USD per full token, with 18 decimal precision
+// Result price is USD per 1e18 of smallest token denomination, with 18 decimal precision
+// Example: 1 USDC = 1.00 USD per full token, each full token is 6 decimals -> 1 * 1e18 * 1e18 / 1e6 = 1e30
+func calculateUsdPer1e18TokenAmount(price *big.Int, decimals uint8) *big.Int {
+ tmp := big.NewInt(0).Mul(price, big.NewInt(1e18))
+ return tmp.Div(tmp, big.NewInt(0).Exp(big.NewInt(10), big.NewInt(int64(decimals)), nil))
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go
new file mode 100644
index 00000000000..0bea8af9a19
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go
@@ -0,0 +1,755 @@
+package db
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ cciporm "github.com/smartcontractkit/chainlink/v2/core/services/ccip"
+ ccipmocks "github.com/smartcontractkit/chainlink/v2/core/services/ccip/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon"
+ ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+)
+
+func TestPriceService_priceCleanup(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ jobId := int32(1)
+ destChainSelector := uint64(12345)
+ sourceChainSelector := uint64(67890)
+
+ testCases := []struct {
+ name string
+ gasPriceError bool
+ tokenPriceError bool
+ expectedErr bool
+ }{
+ {
+ name: "ORM called successfully",
+ gasPriceError: false,
+ tokenPriceError: false,
+ expectedErr: false,
+ },
+ {
+ name: "gasPrice clear failed",
+ gasPriceError: true,
+ tokenPriceError: false,
+ expectedErr: true,
+ },
+ {
+ name: "tokenPrice clear failed",
+ gasPriceError: false,
+ tokenPriceError: true,
+ expectedErr: true,
+ },
+ {
+ name: "both ORM calls failed",
+ gasPriceError: true,
+ tokenPriceError: true,
+ expectedErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := tests.Context(t)
+
+ var gasPricesError error
+ var tokenPricesError error
+ if tc.gasPriceError {
+ gasPricesError = fmt.Errorf("gas prices error")
+ }
+ if tc.tokenPriceError {
+ tokenPricesError = fmt.Errorf("token prices error")
+ }
+
+ mockOrm := ccipmocks.NewORM(t)
+ mockOrm.On("ClearGasPricesByDestChain", ctx, destChainSelector, priceExpireSec).Return(gasPricesError).Once()
+ mockOrm.On("ClearTokenPricesByDestChain", ctx, destChainSelector, priceExpireSec).Return(tokenPricesError).Once()
+
+ priceService := NewPriceService(
+ lggr,
+ mockOrm,
+ jobId,
+ destChainSelector,
+ sourceChainSelector,
+ "",
+ nil,
+ nil,
+ ).(*priceService)
+ err := priceService.runCleanup(ctx)
+ if tc.expectedErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestPriceService_priceWrite(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ jobId := int32(1)
+ destChainSelector := uint64(12345)
+ sourceChainSelector := uint64(67890)
+
+ gasPrice := big.NewInt(1e18)
+ tokenPrices := map[cciptypes.Address]*big.Int{
+ "0x123": big.NewInt(2e18),
+ "0x234": big.NewInt(3e18),
+ }
+
+ expectedGasPriceUpdate := []cciporm.GasPriceUpdate{
+ {
+ SourceChainSelector: sourceChainSelector,
+ GasPrice: assets.NewWei(gasPrice),
+ },
+ }
+ expectedTokenPriceUpdate := []cciporm.TokenPriceUpdate{
+ {
+ TokenAddr: "0x123",
+ TokenPrice: assets.NewWei(big.NewInt(2e18)),
+ },
+ {
+ TokenAddr: "0x234",
+ TokenPrice: assets.NewWei(big.NewInt(3e18)),
+ },
+ }
+
+ testCases := []struct {
+ name string
+ gasPriceError bool
+ tokenPriceError bool
+ expectedErr bool
+ }{
+ {
+ name: "ORM called successfully",
+ gasPriceError: false,
+ tokenPriceError: false,
+ expectedErr: false,
+ },
+ {
+ name: "gasPrice clear failed",
+ gasPriceError: true,
+ tokenPriceError: false,
+ expectedErr: true,
+ },
+ {
+ name: "tokenPrice clear failed",
+ gasPriceError: false,
+ tokenPriceError: true,
+ expectedErr: true,
+ },
+ {
+ name: "both ORM calls failed",
+ gasPriceError: true,
+ tokenPriceError: true,
+ expectedErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := tests.Context(t)
+
+ var gasPricesError error
+ var tokenPricesError error
+ if tc.gasPriceError {
+ gasPricesError = fmt.Errorf("gas prices error")
+ }
+ if tc.tokenPriceError {
+ tokenPricesError = fmt.Errorf("token prices error")
+ }
+
+ mockOrm := ccipmocks.NewORM(t)
+ mockOrm.On("InsertGasPricesForDestChain", ctx, destChainSelector, jobId, expectedGasPriceUpdate).Return(gasPricesError).Once()
+ mockOrm.On("InsertTokenPricesForDestChain", ctx, destChainSelector, jobId, expectedTokenPriceUpdate).Return(tokenPricesError).Once()
+
+ priceService := NewPriceService(
+ lggr,
+ mockOrm,
+ jobId,
+ destChainSelector,
+ sourceChainSelector,
+ "",
+ nil,
+ nil,
+ ).(*priceService)
+ err := priceService.writePricesToDB(ctx, gasPrice, tokenPrices)
+ if tc.expectedErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestPriceService_generatePriceUpdates(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ jobId := int32(1)
+ destChainSelector := uint64(12345)
+ sourceChainSelector := uint64(67890)
+
+ const nTokens = 10
+ tokens := make([]cciptypes.Address, nTokens)
+ for i := range tokens {
+ tokens[i] = cciptypes.Address(utils.RandomAddress().String())
+ }
+ sort.Slice(tokens, func(i, j int) bool { return tokens[i] < tokens[j] })
+
+ testCases := []struct {
+ name string
+ tokenDecimals map[cciptypes.Address]uint8
+ sourceNativeToken cciptypes.Address
+ priceGetterRespData map[cciptypes.Address]*big.Int
+ priceGetterRespErr error
+ feeEstimatorRespFee *big.Int
+ feeEstimatorRespErr error
+ maxGasPrice uint64
+ expSourceGasPriceUSD *big.Int
+ expTokenPricesUSD map[cciptypes.Address]*big.Int
+ expErr bool
+ }{
+ {
+ name: "base",
+ tokenDecimals: map[cciptypes.Address]uint8{
+ tokens[0]: 18,
+ tokens[1]: 12,
+ },
+ sourceNativeToken: tokens[0],
+ priceGetterRespData: map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(100),
+ tokens[1]: val1e18(200),
+ tokens[2]: val1e18(300), // price getter returned a price for this token even though we didn't request it (should be skipped)
+ },
+ priceGetterRespErr: nil,
+ feeEstimatorRespFee: big.NewInt(10),
+ feeEstimatorRespErr: nil,
+ maxGasPrice: 1e18,
+ expSourceGasPriceUSD: big.NewInt(1000),
+ expTokenPricesUSD: map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(100),
+ tokens[1]: val1e18(200 * 1e6),
+ },
+ expErr: false,
+ },
+ {
+ name: "price getter returned an error",
+ tokenDecimals: map[cciptypes.Address]uint8{
+ tokens[0]: 18,
+ tokens[1]: 18,
+ },
+ sourceNativeToken: tokens[0],
+ priceGetterRespData: nil,
+ priceGetterRespErr: fmt.Errorf("some random network error"),
+ expErr: true,
+ },
+ {
+ name: "price getter skipped a requested price",
+ tokenDecimals: map[cciptypes.Address]uint8{
+ tokens[0]: 18,
+ tokens[1]: 18,
+ },
+ sourceNativeToken: tokens[0],
+ priceGetterRespData: map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(100),
+ },
+ priceGetterRespErr: nil,
+ expErr: true,
+ },
+ {
+ name: "price getter skipped source native price",
+ tokenDecimals: map[cciptypes.Address]uint8{
+ tokens[0]: 18,
+ tokens[1]: 18,
+ },
+ sourceNativeToken: tokens[2],
+ priceGetterRespData: map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(100),
+ tokens[1]: val1e18(200),
+ },
+ priceGetterRespErr: nil,
+ expErr: true,
+ },
+ {
+ name: "dynamic fee cap overrides legacy",
+ tokenDecimals: map[cciptypes.Address]uint8{
+ tokens[0]: 18,
+ tokens[1]: 18,
+ },
+ sourceNativeToken: tokens[0],
+ priceGetterRespData: map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(100),
+ tokens[1]: val1e18(200),
+ tokens[2]: val1e18(300), // price getter returned a price for this token even though we didn't request it (should be skipped)
+ },
+ priceGetterRespErr: nil,
+ feeEstimatorRespFee: big.NewInt(20),
+ feeEstimatorRespErr: nil,
+ maxGasPrice: 1e18,
+ expSourceGasPriceUSD: big.NewInt(2000),
+ expTokenPricesUSD: map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(100),
+ tokens[1]: val1e18(200),
+ },
+ expErr: false,
+ },
+ {
+ name: "nil gas price",
+ tokenDecimals: map[cciptypes.Address]uint8{
+ tokens[0]: 18,
+ tokens[1]: 18,
+ },
+ sourceNativeToken: tokens[0],
+ priceGetterRespData: map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(100),
+ tokens[1]: val1e18(200),
+ tokens[2]: val1e18(300), // price getter returned a price for this token even though we didn't request it (should be skipped)
+ },
+ feeEstimatorRespFee: nil,
+ maxGasPrice: 1e18,
+ expErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ priceGetter := pricegetter.NewMockPriceGetter(t)
+ defer priceGetter.AssertExpectations(t)
+
+ gasPriceEstimator := prices.NewMockGasPriceEstimatorCommit(t)
+ defer gasPriceEstimator.AssertExpectations(t)
+
+ var destTokens []cciptypes.Address
+ for tk := range tc.tokenDecimals {
+ destTokens = append(destTokens, tk)
+ }
+ sort.Slice(destTokens, func(i, j int) bool {
+ return destTokens[i] < destTokens[j]
+ })
+ var destDecimals []uint8
+ for _, token := range destTokens {
+ destDecimals = append(destDecimals, tc.tokenDecimals[token])
+ }
+
+ queryTokens := ccipcommon.FlattenUniqueSlice([]cciptypes.Address{tc.sourceNativeToken}, destTokens)
+
+ if len(queryTokens) > 0 {
+ priceGetter.On("TokenPricesUSD", mock.Anything, queryTokens).Return(tc.priceGetterRespData, tc.priceGetterRespErr)
+ }
+
+ if tc.maxGasPrice > 0 {
+ gasPriceEstimator.On("GetGasPrice", mock.Anything).Return(tc.feeEstimatorRespFee, tc.feeEstimatorRespErr)
+ if tc.feeEstimatorRespFee != nil {
+ pUSD := ccipcalc.CalculateUsdPerUnitGas(tc.feeEstimatorRespFee, tc.expTokenPricesUSD[tc.sourceNativeToken])
+ gasPriceEstimator.On("DenoteInUSD", mock.Anything, mock.Anything).Return(pUSD, nil)
+ }
+ }
+
+ destPriceReg := ccipdatamocks.NewPriceRegistryReader(t)
+ destPriceReg.On("GetTokensDecimals", mock.Anything, destTokens).Return(destDecimals, nil).Maybe()
+
+ priceService := NewPriceService(
+ lggr,
+ nil,
+ jobId,
+ destChainSelector,
+ sourceChainSelector,
+ tc.sourceNativeToken,
+ priceGetter,
+ nil,
+ ).(*priceService)
+ priceService.gasPriceEstimator = gasPriceEstimator
+ priceService.destPriceRegistryReader = destPriceReg
+
+ sourceGasPriceUSD, tokenPricesUSD, err := priceService.generatePriceUpdates(context.Background(), lggr, destTokens)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.True(t, tc.expSourceGasPriceUSD.Cmp(sourceGasPriceUSD) == 0)
+ assert.True(t, reflect.DeepEqual(tc.expTokenPricesUSD, tokenPricesUSD))
+ })
+ }
+}
+
+func TestPriceService_calculateUsdPer1e18TokenAmount(t *testing.T) {
+ testCases := []struct {
+ name string
+ price *big.Int
+ decimal uint8
+ wantResult *big.Int
+ }{
+ {
+ name: "18-decimal token, $6.5 per token",
+ price: big.NewInt(65e17),
+ decimal: 18,
+ wantResult: big.NewInt(65e17),
+ },
+ {
+ name: "6-decimal token, $1 per token",
+ price: big.NewInt(1e18),
+ decimal: 6,
+ wantResult: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e12)), // 1e30
+ },
+ {
+ name: "0-decimal token, $1 per token",
+ price: big.NewInt(1e18),
+ decimal: 0,
+ wantResult: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e18)), // 1e36
+ },
+ {
+ name: "36-decimal token, $1 per token",
+ price: big.NewInt(1e18),
+ decimal: 36,
+ wantResult: big.NewInt(1),
+ },
+ }
+ for _, tt := range testCases {
+ t.Run(tt.name, func(t *testing.T) {
+ got := calculateUsdPer1e18TokenAmount(tt.price, tt.decimal)
+ assert.Equal(t, tt.wantResult, got)
+ })
+ }
+}
+
+func TestPriceService_GetGasAndTokenPrices(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ jobId := int32(1)
+ destChainSelector := uint64(12345)
+ sourceChainSelector := uint64(67890)
+
+ token1 := ccipcalc.HexToAddress("0x123")
+ token2 := ccipcalc.HexToAddress("0x234")
+
+ gasPrice := big.NewInt(1e18)
+ tokenPrices := map[cciptypes.Address]*big.Int{
+ token1: big.NewInt(2e18),
+ token2: big.NewInt(3e18),
+ }
+
+ testCases := []struct {
+ name string
+ ormGasPricesResult []cciporm.GasPrice
+ ormTokenPricesResult []cciporm.TokenPrice
+
+ expectedGasPrices map[uint64]*big.Int
+ expectedTokenPrices map[cciptypes.Address]*big.Int
+
+ gasPriceError bool
+ tokenPriceError bool
+ expectedErr bool
+ }{
+ {
+ name: "ORM called successfully",
+ ormGasPricesResult: []cciporm.GasPrice{
+ {
+ SourceChainSelector: sourceChainSelector,
+ GasPrice: assets.NewWei(gasPrice),
+ },
+ },
+ ormTokenPricesResult: []cciporm.TokenPrice{
+ {
+ TokenAddr: string(token1),
+ TokenPrice: assets.NewWei(tokenPrices[token1]),
+ },
+ {
+ TokenAddr: string(token2),
+ TokenPrice: assets.NewWei(tokenPrices[token2]),
+ },
+ },
+ expectedGasPrices: map[uint64]*big.Int{
+ sourceChainSelector: gasPrice,
+ },
+ expectedTokenPrices: tokenPrices,
+ gasPriceError: false,
+ tokenPriceError: false,
+ expectedErr: false,
+ },
+ {
+ name: "multiple gas prices with nil token price",
+ ormGasPricesResult: []cciporm.GasPrice{
+ {
+ SourceChainSelector: sourceChainSelector,
+ GasPrice: assets.NewWei(gasPrice),
+ },
+ {
+ SourceChainSelector: sourceChainSelector + 1,
+ GasPrice: assets.NewWei(big.NewInt(200)),
+ },
+ {
+ SourceChainSelector: sourceChainSelector + 2,
+ GasPrice: assets.NewWei(big.NewInt(300)),
+ },
+ },
+ ormTokenPricesResult: nil,
+ expectedGasPrices: map[uint64]*big.Int{
+ sourceChainSelector: gasPrice,
+ sourceChainSelector + 1: big.NewInt(200),
+ sourceChainSelector + 2: big.NewInt(300),
+ },
+ expectedTokenPrices: map[cciptypes.Address]*big.Int{},
+ gasPriceError: false,
+ tokenPriceError: false,
+ expectedErr: false,
+ },
+ {
+ name: "multiple token prices with nil gas price",
+ ormGasPricesResult: nil,
+ ormTokenPricesResult: []cciporm.TokenPrice{
+ {
+ TokenAddr: string(token1),
+ TokenPrice: assets.NewWei(tokenPrices[token1]),
+ },
+ {
+ TokenAddr: string(token2),
+ TokenPrice: assets.NewWei(tokenPrices[token2]),
+ },
+ },
+ expectedGasPrices: map[uint64]*big.Int{},
+ expectedTokenPrices: tokenPrices,
+ gasPriceError: false,
+ tokenPriceError: false,
+ expectedErr: false,
+ },
+ {
+ name: "nil prices filtered out",
+ ormGasPricesResult: []cciporm.GasPrice{
+ {
+ SourceChainSelector: sourceChainSelector,
+ GasPrice: nil,
+ },
+ {
+ SourceChainSelector: sourceChainSelector + 1,
+ GasPrice: assets.NewWei(gasPrice),
+ },
+ },
+ ormTokenPricesResult: []cciporm.TokenPrice{
+ {
+ TokenAddr: string(token1),
+ TokenPrice: assets.NewWei(tokenPrices[token1]),
+ },
+ {
+ TokenAddr: string(token2),
+ TokenPrice: nil,
+ },
+ },
+ expectedGasPrices: map[uint64]*big.Int{
+ sourceChainSelector + 1: gasPrice,
+ },
+ expectedTokenPrices: map[cciptypes.Address]*big.Int{
+ token1: tokenPrices[token1],
+ },
+ gasPriceError: false,
+ tokenPriceError: false,
+ expectedErr: false,
+ },
+ {
+ name: "gasPrice clear failed",
+ gasPriceError: true,
+ tokenPriceError: false,
+ expectedErr: true,
+ },
+ {
+ name: "tokenPrice clear failed",
+ gasPriceError: false,
+ tokenPriceError: true,
+ expectedErr: true,
+ },
+ {
+ name: "both ORM calls failed",
+ gasPriceError: true,
+ tokenPriceError: true,
+ expectedErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := tests.Context(t)
+
+ mockOrm := ccipmocks.NewORM(t)
+ if tc.gasPriceError {
+ mockOrm.On("GetGasPricesByDestChain", ctx, destChainSelector).Return(nil, fmt.Errorf("gas prices error")).Once()
+ } else {
+ mockOrm.On("GetGasPricesByDestChain", ctx, destChainSelector).Return(tc.ormGasPricesResult, nil).Once()
+ }
+ if tc.tokenPriceError {
+ mockOrm.On("GetTokenPricesByDestChain", ctx, destChainSelector).Return(nil, fmt.Errorf("token prices error")).Once()
+ } else {
+ mockOrm.On("GetTokenPricesByDestChain", ctx, destChainSelector).Return(tc.ormTokenPricesResult, nil).Once()
+ }
+
+ priceService := NewPriceService(
+ lggr,
+ mockOrm,
+ jobId,
+ destChainSelector,
+ sourceChainSelector,
+ "",
+ nil,
+ nil,
+ ).(*priceService)
+ gasPricesResult, tokenPricesResult, err := priceService.GetGasAndTokenPrices(ctx, destChainSelector)
+ if tc.expectedErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedGasPrices, gasPricesResult)
+ assert.Equal(t, tc.expectedTokenPrices, tokenPricesResult)
+ }
+ })
+ }
+}
+
+func val1e18(val int64) *big.Int {
+ return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val))
+}
+
+func setupORM(t *testing.T) cciporm.ORM {
+ t.Helper()
+
+ db := pgtest.NewSqlxDB(t)
+ orm, err := cciporm.NewORM(db)
+
+ require.NoError(t, err)
+
+ return orm
+}
+
+func checkResultLen(t *testing.T, priceService PriceService, destChainSelector uint64, gasCount int, tokenCount int) error {
+ ctx := tests.Context(t)
+ dbGasResult, dbTokenResult, err := priceService.GetGasAndTokenPrices(ctx, destChainSelector)
+ if err != nil {
+ return nil
+ }
+ if len(dbGasResult) != gasCount {
+ return fmt.Errorf("expected %d gas prices, got %d", gasCount, len(dbGasResult))
+ }
+ if len(dbTokenResult) != tokenCount {
+ return fmt.Errorf("expected %d token prices, got %d", tokenCount, len(dbTokenResult))
+ }
+ return nil
+}
+
+func TestPriceService_priceWriteAndCleanupInBackground(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ jobId := int32(1)
+ destChainSelector := uint64(12345)
+ sourceChainSelector := uint64(67890)
+ ctx := tests.Context(t)
+
+ sourceNative := cciptypes.Address("0x123")
+ feeTokens := []cciptypes.Address{"0x234"}
+ rampTokens := []cciptypes.Address{"0x345", "0x456"}
+ rampFilteredTokens := []cciptypes.Address{"0x345"}
+ rampFilterOutTokens := []cciptypes.Address{"0x456"}
+
+ laneTokens := []cciptypes.Address{"0x234", "0x345"}
+ laneTokenDecimals := []uint8{18, 18}
+
+ tokens := []cciptypes.Address{sourceNative, "0x234", "0x345"}
+ tokenPrices := []int64{2, 3, 4}
+ gasPrice := big.NewInt(10)
+
+ orm := setupORM(t)
+
+ priceGetter := pricegetter.NewMockPriceGetter(t)
+ defer priceGetter.AssertExpectations(t)
+
+ gasPriceEstimator := prices.NewMockGasPriceEstimatorCommit(t)
+ defer gasPriceEstimator.AssertExpectations(t)
+
+ priceGetter.On("TokenPricesUSD", mock.Anything, tokens).Return(map[cciptypes.Address]*big.Int{
+ tokens[0]: val1e18(tokenPrices[0]),
+ tokens[1]: val1e18(tokenPrices[1]),
+ tokens[2]: val1e18(tokenPrices[2]),
+ }, nil)
+ priceGetter.On("FilterConfiguredTokens", mock.Anything, rampTokens).Return(rampFilteredTokens, rampFilterOutTokens, nil)
+
+ offRampReader := ccipdatamocks.NewOffRampReader(t)
+ offRampReader.On("GetTokens", mock.Anything).Return(cciptypes.OffRampTokens{
+ DestinationTokens: rampTokens,
+ }, nil).Maybe()
+
+ gasPriceEstimator.On("GetGasPrice", mock.Anything).Return(gasPrice, nil)
+ pUSD := ccipcalc.CalculateUsdPerUnitGas(gasPrice, val1e18(tokenPrices[0]))
+ gasPriceEstimator.On("DenoteInUSD", mock.Anything, mock.Anything).Return(pUSD, nil)
+
+ destPriceReg := ccipdatamocks.NewPriceRegistryReader(t)
+ destPriceReg.On("GetTokensDecimals", mock.Anything, laneTokens).Return(laneTokenDecimals, nil).Maybe()
+ destPriceReg.On("GetFeeTokens", mock.Anything).Return(feeTokens, nil).Maybe()
+
+ priceService := NewPriceService(
+ lggr,
+ orm,
+ jobId,
+ destChainSelector,
+ sourceChainSelector,
+ tokens[0],
+ priceGetter,
+ offRampReader,
+ ).(*priceService)
+
+ updateInterval := 2000 * time.Millisecond
+ cleanupInterval := 3000 * time.Millisecond
+
+ // run write task every 2 second
+ priceService.updateInterval = updateInterval
+ // run cleanup every 3 seconds
+ priceService.cleanupInterval = cleanupInterval
+ // expire all prices during every cleanup
+ priceService.priceExpireSec = 0
+
+ // initially, db is empty
+ assert.NoError(t, checkResultLen(t, priceService, destChainSelector, 0, 0))
+
+ // starts PriceService in the background
+ assert.NoError(t, priceService.Start(ctx))
+
+ // setting dynamicConfig triggers initial price update
+ err := priceService.UpdateDynamicConfig(ctx, gasPriceEstimator, destPriceReg)
+ assert.NoError(t, err)
+ assert.NoError(t, checkResultLen(t, priceService, destChainSelector, 1, len(laneTokens)))
+
+ // eventually prices will be cleaned
+ assert.Eventually(t, func() bool {
+ err := checkResultLen(t, priceService, destChainSelector, 0, 0)
+ return err == nil
+ }, testutils.WaitTimeout(t), testutils.TestInterval)
+
+ // then prices will be updated again
+ assert.Eventually(t, func() bool {
+ err := checkResultLen(t, priceService, destChainSelector, 1, len(laneTokens))
+ return err == nil
+ }, testutils.WaitTimeout(t), testutils.TestInterval)
+
+ assert.NoError(t, priceService.Close())
+ assert.NoError(t, priceService.runCleanup(ctx))
+
+ // after stopping PriceService and runCleanup, no more updates are inserted
+ for i := 0; i < 5; i++ {
+ time.Sleep(time.Second)
+ assert.NoError(t, checkResultLen(t, priceService, destChainSelector, 0, 0))
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go
new file mode 100644
index 00000000000..e42dd8c154d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go
@@ -0,0 +1,73 @@
+package logpollerutil
+
+import (
+ "context"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+)
+
+func RegisterLpFilters(lp logpoller.LogPoller, filters []logpoller.Filter) error {
+ for _, lpFilter := range filters {
+ if filterContainsZeroAddress(lpFilter.Addresses) {
+ continue
+ }
+ // FIXME Dim pgOpts removed from LogPoller
+ if err := lp.RegisterFilter(context.Background(), lpFilter); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func UnregisterLpFilters(lp logpoller.LogPoller, filters []logpoller.Filter) error {
+ for _, lpFilter := range filters {
+ if filterContainsZeroAddress(lpFilter.Addresses) {
+ continue
+ }
+ // FIXME Dim pgOpts removed from LogPoller
+ if err := lp.UnregisterFilter(context.Background(), lpFilter.Name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func FiltersDiff(filtersBefore, filtersNow []logpoller.Filter) (created, deleted []logpoller.Filter) {
+ created = make([]logpoller.Filter, 0, len(filtersNow))
+ deleted = make([]logpoller.Filter, 0, len(filtersBefore))
+
+ for _, f := range filtersNow {
+ if !containsFilter(filtersBefore, f) {
+ created = append(created, f)
+ }
+ }
+
+ for _, f := range filtersBefore {
+ if !containsFilter(filtersNow, f) {
+ deleted = append(deleted, f)
+ }
+ }
+
+ return created, deleted
+}
+
+func containsFilter(filters []logpoller.Filter, f logpoller.Filter) bool {
+ for _, existing := range filters {
+ if existing.Name == f.Name {
+ return true
+ }
+ }
+ return false
+}
+
+func filterContainsZeroAddress(addrs []common.Address) bool {
+ for _, addr := range addrs {
+ if addr == utils.ZeroAddress {
+ return true
+ }
+ }
+ return false
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go
new file mode 100644
index 00000000000..9ea08ec1421
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go
@@ -0,0 +1,156 @@
+package logpollerutil
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+)
+
+func Test_FiltersDiff(t *testing.T) {
+ type args struct {
+ filtersBefore []logpoller.Filter
+ filtersNow []logpoller.Filter
+ }
+ tests := []struct {
+ name string
+ args args
+ wantCreated []logpoller.Filter
+ wantDeleted []logpoller.Filter
+ }{
+ {
+ name: "no diff, both empty",
+ args: args{
+ filtersBefore: []logpoller.Filter{},
+ filtersNow: []logpoller.Filter{},
+ },
+ wantCreated: []logpoller.Filter{},
+ wantDeleted: []logpoller.Filter{},
+ },
+ {
+ name: "no diff, both non-empty",
+ args: args{
+ filtersBefore: []logpoller.Filter{{Name: "a"}},
+ filtersNow: []logpoller.Filter{{Name: "a"}},
+ },
+ wantCreated: []logpoller.Filter{},
+ wantDeleted: []logpoller.Filter{},
+ },
+ {
+ name: "no diff, only name matters",
+ args: args{
+ filtersBefore: []logpoller.Filter{{Name: "a", Retention: time.Minute}},
+ filtersNow: []logpoller.Filter{{Name: "a", Retention: time.Second}},
+ },
+ wantCreated: []logpoller.Filter{},
+ wantDeleted: []logpoller.Filter{},
+ },
+ {
+ name: "diff for both created and deleted",
+ args: args{
+ filtersBefore: []logpoller.Filter{{Name: "e"}, {Name: "a"}, {Name: "b"}},
+ filtersNow: []logpoller.Filter{{Name: "a"}, {Name: "c"}, {Name: "d"}},
+ },
+ wantCreated: []logpoller.Filter{{Name: "c"}, {Name: "d"}},
+ wantDeleted: []logpoller.Filter{{Name: "e"}, {Name: "b"}},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotCreated, gotDeleted := FiltersDiff(tt.args.filtersBefore, tt.args.filtersNow)
+ assert.Equalf(t, tt.wantCreated, gotCreated, "filtersDiff(%v, %v)", tt.args.filtersBefore, tt.args.filtersNow)
+ assert.Equalf(t, tt.wantDeleted, gotDeleted, "filtersDiff(%v, %v)", tt.args.filtersBefore, tt.args.filtersNow)
+ })
+ }
+}
+
+func Test_filterContainsZeroAddress(t *testing.T) {
+ type args struct {
+ addrs []common.Address
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "non-zero addrs",
+ args: args{
+ addrs: []common.Address{
+ common.HexToAddress("1"),
+ common.HexToAddress("2"),
+ common.HexToAddress("3"),
+ },
+ },
+ want: false,
+ },
+ {
+ name: "empty",
+ args: args{addrs: []common.Address{}},
+ want: false,
+ },
+ {
+ name: "zero addr",
+ args: args{
+ addrs: []common.Address{
+ common.HexToAddress("1"),
+ common.HexToAddress("0"),
+ common.HexToAddress("2"),
+ common.HexToAddress("3"),
+ },
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equalf(t, tt.want, filterContainsZeroAddress(tt.args.addrs), "filterContainsZeroAddress(%v)", tt.args.addrs)
+ })
+ }
+}
+
+func Test_containsFilter(t *testing.T) {
+ type args struct {
+ filters []logpoller.Filter
+ f logpoller.Filter
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "empty",
+ args: args{
+ filters: []logpoller.Filter{},
+ f: logpoller.Filter{},
+ },
+ want: false,
+ },
+ {
+ name: "contains",
+ args: args{
+ filters: []logpoller.Filter{{Name: "a"}, {Name: "b"}},
+ f: logpoller.Filter{Name: "b"},
+ },
+ want: true,
+ },
+ {
+ name: "does not contain",
+ args: args{
+ filters: []logpoller.Filter{{Name: "a"}, {Name: "b"}},
+ f: logpoller.Filter{Name: "c"},
+ },
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equalf(t, tt.want,
+ containsFilter(tt.args.filters, tt.args.f), "containsFilter(%v, %v)", tt.args.filters, tt.args.f)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/observability/commit_store.go b/core/services/ocr2/plugins/ccip/internal/observability/commit_store.go
new file mode 100644
index 00000000000..6a1fb48f498
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/observability/commit_store.go
@@ -0,0 +1,75 @@
+package observability
+
+import (
+ "context"
+ "time"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+type ObservedCommitStoreReader struct {
+ ccipdata.CommitStoreReader
+ metric metricDetails
+}
+
+func NewObservedCommitStoreReader(origin ccipdata.CommitStoreReader, chainID int64, pluginName string) *ObservedCommitStoreReader {
+ return &ObservedCommitStoreReader{
+ CommitStoreReader: origin,
+ metric: metricDetails{
+ interactionDuration: readerHistogram,
+ resultSetSize: readerDatasetSize,
+ pluginName: pluginName,
+ readerName: "CommitStoreReader",
+ chainId: chainID,
+ },
+ }
+}
+
+func (o *ObservedCommitStoreReader) GetExpectedNextSequenceNumber(context context.Context) (uint64, error) {
+ return withObservedInteraction(o.metric, "GetExpectedNextSequenceNumber", func() (uint64, error) {
+ return o.CommitStoreReader.GetExpectedNextSequenceNumber(context)
+ })
+}
+
+func (o *ObservedCommitStoreReader) GetLatestPriceEpochAndRound(context context.Context) (uint64, error) {
+ return withObservedInteraction(o.metric, "GetLatestPriceEpochAndRound", func() (uint64, error) {
+ return o.CommitStoreReader.GetLatestPriceEpochAndRound(context)
+ })
+}
+
+func (o *ObservedCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return withObservedInteractionAndResults(o.metric, "GetCommitReportMatchingSeqNum", func() ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return o.CommitStoreReader.GetCommitReportMatchingSeqNum(ctx, seqNum, confs)
+ })
+}
+
+func (o *ObservedCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return withObservedInteractionAndResults(o.metric, "GetAcceptedCommitReportsGteTimestamp", func() ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return o.CommitStoreReader.GetAcceptedCommitReportsGteTimestamp(ctx, ts, confs)
+ })
+}
+
+func (o *ObservedCommitStoreReader) IsDown(ctx context.Context) (bool, error) {
+ return withObservedInteraction(o.metric, "IsDown", func() (bool, error) {
+ return o.CommitStoreReader.IsDown(ctx)
+ })
+}
+
+func (o *ObservedCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) {
+ return withObservedInteraction(o.metric, "IsBlessed", func() (bool, error) {
+ return o.CommitStoreReader.IsBlessed(ctx, root)
+ })
+}
+
+func (o *ObservedCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) {
+ return withObservedInteraction(o.metric, "VerifyExecutionReport", func() (bool, error) {
+ return o.CommitStoreReader.VerifyExecutionReport(ctx, report)
+ })
+}
+
+func (o *ObservedCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) {
+ return withObservedInteraction(o.metric, "GetCommitStoreStaticConfig", func() (cciptypes.CommitStoreStaticConfig, error) {
+ return o.CommitStoreReader.GetCommitStoreStaticConfig(ctx)
+ })
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/observability/metrics.go b/core/services/ocr2/plugins/ccip/internal/observability/metrics.go
new file mode 100644
index 00000000000..9e161fdd9ae
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/observability/metrics.go
@@ -0,0 +1,75 @@
+package observability
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+)
+
+var (
+ latencyBuckets = []float64{
+ float64(10 * time.Millisecond),
+ float64(25 * time.Millisecond),
+ float64(50 * time.Millisecond),
+ float64(75 * time.Millisecond),
+ float64(100 * time.Millisecond),
+ float64(200 * time.Millisecond),
+ float64(300 * time.Millisecond),
+ float64(400 * time.Millisecond),
+ float64(500 * time.Millisecond),
+ float64(750 * time.Millisecond),
+ float64(1 * time.Second),
+ float64(2 * time.Second),
+ float64(3 * time.Second),
+ float64(4 * time.Second),
+ }
+ labels = []string{"evmChainID", "plugin", "reader", "function", "success"}
+ readerHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "ccip_reader_duration",
+ Help: "Duration of calls to Reader instance",
+ Buckets: latencyBuckets,
+ }, labels)
+ readerDatasetSize = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "ccip_reader_dataset_size",
+ Help: "Size of the dataset returned from the Reader instance",
+ }, labels)
+)
+
+type metricDetails struct {
+ interactionDuration *prometheus.HistogramVec
+ resultSetSize *prometheus.GaugeVec
+ pluginName string
+ readerName string
+ chainId int64
+}
+
+func withObservedInteraction[T any](metric metricDetails, function string, f func() (T, error)) (T, error) {
+ contractExecutionStarted := time.Now()
+ value, err := f()
+ metric.interactionDuration.
+ WithLabelValues(
+ strconv.FormatInt(metric.chainId, 10),
+ metric.pluginName,
+ metric.readerName,
+ function,
+ strconv.FormatBool(err == nil),
+ ).
+ Observe(float64(time.Since(contractExecutionStarted)))
+ return value, err
+}
+
+func withObservedInteractionAndResults[T any](metric metricDetails, function string, f func() ([]T, error)) ([]T, error) {
+ results, err := withObservedInteraction(metric, function, f)
+ if err == nil {
+ metric.resultSetSize.WithLabelValues(
+ strconv.FormatInt(metric.chainId, 10),
+ metric.pluginName,
+ metric.readerName,
+ function,
+ strconv.FormatBool(err == nil),
+ ).Set(float64(len(results)))
+ }
+ return results, err
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go b/core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go
new file mode 100644
index 00000000000..3d84acf9616
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go
@@ -0,0 +1,87 @@
+package observability
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ io_prometheus_client "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+)
+
+func TestProperLabelsArePassed(t *testing.T) {
+ histogram := readerHistogram
+ successCounter := 10
+ failedCounter := 5
+
+ details := metricDetails{
+ interactionDuration: histogram,
+ pluginName: "plugin",
+ readerName: "reader",
+ chainId: 123,
+ }
+
+ for i := 0; i < successCounter; i++ {
+ _, err := withObservedInteraction[string](details, "successFun", successfulContract)
+ require.NoError(t, err)
+ }
+
+ for i := 0; i < failedCounter; i++ {
+ _, err := withObservedInteraction[string](details, "failedFun", failedContract)
+ require.Error(t, err)
+ }
+
+ assert.Equal(t, successCounter, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "successFun", "true"))
+ assert.Equal(t, failedCounter, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "failedFun", "false"))
+
+ assert.Equal(t, 0, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "failedFun", "true"))
+ assert.Equal(t, 0, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "successFun", "false"))
+}
+
+func TestMetricsSendFromContractDirectly(t *testing.T) {
+ expectedCounter := 4
+ ctx := testutils.Context(t)
+ chainId := int64(420)
+
+ mockedOfframp := ccipdatamocks.NewOffRampReader(t)
+ mockedOfframp.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{}, fmt.Errorf("execution error"))
+
+ observedOfframp := NewObservedOffRampReader(mockedOfframp, chainId, "plugin")
+
+ for i := 0; i < expectedCounter; i++ {
+ _, _ = observedOfframp.GetTokens(ctx)
+ }
+
+ assert.Equal(t, expectedCounter, counterFromHistogramByLabels(t, observedOfframp.metric.interactionDuration, "420", "plugin", "OffRampReader", "GetTokens", "false"))
+ assert.Equal(t, 0, counterFromHistogramByLabels(t, observedOfframp.metric.interactionDuration, "420", "plugin", "OffRampReader", "GetPoolByDestToken", "false"))
+ assert.Equal(t, 0, counterFromHistogramByLabels(t, observedOfframp.metric.interactionDuration, "420", "plugin", "OffRampReader", "GetPoolByDestToken", "true"))
+}
+
+func counterFromHistogramByLabels(t *testing.T, histogramVec *prometheus.HistogramVec, labels ...string) int {
+ observer, err := histogramVec.GetMetricWithLabelValues(labels...)
+ require.NoError(t, err)
+
+ metricCh := make(chan prometheus.Metric, 1)
+ observer.(prometheus.Histogram).Collect(metricCh)
+ close(metricCh)
+
+ metric := <-metricCh
+ pb := &io_prometheus_client.Metric{}
+ err = metric.Write(pb)
+ require.NoError(t, err)
+
+ return int(pb.GetHistogram().GetSampleCount())
+}
+
+func successfulContract() (string, error) {
+ return "success", nil
+}
+
+func failedContract() (string, error) {
+ return "", fmt.Errorf("just error")
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/observability/offramp.go b/core/services/ocr2/plugins/ccip/internal/observability/offramp.go
new file mode 100644
index 00000000000..b426bc8c91d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/observability/offramp.go
@@ -0,0 +1,69 @@
+package observability
+
+import (
+ "context"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+type ObservedOffRampReader struct {
+ ccipdata.OffRampReader
+ metric metricDetails
+}
+
+func NewObservedOffRampReader(origin ccipdata.OffRampReader, chainID int64, pluginName string) *ObservedOffRampReader {
+ return &ObservedOffRampReader{
+ OffRampReader: origin,
+ metric: metricDetails{
+ interactionDuration: readerHistogram,
+ resultSetSize: readerDatasetSize,
+ pluginName: pluginName,
+ readerName: "OffRampReader",
+ chainId: chainID,
+ },
+ }
+}
+
+func (o *ObservedOffRampReader) GetExecutionStateChangesBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, confs int) ([]cciptypes.ExecutionStateChangedWithTxMeta, error) {
+ return withObservedInteraction(o.metric, "GetExecutionStateChangesBetweenSeqNums", func() ([]cciptypes.ExecutionStateChangedWithTxMeta, error) {
+ return o.OffRampReader.GetExecutionStateChangesBetweenSeqNums(ctx, seqNumMin, seqNumMax, confs)
+ })
+}
+
+func (o *ObservedOffRampReader) CurrentRateLimiterState(ctx context.Context) (cciptypes.TokenBucketRateLimit, error) {
+ return withObservedInteraction(o.metric, "CurrentRateLimiterState", func() (cciptypes.TokenBucketRateLimit, error) {
+ return o.OffRampReader.CurrentRateLimiterState(ctx)
+ })
+}
+
+func (o *ObservedOffRampReader) GetExecutionState(ctx context.Context, sequenceNumber uint64) (uint8, error) {
+ return withObservedInteraction(o.metric, "GetExecutionState", func() (uint8, error) {
+ return o.OffRampReader.GetExecutionState(ctx, sequenceNumber)
+ })
+}
+
+func (o *ObservedOffRampReader) GetStaticConfig(ctx context.Context) (cciptypes.OffRampStaticConfig, error) {
+ return withObservedInteraction(o.metric, "GetStaticConfig", func() (cciptypes.OffRampStaticConfig, error) {
+ return o.OffRampReader.GetStaticConfig(ctx)
+ })
+}
+
+func (o *ObservedOffRampReader) GetSourceToDestTokensMapping(ctx context.Context) (map[cciptypes.Address]cciptypes.Address, error) {
+ return withObservedInteraction(o.metric, "GetSourceToDestTokensMapping", func() (map[cciptypes.Address]cciptypes.Address, error) {
+ return o.OffRampReader.GetSourceToDestTokensMapping(ctx)
+ })
+}
+
+func (o *ObservedOffRampReader) GetTokens(ctx context.Context) (cciptypes.OffRampTokens, error) {
+ return withObservedInteraction(o.metric, "GetTokens", func() (cciptypes.OffRampTokens, error) {
+ return o.OffRampReader.GetTokens(ctx)
+ })
+}
+
+func (o *ObservedOffRampReader) GetSendersNonce(ctx context.Context, senders []cciptypes.Address) (map[cciptypes.Address]uint64, error) {
+ return withObservedInteraction(o.metric, "ListSenderNonces", func() (map[cciptypes.Address]uint64, error) {
+ return o.OffRampReader.ListSenderNonces(ctx, senders)
+ })
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/observability/onramp.go b/core/services/ocr2/plugins/ccip/internal/observability/onramp.go
new file mode 100644
index 00000000000..b167bd57b06
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/observability/onramp.go
@@ -0,0 +1,63 @@
+package observability
+
+import (
+ "context"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+type ObservedOnRampReader struct {
+ ccipdata.OnRampReader
+ metric metricDetails
+}
+
+func NewObservedOnRampReader(origin ccipdata.OnRampReader, chainID int64, pluginName string) *ObservedOnRampReader {
+ return &ObservedOnRampReader{
+ OnRampReader: origin,
+ metric: metricDetails{
+ interactionDuration: readerHistogram,
+ resultSetSize: readerDatasetSize,
+ pluginName: pluginName,
+ readerName: "OnRampReader",
+ chainId: chainID,
+ },
+ }
+}
+
+func (o ObservedOnRampReader) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) {
+ return withObservedInteractionAndResults(o.metric, "GetSendRequestsBetweenSeqNums", func() ([]cciptypes.EVM2EVMMessageWithTxMeta, error) {
+ return o.OnRampReader.GetSendRequestsBetweenSeqNums(ctx, seqNumMin, seqNumMax, finalized)
+ })
+}
+
+func (o ObservedOnRampReader) RouterAddress(ctx context.Context) (cciptypes.Address, error) {
+ return withObservedInteraction(o.metric, "RouterAddress", func() (cciptypes.Address, error) {
+ return o.OnRampReader.RouterAddress(ctx)
+ })
+}
+
+func (o ObservedOnRampReader) GetDynamicConfig(ctx context.Context) (cciptypes.OnRampDynamicConfig, error) {
+ return withObservedInteraction(o.metric, "GetDynamicConfig", func() (cciptypes.OnRampDynamicConfig, error) {
+ return o.OnRampReader.GetDynamicConfig(ctx)
+ })
+}
+
+func (o ObservedOnRampReader) IsSourceCursed(ctx context.Context) (bool, error) {
+ return withObservedInteraction(o.metric, "IsSourceCursed", func() (bool, error) {
+ return o.OnRampReader.IsSourceCursed(ctx)
+ })
+}
+
+func (o ObservedOnRampReader) IsSourceChainHealthy(ctx context.Context) (bool, error) {
+ return withObservedInteraction(o.metric, "IsSourceChainHealthy", func() (bool, error) {
+ return o.OnRampReader.IsSourceChainHealthy(ctx)
+ })
+}
+
+func (o ObservedOnRampReader) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) {
+ return withObservedInteraction(o.metric, "SourcePriceRegistryAddress", func() (cciptypes.Address, error) {
+ return o.OnRampReader.SourcePriceRegistryAddress(ctx)
+ })
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go b/core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go
new file mode 100644
index 00000000000..1918f632b94
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go
@@ -0,0 +1,155 @@
+package observability
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+)
+
+type MethodCall struct {
+ MethodName string
+ Arguments []interface{}
+ Returns []interface{}
+}
+
+// The class expected to override the observed methods.
+const expectedWrapper = "core/services/ocr2/plugins/ccip/internal/observability.ObservedOnRampReader"
+
+// TestOnRampObservedMethods tests that all methods of OnRampReader are observed by a wrapper.
+// It uses the runtime to detect if the call stack contains the wrapper class.
+func TestOnRampObservedMethods(t *testing.T) {
+ // Methods not expected to be observed.
+ // Add a method name here to exclude it from the test.
+ excludedMethods := []string{
+ "Address",
+ "Close",
+ }
+
+ // Defines the overridden method calls to test.
+ // Not defining a non-excluded method here will cause the test to fail with an explicit error.
+ methodCalls := make(map[string]MethodCall)
+ methodCalls["GetDynamicConfig"] = MethodCall{
+ MethodName: "GetDynamicConfig",
+ Arguments: []interface{}{testutils.Context(t)},
+ Returns: []interface{}{cciptypes.OnRampDynamicConfig{}, nil},
+ }
+ methodCalls["GetSendRequestsBetweenSeqNums"] = MethodCall{
+ MethodName: "GetSendRequestsBetweenSeqNums",
+ Arguments: []interface{}{testutils.Context(t), uint64(0), uint64(100), true},
+ Returns: []interface{}{nil, nil},
+ }
+ methodCalls["IsSourceChainHealthy"] = MethodCall{
+ MethodName: "IsSourceChainHealthy",
+ Arguments: []interface{}{testutils.Context(t)},
+ Returns: []interface{}{false, nil},
+ }
+ methodCalls["IsSourceCursed"] = MethodCall{
+ MethodName: "IsSourceCursed",
+ Arguments: []interface{}{testutils.Context(t)},
+ Returns: []interface{}{false, nil},
+ }
+ methodCalls["RouterAddress"] = MethodCall{
+ MethodName: "RouterAddress",
+ Arguments: []interface{}{testutils.Context(t)},
+ Returns: []interface{}{cciptypes.Address("0x0"), nil},
+ }
+ methodCalls["SourcePriceRegistryAddress"] = MethodCall{
+ MethodName: "SourcePriceRegistryAddress",
+ Arguments: []interface{}{testutils.Context(t)},
+ Returns: []interface{}{cciptypes.Address("0x0"), nil},
+ }
+
+ // Test each method defined in the embedded type.
+ observed, reader := buildReader(t)
+ observedType := reflect.TypeOf(observed)
+ for i := 0; i < observedType.NumMethod(); i++ {
+ method := observedType.Method(i)
+ testMethod(t, method, methodCalls, excludedMethods, reader, observed)
+ }
+}
+
+func testMethod(t *testing.T, method reflect.Method, methodCalls map[string]MethodCall, excludedMethods []string, reader *mocks.OnRampReader, observed ObservedOnRampReader) {
+ t.Run(fmt.Sprintf("observability_wrapper_%s", method.Name), func(t *testing.T) {
+ // Skip excluded methods.
+ for _, em := range excludedMethods {
+ if method.Name == em {
+ // Skipping ignore method (not an error).
+ return
+ }
+ }
+
+ // Retrieve method call from definition (fail if not present).
+ mc := methodCalls[method.Name]
+ if mc.MethodName == "" {
+ assert.Fail(t, fmt.Sprintf("method %s not defined in methodCalls, please define it or exclude it.", method.Name))
+ return
+ }
+
+ assertCallByWrapper(t, reader, mc)
+
+ // Perform call on observed object.
+ callParams := buildCallParams(mc)
+ methodc := reflect.ValueOf(&observed).MethodByName(mc.MethodName)
+ methodc.Call(callParams)
+ })
+}
+
+// Set the mock to fail if not called by the wrapper.
+func assertCallByWrapper(t *testing.T, reader *mocks.OnRampReader, mc MethodCall) {
+ reader.On(mc.MethodName, mc.Arguments...).Maybe().Return(mc.Returns...).Run(func(args mock.Arguments) {
+ var i = 0
+ var pc uintptr
+ var ok = true
+ for ok {
+ pc, _, _, ok = runtime.Caller(i)
+ f := runtime.FuncForPC(pc)
+ if strings.Contains(f.Name(), expectedWrapper) {
+ // Found the expected wrapper in the call stack.
+ return
+ }
+ i++
+ }
+ assert.Fail(t, fmt.Sprintf("method %s not observed by wrapper. Please implement the method or add it to the excluded list.", mc.MethodName))
+ })
+}
+
+func buildCallParams(mc MethodCall) []reflect.Value {
+ callParams := make([]reflect.Value, len(mc.Arguments))
+ for i, arg := range mc.Arguments {
+ callParams[i] = reflect.ValueOf(arg)
+ }
+ return callParams
+}
+
+// Build a mock reader and an observed wrapper to be used in the tests.
+func buildReader(t *testing.T) (ObservedOnRampReader, *mocks.OnRampReader) {
+ labels = []string{"evmChainID", "plugin", "reader", "function", "success"}
+ ph := promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "test_histogram",
+ }, labels)
+ pg := promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "test_gauge",
+ }, labels)
+ metric := metricDetails{
+ interactionDuration: ph,
+ resultSetSize: pg,
+ pluginName: "test plugin",
+ readerName: "test reader",
+ chainId: 1337,
+ }
+ reader := mocks.NewOnRampReader(t)
+ observed := ObservedOnRampReader{reader, metric}
+ return observed, reader
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/observability/price_registry.go b/core/services/ocr2/plugins/ccip/internal/observability/price_registry.go
new file mode 100644
index 00000000000..f5b87686d35
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/observability/price_registry.go
@@ -0,0 +1,64 @@
+package observability
+
+import (
+ "context"
+ "time"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+)
+
+type ObservedPriceRegistryReader struct {
+ ccipdata.PriceRegistryReader
+ metric metricDetails
+}
+
+func NewPriceRegistryReader(origin ccipdata.PriceRegistryReader, chainID int64, pluginName string) *ObservedPriceRegistryReader {
+ return &ObservedPriceRegistryReader{
+ PriceRegistryReader: origin,
+ metric: metricDetails{
+ interactionDuration: readerHistogram,
+ resultSetSize: readerDatasetSize,
+ pluginName: pluginName,
+ readerName: "PriceRegistryReader",
+ chainId: chainID,
+ },
+ }
+}
+
+func (o *ObservedPriceRegistryReader) GetTokenPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.TokenPriceUpdateWithTxMeta, error) {
+ return withObservedInteractionAndResults(o.metric, "GetTokenPriceUpdatesCreatedAfter", func() ([]cciptypes.TokenPriceUpdateWithTxMeta, error) {
+ return o.PriceRegistryReader.GetTokenPriceUpdatesCreatedAfter(ctx, ts, confs)
+ })
+}
+
+func (o *ObservedPriceRegistryReader) GetGasPriceUpdatesCreatedAfter(ctx context.Context, chainSelector uint64, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) {
+ return withObservedInteractionAndResults(o.metric, "GetGasPriceUpdatesCreatedAfter", func() ([]cciptypes.GasPriceUpdateWithTxMeta, error) {
+ return o.PriceRegistryReader.GetGasPriceUpdatesCreatedAfter(ctx, chainSelector, ts, confs)
+ })
+}
+
+func (o *ObservedPriceRegistryReader) GetAllGasPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) {
+ return withObservedInteractionAndResults(o.metric, "GetAllGasPriceUpdatesCreatedAfter", func() ([]cciptypes.GasPriceUpdateWithTxMeta, error) {
+ return o.PriceRegistryReader.GetAllGasPriceUpdatesCreatedAfter(ctx, ts, confs)
+ })
+}
+
+func (o *ObservedPriceRegistryReader) GetFeeTokens(ctx context.Context) ([]cciptypes.Address, error) {
+ return withObservedInteraction(o.metric, "GetFeeTokens", func() ([]cciptypes.Address, error) {
+ return o.PriceRegistryReader.GetFeeTokens(ctx)
+ })
+}
+
+func (o *ObservedPriceRegistryReader) GetTokenPrices(ctx context.Context, wantedTokens []cciptypes.Address) ([]cciptypes.TokenPriceUpdate, error) {
+ return withObservedInteractionAndResults(o.metric, "GetTokenPrices", func() ([]cciptypes.TokenPriceUpdate, error) {
+ return o.PriceRegistryReader.GetTokenPrices(ctx, wantedTokens)
+ })
+}
+
+func (o *ObservedPriceRegistryReader) GetTokensDecimals(ctx context.Context, tokenAddresses []cciptypes.Address) ([]uint8, error) {
+ return withObservedInteractionAndResults(o.metric, "GetTokensDecimals", func() ([]uint8, error) {
+ return o.PriceRegistryReader.GetTokensDecimals(ctx, tokenAddresses)
+ })
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go
new file mode 100644
index 00000000000..d2851e3a079
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go
@@ -0,0 +1,218 @@
+package oraclelib
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services"
+
+ "go.uber.org/multierr"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+)
+
+type BackfilledOracle struct {
+ srcStartBlock, dstStartBlock uint64
+ oracleStarted atomic.Bool
+ cancelFn context.CancelFunc
+ src, dst logpoller.LogPoller
+ oracle job.ServiceCtx
+ lggr logger.Logger
+}
+
+func NewBackfilledOracle(lggr logger.Logger, src, dst logpoller.LogPoller, srcStartBlock, dstStartBlock uint64, oracle job.ServiceCtx) *BackfilledOracle {
+ return &BackfilledOracle{
+ srcStartBlock: srcStartBlock,
+ dstStartBlock: dstStartBlock,
+ oracleStarted: atomic.Bool{},
+ cancelFn: nil,
+ src: src,
+ dst: dst,
+ oracle: oracle,
+ lggr: lggr,
+ }
+}
+
+func (r *BackfilledOracle) Start(_ context.Context) error {
+ go r.Run()
+ return nil
+}
+
+func (r *BackfilledOracle) IsRunning() bool {
+ return r.oracleStarted.Load()
+}
+
+func (r *BackfilledOracle) Run() {
+ ctx, cancelFn := context.WithCancel(context.Background())
+ r.cancelFn = cancelFn
+ var err error
+ var errMu sync.Mutex
+ var wg sync.WaitGroup
+ // Replay in parallel if both requested.
+ if r.srcStartBlock != 0 {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ s := time.Now()
+ r.lggr.Infow("start replaying src chain", "fromBlock", r.srcStartBlock)
+ srcReplayErr := r.src.Replay(ctx, int64(r.srcStartBlock))
+ errMu.Lock()
+ err = multierr.Combine(err, srcReplayErr)
+ errMu.Unlock()
+ r.lggr.Infow("finished replaying src chain", "time", time.Since(s))
+ }()
+ }
+ if r.dstStartBlock != 0 {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ s := time.Now()
+ r.lggr.Infow("start replaying dst chain", "fromBlock", r.dstStartBlock)
+ dstReplayErr := r.dst.Replay(ctx, int64(r.dstStartBlock))
+ errMu.Lock()
+ err = multierr.Combine(err, dstReplayErr)
+ errMu.Unlock()
+ r.lggr.Infow("finished replaying dst chain", "time", time.Since(s))
+ }()
+ }
+ wg.Wait()
+ if err != nil {
+ r.lggr.Criticalw("unexpected error replaying, continuing plugin boot without all the logs backfilled", "err", err)
+ }
+ if err := ctx.Err(); err != nil {
+ r.lggr.Errorw("context already cancelled", "err", err)
+ return
+ }
+ // Start oracle with all logs present from dstStartBlock on dst and
+ // all logs from srcStartBlock on src.
+ if err := r.oracle.Start(ctx); err != nil {
+ // Should never happen.
+ r.lggr.Errorw("unexpected error starting oracle", "err", err)
+ } else {
+ r.oracleStarted.Store(true)
+ }
+}
+
+func (r *BackfilledOracle) Close() error {
+ if r.oracleStarted.Load() {
+ // If the oracle is running, it must be Closed/stopped
+ if err := r.oracle.Close(); err != nil {
+ r.lggr.Errorw("unexpected error stopping oracle", "err", err)
+ return err
+ }
+ // Flag the oracle as closed with our internal variable that keeps track
+ // of its state. This will allow to re-start the process
+ r.oracleStarted.Store(false)
+ }
+ if r.cancelFn != nil {
+ // This is useful to step the previous tasks that are spawned in
+ // parallel before starting the Oracle. This will use the context to
+ // signal them to exit immediately.
+ //
+ // It can be possible this is the only way to stop the Start() async
+ // flow, specially when the previusly task are running (the replays) and
+ // `oracleStarted` would be false in that example. Calling `cancelFn()`
+ // will stop the replays and will prevent the oracle to start
+ r.cancelFn()
+ }
+ return nil
+}
+
+func NewChainAgnosticBackFilledOracle(lggr logger.Logger, srcProvider services.ServiceCtx, dstProvider services.ServiceCtx, oracle job.ServiceCtx) *ChainAgnosticBackFilledOracle {
+ return &ChainAgnosticBackFilledOracle{
+ srcProvider: srcProvider,
+ dstProvider: dstProvider,
+ oracle: oracle,
+ lggr: lggr,
+ }
+}
+
+type ChainAgnosticBackFilledOracle struct {
+ srcProvider services.ServiceCtx
+ dstProvider services.ServiceCtx
+ oracle job.ServiceCtx
+ lggr logger.Logger
+ oracleStarted atomic.Bool
+ cancelFn context.CancelFunc
+}
+
+func (r *ChainAgnosticBackFilledOracle) Start(_ context.Context) error {
+ go r.run()
+ return nil
+}
+
+func (r *ChainAgnosticBackFilledOracle) run() {
+ ctx, cancelFn := context.WithCancel(context.Background())
+ r.cancelFn = cancelFn
+ var err error
+ var errMu sync.Mutex
+ var wg sync.WaitGroup
+ // Replay in parallel if both requested.
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ s := time.Now()
+ srcReplayErr := r.srcProvider.Start(ctx)
+ errMu.Lock()
+ err = multierr.Combine(err, srcReplayErr)
+ errMu.Unlock()
+ r.lggr.Infow("finished replaying src chain", "time", time.Since(s))
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ s := time.Now()
+ dstReplayErr := r.dstProvider.Start(ctx)
+ errMu.Lock()
+ err = multierr.Combine(err, dstReplayErr)
+ errMu.Unlock()
+ r.lggr.Infow("finished replaying dst chain", "time", time.Since(s))
+ }()
+
+ wg.Wait()
+ if err != nil {
+ r.lggr.Criticalw("unexpected error replaying, continuing plugin boot without all the logs backfilled", "err", err)
+ }
+ if err := ctx.Err(); err != nil {
+ r.lggr.Errorw("context already cancelled", "err", err)
+ }
+ // Start oracle with all logs present from dstStartBlock on dst and
+ // all logs from srcStartBlock on src.
+ if err := r.oracle.Start(ctx); err != nil {
+ // Should never happen.
+ r.lggr.Errorw("unexpected error starting oracle", "err", err)
+ } else {
+ r.oracleStarted.Store(true)
+ }
+}
+
+func (r *ChainAgnosticBackFilledOracle) Close() error {
+ if r.oracleStarted.Load() {
+ // If the oracle is running, it must be Closed/stopped
+ // TODO: Close should be safe to call in either case?
+ if err := r.oracle.Close(); err != nil {
+ r.lggr.Errorw("unexpected error stopping oracle", "err", err)
+ return err
+ }
+ // Flag the oracle as closed with our internal variable that keeps track
+ // of its state. This will allow to re-start the process
+ r.oracleStarted.Store(false)
+ }
+ if r.cancelFn != nil {
+ // This is useful to step the previous tasks that are spawned in
+ // parallel before starting the Oracle. This will use the context to
+ // signal them to exit immediately.
+ //
+ // It can be possible this is the only way to stop the Start() async
+ // flow, specially when the previusly task are running (the replays) and
+ // `oracleStarted` would be false in that example. Calling `cancelFn()`
+ // will stop the replays and will prevent the oracle to start
+ r.cancelFn()
+ }
+ return nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go
new file mode 100644
index 00000000000..6db1ebbadd9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go
@@ -0,0 +1,56 @@
+package oraclelib
+
+import (
+ "testing"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ jobmocks "github.com/smartcontractkit/chainlink/v2/core/services/job/mocks"
+)
+
+func TestBackfilledOracle(t *testing.T) {
+ // First scenario: Start() fails, check that all Replay are being called.
+ lp1 := lpmocks.NewLogPoller(t)
+ lp2 := lpmocks.NewLogPoller(t)
+ lp1.On("Replay", mock.Anything, int64(1)).Return(nil)
+ lp2.On("Replay", mock.Anything, int64(2)).Return(nil)
+ oracle1 := jobmocks.NewServiceCtx(t)
+ oracle1.On("Start", mock.Anything).Return(errors.New("Failed to start")).Twice()
+ job := NewBackfilledOracle(logger.TestLogger(t), lp1, lp2, 1, 2, oracle1)
+
+ job.Run()
+ assert.False(t, job.IsRunning())
+ job.Run()
+ assert.False(t, job.IsRunning())
+
+ /// Start -> Stop -> Start
+ oracle2 := jobmocks.NewServiceCtx(t)
+ oracle2.On("Start", mock.Anything).Return(nil).Twice()
+ oracle2.On("Close").Return(nil).Once()
+
+ job2 := NewBackfilledOracle(logger.TestLogger(t), lp1, lp2, 1, 2, oracle2)
+ job2.Run()
+ assert.True(t, job2.IsRunning())
+ assert.Nil(t, job2.Close())
+ assert.False(t, job2.IsRunning())
+ assert.Nil(t, job2.Close())
+ assert.False(t, job2.IsRunning())
+ job2.Run()
+ assert.True(t, job2.IsRunning())
+
+ /// Replay fails, but it starts anyway
+ lp11 := lpmocks.NewLogPoller(t)
+ lp12 := lpmocks.NewLogPoller(t)
+ lp11.On("Replay", mock.Anything, int64(1)).Return(errors.New("Replay failed")).Once()
+ lp12.On("Replay", mock.Anything, int64(2)).Return(errors.New("Replay failed")).Once()
+
+ oracle := jobmocks.NewServiceCtx(t)
+ oracle.On("Start", mock.Anything).Return(nil).Once()
+ job3 := NewBackfilledOracle(logger.NullLogger, lp11, lp12, 1, 2, oracle)
+ job3.Run()
+ assert.True(t, job3.IsRunning())
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go
new file mode 100644
index 00000000000..48d0d261653
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go
@@ -0,0 +1,44 @@
+package parseutil
+
+import (
+ "math/big"
+
+ "github.com/pkg/errors"
+ "github.com/shopspring/decimal"
+)
+
+func ParseBigIntFromAny(val any) (*big.Int, error) {
+ if val == nil {
+ return nil, errors.Errorf("nil value passed")
+ }
+
+ switch v := val.(type) {
+ case decimal.Decimal:
+ return ParseBigIntFromString(v.String())
+ case *decimal.Decimal:
+ return ParseBigIntFromString(v.String())
+ case *big.Int:
+ return v, nil
+ case string:
+ return ParseBigIntFromString(v)
+ case int:
+ return big.NewInt(int64(v)), nil
+ case int64:
+ return big.NewInt(v), nil
+ case float64:
+ i := new(big.Int)
+ big.NewFloat(v).Int(i)
+ return i, nil
+ default:
+ return nil, errors.Errorf("unsupported big int type %T", val)
+ }
+}
+
+func ParseBigIntFromString(v string) (*big.Int, error) {
+ valBigInt, success := new(big.Int).SetString(v, 10)
+ if !success {
+ return nil, errors.Errorf("unable to convert to integer %s", v)
+ }
+
+ return valBigInt, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go
new file mode 100644
index 00000000000..cea2f8cc19c
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go
@@ -0,0 +1,42 @@
+package parseutil
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/shopspring/decimal"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseBigIntFromAny(t *testing.T) {
+ decimalVal := decimal.New(123, 0)
+
+ testCases := []struct {
+ name string
+ val any
+ res *big.Int
+ expErr bool
+ }{
+ {name: "nil", val: nil, expErr: true},
+ {name: "string", val: "123", res: big.NewInt(123)},
+ {name: "decimal", val: decimal.New(123, 0), res: big.NewInt(123)},
+ {name: "decimal pointer", val: &decimalVal, res: big.NewInt(123)},
+ {name: "int64", val: int64(123), res: big.NewInt(123)},
+ {name: "int", val: 123, res: big.NewInt(123)},
+ {name: "float", val: 123.12, res: big.NewInt(123)},
+ {name: "uint8", val: uint8(12), expErr: true},
+ {name: "struct", val: struct{ name string }{name: "asd"}, expErr: true},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res, err := ParseBigIntFromAny(tc.val)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, tc.res, res)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go
new file mode 100644
index 00000000000..ed54428bd9b
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go
@@ -0,0 +1,239 @@
+package pricegetter
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "strings"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/internal/gethwrappers2/generated/offchainaggregator"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+const decimalsMethodName = "decimals"
+const latestRoundDataMethodName = "latestRoundData"
+
+func init() {
+ // Ensure existence of latestRoundData method on the Aggregator contract.
+ aggregatorABI, err := abi.JSON(strings.NewReader(offchainaggregator.OffchainAggregatorABI))
+ if err != nil {
+ panic(err)
+ }
+ ensureMethodOnContract(aggregatorABI, decimalsMethodName)
+ ensureMethodOnContract(aggregatorABI, latestRoundDataMethodName)
+}
+
+func ensureMethodOnContract(abi abi.ABI, methodName string) {
+ if _, ok := abi.Methods[methodName]; !ok {
+ panic(fmt.Errorf("method %s not found on ABI: %+v", methodName, abi.Methods))
+ }
+}
+
+type DynamicPriceGetterClient struct {
+ BatchCaller rpclib.EvmBatchCaller
+}
+
+func NewDynamicPriceGetterClient(batchCaller rpclib.EvmBatchCaller) DynamicPriceGetterClient {
+ return DynamicPriceGetterClient{
+ BatchCaller: batchCaller,
+ }
+}
+
+type DynamicPriceGetter struct {
+ cfg config.DynamicPriceGetterConfig
+ evmClients map[uint64]DynamicPriceGetterClient
+ aggregatorAbi abi.ABI
+}
+
+func NewDynamicPriceGetterConfig(configJson string) (config.DynamicPriceGetterConfig, error) {
+ priceGetterConfig := config.DynamicPriceGetterConfig{}
+ err := json.Unmarshal([]byte(configJson), &priceGetterConfig)
+ if err != nil {
+ return config.DynamicPriceGetterConfig{}, fmt.Errorf("parsing dynamic price getter config: %w", err)
+ }
+ err = priceGetterConfig.Validate()
+ if err != nil {
+ return config.DynamicPriceGetterConfig{}, fmt.Errorf("validating price getter config: %w", err)
+ }
+ return priceGetterConfig, nil
+}
+
+// NewDynamicPriceGetter build a DynamicPriceGetter from a configuration and a map of chain ID to batch callers.
+// A batch caller should be provided for all retrieved prices.
+func NewDynamicPriceGetter(cfg config.DynamicPriceGetterConfig, evmClients map[uint64]DynamicPriceGetterClient) (*DynamicPriceGetter, error) {
+ if err := cfg.Validate(); err != nil {
+ return nil, fmt.Errorf("validating dynamic price getter config: %w", err)
+ }
+ aggregatorAbi, err := abi.JSON(strings.NewReader(offchainaggregator.OffchainAggregatorABI))
+ if err != nil {
+ return nil, fmt.Errorf("parsing offchainaggregator abi: %w", err)
+ }
+ priceGetter := DynamicPriceGetter{cfg, evmClients, aggregatorAbi}
+ return &priceGetter, nil
+}
+
+// FilterConfiguredTokens implements the PriceGetter interface.
+// It filters a list of token addresses for only those that have a price resolution rule configured on the PriceGetterConfig
+func (d *DynamicPriceGetter) FilterConfiguredTokens(ctx context.Context, tokens []cciptypes.Address) (configured []cciptypes.Address, unconfigured []cciptypes.Address, err error) {
+ configured = []cciptypes.Address{}
+ unconfigured = []cciptypes.Address{}
+ for _, tk := range tokens {
+ evmAddr, err := ccipcalc.GenericAddrToEvm(tk)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if _, isAgg := d.cfg.AggregatorPrices[evmAddr]; isAgg {
+ configured = append(configured, tk)
+ } else if _, isStatic := d.cfg.StaticPrices[evmAddr]; isStatic {
+ configured = append(configured, tk)
+ } else {
+ unconfigured = append(unconfigured, tk)
+ }
+ }
+ return configured, unconfigured, nil
+}
+
+// TokenPricesUSD implements the PriceGetter interface.
+// It returns static prices stored in the price getter, and batch calls aggregators (one per chain) to retrieve aggregator-based prices.
+func (d *DynamicPriceGetter) TokenPricesUSD(ctx context.Context, tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, error) {
+ prices, batchCallsPerChain, err := d.preparePricesAndBatchCallsPerChain(tokens)
+ if err != nil {
+ return nil, err
+ }
+ if err = d.performBatchCalls(ctx, batchCallsPerChain, prices); err != nil {
+ return nil, err
+ }
+ return prices, nil
+}
+
+// performBatchCalls performs batch calls on all chains to retrieve token prices.
+func (d *DynamicPriceGetter) performBatchCalls(ctx context.Context, batchCallsPerChain map[uint64]*batchCallsForChain, prices map[cciptypes.Address]*big.Int) error {
+ for chainID, batchCalls := range batchCallsPerChain {
+ if err := d.performBatchCall(ctx, chainID, batchCalls, prices); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// performBatchCall performs a batch call on a given chain to retrieve token prices.
+func (d *DynamicPriceGetter) performBatchCall(ctx context.Context, chainID uint64, batchCalls *batchCallsForChain, prices map[cciptypes.Address]*big.Int) error {
+ // Retrieve the EVM caller for the chain.
+ client, exists := d.evmClients[chainID]
+ if !exists {
+ return fmt.Errorf("evm caller for chain %d not found", chainID)
+ }
+ evmCaller := client.BatchCaller
+
+ nbDecimalCalls := len(batchCalls.decimalCalls)
+ nbLatestRoundDataCalls := len(batchCalls.decimalCalls)
+
+ // Perform batched call (all decimals calls followed by latest round data calls).
+ calls := make([]rpclib.EvmCall, 0, nbDecimalCalls+nbLatestRoundDataCalls)
+ calls = append(calls, batchCalls.decimalCalls...)
+ calls = append(calls, batchCalls.latestRoundDataCalls...)
+
+ results, err := evmCaller.BatchCall(ctx, 0, calls)
+ if err != nil {
+ return fmt.Errorf("batch call on chain %d failed: %w", chainID, err)
+ }
+
+ // Extract results.
+ decimals := make([]uint8, 0, nbDecimalCalls)
+ latestRounds := make([]*big.Int, 0, nbLatestRoundDataCalls)
+
+ for i, res := range results[0:nbDecimalCalls] {
+ v, err1 := rpclib.ParseOutput[uint8](res, 0)
+ if err1 != nil {
+ callSignature := batchCalls.decimalCalls[i].String()
+ return fmt.Errorf("parse contract output while calling %v on chain %d: %w", callSignature, chainID, err1)
+ }
+ decimals = append(decimals, v)
+ }
+
+ for i, res := range results[nbDecimalCalls : nbDecimalCalls+nbLatestRoundDataCalls] {
+ // latestRoundData function has multiple outputs (roundId,answer,startedAt,updatedAt,answeredInRound).
+ // we want the second one (answer, at idx=1).
+ v, err1 := rpclib.ParseOutput[*big.Int](res, 1)
+ if err1 != nil {
+ callSignature := batchCalls.latestRoundDataCalls[i].String()
+ return fmt.Errorf("parse contract output while calling %v on chain %d: %w", callSignature, chainID, err1)
+ }
+ latestRounds = append(latestRounds, v)
+ }
+
+ // Normalize and store prices.
+ for i := range batchCalls.tokenOrder {
+ // Normalize to 1e18.
+ if decimals[i] < 18 {
+ latestRounds[i].Mul(latestRounds[i], big.NewInt(0).Exp(big.NewInt(10), big.NewInt(18-int64(decimals[i])), nil))
+ } else if decimals[i] > 18 {
+ latestRounds[i].Div(latestRounds[i], big.NewInt(0).Exp(big.NewInt(10), big.NewInt(int64(decimals[i])-18), nil))
+ }
+ prices[ccipcalc.EvmAddrToGeneric(batchCalls.tokenOrder[i])] = latestRounds[i]
+ }
+ return nil
+}
+
+// preparePricesAndBatchCallsPerChain uses this price getter to prepare for a list of tokens:
+// - the map of token address to their prices (static prices)
+// - the map of and batch calls per chain for the given tokens (dynamic prices)
+func (d *DynamicPriceGetter) preparePricesAndBatchCallsPerChain(tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, map[uint64]*batchCallsForChain, error) {
+ prices := make(map[cciptypes.Address]*big.Int, len(tokens))
+ batchCallsPerChain := make(map[uint64]*batchCallsForChain)
+ evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokens...)
+ if err != nil {
+ return nil, nil, err
+ }
+ for _, tk := range evmAddrs {
+ if aggCfg, isAgg := d.cfg.AggregatorPrices[tk]; isAgg {
+ // Batch calls for aggregator-based token prices (one per chain).
+ if _, exists := batchCallsPerChain[aggCfg.ChainID]; !exists {
+ batchCallsPerChain[aggCfg.ChainID] = &batchCallsForChain{
+ decimalCalls: []rpclib.EvmCall{},
+ latestRoundDataCalls: []rpclib.EvmCall{},
+ tokenOrder: []common.Address{},
+ }
+ }
+ chainCalls := batchCallsPerChain[aggCfg.ChainID]
+ chainCalls.decimalCalls = append(chainCalls.decimalCalls, rpclib.NewEvmCall(
+ d.aggregatorAbi,
+ decimalsMethodName,
+ aggCfg.AggregatorContractAddress,
+ ))
+ chainCalls.latestRoundDataCalls = append(chainCalls.latestRoundDataCalls, rpclib.NewEvmCall(
+ d.aggregatorAbi,
+ latestRoundDataMethodName,
+ aggCfg.AggregatorContractAddress,
+ ))
+ chainCalls.tokenOrder = append(chainCalls.tokenOrder, tk)
+ } else if staticCfg, isStatic := d.cfg.StaticPrices[tk]; isStatic {
+ // Fill static prices.
+ prices[ccipcalc.EvmAddrToGeneric(tk)] = staticCfg.Price
+ } else {
+ return nil, nil, fmt.Errorf("no price resolution rule for token %s", tk.Hex())
+ }
+ }
+ return prices, batchCallsPerChain, nil
+}
+
+// batchCallsForChain Defines the batch calls to perform on a given chain.
+type batchCallsForChain struct {
+ decimalCalls []rpclib.EvmCall
+ latestRoundDataCalls []rpclib.EvmCall
+ tokenOrder []common.Address // required to maintain the order of the batched rpc calls for mapping the results.
+}
+
+func (d *DynamicPriceGetter) Close() error {
+ return nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go
new file mode 100644
index 00000000000..673b9776c79
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go
@@ -0,0 +1,546 @@
+package pricegetter
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/aggregator_v3_interface"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks"
+)
+
+type testParameters struct {
+ cfg config.DynamicPriceGetterConfig
+ evmClients map[uint64]DynamicPriceGetterClient
+ tokens []common.Address
+ expectedTokenPrices map[common.Address]big.Int
+ evmCallErr bool
+ invalidConfigErrorExpected bool
+ priceResolutionErrorExpected bool
+}
+
+func TestDynamicPriceGetter(t *testing.T) {
+ tests := []struct {
+ name string
+ param testParameters
+ }{
+ {
+ name: "aggregator_only_valid",
+ param: testParamAggregatorOnly(t),
+ },
+ {
+ name: "aggregator_only_valid_multi",
+ param: testParamAggregatorOnlyMulti(t),
+ },
+ {
+ name: "static_only_valid",
+ param: testParamStaticOnly(),
+ },
+ {
+ name: "aggregator_and_static_valid",
+ param: testParamAggregatorAndStaticValid(t),
+ },
+ {
+ name: "aggregator_and_static_token_collision",
+ param: testParamAggregatorAndStaticTokenCollision(t),
+ },
+ {
+ name: "no_aggregator_for_token",
+ param: testParamNoAggregatorForToken(t),
+ },
+ {
+ name: "batchCall_returns_err",
+ param: testParamBatchCallReturnsErr(t),
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ pg, err := NewDynamicPriceGetter(test.param.cfg, test.param.evmClients)
+ if test.param.invalidConfigErrorExpected {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ ctx := testutils.Context(t)
+ // Check configured token
+ unconfiguredTk := cciptypes.Address(utils.RandomAddress().String())
+ cfgTokens, uncfgTokens, err := pg.FilterConfiguredTokens(ctx, []cciptypes.Address{unconfiguredTk})
+ require.NoError(t, err)
+ assert.Equal(t, []cciptypes.Address{}, cfgTokens)
+ assert.Equal(t, []cciptypes.Address{unconfiguredTk}, uncfgTokens)
+ // Build list of tokens to query.
+ tokens := make([]cciptypes.Address, 0, len(test.param.tokens))
+ for _, tk := range test.param.tokens {
+ tokenAddr := ccipcalc.EvmAddrToGeneric(tk)
+ tokens = append(tokens, tokenAddr)
+ }
+ prices, err := pg.TokenPricesUSD(ctx, tokens)
+
+ if test.param.evmCallErr {
+ require.Error(t, err)
+ return
+ }
+
+ if test.param.priceResolutionErrorExpected {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ // we expect prices for at least all queried tokens (it is possible that additional tokens are returned).
+ assert.True(t, len(prices) >= len(test.param.expectedTokenPrices))
+ // Check prices are matching expected result.
+ for tk, expectedPrice := range test.param.expectedTokenPrices {
+ if prices[cciptypes.Address(tk.String())] == nil {
+ assert.Fail(t, "Token price not found")
+ }
+ assert.Equal(t, 0, expectedPrice.Cmp(prices[cciptypes.Address(tk.String())]),
+ "Token price mismatch: expected price %v, got %v", expectedPrice, *prices[cciptypes.Address(tk.String())])
+ }
+ })
+ }
+}
+
+func testParamAggregatorOnly(t *testing.T) testParameters {
+ tk1 := utils.RandomAddress()
+ tk2 := utils.RandomAddress()
+ tk3 := utils.RandomAddress()
+ tk4 := utils.RandomAddress()
+ cfg := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ tk1: {
+ ChainID: 101,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk2: {
+ ChainID: 102,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk3: {
+ ChainID: 103,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk4: {
+ ChainID: 104,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{},
+ }
+ // Real LINK/USD example from OP.
+ round1 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(1000),
+ Answer: big.NewInt(1396818990),
+ StartedAt: big.NewInt(1704896575),
+ UpdatedAt: big.NewInt(1704896575),
+ AnsweredInRound: big.NewInt(1000),
+ }
+ // Real ETH/USD example from OP.
+ round2 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(2000),
+ Answer: big.NewInt(238879815123),
+ StartedAt: big.NewInt(1704897197),
+ UpdatedAt: big.NewInt(1704897197),
+ AnsweredInRound: big.NewInt(2000),
+ }
+ // Real LINK/ETH example from OP.
+ round3 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(3000),
+ Answer: big.NewInt(4468862777874802),
+ StartedAt: big.NewInt(1715743907),
+ UpdatedAt: big.NewInt(1715743907),
+ AnsweredInRound: big.NewInt(3000),
+ }
+ // Fake data for a token with more than 18 decimals.
+ round4 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(4000),
+ Answer: multExp(big.NewInt(1234567890), 10), // 20 digits.
+ StartedAt: big.NewInt(1715753907),
+ UpdatedAt: big.NewInt(1715753907),
+ AnsweredInRound: big.NewInt(4000),
+ }
+ evmClients := map[uint64]DynamicPriceGetterClient{
+ uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}),
+ uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}),
+ uint64(103): mockClient(t, []uint8{18}, []aggregator_v3_interface.LatestRoundData{round3}),
+ uint64(104): mockClient(t, []uint8{20}, []aggregator_v3_interface.LatestRoundData{round4}),
+ }
+ expectedTokenPrices := map[common.Address]big.Int{
+ tk1: *multExp(round1.Answer, 10), // expected in 1e18 format.
+ tk2: *multExp(round2.Answer, 10), // expected in 1e18 format.
+ tk3: *round3.Answer, // already in 1e18 format (contract decimals==18).
+ tk4: *multExp(big.NewInt(1234567890), 8), // expected in 1e18 format.
+ }
+ return testParameters{
+ cfg: cfg,
+ evmClients: evmClients,
+ tokens: []common.Address{tk1, tk2, tk3, tk4},
+ expectedTokenPrices: expectedTokenPrices,
+ invalidConfigErrorExpected: false,
+ }
+}
+
+// testParamAggregatorOnlyMulti test with several tokens on chain 102.
+func testParamAggregatorOnlyMulti(t *testing.T) testParameters {
+ tk1 := utils.RandomAddress()
+ tk2 := utils.RandomAddress()
+ tk3 := utils.RandomAddress()
+ cfg := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ tk1: {
+ ChainID: 101,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk2: {
+ ChainID: 102,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk3: {
+ ChainID: 102,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{},
+ }
+ // Real LINK/USD example from OP.
+ round1 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(1000),
+ Answer: big.NewInt(1396818990),
+ StartedAt: big.NewInt(1704896575),
+ UpdatedAt: big.NewInt(1704896575),
+ AnsweredInRound: big.NewInt(1000),
+ }
+ // Real ETH/USD example from OP.
+ round2 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(2000),
+ Answer: big.NewInt(238879815123),
+ StartedAt: big.NewInt(1704897197),
+ UpdatedAt: big.NewInt(1704897197),
+ AnsweredInRound: big.NewInt(2000),
+ }
+ round3 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(3000),
+ Answer: big.NewInt(238879815125),
+ StartedAt: big.NewInt(1704897198),
+ UpdatedAt: big.NewInt(1704897198),
+ AnsweredInRound: big.NewInt(3000),
+ }
+ evmClients := map[uint64]DynamicPriceGetterClient{
+ uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}),
+ uint64(102): mockClient(t, []uint8{8, 8}, []aggregator_v3_interface.LatestRoundData{round2, round3}),
+ }
+ expectedTokenPrices := map[common.Address]big.Int{
+ tk1: *multExp(round1.Answer, 10),
+ tk2: *multExp(round2.Answer, 10),
+ tk3: *multExp(round3.Answer, 10),
+ }
+ return testParameters{
+ cfg: cfg,
+ evmClients: evmClients,
+ invalidConfigErrorExpected: false,
+ tokens: []common.Address{tk1, tk2, tk3},
+ expectedTokenPrices: expectedTokenPrices,
+ }
+}
+
+func testParamStaticOnly() testParameters {
+ tk1 := utils.RandomAddress()
+ tk2 := utils.RandomAddress()
+ tk3 := utils.RandomAddress()
+ cfg := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{},
+ StaticPrices: map[common.Address]config.StaticPriceConfig{
+ tk1: {
+ ChainID: 101,
+ Price: big.NewInt(1_234_000),
+ },
+ tk2: {
+ ChainID: 102,
+ Price: big.NewInt(2_234_000),
+ },
+ tk3: {
+ ChainID: 103,
+ Price: big.NewInt(3_234_000),
+ },
+ },
+ }
+ // Real LINK/USD example from OP.
+ evmClients := map[uint64]DynamicPriceGetterClient{}
+ expectedTokenPrices := map[common.Address]big.Int{
+ tk1: *cfg.StaticPrices[tk1].Price,
+ tk2: *cfg.StaticPrices[tk2].Price,
+ tk3: *cfg.StaticPrices[tk3].Price,
+ }
+ return testParameters{
+ cfg: cfg,
+ evmClients: evmClients,
+ tokens: []common.Address{tk1, tk2, tk3},
+ expectedTokenPrices: expectedTokenPrices,
+ }
+}
+
+func testParamAggregatorAndStaticValid(t *testing.T) testParameters {
+ tk1 := utils.RandomAddress()
+ tk2 := utils.RandomAddress()
+ tk3 := utils.RandomAddress()
+ cfg := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ tk1: {
+ ChainID: 101,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk2: {
+ ChainID: 102,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{
+ tk3: {
+ ChainID: 103,
+ Price: big.NewInt(1_234_000),
+ },
+ },
+ }
+ // Real LINK/USD example from OP.
+ round1 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(1000),
+ Answer: big.NewInt(1396818990),
+ StartedAt: big.NewInt(1704896575),
+ UpdatedAt: big.NewInt(1704896575),
+ AnsweredInRound: big.NewInt(1000),
+ }
+ // Real ETH/USD example from OP.
+ round2 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(2000),
+ Answer: big.NewInt(238879815123),
+ StartedAt: big.NewInt(1704897197),
+ UpdatedAt: big.NewInt(1704897197),
+ AnsweredInRound: big.NewInt(2000),
+ }
+ evmClients := map[uint64]DynamicPriceGetterClient{
+ uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}),
+ uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}),
+ }
+ expectedTokenPrices := map[common.Address]big.Int{
+ tk1: *multExp(round1.Answer, 10),
+ tk2: *multExp(round2.Answer, 10),
+ tk3: *cfg.StaticPrices[tk3].Price,
+ }
+ return testParameters{
+ cfg: cfg,
+ evmClients: evmClients,
+ tokens: []common.Address{tk1, tk2, tk3},
+ expectedTokenPrices: expectedTokenPrices,
+ }
+}
+
+func testParamAggregatorAndStaticTokenCollision(t *testing.T) testParameters {
+ tk1 := utils.RandomAddress()
+ tk2 := utils.RandomAddress()
+ tk3 := utils.RandomAddress()
+ cfg := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ tk1: {
+ ChainID: 101,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk2: {
+ ChainID: 102,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk3: {
+ ChainID: 103,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{
+ tk3: {
+ ChainID: 103,
+ Price: big.NewInt(1_234_000),
+ },
+ },
+ }
+ // Real LINK/USD example from OP.
+ round1 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(1000),
+ Answer: big.NewInt(1396818990),
+ StartedAt: big.NewInt(1704896575),
+ UpdatedAt: big.NewInt(1704896575),
+ AnsweredInRound: big.NewInt(1000),
+ }
+ // Real ETH/USD example from OP.
+ round2 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(2000),
+ Answer: big.NewInt(238879815123),
+ StartedAt: big.NewInt(1704897197),
+ UpdatedAt: big.NewInt(1704897197),
+ AnsweredInRound: big.NewInt(2000),
+ }
+ round3 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(3000),
+ Answer: big.NewInt(238879815124),
+ StartedAt: big.NewInt(1704897198),
+ UpdatedAt: big.NewInt(1704897198),
+ AnsweredInRound: big.NewInt(3000),
+ }
+ evmClients := map[uint64]DynamicPriceGetterClient{
+ uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}),
+ uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}),
+ uint64(103): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round3}),
+ }
+ return testParameters{
+ cfg: cfg,
+ evmClients: evmClients,
+ tokens: []common.Address{tk1, tk2, tk3},
+ invalidConfigErrorExpected: true,
+ }
+}
+
+func testParamNoAggregatorForToken(t *testing.T) testParameters {
+ tk1 := utils.RandomAddress()
+ tk2 := utils.RandomAddress()
+ tk3 := utils.RandomAddress()
+ tk4 := utils.RandomAddress()
+ cfg := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ tk1: {
+ ChainID: 101,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk2: {
+ ChainID: 102,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{
+ tk3: {
+ ChainID: 103,
+ Price: big.NewInt(1_234_000),
+ },
+ },
+ }
+ // Real LINK/USD example from OP.
+ round1 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(1000),
+ Answer: big.NewInt(1396818990),
+ StartedAt: big.NewInt(1704896575),
+ UpdatedAt: big.NewInt(1704896575),
+ AnsweredInRound: big.NewInt(1000),
+ }
+ // Real ETH/USD example from OP.
+ round2 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(2000),
+ Answer: big.NewInt(238879815123),
+ StartedAt: big.NewInt(1704897197),
+ UpdatedAt: big.NewInt(1704897197),
+ AnsweredInRound: big.NewInt(2000),
+ }
+ evmClients := map[uint64]DynamicPriceGetterClient{
+ uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}),
+ uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}),
+ }
+ expectedTokenPrices := map[common.Address]big.Int{
+ tk1: *round1.Answer,
+ tk2: *round2.Answer,
+ tk3: *cfg.StaticPrices[tk3].Price,
+ tk4: *big.NewInt(0),
+ }
+ return testParameters{
+ cfg: cfg,
+ evmClients: evmClients,
+ tokens: []common.Address{tk1, tk2, tk3, tk4},
+ expectedTokenPrices: expectedTokenPrices,
+ priceResolutionErrorExpected: true,
+ }
+}
+
+func testParamBatchCallReturnsErr(t *testing.T) testParameters {
+ tk1 := utils.RandomAddress()
+ tk2 := utils.RandomAddress()
+ tk3 := utils.RandomAddress()
+ cfg := config.DynamicPriceGetterConfig{
+ AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{
+ tk1: {
+ ChainID: 101,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ tk2: {
+ ChainID: 102,
+ AggregatorContractAddress: utils.RandomAddress(),
+ },
+ },
+ StaticPrices: map[common.Address]config.StaticPriceConfig{
+ tk3: {
+ ChainID: 103,
+ Price: big.NewInt(1_234_000),
+ },
+ },
+ }
+ // Real LINK/USD example from OP.
+ round1 := aggregator_v3_interface.LatestRoundData{
+ RoundId: big.NewInt(1000),
+ Answer: big.NewInt(1396818990),
+ StartedAt: big.NewInt(1704896575),
+ UpdatedAt: big.NewInt(1704896575),
+ AnsweredInRound: big.NewInt(1000),
+ }
+ evmClients := map[uint64]DynamicPriceGetterClient{
+ uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}),
+ uint64(102): {
+ BatchCaller: mockErrCaller(t),
+ },
+ }
+ return testParameters{
+ cfg: cfg,
+ evmClients: evmClients,
+ tokens: []common.Address{tk1, tk2, tk3},
+ evmCallErr: true,
+ }
+}
+
+func mockClient(t *testing.T, decimals []uint8, rounds []aggregator_v3_interface.LatestRoundData) DynamicPriceGetterClient {
+ return DynamicPriceGetterClient{
+ BatchCaller: mockCaller(t, decimals, rounds),
+ }
+}
+
+func mockCaller(t *testing.T, decimals []uint8, rounds []aggregator_v3_interface.LatestRoundData) *rpclibmocks.EvmBatchCaller {
+ caller := rpclibmocks.NewEvmBatchCaller(t)
+
+ // Mock batch calls per chain: all decimals calls then all latestRoundData calls.
+ dataAndErrs := make([]rpclib.DataAndErr, 0, len(decimals)+len(rounds))
+ for _, d := range decimals {
+ dataAndErrs = append(dataAndErrs, rpclib.DataAndErr{
+ Outputs: []any{d},
+ })
+ }
+ for _, round := range rounds {
+ dataAndErrs = append(dataAndErrs, rpclib.DataAndErr{
+ Outputs: []any{round.RoundId, round.Answer, round.StartedAt, round.UpdatedAt, round.AnsweredInRound},
+ })
+ }
+ caller.On("BatchCall", mock.Anything, uint64(0), mock.Anything).Return(dataAndErrs, nil).Maybe()
+ return caller
+}
+
+func mockErrCaller(t *testing.T) *rpclibmocks.EvmBatchCaller {
+ caller := rpclibmocks.NewEvmBatchCaller(t)
+ caller.On("BatchCall", mock.Anything, uint64(0), mock.Anything).Return(nil, assert.AnError).Maybe()
+ return caller
+}
+
+// multExp returns the result of multiplying x by 10^e.
+func multExp(x *big.Int, e int64) *big.Int {
+ return big.NewInt(0).Mul(x, big.NewInt(0).Exp(big.NewInt(10), big.NewInt(e), nil))
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go
new file mode 100644
index 00000000000..195649685b2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go
@@ -0,0 +1,211 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package pricegetter
+
+import (
+ context "context"
+ big "math/big"
+
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// MockPriceGetter is an autogenerated mock type for the PriceGetter type
+type MockPriceGetter struct {
+ mock.Mock
+}
+
+type MockPriceGetter_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *MockPriceGetter) EXPECT() *MockPriceGetter_Expecter {
+ return &MockPriceGetter_Expecter{mock: &_m.Mock}
+}
+
+// Close provides a mock function with given fields:
+func (_m *MockPriceGetter) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// MockPriceGetter_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type MockPriceGetter_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *MockPriceGetter_Expecter) Close() *MockPriceGetter_Close_Call {
+ return &MockPriceGetter_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *MockPriceGetter_Close_Call) Run(run func()) *MockPriceGetter_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *MockPriceGetter_Close_Call) Return(_a0 error) *MockPriceGetter_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *MockPriceGetter_Close_Call) RunAndReturn(run func() error) *MockPriceGetter_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// FilterConfiguredTokens provides a mock function with given fields: ctx, tokens
+func (_m *MockPriceGetter) FilterConfiguredTokens(ctx context.Context, tokens []ccip.Address) ([]ccip.Address, []ccip.Address, error) {
+ ret := _m.Called(ctx, tokens)
+
+ if len(ret) == 0 {
+ panic("no return value specified for FilterConfiguredTokens")
+ }
+
+ var r0 []ccip.Address
+ var r1 []ccip.Address
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]ccip.Address, []ccip.Address, error)); ok {
+ return rf(ctx, tokens)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []ccip.Address); ok {
+ r0 = rf(ctx, tokens)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]ccip.Address)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) []ccip.Address); ok {
+ r1 = rf(ctx, tokens)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).([]ccip.Address)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, []ccip.Address) error); ok {
+ r2 = rf(ctx, tokens)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// MockPriceGetter_FilterConfiguredTokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterConfiguredTokens'
+type MockPriceGetter_FilterConfiguredTokens_Call struct {
+ *mock.Call
+}
+
+// FilterConfiguredTokens is a helper method to define mock.On call
+// - ctx context.Context
+// - tokens []ccip.Address
+func (_e *MockPriceGetter_Expecter) FilterConfiguredTokens(ctx interface{}, tokens interface{}) *MockPriceGetter_FilterConfiguredTokens_Call {
+ return &MockPriceGetter_FilterConfiguredTokens_Call{Call: _e.mock.On("FilterConfiguredTokens", ctx, tokens)}
+}
+
+func (_c *MockPriceGetter_FilterConfiguredTokens_Call) Run(run func(ctx context.Context, tokens []ccip.Address)) *MockPriceGetter_FilterConfiguredTokens_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]ccip.Address))
+ })
+ return _c
+}
+
+func (_c *MockPriceGetter_FilterConfiguredTokens_Call) Return(configured []ccip.Address, unconfigured []ccip.Address, err error) *MockPriceGetter_FilterConfiguredTokens_Call {
+ _c.Call.Return(configured, unconfigured, err)
+ return _c
+}
+
+func (_c *MockPriceGetter_FilterConfiguredTokens_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]ccip.Address, []ccip.Address, error)) *MockPriceGetter_FilterConfiguredTokens_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// TokenPricesUSD provides a mock function with given fields: ctx, tokens
+func (_m *MockPriceGetter) TokenPricesUSD(ctx context.Context, tokens []ccip.Address) (map[ccip.Address]*big.Int, error) {
+ ret := _m.Called(ctx, tokens)
+
+ if len(ret) == 0 {
+ panic("no return value specified for TokenPricesUSD")
+ }
+
+ var r0 map[ccip.Address]*big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) (map[ccip.Address]*big.Int, error)); ok {
+ return rf(ctx, tokens)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) map[ccip.Address]*big.Int); ok {
+ r0 = rf(ctx, tokens)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[ccip.Address]*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok {
+ r1 = rf(ctx, tokens)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockPriceGetter_TokenPricesUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TokenPricesUSD'
+type MockPriceGetter_TokenPricesUSD_Call struct {
+ *mock.Call
+}
+
+// TokenPricesUSD is a helper method to define mock.On call
+// - ctx context.Context
+// - tokens []ccip.Address
+func (_e *MockPriceGetter_Expecter) TokenPricesUSD(ctx interface{}, tokens interface{}) *MockPriceGetter_TokenPricesUSD_Call {
+ return &MockPriceGetter_TokenPricesUSD_Call{Call: _e.mock.On("TokenPricesUSD", ctx, tokens)}
+}
+
+func (_c *MockPriceGetter_TokenPricesUSD_Call) Run(run func(ctx context.Context, tokens []ccip.Address)) *MockPriceGetter_TokenPricesUSD_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]ccip.Address))
+ })
+ return _c
+}
+
+func (_c *MockPriceGetter_TokenPricesUSD_Call) Return(_a0 map[ccip.Address]*big.Int, _a1 error) *MockPriceGetter_TokenPricesUSD_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockPriceGetter_TokenPricesUSD_Call) RunAndReturn(run func(context.Context, []ccip.Address) (map[ccip.Address]*big.Int, error)) *MockPriceGetter_TokenPricesUSD_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewMockPriceGetter creates a new instance of MockPriceGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockPriceGetter(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockPriceGetter {
+ mock := &MockPriceGetter{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go
new file mode 100644
index 00000000000..ae9a10deb65
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go
@@ -0,0 +1,114 @@
+package pricegetter
+
+import (
+ "context"
+ "math/big"
+ "strings"
+ "time"
+
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/google/uuid"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/parseutil"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+)
+
+var _ PriceGetter = &PipelineGetter{}
+
+type PipelineGetter struct {
+ source string
+ runner pipeline.Runner
+ jobID int32
+ externalJobID uuid.UUID
+ name string
+ lggr logger.Logger
+}
+
+func NewPipelineGetter(source string, runner pipeline.Runner, jobID int32, externalJobID uuid.UUID, name string, lggr logger.Logger) (*PipelineGetter, error) {
+ _, err := pipeline.Parse(source)
+ if err != nil {
+ return nil, err
+ }
+
+ return &PipelineGetter{
+ source: source,
+ runner: runner,
+ jobID: jobID,
+ externalJobID: externalJobID,
+ name: name,
+ lggr: lggr,
+ }, nil
+}
+
+// FilterForConfiguredTokens implements the PriceGetter interface.
+// It filters a list of token addresses for only those that have a pipeline job configured on the TokenPricesUSDPipeline
+func (d *PipelineGetter) FilterConfiguredTokens(ctx context.Context, tokens []cciptypes.Address) (configured []cciptypes.Address, unconfigured []cciptypes.Address, err error) {
+ lcSource := strings.ToLower(d.source)
+ for _, tk := range tokens {
+ lcToken := strings.ToLower(string(tk))
+ if strings.Contains(lcSource, lcToken) {
+ configured = append(configured, tk)
+ } else {
+ unconfigured = append(unconfigured, tk)
+ }
+ }
+ return configured, unconfigured, nil
+}
+
+func (d *PipelineGetter) TokenPricesUSD(ctx context.Context, tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, error) {
+ _, trrs, err := d.runner.ExecuteRun(ctx, pipeline.Spec{
+ ID: d.jobID,
+ DotDagSource: d.source,
+ CreatedAt: time.Now(),
+ JobID: d.jobID,
+ JobName: d.name,
+ JobType: "",
+ }, pipeline.NewVarsFrom(map[string]interface{}{}))
+ if err != nil {
+ return nil, err
+ }
+ finalResult := trrs.FinalResult()
+ if finalResult.HasErrors() {
+ return nil, errors.Errorf("error getting prices %v", finalResult.AllErrors)
+ }
+ if len(finalResult.Values) != 1 {
+ return nil, errors.Errorf("invalid number of price results, expected 1 got %v", len(finalResult.Values))
+ }
+ prices, ok := finalResult.Values[0].(map[string]interface{})
+ if !ok {
+ return nil, errors.Errorf("expected map output of price pipeline, got %T", finalResult.Values[0])
+ }
+
+ providedTokensSet := mapset.NewSet(tokens...)
+ tokenPrices := make(map[cciptypes.Address]*big.Int)
+ for tokenAddressStr, rawPrice := range prices {
+ tokenAddressStr := ccipcalc.HexToAddress(tokenAddressStr)
+ castedPrice, err := parseutil.ParseBigIntFromAny(rawPrice)
+ if err != nil {
+ return nil, err
+ }
+
+ if providedTokensSet.Contains(tokenAddressStr) {
+ tokenPrices[tokenAddressStr] = castedPrice
+ }
+ }
+
+ // The mapping of token address to source of token price has to live offchain.
+ // Best we can do is sanity check that the token price spec covers all our desired execution token prices.
+ for _, token := range tokens {
+ if _, ok = tokenPrices[token]; !ok {
+ return nil, errors.Errorf("missing token %s from tokensForFeeCoin spec, got %v", token, prices)
+ }
+ }
+
+ return tokenPrices, nil
+}
+
+func (d *PipelineGetter) Close() error {
+ return d.runner.Close()
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go
new file mode 100644
index 00000000000..37970750732
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go
@@ -0,0 +1,178 @@
+package pricegetter_test
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ config2 "github.com/smartcontractkit/chainlink-common/pkg/config"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/bridges"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+
+ pipelinemocks "github.com/smartcontractkit/chainlink/v2/core/services/pipeline/mocks"
+
+ config "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+)
+
+func TestDataSource(t *testing.T) {
+ linkEth := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err := w.Write([]byte(`{"JuelsPerETH": "200000000000000000000"}`))
+ require.NoError(t, err)
+ }))
+ defer linkEth.Close()
+ usdcEth := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err := w.Write([]byte(`{"USDCWeiPerETH": "1000000000000000000000"}`)) // 1000 USDC / ETH
+ require.NoError(t, err)
+ }))
+ defer usdcEth.Close()
+ linkTokenAddress := ccipcalc.HexToAddress("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05")
+ usdcTokenAddress := ccipcalc.HexToAddress("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e10")
+ source := fmt.Sprintf(`
+ // Price 1
+ link [type=http method=GET url="%s"];
+ link_parse [type=jsonparse path="JuelsPerETH"];
+ link->link_parse;
+ // Price 2
+ usdc [type=http method=GET url="%s"];
+ usdc_parse [type=jsonparse path="USDCWeiPerETH"];
+ usdc->usdc_parse;
+ merge [type=merge left="{}" right="{\"%s\":$(link_parse), \"%s\":$(usdc_parse)}"];
+`, linkEth.URL, usdcEth.URL, linkTokenAddress, usdcTokenAddress)
+
+ priceGetter := newTestPipelineGetter(t, source)
+
+ // USDC & LINK are configured
+ confTokens, _, err := priceGetter.FilterConfiguredTokens(context.Background(), []cciptypes.Address{linkTokenAddress, usdcTokenAddress})
+ require.NoError(t, err)
+ assert.Equal(t, linkTokenAddress, confTokens[0])
+ assert.Equal(t, usdcTokenAddress, confTokens[1])
+
+ // Ask for all prices present in spec.
+ prices, err := priceGetter.TokenPricesUSD(context.Background(), []cciptypes.Address{
+ linkTokenAddress,
+ usdcTokenAddress,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, prices, map[cciptypes.Address]*big.Int{
+ linkTokenAddress: big.NewInt(0).Mul(big.NewInt(200), big.NewInt(1000000000000000000)),
+ usdcTokenAddress: big.NewInt(0).Mul(big.NewInt(1000), big.NewInt(1000000000000000000)),
+ })
+
+ // Ask a non-existent price.
+ _, err = priceGetter.TokenPricesUSD(context.Background(), []cciptypes.Address{
+ ccipcalc.HexToAddress("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e11"),
+ })
+ require.Error(t, err)
+
+ // Ask only one price
+ prices, err = priceGetter.TokenPricesUSD(context.Background(), []cciptypes.Address{linkTokenAddress})
+ require.NoError(t, err)
+ assert.Equal(t, prices, map[cciptypes.Address]*big.Int{
+ linkTokenAddress: big.NewInt(0).Mul(big.NewInt(200), big.NewInt(1000000000000000000)),
+ })
+}
+
+func TestParsingDifferentFormats(t *testing.T) {
+ tests := []struct {
+ name string
+ inputValue string
+ expectedValue *big.Int
+ expectedError bool
+ }{
+ {
+ name: "number as string",
+ inputValue: "\"200000000000000000000\"",
+ expectedValue: new(big.Int).Mul(big.NewInt(200), big.NewInt(1e18)),
+ },
+ {
+ name: "number as big number",
+ inputValue: "500000000000000000000",
+ expectedValue: new(big.Int).Mul(big.NewInt(500), big.NewInt(1e18)),
+ },
+ {
+ name: "number as int64",
+ inputValue: "150",
+ expectedValue: big.NewInt(150),
+ },
+ {
+ name: "number in scientific notation",
+ inputValue: "3e22",
+ expectedValue: new(big.Int).Mul(big.NewInt(30000), big.NewInt(1e18)),
+ },
+ {
+ name: "number as string in scientific notation returns error",
+ inputValue: "\"3e22\"",
+ expectedError: true,
+ },
+ {
+ name: "invalid value should return error",
+ inputValue: "\"NaN\"",
+ expectedError: true,
+ },
+ {
+ name: "null should return error",
+ inputValue: "null",
+ expectedError: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ token := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err := fmt.Fprintf(w, `{"MyCoin": %s}`, tt.inputValue)
+ require.NoError(t, err)
+ }))
+ defer token.Close()
+
+ address := common.HexToAddress("0x94025780a1aB58868D9B2dBBB775f44b32e8E6e5")
+ source := fmt.Sprintf(`
+ // Price 1
+ coin [type=http method=GET url="%s"];
+ coin_parse [type=jsonparse path="MyCoin"];
+ coin->coin_parse;
+ merge [type=merge left="{}" right="{\"%s\":$(coin_parse)}"];
+ `, token.URL, strings.ToLower(address.String()))
+
+ prices, err := newTestPipelineGetter(t, source).
+ TokenPricesUSD(context.Background(), []cciptypes.Address{ccipcalc.EvmAddrToGeneric(address)})
+
+ if tt.expectedError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, prices[ccipcalc.EvmAddrToGeneric(address)], tt.expectedValue)
+ }
+ })
+ }
+}
+
+func newTestPipelineGetter(t *testing.T, source string) *pricegetter.PipelineGetter {
+ lggr, _ := logger.NewLogger()
+ cfg := pipelinemocks.NewConfig(t)
+ cfg.On("MaxRunDuration").Return(time.Second)
+ cfg.On("DefaultHTTPTimeout").Return(*config2.MustNewDuration(time.Second))
+ cfg.On("DefaultHTTPLimit").Return(int64(1024 * 10))
+ cfg.On("VerboseLogging").Return(true)
+ db := pgtest.NewSqlxDB(t)
+ bridgeORM := bridges.NewORM(db)
+ runner := pipeline.NewRunner(pipeline.NewORM(db, lggr, config.NewTestGeneralConfig(t).JobPipeline().MaxSuccessfulRuns()),
+ bridgeORM, cfg, nil, nil, nil, nil, lggr, &http.Client{}, &http.Client{})
+ ds, err := pricegetter.NewPipelineGetter(source, runner, 1, uuid.New(), "test", lggr)
+ require.NoError(t, err)
+ return ds
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go
new file mode 100644
index 00000000000..9ee0e8f3d0a
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go
@@ -0,0 +1,7 @@
+package pricegetter
+
+import cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+type PriceGetter interface {
+ cciptypes.PriceGetter
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/rpclib/evm.go b/core/services/ocr2/plugins/ccip/internal/rpclib/evm.go
new file mode 100644
index 00000000000..71357029dd2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/rpclib/evm.go
@@ -0,0 +1,337 @@
+package rpclib
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "reflect"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/pkg/errors"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+var ErrEmptyOutput = errors.New("rpc call output is empty (make sure that the contract method exists and rpc is healthy)")
+
+type EvmBatchCaller interface {
+ // BatchCall executes all the provided EvmCall and returns the results in the same order
+ // of the calls. Pass blockNumber=0 to use the latest block.
+ BatchCall(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error)
+}
+
+type BatchSender interface {
+ BatchCallContext(ctx context.Context, calls []rpc.BatchElem) error
+}
+
+const (
+ // DefaultRpcBatchSizeLimit defines the maximum number of rpc requests to be included in a batch.
+ DefaultRpcBatchSizeLimit = 100
+
+ // DefaultRpcBatchBackOffMultiplier defines the rate of reducing the batch size limit for retried calls.
+ // For example if limit is 20 and multiplier is 4:
+ // 1. 20
+ // 2. 20/4 = 5
+ // 3. 5/4 = 1
+ DefaultRpcBatchBackOffMultiplier = 5
+
+ // DefaultMaxParallelRpcCalls defines the default maximum number of individual in-parallel rpc calls.
+ DefaultMaxParallelRpcCalls = 10
+)
+
+// DynamicLimitedBatchCaller makes batched rpc calls and perform retries by reducing the batch size on each retry.
+type DynamicLimitedBatchCaller struct {
+ bc *defaultEvmBatchCaller
+}
+
+func NewDynamicLimitedBatchCaller(
+ lggr logger.Logger, batchSender BatchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit uint,
+) *DynamicLimitedBatchCaller {
+ return &DynamicLimitedBatchCaller{
+ bc: newDefaultEvmBatchCaller(lggr, batchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit),
+ }
+}
+
+func (c *DynamicLimitedBatchCaller) BatchCall(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error) {
+ return c.bc.batchCallDynamicLimitRetries(ctx, blockNumber, calls)
+}
+
+type defaultEvmBatchCaller struct {
+ lggr logger.Logger
+ batchSender BatchSender
+ batchSizeLimit uint
+ parallelRpcCallsLimit uint
+ backOffMultiplier uint
+}
+
+// NewDefaultEvmBatchCaller returns a new batch caller instance.
+// batchCallLimit defines the maximum number of calls for BatchCallLimit method, pass 0 to keep the default.
+// backOffMultiplier defines the back-off strategy for retries on BatchCallDynamicLimitRetries method, pass 0 to keep the default.
+func newDefaultEvmBatchCaller(
+ lggr logger.Logger, batchSender BatchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit uint,
+) *defaultEvmBatchCaller {
+ batchSize := uint(DefaultRpcBatchSizeLimit)
+ if batchSizeLimit > 0 {
+ batchSize = batchSizeLimit
+ }
+
+ multiplier := uint(DefaultRpcBatchBackOffMultiplier)
+ if backOffMultiplier > 0 {
+ multiplier = backOffMultiplier
+ }
+
+ parallelRpcCalls := uint(DefaultMaxParallelRpcCalls)
+ if parallelRpcCallsLimit > 0 {
+ parallelRpcCalls = parallelRpcCallsLimit
+ }
+
+ return &defaultEvmBatchCaller{
+ lggr: lggr,
+ batchSender: batchSender,
+ batchSizeLimit: batchSize,
+ parallelRpcCallsLimit: parallelRpcCalls,
+ backOffMultiplier: multiplier,
+ }
+}
+
+func (c *defaultEvmBatchCaller) batchCall(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error) {
+ if len(calls) == 0 {
+ return nil, nil
+ }
+
+ packedOutputs := make([]string, len(calls))
+ rpcBatchCalls := make([]rpc.BatchElem, len(calls))
+
+ for i, call := range calls {
+ packedInputs, err := call.abi.Pack(call.methodName, call.args...)
+ if err != nil {
+ return nil, fmt.Errorf("pack %s(%+v): %w", call.methodName, call.args, err)
+ }
+
+ blockNumStr := "latest"
+ if blockNumber > 0 {
+ blockNumStr = hexutil.EncodeBig(big.NewInt(0).SetUint64(blockNumber))
+ }
+
+ rpcBatchCalls[i] = rpc.BatchElem{
+ Method: "eth_call",
+ Args: []any{
+ map[string]interface{}{
+ "from": common.Address{},
+ "to": call.contractAddress,
+ "data": hexutil.Bytes(packedInputs),
+ },
+ blockNumStr,
+ },
+ Result: &packedOutputs[i],
+ }
+ }
+
+ err := c.batchSender.BatchCallContext(ctx, rpcBatchCalls)
+ if err != nil {
+ return nil, fmt.Errorf("batch call context: %w", err)
+ }
+
+ results := make([]DataAndErr, len(calls))
+ for i, call := range calls {
+ if rpcBatchCalls[i].Error != nil {
+ results[i].Err = rpcBatchCalls[i].Error
+ continue
+ }
+
+ if packedOutputs[i] == "" {
+ // Some RPCs instead of returning "0x" are returning an empty string.
+ // We are overriding this behaviour for consistent handling of this scenario.
+ packedOutputs[i] = "0x"
+ }
+
+ b, err := hexutil.Decode(packedOutputs[i])
+ if err != nil {
+ return nil, fmt.Errorf("decode result %s: packedOutputs %s: %w", call, packedOutputs[i], err)
+ }
+
+ unpackedOutputs, err := call.abi.Unpack(call.methodName, b)
+ if err != nil {
+ if len(b) == 0 {
+ results[i].Err = fmt.Errorf("unpack result %s: %s: %w", call, err.Error(), ErrEmptyOutput)
+ } else {
+ results[i].Err = fmt.Errorf("unpack result %s: %w", call, err)
+ }
+ continue
+ }
+
+ results[i].Outputs = unpackedOutputs
+ }
+
+ return results, nil
+}
+
+func (c *defaultEvmBatchCaller) batchCallDynamicLimitRetries(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error) {
+ lim := c.batchSizeLimit
+ // Limit the batch size to the number of calls
+ if uint(len(calls)) < lim {
+ lim = uint(len(calls))
+ }
+ for {
+ results, err := c.batchCallLimit(ctx, blockNumber, calls, lim)
+ if err == nil {
+ return results, nil
+ }
+
+ if lim <= 1 {
+ return nil, errors.Wrapf(err, "calls %+v", EVMCallsToString(calls))
+ }
+
+ newLim := lim / c.backOffMultiplier
+ if newLim == 0 || newLim == lim {
+ newLim = 1
+ }
+ lim = newLim
+ c.lggr.Errorf("retrying batch call with %d calls and %d limit that failed with error=%s",
+ len(calls), lim, err)
+ }
+}
+
+func (c *defaultEvmBatchCaller) batchCallLimit(ctx context.Context, blockNumber uint64, calls []EvmCall, batchSizeLimit uint) ([]DataAndErr, error) {
+ if batchSizeLimit <= 0 {
+ return c.batchCall(ctx, blockNumber, calls)
+ }
+
+ type job struct {
+ blockNumber uint64
+ calls []EvmCall
+ results []DataAndErr
+ }
+
+ jobs := make([]job, 0)
+ for i := 0; i < len(calls); i += int(batchSizeLimit) {
+ idxFrom := i
+ idxTo := idxFrom + int(batchSizeLimit)
+ if idxTo > len(calls) {
+ idxTo = len(calls)
+ }
+ jobs = append(jobs, job{blockNumber: blockNumber, calls: calls[idxFrom:idxTo], results: nil})
+ }
+
+ if c.parallelRpcCallsLimit > 1 {
+ eg := new(errgroup.Group)
+ eg.SetLimit(int(c.parallelRpcCallsLimit))
+ for jobIdx := range jobs {
+ jobIdx := jobIdx
+ eg.Go(func() error {
+ res, err := c.batchCall(ctx, jobs[jobIdx].blockNumber, jobs[jobIdx].calls)
+ if err != nil {
+ return err
+ }
+ jobs[jobIdx].results = res
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+ } else {
+ var err error
+ for jobIdx := range jobs {
+ jobs[jobIdx].results, err = c.batchCall(ctx, jobs[jobIdx].blockNumber, jobs[jobIdx].calls)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ results := make([]DataAndErr, 0)
+ for _, jb := range jobs {
+ results = append(results, jb.results...)
+ }
+ return results, nil
+}
+
+type AbiPackerUnpacker interface {
+ Pack(name string, args ...interface{}) ([]byte, error)
+ Unpack(name string, data []byte) ([]interface{}, error)
+}
+
+type EvmCall struct {
+ abi AbiPackerUnpacker
+ methodName string
+ contractAddress common.Address
+ args []any
+}
+
+func NewEvmCall(abi AbiPackerUnpacker, methodName string, contractAddress common.Address, args ...any) EvmCall {
+ return EvmCall{
+ abi: abi,
+ methodName: methodName,
+ contractAddress: contractAddress,
+ args: args,
+ }
+}
+
+func (c EvmCall) MethodName() string {
+ return c.methodName
+}
+
+func (c EvmCall) String() string {
+ return fmt.Sprintf("%s: %s(%+v)", c.contractAddress.String(), c.methodName, c.args)
+}
+
+func EVMCallsToString(calls []EvmCall) string {
+ callString := ""
+ for _, call := range calls {
+ callString += fmt.Sprintf("%s\n", call.String())
+ }
+ return callString
+}
+
+type DataAndErr struct {
+ Outputs []any
+ Err error
+}
+
+func ParseOutputs[T any](results []DataAndErr, parseFunc func(d DataAndErr) (T, error)) ([]T, error) {
+ parsed := make([]T, 0, len(results))
+
+ for _, res := range results {
+ v, err := parseFunc(res)
+ if err != nil {
+ return nil, fmt.Errorf("parse contract output: %w", err)
+ }
+ parsed = append(parsed, v)
+ }
+
+ return parsed, nil
+}
+
+func ParseOutput[T any](dataAndErr DataAndErr, idx int) (T, error) {
+ var parsed T
+
+ if dataAndErr.Err != nil {
+ return parsed, fmt.Errorf("rpc call error: %w", dataAndErr.Err)
+ }
+
+ if idx < 0 || idx >= len(dataAndErr.Outputs) {
+ return parsed, fmt.Errorf("idx %d is out of bounds for %d outputs", idx, len(dataAndErr.Outputs))
+ }
+
+ res, is := dataAndErr.Outputs[idx].(T)
+ if !is {
+ // some rpc types are not strictly defined
+ // for that reason we try to manually map the fields using json encoding
+ b, err := json.Marshal(dataAndErr.Outputs[idx])
+ if err == nil {
+ var empty T
+ if err := json.Unmarshal(b, &parsed); err == nil && !reflect.DeepEqual(parsed, empty) {
+ return parsed, nil
+ }
+ }
+
+ return parsed, fmt.Errorf("the result type is: %T, expected: %T", dataAndErr.Outputs[idx], parsed)
+ }
+
+ return res, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go b/core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go
new file mode 100644
index 00000000000..1a3d7baf0fc
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go
@@ -0,0 +1,223 @@
+package rpclib_test
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+
+ "github.com/cometbft/cometbft/libs/rand"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+func TestDefaultEvmBatchCaller_BatchCallDynamicLimit(t *testing.T) {
+ testCases := []struct {
+ name string
+ maxBatchSize uint
+ backOffMultiplier uint
+ numCalls int
+ expectedBatchSizesOnEachRetry []int
+ }{
+ {
+ name: "defaults",
+ maxBatchSize: rpclib.DefaultRpcBatchSizeLimit,
+ backOffMultiplier: rpclib.DefaultRpcBatchBackOffMultiplier,
+ numCalls: 200,
+ expectedBatchSizesOnEachRetry: []int{100, 20, 4, 1},
+ },
+ {
+ name: "base simple scenario",
+ maxBatchSize: 20,
+ backOffMultiplier: 2,
+ numCalls: 100,
+ expectedBatchSizesOnEachRetry: []int{20, 10, 5, 2, 1},
+ },
+ {
+ name: "remainder",
+ maxBatchSize: 99,
+ backOffMultiplier: 5,
+ numCalls: 100,
+ expectedBatchSizesOnEachRetry: []int{99, 19, 3, 1},
+ },
+ {
+ name: "large back off multiplier",
+ maxBatchSize: 20,
+ backOffMultiplier: 18,
+ numCalls: 100,
+ expectedBatchSizesOnEachRetry: []int{20, 1},
+ },
+ {
+ name: "back off equal to batch size",
+ maxBatchSize: 20,
+ backOffMultiplier: 20,
+ numCalls: 100,
+ expectedBatchSizesOnEachRetry: []int{20, 1},
+ },
+ {
+ name: "back off larger than batch size",
+ maxBatchSize: 20,
+ backOffMultiplier: 220,
+ numCalls: 100,
+ expectedBatchSizesOnEachRetry: []int{20, 1},
+ },
+ {
+ name: "back off 1",
+ maxBatchSize: 20,
+ backOffMultiplier: 1,
+ numCalls: 100,
+ expectedBatchSizesOnEachRetry: []int{20, 1},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ batchSizes := make([]int, 0)
+
+ ec := mocks.NewClient(t)
+ bc := rpclib.NewDynamicLimitedBatchCaller(logger.TestLogger(t), ec, tc.maxBatchSize, tc.backOffMultiplier, 1)
+ ctx := testutils.Context(t)
+ calls := make([]rpclib.EvmCall, tc.numCalls)
+ emptyAbi := abihelpers.MustParseABI("[]")
+ for i := range calls {
+ calls[i] = rpclib.NewEvmCall(emptyAbi, "", common.Address{})
+ }
+ ec.On("BatchCallContext", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
+ evmCalls := args.Get(1).([]rpc.BatchElem)
+ batchSizes = append(batchSizes, len(evmCalls))
+ }).Return(errors.New("some error"))
+ _, _ = bc.BatchCall(ctx, 123, calls)
+
+ assert.Equal(t, tc.expectedBatchSizesOnEachRetry, batchSizes)
+ })
+ }
+}
+
+func TestDefaultEvmBatchCaller_batchCallLimit(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ testCases := []struct {
+ numCalls uint
+ batchSize uint
+ parallelRpcCallsLimit uint
+ }{
+ {numCalls: 100, batchSize: 10, parallelRpcCallsLimit: 5},
+ {numCalls: 10, batchSize: 100, parallelRpcCallsLimit: 10},
+ {numCalls: 1, batchSize: 100, parallelRpcCallsLimit: 10},
+ {numCalls: 1000, batchSize: 10, parallelRpcCallsLimit: 2},
+ {numCalls: rand.Uint() % 1000, batchSize: rand.Uint() % 500, parallelRpcCallsLimit: rand.Uint() % 500},
+ }
+
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) {
+ ec := mocks.NewClient(t)
+ bc := rpclib.NewDynamicLimitedBatchCaller(logger.TestLogger(t), ec, tc.batchSize, 99999, tc.parallelRpcCallsLimit)
+
+ // generate the abi and the rpc calls
+ intTyp, err := abi.NewType("uint64", "uint64", nil)
+ assert.NoError(t, err)
+ calls := make([]rpclib.EvmCall, tc.numCalls)
+ mockAbi := abihelpers.MustParseABI("[]")
+ for i := range calls {
+ name := fmt.Sprintf("method_%d", i)
+ meth := abi.NewMethod(name, name, abi.Function, "nonpayable", true, false, abi.Arguments{abi.Argument{Name: "a", Type: intTyp}}, abi.Arguments{abi.Argument{Name: "b", Type: intTyp}})
+ mockAbi.Methods[name] = meth
+ calls[i] = rpclib.NewEvmCall(mockAbi, name, common.Address{}, uint64(i))
+ }
+
+ // mock the rpc call to batch call context
+ // for simplicity we just set an error
+ ec.On("BatchCallContext", mock.Anything, mock.Anything).
+ Run(func(args mock.Arguments) {
+ evmCalls := args.Get(1).([]rpc.BatchElem)
+ for i := range evmCalls {
+ arg := evmCalls[i].Args[0].(map[string]interface{})["data"].(hexutil.Bytes)
+ arg = arg[len(arg)-10:]
+ evmCalls[i].Error = fmt.Errorf("%s", arg)
+ }
+ }).Return(nil)
+
+ // make the call and make sure the results are received in order
+ results, _ := bc.BatchCall(ctx, 0, calls)
+ assert.Len(t, results, len(calls))
+ for i, res := range results {
+ resNum, err := strconv.ParseInt(res.Err.Error()[2:], 16, 64)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(i), resNum)
+ }
+ })
+ }
+}
+
+func TestParseOutput(t *testing.T) {
+ type testCase[T any] struct {
+ name string
+ dataAndErr rpclib.DataAndErr
+ outputIdx int
+ expRes T
+ expErr bool
+ }
+
+ testCases := []testCase[string]{
+ {
+ name: "success",
+ dataAndErr: rpclib.DataAndErr{Outputs: []any{"abc"}, Err: nil},
+ outputIdx: 0,
+ expRes: "abc",
+ expErr: false,
+ },
+ {
+ name: "index error on empty list",
+ dataAndErr: rpclib.DataAndErr{Outputs: []any{}, Err: nil},
+ outputIdx: 0,
+ expErr: true,
+ },
+ {
+ name: "index error on non-empty list",
+ dataAndErr: rpclib.DataAndErr{Outputs: []any{"a", "b"}, Err: nil},
+ outputIdx: 2,
+ expErr: true,
+ },
+ {
+ name: "negative index",
+ dataAndErr: rpclib.DataAndErr{Outputs: []any{"a", "b"}, Err: nil},
+ outputIdx: -1,
+ expErr: true,
+ },
+ {
+ name: "wrong type",
+ dataAndErr: rpclib.DataAndErr{Outputs: []any{1234}, Err: nil},
+ outputIdx: 0,
+ expErr: true,
+ },
+ {
+ name: "has err",
+ dataAndErr: rpclib.DataAndErr{Outputs: []any{"abc"}, Err: fmt.Errorf("some err")},
+ outputIdx: 0,
+ expErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res, err := rpclib.ParseOutput[string](tc.dataAndErr, tc.outputIdx)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expRes, res)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go b/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go
new file mode 100644
index 00000000000..aa42814186e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go
@@ -0,0 +1,97 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package rpclibmocks
+
+import (
+ context "context"
+
+ rpclib "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// EvmBatchCaller is an autogenerated mock type for the EvmBatchCaller type
+type EvmBatchCaller struct {
+ mock.Mock
+}
+
+type EvmBatchCaller_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *EvmBatchCaller) EXPECT() *EvmBatchCaller_Expecter {
+ return &EvmBatchCaller_Expecter{mock: &_m.Mock}
+}
+
+// BatchCall provides a mock function with given fields: ctx, blockNumber, calls
+func (_m *EvmBatchCaller) BatchCall(ctx context.Context, blockNumber uint64, calls []rpclib.EvmCall) ([]rpclib.DataAndErr, error) {
+ ret := _m.Called(ctx, blockNumber, calls)
+
+ if len(ret) == 0 {
+ panic("no return value specified for BatchCall")
+ }
+
+ var r0 []rpclib.DataAndErr
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, []rpclib.EvmCall) ([]rpclib.DataAndErr, error)); ok {
+ return rf(ctx, blockNumber, calls)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, []rpclib.EvmCall) []rpclib.DataAndErr); ok {
+ r0 = rf(ctx, blockNumber, calls)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]rpclib.DataAndErr)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, []rpclib.EvmCall) error); ok {
+ r1 = rf(ctx, blockNumber, calls)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// EvmBatchCaller_BatchCall_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BatchCall'
+type EvmBatchCaller_BatchCall_Call struct {
+ *mock.Call
+}
+
+// BatchCall is a helper method to define mock.On call
+// - ctx context.Context
+// - blockNumber uint64
+// - calls []rpclib.EvmCall
+func (_e *EvmBatchCaller_Expecter) BatchCall(ctx interface{}, blockNumber interface{}, calls interface{}) *EvmBatchCaller_BatchCall_Call {
+ return &EvmBatchCaller_BatchCall_Call{Call: _e.mock.On("BatchCall", ctx, blockNumber, calls)}
+}
+
+func (_c *EvmBatchCaller_BatchCall_Call) Run(run func(ctx context.Context, blockNumber uint64, calls []rpclib.EvmCall)) *EvmBatchCaller_BatchCall_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].([]rpclib.EvmCall))
+ })
+ return _c
+}
+
+func (_c *EvmBatchCaller_BatchCall_Call) Return(_a0 []rpclib.DataAndErr, _a1 error) *EvmBatchCaller_BatchCall_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *EvmBatchCaller_BatchCall_Call) RunAndReturn(run func(context.Context, uint64, []rpclib.EvmCall) ([]rpclib.DataAndErr, error)) *EvmBatchCaller_BatchCall_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewEvmBatchCaller creates a new instance of EvmBatchCaller. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewEvmBatchCaller(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *EvmBatchCaller {
+ mock := &EvmBatchCaller{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/metrics.go b/core/services/ocr2/plugins/ccip/metrics.go
new file mode 100644
index 00000000000..f481b5d447d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/metrics.go
@@ -0,0 +1,99 @@
+package ccip
+
+import (
+ "strconv"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+)
+
+var (
+ unexpiredCommitRoots = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "ccip_unexpired_commit_roots",
+ Help: "Number of unexpired commit roots processed by the plugin",
+ }, []string{"plugin", "source", "dest"})
+ messagesProcessed = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "ccip_number_of_messages_processed",
+ Help: "Number of messages processed by the plugin during different OCR phases",
+ }, []string{"plugin", "source", "dest", "ocrPhase"})
+ sequenceNumberCounter = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "ccip_sequence_number_counter",
+ Help: "Sequence number of the last message processed by the plugin",
+ }, []string{"plugin", "source", "dest", "ocrPhase"})
+)
+
+type ocrPhase string
+
+const (
+ Observation ocrPhase = "observation"
+ Report ocrPhase = "report"
+ ShouldAccept ocrPhase = "shouldAccept"
+)
+
+type PluginMetricsCollector interface {
+ NumberOfMessagesProcessed(phase ocrPhase, count int)
+ NumberOfMessagesBasedOnInterval(phase ocrPhase, seqNrMin, seqNrMax uint64)
+ UnexpiredCommitRoots(count int)
+ SequenceNumber(phase ocrPhase, seqNr uint64)
+}
+
+type pluginMetricsCollector struct {
+ pluginName string
+ source, dest string
+}
+
+func NewPluginMetricsCollector(pluginLabel string, sourceChainId, destChainId int64) *pluginMetricsCollector {
+ return &pluginMetricsCollector{
+ pluginName: pluginLabel,
+ source: strconv.FormatInt(sourceChainId, 10),
+ dest: strconv.FormatInt(destChainId, 10),
+ }
+}
+
+func (p *pluginMetricsCollector) NumberOfMessagesProcessed(phase ocrPhase, count int) {
+ messagesProcessed.
+ WithLabelValues(p.pluginName, p.source, p.dest, string(phase)).
+ Set(float64(count))
+}
+
+func (p *pluginMetricsCollector) NumberOfMessagesBasedOnInterval(phase ocrPhase, seqNrMin, seqNrMax uint64) {
+ messagesProcessed.
+ WithLabelValues(p.pluginName, p.source, p.dest, string(phase)).
+ Set(float64(seqNrMax - seqNrMin + 1))
+}
+
+func (p *pluginMetricsCollector) UnexpiredCommitRoots(count int) {
+ unexpiredCommitRoots.
+ WithLabelValues(p.pluginName, p.source, p.dest).
+ Set(float64(count))
+}
+
+func (p *pluginMetricsCollector) SequenceNumber(phase ocrPhase, seqNr uint64) {
+ // Don't publish price reports
+ if seqNr == 0 {
+ return
+ }
+
+ sequenceNumberCounter.
+ WithLabelValues(p.pluginName, p.source, p.dest, string(phase)).
+ Set(float64(seqNr))
+}
+
+var (
+ // NoopMetricsCollector is a no-op implementation of PluginMetricsCollector
+ NoopMetricsCollector PluginMetricsCollector = noop{}
+)
+
+type noop struct{}
+
+func (d noop) NumberOfMessagesProcessed(ocrPhase, int) {
+}
+
+func (d noop) NumberOfMessagesBasedOnInterval(ocrPhase, uint64, uint64) {
+}
+
+func (d noop) UnexpiredCommitRoots(int) {
+}
+
+func (d noop) SequenceNumber(ocrPhase, uint64) {
+}
diff --git a/core/services/ocr2/plugins/ccip/metrics_test.go b/core/services/ocr2/plugins/ccip/metrics_test.go
new file mode 100644
index 00000000000..eec67db7dd0
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/metrics_test.go
@@ -0,0 +1,47 @@
+package ccip
+
+import (
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/stretchr/testify/assert"
+)
+
+const (
+ sourceChainId = 1337
+ destChainId = 2337
+)
+
+func Test_SequenceNumbers(t *testing.T) {
+ collector := NewPluginMetricsCollector("test", sourceChainId, destChainId)
+
+ collector.SequenceNumber(Report, 10)
+ assert.Equal(t, float64(10), testutil.ToFloat64(sequenceNumberCounter.WithLabelValues("test", "1337", "2337", "report")))
+
+ collector.SequenceNumber(Report, 0)
+ assert.Equal(t, float64(10), testutil.ToFloat64(sequenceNumberCounter.WithLabelValues("test", "1337", "2337", "report")))
+}
+
+func Test_NumberOfMessages(t *testing.T) {
+ collector := NewPluginMetricsCollector("test", sourceChainId, destChainId)
+ collector2 := NewPluginMetricsCollector("test2", destChainId, sourceChainId)
+
+ collector.NumberOfMessagesBasedOnInterval(Observation, 1, 10)
+ assert.Equal(t, float64(10), testutil.ToFloat64(messagesProcessed.WithLabelValues("test", "1337", "2337", "observation")))
+
+ collector.NumberOfMessagesBasedOnInterval(Report, 5, 30)
+ assert.Equal(t, float64(26), testutil.ToFloat64(messagesProcessed.WithLabelValues("test", "1337", "2337", "report")))
+
+ collector2.NumberOfMessagesProcessed(Report, 15)
+ assert.Equal(t, float64(15), testutil.ToFloat64(messagesProcessed.WithLabelValues("test2", "2337", "1337", "report")))
+}
+
+func Test_UnexpiredCommitRoots(t *testing.T) {
+ collector := NewPluginMetricsCollector("test", sourceChainId, destChainId)
+
+ collector.UnexpiredCommitRoots(10)
+ assert.Equal(t, float64(10), testutil.ToFloat64(unexpiredCommitRoots.WithLabelValues("test", "1337", "2337")))
+
+ collector.UnexpiredCommitRoots(5)
+ assert.Equal(t, float64(5), testutil.ToFloat64(unexpiredCommitRoots.WithLabelValues("test", "1337", "2337")))
+}
diff --git a/core/services/ocr2/plugins/ccip/observations.go b/core/services/ocr2/plugins/ccip/observations.go
new file mode 100644
index 00000000000..f79d667a550
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/observations.go
@@ -0,0 +1,149 @@
+package ccip
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "strings"
+
+ "github.com/smartcontractkit/libocr/commontypes"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+// Note if a breaking change is introduced to this struct nodes running different versions
+// will not be able to unmarshal each other's observations. Do not modify unless you
+// know what you are doing.
+type CommitObservation struct {
+ Interval cciptypes.CommitStoreInterval `json:"interval"`
+ TokenPricesUSD map[cciptypes.Address]*big.Int `json:"tokensPerFeeCoin"`
+ SourceGasPriceUSD *big.Int `json:"sourceGasPrice"` // Deprecated
+ SourceGasPriceUSDPerChain map[uint64]*big.Int `json:"sourceGasPriceUSDPerChain"`
+}
+
+// Marshal MUST be used instead of raw json.Marshal(o) since it contains backwards compatibility related changes.
+func (o CommitObservation) Marshal() ([]byte, error) {
+ obsCopy := o
+
+ // Similar to: commitObservationJSONBackComp but for commit observation marshaling.
+ tokenPricesUSD := make(map[cciptypes.Address]*big.Int, len(obsCopy.TokenPricesUSD))
+ for k, v := range obsCopy.TokenPricesUSD {
+ tokenPricesUSD[cciptypes.Address(strings.ToLower(string(k)))] = v
+ }
+ obsCopy.TokenPricesUSD = tokenPricesUSD
+
+ return json.Marshal(&obsCopy)
+}
+
+// ExecutionObservation stores messages as a map pointing from a sequence number (uint) to the message payload (MsgData)
+// Having it structured this way is critical because:
+// * it prevents having duplicated sequence numbers within a single ExecutionObservation (compared to the list representation)
+// * prevents malicious actors from passing multiple messages with the same sequence number
+// Note if a breaking change is introduced to this struct nodes running different versions
+// will not be able to unmarshal each other's observations. Do not modify unless you
+// know what you are doing.
+type ExecutionObservation struct {
+ Messages map[uint64]MsgData `json:"messages"`
+}
+
+type MsgData struct {
+ TokenData [][]byte `json:"tokenData"`
+}
+
+// ObservedMessage is a transient struct used for processing convenience within the plugin. It's easier to process observed messages
+// when all properties are flattened into a single structure.
+// It should not be serialized and returned from types.ReportingPlugin functions, please serialize/deserialize to/from ExecutionObservation instead using NewObservedMessage
+type ObservedMessage struct {
+ SeqNr uint64
+ MsgData
+}
+
+func NewExecutionObservation(observations []ObservedMessage) ExecutionObservation {
+ denormalized := make(map[uint64]MsgData, len(observations))
+ for _, o := range observations {
+ denormalized[o.SeqNr] = MsgData{TokenData: o.TokenData}
+ }
+ return ExecutionObservation{Messages: denormalized}
+}
+
+func NewObservedMessage(seqNr uint64, tokenData [][]byte) ObservedMessage {
+ return ObservedMessage{
+ SeqNr: seqNr,
+ MsgData: MsgData{TokenData: tokenData},
+ }
+}
+
+func (o ExecutionObservation) Marshal() ([]byte, error) {
+ return json.Marshal(&o)
+}
+
+// GetParsableObservations checks the given observations for formatting and value errors.
+// It returns all valid observations, potentially being an empty list. It will log
+// malformed observations but never error.
+//
+// GetParsableObservations MUST be used instead of raw json.Unmarshal(o) since it contains backwards compatibility changes.
+func GetParsableObservations[O CommitObservation | ExecutionObservation](l logger.Logger, observations []types.AttributedObservation) []O {
+ var parseableObservations []O
+ var observers []commontypes.OracleID
+ for _, ao := range observations {
+ if len(ao.Observation) == 0 {
+ // Empty observation
+ l.Infow("Discarded empty observation", "observer", ao.Observer)
+ continue
+ }
+ var ob O
+ var err error
+ obsJSON := ao.Observation
+
+ switch any(ob).(type) {
+ case CommitObservation:
+ commitObservation, err1 := commitObservationJSONBackComp(ao.Observation)
+ if err1 != nil {
+ l.Errorw("commit observation json backwards compatibility format failed", "err", err,
+ "observation", string(ao.Observation), "observer", ao.Observer)
+ continue
+ }
+ ob = any(commitObservation).(O)
+ default:
+ err = json.Unmarshal(obsJSON, &ob)
+ if err != nil {
+ l.Errorw("Received unmarshallable observation", "err", err, "observation", string(ao.Observation), "observer", ao.Observer)
+ continue
+ }
+ }
+
+ parseableObservations = append(parseableObservations, ob)
+ observers = append(observers, ao.Observer)
+ }
+ l.Infow(
+ "Parsed observations",
+ "observers", observers,
+ "observersLength", len(observers),
+ "observationsLength", len(parseableObservations),
+ "rawObservationLength", len(observations),
+ )
+ return parseableObservations
+}
+
+// For backwards compatibility, converts token prices to eip55.
+// Prior to cciptypes.Address we were using go-ethereum common.Address type which is
+// marshalled to lower-case while the string representation we used was eip55.
+// Nodes that run different ccip version should generate the same observations.
+func commitObservationJSONBackComp(obsJson []byte) (CommitObservation, error) {
+ var obs CommitObservation
+ err := json.Unmarshal(obsJson, &obs)
+ if err != nil {
+ return CommitObservation{}, fmt.Errorf("unmarshal observation: %w", err)
+ }
+ tokenPricesUSD := make(map[cciptypes.Address]*big.Int, len(obs.TokenPricesUSD))
+ for k, v := range obs.TokenPricesUSD {
+ tokenPricesUSD[ccipcalc.HexToAddress(string(k))] = v
+ }
+ obs.TokenPricesUSD = tokenPricesUSD
+ return obs, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/observations_test.go b/core/services/ocr2/plugins/ccip/observations_test.go
new file mode 100644
index 00000000000..a3143f157d7
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/observations_test.go
@@ -0,0 +1,305 @@
+package ccip
+
+import (
+ "encoding/json"
+ "math/big"
+ "strings"
+ "testing"
+
+ "github.com/leanovate/gopter"
+ "github.com/leanovate/gopter/gen"
+ "github.com/leanovate/gopter/prop"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+)
+
+func TestObservationFilter(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ obs1 := CommitObservation{Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}}
+ b1, err := obs1.Marshal()
+ require.NoError(t, err)
+ nonEmpty := GetParsableObservations[CommitObservation](lggr, []types.AttributedObservation{{Observation: b1}, {Observation: []byte{}}})
+ require.Equal(t, 1, len(nonEmpty))
+ assert.Equal(t, nonEmpty[0].Interval, obs1.Interval)
+}
+
+// This is the observation format up to 1.4.16 release
+type CommitObservationLegacy struct {
+ Interval cciptypes.CommitStoreInterval `json:"interval"`
+ TokenPricesUSD map[cciptypes.Address]*big.Int `json:"tokensPerFeeCoin"`
+ SourceGasPriceUSD *big.Int `json:"sourceGasPrice"`
+}
+
+func TestObservationCompat_MultiChainGas(t *testing.T) {
+ obsLegacy := CommitObservationLegacy{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 1,
+ Max: 12,
+ },
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{ccipcalc.HexToAddress("0x1"): big.NewInt(1)},
+ SourceGasPriceUSD: big.NewInt(3)}
+ bL, err := json.Marshal(obsLegacy)
+ require.NoError(t, err)
+ obsNew := CommitObservation{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 1,
+ Max: 12,
+ },
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{ccipcalc.HexToAddress("0x1"): big.NewInt(1)},
+ SourceGasPriceUSD: big.NewInt(3),
+ }
+ bN, err := json.Marshal(obsNew)
+ require.NoError(t, err)
+
+ observations := GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{{Observation: bL}, {Observation: bN}})
+
+ assert.Equal(t, 2, len(observations))
+ assert.Equal(t, observations[0], observations[1])
+}
+
+func TestCommitObservationJsonDeserialization(t *testing.T) {
+ expectedObservation := CommitObservation{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 1,
+ Max: 12,
+ },
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{
+ ccipcalc.HexToAddress("0x1"): big.NewInt(1)},
+ SourceGasPriceUSD: big.NewInt(3),
+ }
+
+ json := `{
+ "interval": {
+ "Min":1,
+ "Max":12
+ },
+ "tokensPerFeeCoin": {
+ "0x0000000000000000000000000000000000000001": 1
+ },
+ "sourceGasPrice": 3
+ }`
+
+ observations := GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{{Observation: []byte(json)}})
+ assert.Equal(t, 1, len(observations))
+ assert.Equal(t, expectedObservation, observations[0])
+}
+
+func TestCommitObservationMarshal(t *testing.T) {
+ obs := CommitObservation{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 1,
+ Max: 12,
+ },
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{"0xAaAaAa": big.NewInt(1)},
+ SourceGasPriceUSD: big.NewInt(3),
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{123: big.NewInt(3)},
+ }
+
+ b, err := obs.Marshal()
+ require.NoError(t, err)
+ assert.Equal(t, `{"interval":{"Min":1,"Max":12},"tokensPerFeeCoin":{"0xaaaaaa":1},"sourceGasPrice":3,"sourceGasPriceUSDPerChain":{"123":3}}`, string(b))
+
+ // Make sure that the call to Marshal did not alter the original observation object.
+ assert.Len(t, obs.TokenPricesUSD, 1)
+ _, exists := obs.TokenPricesUSD["0xAaAaAa"]
+ assert.True(t, exists)
+ _, exists = obs.TokenPricesUSD["0xaaaaaa"]
+ assert.False(t, exists)
+
+ assert.Len(t, obs.SourceGasPriceUSDPerChain, 1)
+ _, exists = obs.SourceGasPriceUSDPerChain[123]
+ assert.True(t, exists)
+}
+
+func TestExecutionObservationJsonDeserialization(t *testing.T) {
+ expectedObservation := ExecutionObservation{Messages: map[uint64]MsgData{
+ 2: {TokenData: tokenData("c")},
+ 1: {TokenData: tokenData("c")},
+ }}
+
+ // ["YQ=="] is "a"
+ // ["Yw=="] is "c"
+ json := `{
+ "messages": {
+ "2":{"tokenData":["YQ=="]},
+ "1":{"tokenData":["Yw=="]},
+ "2":{"tokenData":["Yw=="]}
+ }
+ }`
+
+ observations := GetParsableObservations[ExecutionObservation](logger.TestLogger(t), []types.AttributedObservation{{Observation: []byte(json)}})
+ assert.Equal(t, 1, len(observations))
+ assert.Equal(t, 2, len(observations[0].Messages))
+ assert.Equal(t, expectedObservation, observations[0])
+}
+
+func TestObservationSize(t *testing.T) {
+ testParams := gopter.DefaultTestParameters()
+ testParams.MinSuccessfulTests = 100
+ p := gopter.NewProperties(testParams)
+ p.Property("bounded observation size", prop.ForAll(func(min, max uint64) bool {
+ o := NewExecutionObservation(
+ []ObservedMessage{
+ {
+ SeqNr: min,
+ MsgData: MsgData{},
+ },
+ {
+ SeqNr: max,
+ MsgData: MsgData{},
+ },
+ },
+ )
+ b, err := o.Marshal()
+ require.NoError(t, err)
+ return len(b) <= MaxObservationLength
+ }, gen.UInt64(), gen.UInt64()))
+ p.TestingRun(t)
+}
+
+func TestNewExecutionObservation(t *testing.T) {
+ tests := []struct {
+ name string
+ observations []ObservedMessage
+ want ExecutionObservation
+ }{
+ {
+ name: "nil observations",
+ observations: nil,
+ want: ExecutionObservation{Messages: map[uint64]MsgData{}},
+ },
+ {
+ name: "empty observations",
+ observations: []ObservedMessage{},
+ want: ExecutionObservation{Messages: map[uint64]MsgData{}},
+ },
+ {
+ name: "observations with different sequence numbers",
+ observations: []ObservedMessage{
+ NewObservedMessage(1, tokenData("a")),
+ NewObservedMessage(2, tokenData("b")),
+ NewObservedMessage(3, tokenData("c")),
+ },
+ want: ExecutionObservation{
+ Messages: map[uint64]MsgData{
+ 1: {TokenData: tokenData("a")},
+ 2: {TokenData: tokenData("b")},
+ 3: {TokenData: tokenData("c")},
+ },
+ },
+ },
+ {
+ name: "last one wins in case of duplicates",
+ observations: []ObservedMessage{
+ NewObservedMessage(1, tokenData("a")),
+ NewObservedMessage(1, tokenData("b")),
+ NewObservedMessage(1, tokenData("c")),
+ },
+ want: ExecutionObservation{
+ Messages: map[uint64]MsgData{
+ 1: {TokenData: tokenData("c")},
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equalf(t, tt.want, NewExecutionObservation(tt.observations), "NewExecutionObservation(%v)", tt.observations)
+ })
+ }
+}
+
+func tokenData(value string) [][]byte {
+ return [][]byte{[]byte(value)}
+}
+
+func TestCommitObservationJsonSerializationDeserialization(t *testing.T) {
+ jsonEncoded := `{
+ "interval": {
+ "Min":1,
+ "Max":12
+ },
+ "tokensPerFeeCoin": {
+ "0x0000000000000000000000000000000000000001": 1,
+ "0x507877C2E26f1387432D067D2DaAfa7d0420d90a": 2,
+ "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": 3
+ },
+ "sourceGasPrice": 3,
+ "sourceGasPriceUSDPerChain": {
+ "123":3
+ }
+ }`
+
+ expectedObservation := CommitObservation{
+ Interval: cciptypes.CommitStoreInterval{
+ Min: 1,
+ Max: 12,
+ },
+ TokenPricesUSD: map[cciptypes.Address]*big.Int{
+ cciptypes.Address("0x0000000000000000000000000000000000000001"): big.NewInt(1),
+ cciptypes.Address("0x507877C2E26f1387432D067D2DaAfa7d0420d90a"): big.NewInt(2), // json eip55->eip55 parsed
+ cciptypes.Address("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"): big.NewInt(3), // json lower->eip55 parsed
+ },
+ SourceGasPriceUSD: big.NewInt(3),
+ SourceGasPriceUSDPerChain: map[uint64]*big.Int{
+ 123: big.NewInt(3),
+ },
+ }
+
+ observations := GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{
+ {Observation: []byte(jsonEncoded)},
+ })
+ assert.Equal(t, 1, len(observations))
+ assert.Equal(t, expectedObservation, observations[0])
+
+ backToJson, err := expectedObservation.Marshal()
+ // we expect the json encoded addresses to be lower-case
+ exp := strings.ReplaceAll(
+ jsonEncoded, "0x507877C2E26f1387432D067D2DaAfa7d0420d90a", strings.ToLower("0x507877C2E26f1387432D067D2DaAfa7d0420d90a"))
+ assert.NoError(t, err)
+ assert.JSONEq(t, exp, string(backToJson))
+
+ // and we expect to get the same results after we parse the lower-case addresses
+ observations = GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{
+ {Observation: []byte(jsonEncoded)},
+ })
+ assert.Equal(t, 1, len(observations))
+ assert.Equal(t, expectedObservation, observations[0])
+}
+
+func TestAddressEncodingBackwardsCompatibility(t *testing.T) {
+ // The intention of this test is to remind including proper formatting of addresses after config is updated.
+ //
+ // The following tests will fail when a new cciptypes.Address field is added or removed.
+ // If you notice that the test is failing, make sure to apply proper address formatting
+ // after the struct is marshalled/unmarshalled and then include your new field in the expected fields slice to
+ // make this test pass or if you removed a field, remove it from the expected fields slice.
+
+ t.Run("job spec config", func(t *testing.T) {
+ exp := []string{"ccip.Address OffRamp"}
+
+ fields := testhelpers.FindStructFieldsOfCertainType(
+ "ccip.Address",
+ config.CommitPluginJobSpecConfig{PriceGetterConfig: &config.DynamicPriceGetterConfig{}},
+ )
+ assert.Equal(t, exp, fields)
+ })
+
+ t.Run("commit observation", func(t *testing.T) {
+ exp := []string{"map[ccip.Address]*big.Int TokenPricesUSD"}
+
+ fields := testhelpers.FindStructFieldsOfCertainType(
+ "ccip.Address",
+ CommitObservation{SourceGasPriceUSD: big.NewInt(0)},
+ )
+ assert.Equal(t, exp, fields)
+ })
+}
diff --git a/core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go b/core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go
new file mode 100644
index 00000000000..c334f159fd2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go
@@ -0,0 +1,61 @@
+package leafer
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+)
+
+// LeafHasher converts a CCIPSendRequested event into something that can be hashed and hashes it.
+type LeafHasher interface {
+ HashLeaf(log types.Log) ([32]byte, error)
+}
+
+// Version is the contract to use.
+type Version string
+
+const (
+ V1_0_0 Version = "v1_0_0"
+ V1_2_0 Version = "v1_2_0"
+ V1_5_0 Version = "v1_5_0"
+)
+
+// MakeLeafHasher is a factory function to construct the onramp implementing the HashLeaf function for a given version.
+func MakeLeafHasher(ver Version, cl bind.ContractBackend, sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte]) (LeafHasher, error) {
+ switch ver {
+ case V1_0_0:
+ or, err := evm_2_evm_onramp_1_0_0.NewEVM2EVMOnRamp(onRampId, cl)
+ if err != nil {
+ return nil, err
+ }
+ h := v1_0_0.NewLeafHasher(sourceChainSelector, destChainSelector, onRampId, ctx, or)
+ return h, nil
+ case V1_2_0:
+ or, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampId, cl)
+ if err != nil {
+ return nil, err
+ }
+ h := v1_2_0.NewLeafHasher(sourceChainSelector, destChainSelector, onRampId, ctx, or)
+ return h, nil
+ case V1_5_0:
+ or, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampId, cl)
+ if err != nil {
+ return nil, err
+ }
+ h := v1_5_0.NewLeafHasher(sourceChainSelector, destChainSelector, onRampId, ctx, or)
+ return h, nil
+ default:
+ return nil, fmt.Errorf("unknown version %q", ver)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/da_price_estimator.go b/core/services/ocr2/plugins/ccip/prices/da_price_estimator.go
new file mode 100644
index 00000000000..7c75b9bdd99
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/da_price_estimator.go
@@ -0,0 +1,176 @@
+package prices
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/rollups"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+type DAGasPriceEstimator struct {
+ execEstimator GasPriceEstimator
+ l1Oracle rollups.L1Oracle
+ priceEncodingLength uint
+ daDeviationPPB int64
+ daOverheadGas int64
+ gasPerDAByte int64
+ daMultiplier int64
+}
+
+func NewDAGasPriceEstimator(
+ estimator gas.EvmFeeEstimator,
+ maxGasPrice *big.Int,
+ deviationPPB int64,
+ daDeviationPPB int64,
+) *DAGasPriceEstimator {
+ return &DAGasPriceEstimator{
+ execEstimator: NewExecGasPriceEstimator(estimator, maxGasPrice, deviationPPB),
+ l1Oracle: estimator.L1Oracle(),
+ priceEncodingLength: daGasPriceEncodingLength,
+ daDeviationPPB: daDeviationPPB,
+ }
+}
+
+func (g DAGasPriceEstimator) GetGasPrice(ctx context.Context) (*big.Int, error) {
+ execGasPrice, err := g.execEstimator.GetGasPrice(ctx)
+ if err != nil {
+ return nil, err
+ }
+ var gasPrice *big.Int = execGasPrice
+ if gasPrice.BitLen() > int(g.priceEncodingLength) {
+ return nil, fmt.Errorf("native gas price exceeded max range %+v", gasPrice)
+ }
+
+ if g.l1Oracle == nil {
+ return gasPrice, nil
+ }
+
+ daGasPriceWei, err := g.l1Oracle.GasPrice(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if daGasPrice := daGasPriceWei.ToInt(); daGasPrice.Cmp(big.NewInt(0)) > 0 {
+ if daGasPrice.BitLen() > int(g.priceEncodingLength) {
+ return nil, fmt.Errorf("data availability gas price exceeded max range %+v", daGasPrice)
+ }
+
+ daGasPrice = new(big.Int).Lsh(daGasPrice, g.priceEncodingLength)
+ gasPrice = new(big.Int).Add(gasPrice, daGasPrice)
+ }
+
+ return gasPrice, nil
+}
+
+func (g DAGasPriceEstimator) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) {
+ daGasPrice, execGasPrice, err := g.parseEncodedGasPrice(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // This assumes l1GasPrice is priced using the same native token as l2 native
+ daUSD := ccipcalc.CalculateUsdPerUnitGas(daGasPrice, wrappedNativePrice)
+ if daUSD.BitLen() > int(g.priceEncodingLength) {
+ return nil, fmt.Errorf("data availability gas price USD exceeded max range %+v", daUSD)
+ }
+ execUSD := ccipcalc.CalculateUsdPerUnitGas(execGasPrice, wrappedNativePrice)
+ if execUSD.BitLen() > int(g.priceEncodingLength) {
+ return nil, fmt.Errorf("exec gas price USD exceeded max range %+v", execUSD)
+ }
+
+ daUSD = new(big.Int).Lsh(daUSD, g.priceEncodingLength)
+ return new(big.Int).Add(daUSD, execUSD), nil
+}
+
+func (g DAGasPriceEstimator) Median(gasPrices []*big.Int) (*big.Int, error) {
+ daPrices := make([]*big.Int, len(gasPrices))
+ execPrices := make([]*big.Int, len(gasPrices))
+
+ for i := range gasPrices {
+ daGasPrice, execGasPrice, err := g.parseEncodedGasPrice(gasPrices[i])
+ if err != nil {
+ return nil, err
+ }
+
+ daPrices[i] = daGasPrice
+ execPrices[i] = execGasPrice
+ }
+
+ daMedian := ccipcalc.BigIntSortedMiddle(daPrices)
+ execMedian := ccipcalc.BigIntSortedMiddle(execPrices)
+
+ daMedian = new(big.Int).Lsh(daMedian, g.priceEncodingLength)
+ return new(big.Int).Add(daMedian, execMedian), nil
+}
+
+func (g DAGasPriceEstimator) Deviates(p1, p2 *big.Int) (bool, error) {
+ p1DAGasPrice, p1ExecGasPrice, err := g.parseEncodedGasPrice(p1)
+ if err != nil {
+ return false, err
+ }
+ p2DAGasPrice, p2ExecGasPrice, err := g.parseEncodedGasPrice(p2)
+ if err != nil {
+ return false, err
+ }
+
+ execDeviates, err := g.execEstimator.Deviates(p1ExecGasPrice, p2ExecGasPrice)
+ if err != nil {
+ return false, err
+ }
+ if execDeviates {
+ return execDeviates, nil
+ }
+
+ return ccipcalc.Deviates(p1DAGasPrice, p2DAGasPrice, g.daDeviationPPB), nil
+}
+
+func (g DAGasPriceEstimator) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) {
+ daGasPrice, execGasPrice, err := g.parseEncodedGasPrice(p)
+ if err != nil {
+ return nil, err
+ }
+
+ execCostUSD, err := g.execEstimator.EstimateMsgCostUSD(execGasPrice, wrappedNativePrice, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there is data availability price component, then include data availability cost in fee estimation
+ if daGasPrice.Cmp(big.NewInt(0)) > 0 {
+ daGasCostUSD := g.estimateDACostUSD(daGasPrice, wrappedNativePrice, msg)
+ execCostUSD = new(big.Int).Add(daGasCostUSD, execCostUSD)
+ }
+ return execCostUSD, nil
+}
+
+func (g DAGasPriceEstimator) parseEncodedGasPrice(p *big.Int) (*big.Int, *big.Int, error) {
+ if p.BitLen() > int(g.priceEncodingLength*2) {
+ return nil, nil, fmt.Errorf("encoded gas price exceeded max range %+v", p)
+ }
+
+ daGasPrice := new(big.Int).Rsh(p, g.priceEncodingLength)
+
+ daStart := new(big.Int).Lsh(big.NewInt(1), g.priceEncodingLength)
+ execGasPrice := new(big.Int).Mod(p, daStart)
+
+ return daGasPrice, execGasPrice, nil
+}
+
+func (g DAGasPriceEstimator) estimateDACostUSD(daGasPrice *big.Int, wrappedNativePrice *big.Int, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) *big.Int {
+ var sourceTokenDataLen int
+ for _, tokenData := range msg.SourceTokenData {
+ sourceTokenDataLen += len(tokenData)
+ }
+
+ dataLen := evmMessageFixedBytes + len(msg.Data) + len(msg.TokenAmounts)*evmMessageBytesPerToken + sourceTokenDataLen
+ dataGas := big.NewInt(int64(dataLen)*g.gasPerDAByte + g.daOverheadGas)
+
+ dataGasEstimate := new(big.Int).Mul(dataGas, daGasPrice)
+ dataGasEstimate = new(big.Int).Div(new(big.Int).Mul(dataGasEstimate, big.NewInt(g.daMultiplier)), big.NewInt(daMultiplierBase))
+
+ return ccipcalc.CalculateUsdPerUnitGas(dataGasEstimate, wrappedNativePrice)
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go b/core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go
new file mode 100644
index 00000000000..2f8616a8669
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go
@@ -0,0 +1,440 @@
+package prices
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/rollups/mocks"
+)
+
+func encodeGasPrice(daPrice, execPrice *big.Int) *big.Int {
+ return new(big.Int).Add(new(big.Int).Lsh(daPrice, daGasPriceEncodingLength), execPrice)
+}
+
+func TestDAPriceEstimator_GetGasPrice(t *testing.T) {
+ ctx := context.Background()
+
+ testCases := []struct {
+ name string
+ daGasPrice *big.Int
+ execGasPrice *big.Int
+ expPrice *big.Int
+ expErr bool
+ }{
+ {
+ name: "base",
+ daGasPrice: big.NewInt(1),
+ execGasPrice: big.NewInt(0),
+ expPrice: encodeGasPrice(big.NewInt(1), big.NewInt(0)),
+ expErr: false,
+ },
+ {
+ name: "large values",
+ daGasPrice: big.NewInt(1e9), // 1 gwei
+ execGasPrice: big.NewInt(200e9), // 200 gwei
+ expPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(200e9)),
+ expErr: false,
+ },
+ {
+ name: "zero DA price",
+ daGasPrice: big.NewInt(0),
+ execGasPrice: big.NewInt(200e9),
+ expPrice: encodeGasPrice(big.NewInt(0), big.NewInt(200e9)),
+ expErr: false,
+ },
+ {
+ name: "zero exec price",
+ daGasPrice: big.NewInt(1e9),
+ execGasPrice: big.NewInt(0),
+ expPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)),
+ expErr: false,
+ },
+ {
+ name: "price out of bounds",
+ daGasPrice: new(big.Int).Lsh(big.NewInt(1), daGasPriceEncodingLength),
+ execGasPrice: big.NewInt(1),
+ expPrice: nil,
+ expErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ execEstimator := NewMockGasPriceEstimator(t)
+ execEstimator.On("GetGasPrice", ctx).Return(tc.execGasPrice, nil)
+
+ l1Oracle := mocks.NewL1Oracle(t)
+ l1Oracle.On("GasPrice", ctx).Return(assets.NewWei(tc.daGasPrice), nil)
+
+ g := DAGasPriceEstimator{
+ execEstimator: execEstimator,
+ l1Oracle: l1Oracle,
+ priceEncodingLength: daGasPriceEncodingLength,
+ }
+
+ gasPrice, err := g.GetGasPrice(ctx)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expPrice, gasPrice)
+ })
+ }
+
+ t.Run("nil L1 oracle", func(t *testing.T) {
+ expPrice := big.NewInt(1)
+
+ execEstimator := NewMockGasPriceEstimator(t)
+ execEstimator.On("GetGasPrice", ctx).Return(expPrice, nil)
+
+ g := DAGasPriceEstimator{
+ execEstimator: execEstimator,
+ l1Oracle: nil,
+ priceEncodingLength: daGasPriceEncodingLength,
+ }
+
+ gasPrice, err := g.GetGasPrice(ctx)
+ assert.NoError(t, err)
+ assert.Equal(t, expPrice, gasPrice)
+ })
+}
+
+func TestDAPriceEstimator_DenoteInUSD(t *testing.T) {
+ val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) }
+
+ testCases := []struct {
+ name string
+ gasPrice *big.Int
+ nativePrice *big.Int
+ expPrice *big.Int
+ }{
+ {
+ name: "base",
+ gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(10e9)),
+ nativePrice: val1e18(2_000),
+ expPrice: encodeGasPrice(big.NewInt(2000e9), big.NewInt(20000e9)),
+ },
+ {
+ name: "low price truncates to 0",
+ gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(10e9)),
+ nativePrice: big.NewInt(1),
+ expPrice: big.NewInt(0),
+ },
+ {
+ name: "high price",
+ gasPrice: encodeGasPrice(val1e18(1), val1e18(10)),
+ nativePrice: val1e18(2000),
+ expPrice: encodeGasPrice(val1e18(2_000), val1e18(20_000)),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := DAGasPriceEstimator{
+ priceEncodingLength: daGasPriceEncodingLength,
+ }
+
+ gasPrice, err := g.DenoteInUSD(tc.gasPrice, tc.nativePrice)
+ assert.NoError(t, err)
+ assert.True(t, tc.expPrice.Cmp(gasPrice) == 0)
+ })
+ }
+}
+
+func TestDAPriceEstimator_Median(t *testing.T) {
+ val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) }
+
+ testCases := []struct {
+ name string
+ gasPrices []*big.Int
+ expMedian *big.Int
+ }{
+ {
+ name: "base",
+ gasPrices: []*big.Int{
+ encodeGasPrice(big.NewInt(1), big.NewInt(1)),
+ encodeGasPrice(big.NewInt(2), big.NewInt(2)),
+ encodeGasPrice(big.NewInt(3), big.NewInt(3)),
+ },
+ expMedian: encodeGasPrice(big.NewInt(2), big.NewInt(2)),
+ },
+ {
+ name: "median 2",
+ gasPrices: []*big.Int{
+ encodeGasPrice(big.NewInt(1), big.NewInt(1)),
+ encodeGasPrice(big.NewInt(2), big.NewInt(2)),
+ },
+ expMedian: encodeGasPrice(big.NewInt(2), big.NewInt(2)),
+ },
+ {
+ name: "large values",
+ gasPrices: []*big.Int{
+ encodeGasPrice(val1e18(5), val1e18(5)),
+ encodeGasPrice(val1e18(4), val1e18(4)),
+ encodeGasPrice(val1e18(3), val1e18(3)),
+ encodeGasPrice(val1e18(2), val1e18(2)),
+ encodeGasPrice(val1e18(1), val1e18(1)),
+ },
+ expMedian: encodeGasPrice(val1e18(3), val1e18(3)),
+ },
+ {
+ name: "zeros",
+ gasPrices: []*big.Int{big.NewInt(0), big.NewInt(0), big.NewInt(0)},
+ expMedian: big.NewInt(0),
+ },
+ {
+ name: "picks median of each price component individually",
+ gasPrices: []*big.Int{
+ encodeGasPrice(val1e18(1), val1e18(3)),
+ encodeGasPrice(val1e18(2), val1e18(2)),
+ encodeGasPrice(val1e18(3), val1e18(1)),
+ },
+ expMedian: encodeGasPrice(val1e18(2), val1e18(2)),
+ },
+ {
+ name: "unsorted even number of price components",
+ gasPrices: []*big.Int{
+ encodeGasPrice(val1e18(1), val1e18(22)),
+ encodeGasPrice(val1e18(4), val1e18(33)),
+ encodeGasPrice(val1e18(2), val1e18(44)),
+ encodeGasPrice(val1e18(3), val1e18(11)),
+ },
+ expMedian: encodeGasPrice(val1e18(3), val1e18(33)),
+ },
+ {
+ name: "equal DA price components",
+ gasPrices: []*big.Int{
+ encodeGasPrice(val1e18(2), val1e18(22)),
+ encodeGasPrice(val1e18(2), val1e18(33)),
+ encodeGasPrice(val1e18(2), val1e18(44)),
+ encodeGasPrice(val1e18(2), val1e18(11)),
+ },
+ expMedian: encodeGasPrice(val1e18(2), val1e18(33)),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := DAGasPriceEstimator{
+ priceEncodingLength: daGasPriceEncodingLength,
+ }
+
+ gasPrice, err := g.Median(tc.gasPrices)
+ assert.NoError(t, err)
+ assert.True(t, tc.expMedian.Cmp(gasPrice) == 0)
+ })
+ }
+}
+
+func TestDAPriceEstimator_Deviates(t *testing.T) {
+ testCases := []struct {
+ name string
+ gasPrice1 *big.Int
+ gasPrice2 *big.Int
+ daDeviationPPB int64
+ execDeviationPPB int64
+ expDeviates bool
+ }{
+ {
+ name: "base",
+ gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)),
+ gasPrice2: encodeGasPrice(big.NewInt(79e8), big.NewInt(79e8)),
+ daDeviationPPB: 2e8,
+ execDeviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "negative difference also deviates",
+ gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)),
+ gasPrice2: encodeGasPrice(big.NewInt(121e8), big.NewInt(121e8)),
+ daDeviationPPB: 2e8,
+ execDeviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "only DA component deviates",
+ gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)),
+ gasPrice2: encodeGasPrice(big.NewInt(150e8), big.NewInt(110e8)),
+ daDeviationPPB: 2e8,
+ execDeviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "only exec component deviates",
+ gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)),
+ gasPrice2: encodeGasPrice(big.NewInt(110e8), big.NewInt(150e8)),
+ daDeviationPPB: 2e8,
+ execDeviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "both do not deviate",
+ gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)),
+ gasPrice2: encodeGasPrice(big.NewInt(110e8), big.NewInt(110e8)),
+ daDeviationPPB: 2e8,
+ execDeviationPPB: 2e8,
+ expDeviates: false,
+ },
+ {
+ name: "zero DA price and exec deviates",
+ gasPrice1: encodeGasPrice(big.NewInt(0), big.NewInt(100e8)),
+ gasPrice2: encodeGasPrice(big.NewInt(0), big.NewInt(121e8)),
+ daDeviationPPB: 2e8,
+ execDeviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "zero DA price and exec does not deviate",
+ gasPrice1: encodeGasPrice(big.NewInt(0), big.NewInt(100e8)),
+ gasPrice2: encodeGasPrice(big.NewInt(0), big.NewInt(110e8)),
+ daDeviationPPB: 2e8,
+ execDeviationPPB: 2e8,
+ expDeviates: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := DAGasPriceEstimator{
+ execEstimator: ExecGasPriceEstimator{
+ deviationPPB: tc.execDeviationPPB,
+ },
+ daDeviationPPB: tc.daDeviationPPB,
+ priceEncodingLength: daGasPriceEncodingLength,
+ }
+
+ deviated, err := g.Deviates(tc.gasPrice1, tc.gasPrice2)
+ assert.NoError(t, err)
+ if tc.expDeviates {
+ assert.True(t, deviated)
+ } else {
+ assert.False(t, deviated)
+ }
+ })
+ }
+}
+
+func TestDAPriceEstimator_EstimateMsgCostUSD(t *testing.T) {
+ execCostUSD := big.NewInt(100_000)
+
+ testCases := []struct {
+ name string
+ gasPrice *big.Int
+ wrappedNativePrice *big.Int
+ msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+ daOverheadGas int64
+ gasPerDAByte int64
+ daMultiplier int64
+ expUSD *big.Int
+ }{
+ {
+ name: "only DA overhead",
+ gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ SourceTokenData: [][]byte{},
+ },
+ },
+ daOverheadGas: 100_000,
+ gasPerDAByte: 0,
+ daMultiplier: 10_000, // 1x multiplier
+ expUSD: new(big.Int).Add(execCostUSD, big.NewInt(100_000e9)),
+ },
+ {
+ name: "include message data gas",
+ gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ Data: make([]byte, 1_000),
+ TokenAmounts: make([]cciptypes.TokenAmount, 5),
+ SourceTokenData: [][]byte{
+ make([]byte, 10), make([]byte, 10), make([]byte, 10), make([]byte, 10), make([]byte, 10),
+ },
+ },
+ },
+ daOverheadGas: 100_000,
+ gasPerDAByte: 16,
+ daMultiplier: 10_000, // 1x multiplier
+ expUSD: new(big.Int).Add(execCostUSD, big.NewInt(134_208e9)),
+ },
+ {
+ name: "zero DA price",
+ gasPrice: big.NewInt(0), // 1 gwei DA price, 0 exec price
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ SourceTokenData: [][]byte{},
+ },
+ },
+ daOverheadGas: 100_000,
+ gasPerDAByte: 16,
+ daMultiplier: 10_000, // 1x multiplier
+ expUSD: execCostUSD,
+ },
+ {
+ name: "double native price",
+ gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price
+ wrappedNativePrice: big.NewInt(2e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ SourceTokenData: [][]byte{},
+ },
+ },
+ daOverheadGas: 100_000,
+ gasPerDAByte: 0,
+ daMultiplier: 10_000, // 1x multiplier
+ expUSD: new(big.Int).Add(execCostUSD, big.NewInt(200_000e9)),
+ },
+ {
+ name: "half multiplier",
+ gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ SourceTokenData: [][]byte{},
+ },
+ },
+ daOverheadGas: 100_000,
+ gasPerDAByte: 0,
+ daMultiplier: 5_000, // 0.5x multiplier
+ expUSD: new(big.Int).Add(execCostUSD, big.NewInt(50_000e9)),
+ },
+ }
+
+ for _, tc := range testCases {
+ execEstimator := NewMockGasPriceEstimator(t)
+ execEstimator.On("EstimateMsgCostUSD", mock.Anything, tc.wrappedNativePrice, tc.msg).Return(execCostUSD, nil)
+
+ t.Run(tc.name, func(t *testing.T) {
+ g := DAGasPriceEstimator{
+ execEstimator: execEstimator,
+ l1Oracle: nil,
+ priceEncodingLength: daGasPriceEncodingLength,
+ daOverheadGas: tc.daOverheadGas,
+ gasPerDAByte: tc.gasPerDAByte,
+ daMultiplier: tc.daMultiplier,
+ }
+
+ costUSD, err := g.EstimateMsgCostUSD(tc.gasPrice, tc.wrappedNativePrice, tc.msg)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expUSD, costUSD)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go
new file mode 100644
index 00000000000..56e1ddb583e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go
@@ -0,0 +1,65 @@
+package prices
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+type ExecGasPriceEstimator struct {
+ estimator gas.EvmFeeEstimator
+ maxGasPrice *big.Int
+ deviationPPB int64
+}
+
+func NewExecGasPriceEstimator(estimator gas.EvmFeeEstimator, maxGasPrice *big.Int, deviationPPB int64) ExecGasPriceEstimator {
+ return ExecGasPriceEstimator{
+ estimator: estimator,
+ maxGasPrice: maxGasPrice,
+ deviationPPB: deviationPPB,
+ }
+}
+
+func (g ExecGasPriceEstimator) GetGasPrice(ctx context.Context) (*big.Int, error) {
+ gasPriceWei, _, err := g.estimator.GetFee(ctx, nil, 0, assets.NewWei(g.maxGasPrice))
+ if err != nil {
+ return nil, err
+ }
+ // Use legacy if no dynamic is available.
+ gasPrice := gasPriceWei.Legacy.ToInt()
+ if gasPriceWei.DynamicFeeCap != nil {
+ gasPrice = gasPriceWei.DynamicFeeCap.ToInt()
+ }
+ if gasPrice == nil {
+ return nil, fmt.Errorf("missing gas price %+v", gasPriceWei)
+ }
+
+ return gasPrice, nil
+}
+
+func (g ExecGasPriceEstimator) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) {
+ return ccipcalc.CalculateUsdPerUnitGas(p, wrappedNativePrice), nil
+}
+
+func (g ExecGasPriceEstimator) Median(gasPrices []*big.Int) (*big.Int, error) {
+ return ccipcalc.BigIntSortedMiddle(gasPrices), nil
+}
+
+func (g ExecGasPriceEstimator) Deviates(p1 *big.Int, p2 *big.Int) (bool, error) {
+ return ccipcalc.Deviates(p1, p2, g.deviationPPB), nil
+}
+
+func (g ExecGasPriceEstimator) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) {
+ execGasAmount := new(big.Int).Add(big.NewInt(feeBoostingOverheadGas), msg.GasLimit)
+ execGasAmount = new(big.Int).Add(execGasAmount, new(big.Int).Mul(big.NewInt(int64(len(msg.Data))), big.NewInt(execGasPerPayloadByte)))
+ execGasAmount = new(big.Int).Add(execGasAmount, new(big.Int).Mul(big.NewInt(int64(len(msg.TokenAmounts))), big.NewInt(execGasPerToken)))
+
+ execGasCost := new(big.Int).Mul(execGasAmount, p)
+
+ return ccipcalc.CalculateUsdPerUnitGas(execGasCost, wrappedNativePrice), nil
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go
new file mode 100644
index 00000000000..e1c2fa03981
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go
@@ -0,0 +1,351 @@
+package prices
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks"
+)
+
+func TestExecPriceEstimator_GetGasPrice(t *testing.T) {
+ ctx := context.Background()
+
+ testCases := []struct {
+ name string
+ sourceFeeEstimatorRespFee gas.EvmFee
+ sourceFeeEstimatorRespErr error
+ maxGasPrice *big.Int
+ expPrice *big.Int
+ expErr bool
+ }{
+ {
+ name: "gets legacy gas price",
+ sourceFeeEstimatorRespFee: gas.EvmFee{
+ Legacy: assets.NewWei(big.NewInt(10)),
+ DynamicFeeCap: nil,
+ },
+ sourceFeeEstimatorRespErr: nil,
+ maxGasPrice: big.NewInt(1),
+ expPrice: big.NewInt(10),
+ expErr: false,
+ },
+ {
+ name: "gets dynamic gas price",
+ sourceFeeEstimatorRespFee: gas.EvmFee{
+ Legacy: nil,
+ DynamicFeeCap: assets.NewWei(big.NewInt(20)),
+ },
+ sourceFeeEstimatorRespErr: nil,
+ maxGasPrice: big.NewInt(1),
+ expPrice: big.NewInt(20),
+ expErr: false,
+ },
+ {
+ name: "gets dynamic gas price over legacy gas price",
+ sourceFeeEstimatorRespFee: gas.EvmFee{
+ Legacy: assets.NewWei(big.NewInt(10)),
+ DynamicFeeCap: assets.NewWei(big.NewInt(20)),
+ },
+ sourceFeeEstimatorRespErr: nil,
+ maxGasPrice: big.NewInt(1),
+ expPrice: big.NewInt(20),
+ expErr: false,
+ },
+ {
+ name: "fee estimator error",
+ sourceFeeEstimatorRespFee: gas.EvmFee{
+ Legacy: assets.NewWei(big.NewInt(10)),
+ DynamicFeeCap: nil,
+ },
+ sourceFeeEstimatorRespErr: errors.New("fee estimator error"),
+ maxGasPrice: big.NewInt(1),
+ expPrice: nil,
+ expErr: true,
+ },
+ {
+ name: "nil gas price error",
+ sourceFeeEstimatorRespFee: gas.EvmFee{
+ Legacy: nil,
+ DynamicFeeCap: nil,
+ },
+ sourceFeeEstimatorRespErr: nil,
+ maxGasPrice: big.NewInt(1),
+ expPrice: nil,
+ expErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ sourceFeeEstimator := mocks.NewEvmFeeEstimator(t)
+ sourceFeeEstimator.On("GetFee", ctx, []byte(nil), uint64(0), assets.NewWei(tc.maxGasPrice)).Return(
+ tc.sourceFeeEstimatorRespFee, uint64(0), tc.sourceFeeEstimatorRespErr)
+
+ g := ExecGasPriceEstimator{
+ estimator: sourceFeeEstimator,
+ maxGasPrice: tc.maxGasPrice,
+ }
+
+ gasPrice, err := g.GetGasPrice(ctx)
+ if tc.expErr {
+ assert.Nil(t, gasPrice)
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expPrice, gasPrice)
+ })
+ }
+}
+
+func TestExecPriceEstimator_DenoteInUSD(t *testing.T) {
+ val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) }
+
+ testCases := []struct {
+ name string
+ gasPrice *big.Int
+ nativePrice *big.Int
+ expPrice *big.Int
+ }{
+ {
+ name: "base",
+ gasPrice: big.NewInt(1e9),
+ nativePrice: val1e18(2_000),
+ expPrice: big.NewInt(2_000e9),
+ },
+ {
+ name: "low price truncates to 0",
+ gasPrice: big.NewInt(1e9),
+ nativePrice: big.NewInt(1),
+ expPrice: big.NewInt(0),
+ },
+ {
+ name: "high price",
+ gasPrice: val1e18(1),
+ nativePrice: val1e18(2_000),
+ expPrice: val1e18(2_000),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := ExecGasPriceEstimator{}
+
+ gasPrice, err := g.DenoteInUSD(tc.gasPrice, tc.nativePrice)
+ assert.NoError(t, err)
+ assert.True(t, tc.expPrice.Cmp(gasPrice) == 0)
+ })
+ }
+}
+
+func TestExecPriceEstimator_Median(t *testing.T) {
+ val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) }
+
+ testCases := []struct {
+ name string
+ gasPrices []*big.Int
+ expMedian *big.Int
+ }{
+ {
+ name: "base",
+ gasPrices: []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
+ expMedian: big.NewInt(2),
+ },
+ {
+ name: "median 1",
+ gasPrices: []*big.Int{big.NewInt(1)},
+ expMedian: big.NewInt(1),
+ },
+ {
+ name: "median 2",
+ gasPrices: []*big.Int{big.NewInt(1), big.NewInt(2)},
+ expMedian: big.NewInt(2),
+ },
+ {
+ name: "large values",
+ gasPrices: []*big.Int{val1e18(5), val1e18(4), val1e18(3), val1e18(2), val1e18(1)},
+ expMedian: val1e18(3),
+ },
+ {
+ name: "zeros",
+ gasPrices: []*big.Int{big.NewInt(0), big.NewInt(0), big.NewInt(0)},
+ expMedian: big.NewInt(0),
+ },
+ {
+ name: "unsorted even number of prices",
+ gasPrices: []*big.Int{big.NewInt(4), big.NewInt(2), big.NewInt(3), big.NewInt(1)},
+ expMedian: big.NewInt(3),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := ExecGasPriceEstimator{}
+
+ gasPrice, err := g.Median(tc.gasPrices)
+ assert.NoError(t, err)
+ assert.True(t, tc.expMedian.Cmp(gasPrice) == 0)
+ })
+ }
+}
+
+func TestExecPriceEstimator_Deviates(t *testing.T) {
+ testCases := []struct {
+ name string
+ gasPrice1 *big.Int
+ gasPrice2 *big.Int
+ deviationPPB int64
+ expDeviates bool
+ }{
+ {
+ name: "base",
+ gasPrice1: big.NewInt(100e8),
+ gasPrice2: big.NewInt(79e8),
+ deviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "negative difference also deviates",
+ gasPrice1: big.NewInt(100e8),
+ gasPrice2: big.NewInt(121e8),
+ deviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "larger difference deviates",
+ gasPrice1: big.NewInt(100e8),
+ gasPrice2: big.NewInt(70e8),
+ deviationPPB: 2e8,
+ expDeviates: true,
+ },
+ {
+ name: "smaller difference does not deviate",
+ gasPrice1: big.NewInt(100e8),
+ gasPrice2: big.NewInt(90e8),
+ deviationPPB: 2e8,
+ expDeviates: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := ExecGasPriceEstimator{
+ deviationPPB: tc.deviationPPB,
+ }
+
+ deviated, err := g.Deviates(tc.gasPrice1, tc.gasPrice2)
+ assert.NoError(t, err)
+ if tc.expDeviates {
+ assert.True(t, deviated)
+ } else {
+ assert.False(t, deviated)
+ }
+ })
+ }
+}
+
+func TestExecPriceEstimator_EstimateMsgCostUSD(t *testing.T) {
+ testCases := []struct {
+ name string
+ gasPrice *big.Int
+ wrappedNativePrice *big.Int
+ msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+ expUSD *big.Int
+ }{
+ {
+ name: "base",
+ gasPrice: big.NewInt(1e9), // 1 gwei
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ GasLimit: big.NewInt(100_000),
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ },
+ },
+ expUSD: big.NewInt(300_000e9),
+ },
+ {
+ name: "base with data",
+ gasPrice: big.NewInt(1e9), // 1 gwei
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ GasLimit: big.NewInt(100_000),
+ Data: make([]byte, 1_000),
+ TokenAmounts: []cciptypes.TokenAmount{},
+ },
+ },
+ expUSD: big.NewInt(316_000e9),
+ },
+ {
+ name: "base with data and tokens",
+ gasPrice: big.NewInt(1e9), // 1 gwei
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ GasLimit: big.NewInt(100_000),
+ Data: make([]byte, 1_000),
+ TokenAmounts: make([]cciptypes.TokenAmount, 5),
+ },
+ },
+ expUSD: big.NewInt(366_000e9),
+ },
+ {
+ name: "empty msg",
+ gasPrice: big.NewInt(1e9), // 1 gwei
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ GasLimit: big.NewInt(0),
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ },
+ },
+ expUSD: big.NewInt(200_000e9),
+ },
+ {
+ name: "double native price",
+ gasPrice: big.NewInt(1e9), // 1 gwei
+ wrappedNativePrice: big.NewInt(2e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ GasLimit: big.NewInt(0),
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ },
+ },
+ expUSD: big.NewInt(400_000e9),
+ },
+ {
+ name: "zero gas price",
+ gasPrice: big.NewInt(0), // 1 gwei
+ wrappedNativePrice: big.NewInt(1e18), // $1
+ msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ GasLimit: big.NewInt(0),
+ Data: []byte{},
+ TokenAmounts: []cciptypes.TokenAmount{},
+ },
+ },
+ expUSD: big.NewInt(0),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := ExecGasPriceEstimator{}
+
+ costUSD, err := g.EstimateMsgCostUSD(tc.gasPrice, tc.wrappedNativePrice, tc.msg)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expUSD, costUSD)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go
new file mode 100644
index 00000000000..49a6fbcc4ad
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go
@@ -0,0 +1,59 @@
+package prices
+
+import (
+ "math/big"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+)
+
+const (
+ feeBoostingOverheadGas = 200_000
+ // execGasPerToken is lower-bound estimation of ERC20 releaseOrMint gas cost (Mint with static minter).
+ // Use this in per-token gas cost calc as heuristic to simplify estimation logic.
+ execGasPerToken = 10_000
+ // execGasPerPayloadByte is gas charged for passing each byte of `data` payload to CCIP receiver, ignores 4 gas per 0-byte rule.
+ // This can be a constant as it is part of EVM spec. Changes should be rare.
+ execGasPerPayloadByte = 16
+ // evmMessageFixedBytes is byte size of fixed-size fields in EVM2EVMMessage
+ // Updating EVM2EVMMessage involves an offchain upgrade, safe to keep this as constant in code.
+ evmMessageFixedBytes = 448
+ evmMessageBytesPerToken = 128 // Byte size of each token transfer, consisting of 1 EVMTokenAmount and 1 bytes, excl length of bytes
+ daMultiplierBase = int64(10000) // DA multiplier is in multiples of 0.0001, i.e. 1/daMultiplierBase
+ daGasPriceEncodingLength = 112 // Each gas price takes up at most GasPriceEncodingLength number of bits
+)
+
+// GasPriceEstimatorCommit provides gasPriceEstimatorCommon + features needed in commit plugin, e.g. price deviation check.
+type GasPriceEstimatorCommit interface {
+ cciptypes.GasPriceEstimatorCommit
+}
+
+// GasPriceEstimatorExec provides gasPriceEstimatorCommon + features needed in exec plugin, e.g. message cost estimation.
+type GasPriceEstimatorExec interface {
+ cciptypes.GasPriceEstimatorExec
+}
+
+// GasPriceEstimator provides complete gas price estimator functions.
+type GasPriceEstimator interface {
+ cciptypes.GasPriceEstimator
+}
+
+func NewGasPriceEstimatorForCommitPlugin(
+ commitStoreVersion semver.Version,
+ estimator gas.EvmFeeEstimator,
+ maxExecGasPrice *big.Int,
+ daDeviationPPB int64,
+ execDeviationPPB int64,
+) (GasPriceEstimatorCommit, error) {
+ switch commitStoreVersion.String() {
+ case "1.0.0", "1.1.0":
+ return NewExecGasPriceEstimator(estimator, maxExecGasPrice, execDeviationPPB), nil
+ case "1.2.0":
+ return NewDAGasPriceEstimator(estimator, maxExecGasPrice, execDeviationPPB, daDeviationPPB), nil
+ default:
+ return nil, errors.Errorf("Invalid commitStore version: %s", commitStoreVersion)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go
new file mode 100644
index 00000000000..0a366a66ac2
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go
@@ -0,0 +1,269 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package prices
+
+import (
+ context "context"
+ big "math/big"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// MockGasPriceEstimatorCommit is an autogenerated mock type for the GasPriceEstimatorCommit type
+type MockGasPriceEstimatorCommit struct {
+ mock.Mock
+}
+
+type MockGasPriceEstimatorCommit_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *MockGasPriceEstimatorCommit) EXPECT() *MockGasPriceEstimatorCommit_Expecter {
+ return &MockGasPriceEstimatorCommit_Expecter{mock: &_m.Mock}
+}
+
+// DenoteInUSD provides a mock function with given fields: p, wrappedNativePrice
+func (_m *MockGasPriceEstimatorCommit) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) {
+ ret := _m.Called(p, wrappedNativePrice)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DenoteInUSD")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (*big.Int, error)); ok {
+ return rf(p, wrappedNativePrice)
+ }
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) *big.Int); ok {
+ r0 = rf(p, wrappedNativePrice)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok {
+ r1 = rf(p, wrappedNativePrice)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorCommit_DenoteInUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DenoteInUSD'
+type MockGasPriceEstimatorCommit_DenoteInUSD_Call struct {
+ *mock.Call
+}
+
+// DenoteInUSD is a helper method to define mock.On call
+// - p *big.Int
+// - wrappedNativePrice *big.Int
+func (_e *MockGasPriceEstimatorCommit_Expecter) DenoteInUSD(p interface{}, wrappedNativePrice interface{}) *MockGasPriceEstimatorCommit_DenoteInUSD_Call {
+ return &MockGasPriceEstimatorCommit_DenoteInUSD_Call{Call: _e.mock.On("DenoteInUSD", p, wrappedNativePrice)}
+}
+
+func (_c *MockGasPriceEstimatorCommit_DenoteInUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int)) *MockGasPriceEstimatorCommit_DenoteInUSD_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(*big.Int), args[1].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_DenoteInUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorCommit_DenoteInUSD_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_DenoteInUSD_Call) RunAndReturn(run func(*big.Int, *big.Int) (*big.Int, error)) *MockGasPriceEstimatorCommit_DenoteInUSD_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Deviates provides a mock function with given fields: p1, p2
+func (_m *MockGasPriceEstimatorCommit) Deviates(p1 *big.Int, p2 *big.Int) (bool, error) {
+ ret := _m.Called(p1, p2)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Deviates")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (bool, error)); ok {
+ return rf(p1, p2)
+ }
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) bool); ok {
+ r0 = rf(p1, p2)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok {
+ r1 = rf(p1, p2)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorCommit_Deviates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deviates'
+type MockGasPriceEstimatorCommit_Deviates_Call struct {
+ *mock.Call
+}
+
+// Deviates is a helper method to define mock.On call
+// - p1 *big.Int
+// - p2 *big.Int
+func (_e *MockGasPriceEstimatorCommit_Expecter) Deviates(p1 interface{}, p2 interface{}) *MockGasPriceEstimatorCommit_Deviates_Call {
+ return &MockGasPriceEstimatorCommit_Deviates_Call{Call: _e.mock.On("Deviates", p1, p2)}
+}
+
+func (_c *MockGasPriceEstimatorCommit_Deviates_Call) Run(run func(p1 *big.Int, p2 *big.Int)) *MockGasPriceEstimatorCommit_Deviates_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(*big.Int), args[1].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_Deviates_Call) Return(_a0 bool, _a1 error) *MockGasPriceEstimatorCommit_Deviates_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_Deviates_Call) RunAndReturn(run func(*big.Int, *big.Int) (bool, error)) *MockGasPriceEstimatorCommit_Deviates_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGasPrice provides a mock function with given fields: ctx
+func (_m *MockGasPriceEstimatorCommit) GetGasPrice(ctx context.Context) (*big.Int, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGasPrice")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorCommit_GetGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPrice'
+type MockGasPriceEstimatorCommit_GetGasPrice_Call struct {
+ *mock.Call
+}
+
+// GetGasPrice is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *MockGasPriceEstimatorCommit_Expecter) GetGasPrice(ctx interface{}) *MockGasPriceEstimatorCommit_GetGasPrice_Call {
+ return &MockGasPriceEstimatorCommit_GetGasPrice_Call{Call: _e.mock.On("GetGasPrice", ctx)}
+}
+
+func (_c *MockGasPriceEstimatorCommit_GetGasPrice_Call) Run(run func(ctx context.Context)) *MockGasPriceEstimatorCommit_GetGasPrice_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_GetGasPrice_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorCommit_GetGasPrice_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_GetGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *MockGasPriceEstimatorCommit_GetGasPrice_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Median provides a mock function with given fields: gasPrices
+func (_m *MockGasPriceEstimatorCommit) Median(gasPrices []*big.Int) (*big.Int, error) {
+ ret := _m.Called(gasPrices)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Median")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func([]*big.Int) (*big.Int, error)); ok {
+ return rf(gasPrices)
+ }
+ if rf, ok := ret.Get(0).(func([]*big.Int) *big.Int); ok {
+ r0 = rf(gasPrices)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func([]*big.Int) error); ok {
+ r1 = rf(gasPrices)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorCommit_Median_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Median'
+type MockGasPriceEstimatorCommit_Median_Call struct {
+ *mock.Call
+}
+
+// Median is a helper method to define mock.On call
+// - gasPrices []*big.Int
+func (_e *MockGasPriceEstimatorCommit_Expecter) Median(gasPrices interface{}) *MockGasPriceEstimatorCommit_Median_Call {
+ return &MockGasPriceEstimatorCommit_Median_Call{Call: _e.mock.On("Median", gasPrices)}
+}
+
+func (_c *MockGasPriceEstimatorCommit_Median_Call) Run(run func(gasPrices []*big.Int)) *MockGasPriceEstimatorCommit_Median_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].([]*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_Median_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorCommit_Median_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorCommit_Median_Call) RunAndReturn(run func([]*big.Int) (*big.Int, error)) *MockGasPriceEstimatorCommit_Median_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewMockGasPriceEstimatorCommit creates a new instance of MockGasPriceEstimatorCommit. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockGasPriceEstimatorCommit(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockGasPriceEstimatorCommit {
+ mock := &MockGasPriceEstimatorCommit{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go
new file mode 100644
index 00000000000..8f778555b17
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go
@@ -0,0 +1,274 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package prices
+
+import (
+ context "context"
+ big "math/big"
+
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// MockGasPriceEstimatorExec is an autogenerated mock type for the GasPriceEstimatorExec type
+type MockGasPriceEstimatorExec struct {
+ mock.Mock
+}
+
+type MockGasPriceEstimatorExec_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *MockGasPriceEstimatorExec) EXPECT() *MockGasPriceEstimatorExec_Expecter {
+ return &MockGasPriceEstimatorExec_Expecter{mock: &_m.Mock}
+}
+
+// DenoteInUSD provides a mock function with given fields: p, wrappedNativePrice
+func (_m *MockGasPriceEstimatorExec) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) {
+ ret := _m.Called(p, wrappedNativePrice)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DenoteInUSD")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (*big.Int, error)); ok {
+ return rf(p, wrappedNativePrice)
+ }
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) *big.Int); ok {
+ r0 = rf(p, wrappedNativePrice)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok {
+ r1 = rf(p, wrappedNativePrice)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorExec_DenoteInUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DenoteInUSD'
+type MockGasPriceEstimatorExec_DenoteInUSD_Call struct {
+ *mock.Call
+}
+
+// DenoteInUSD is a helper method to define mock.On call
+// - p *big.Int
+// - wrappedNativePrice *big.Int
+func (_e *MockGasPriceEstimatorExec_Expecter) DenoteInUSD(p interface{}, wrappedNativePrice interface{}) *MockGasPriceEstimatorExec_DenoteInUSD_Call {
+ return &MockGasPriceEstimatorExec_DenoteInUSD_Call{Call: _e.mock.On("DenoteInUSD", p, wrappedNativePrice)}
+}
+
+func (_c *MockGasPriceEstimatorExec_DenoteInUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int)) *MockGasPriceEstimatorExec_DenoteInUSD_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(*big.Int), args[1].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_DenoteInUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_DenoteInUSD_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_DenoteInUSD_Call) RunAndReturn(run func(*big.Int, *big.Int) (*big.Int, error)) *MockGasPriceEstimatorExec_DenoteInUSD_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// EstimateMsgCostUSD provides a mock function with given fields: p, wrappedNativePrice, msg
+func (_m *MockGasPriceEstimatorExec) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) {
+ ret := _m.Called(p, wrappedNativePrice, msg)
+
+ if len(ret) == 0 {
+ panic("no return value specified for EstimateMsgCostUSD")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)); ok {
+ return rf(p, wrappedNativePrice, msg)
+ }
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) *big.Int); ok {
+ r0 = rf(p, wrappedNativePrice, msg)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) error); ok {
+ r1 = rf(p, wrappedNativePrice, msg)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateMsgCostUSD'
+type MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call struct {
+ *mock.Call
+}
+
+// EstimateMsgCostUSD is a helper method to define mock.On call
+// - p *big.Int
+// - wrappedNativePrice *big.Int
+// - msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta
+func (_e *MockGasPriceEstimatorExec_Expecter) EstimateMsgCostUSD(p interface{}, wrappedNativePrice interface{}, msg interface{}) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call {
+ return &MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call{Call: _e.mock.On("EstimateMsgCostUSD", p, wrappedNativePrice, msg)}
+}
+
+func (_c *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta)) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(*big.Int), args[1].(*big.Int), args[2].(ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call) RunAndReturn(run func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGasPrice provides a mock function with given fields: ctx
+func (_m *MockGasPriceEstimatorExec) GetGasPrice(ctx context.Context) (*big.Int, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGasPrice")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorExec_GetGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPrice'
+type MockGasPriceEstimatorExec_GetGasPrice_Call struct {
+ *mock.Call
+}
+
+// GetGasPrice is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *MockGasPriceEstimatorExec_Expecter) GetGasPrice(ctx interface{}) *MockGasPriceEstimatorExec_GetGasPrice_Call {
+ return &MockGasPriceEstimatorExec_GetGasPrice_Call{Call: _e.mock.On("GetGasPrice", ctx)}
+}
+
+func (_c *MockGasPriceEstimatorExec_GetGasPrice_Call) Run(run func(ctx context.Context)) *MockGasPriceEstimatorExec_GetGasPrice_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_GetGasPrice_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_GetGasPrice_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_GetGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *MockGasPriceEstimatorExec_GetGasPrice_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Median provides a mock function with given fields: gasPrices
+func (_m *MockGasPriceEstimatorExec) Median(gasPrices []*big.Int) (*big.Int, error) {
+ ret := _m.Called(gasPrices)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Median")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func([]*big.Int) (*big.Int, error)); ok {
+ return rf(gasPrices)
+ }
+ if rf, ok := ret.Get(0).(func([]*big.Int) *big.Int); ok {
+ r0 = rf(gasPrices)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func([]*big.Int) error); ok {
+ r1 = rf(gasPrices)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimatorExec_Median_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Median'
+type MockGasPriceEstimatorExec_Median_Call struct {
+ *mock.Call
+}
+
+// Median is a helper method to define mock.On call
+// - gasPrices []*big.Int
+func (_e *MockGasPriceEstimatorExec_Expecter) Median(gasPrices interface{}) *MockGasPriceEstimatorExec_Median_Call {
+ return &MockGasPriceEstimatorExec_Median_Call{Call: _e.mock.On("Median", gasPrices)}
+}
+
+func (_c *MockGasPriceEstimatorExec_Median_Call) Run(run func(gasPrices []*big.Int)) *MockGasPriceEstimatorExec_Median_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].([]*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_Median_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_Median_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimatorExec_Median_Call) RunAndReturn(run func([]*big.Int) (*big.Int, error)) *MockGasPriceEstimatorExec_Median_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewMockGasPriceEstimatorExec creates a new instance of MockGasPriceEstimatorExec. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockGasPriceEstimatorExec(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockGasPriceEstimatorExec {
+ mock := &MockGasPriceEstimatorExec{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go
new file mode 100644
index 00000000000..a513083319d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go
@@ -0,0 +1,331 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package prices
+
+import (
+ context "context"
+ big "math/big"
+
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// MockGasPriceEstimator is an autogenerated mock type for the GasPriceEstimator type
+type MockGasPriceEstimator struct {
+ mock.Mock
+}
+
+type MockGasPriceEstimator_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *MockGasPriceEstimator) EXPECT() *MockGasPriceEstimator_Expecter {
+ return &MockGasPriceEstimator_Expecter{mock: &_m.Mock}
+}
+
+// DenoteInUSD provides a mock function with given fields: p, wrappedNativePrice
+func (_m *MockGasPriceEstimator) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) {
+ ret := _m.Called(p, wrappedNativePrice)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DenoteInUSD")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (*big.Int, error)); ok {
+ return rf(p, wrappedNativePrice)
+ }
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) *big.Int); ok {
+ r0 = rf(p, wrappedNativePrice)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok {
+ r1 = rf(p, wrappedNativePrice)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimator_DenoteInUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DenoteInUSD'
+type MockGasPriceEstimator_DenoteInUSD_Call struct {
+ *mock.Call
+}
+
+// DenoteInUSD is a helper method to define mock.On call
+// - p *big.Int
+// - wrappedNativePrice *big.Int
+func (_e *MockGasPriceEstimator_Expecter) DenoteInUSD(p interface{}, wrappedNativePrice interface{}) *MockGasPriceEstimator_DenoteInUSD_Call {
+ return &MockGasPriceEstimator_DenoteInUSD_Call{Call: _e.mock.On("DenoteInUSD", p, wrappedNativePrice)}
+}
+
+func (_c *MockGasPriceEstimator_DenoteInUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int)) *MockGasPriceEstimator_DenoteInUSD_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(*big.Int), args[1].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_DenoteInUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_DenoteInUSD_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_DenoteInUSD_Call) RunAndReturn(run func(*big.Int, *big.Int) (*big.Int, error)) *MockGasPriceEstimator_DenoteInUSD_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Deviates provides a mock function with given fields: p1, p2
+func (_m *MockGasPriceEstimator) Deviates(p1 *big.Int, p2 *big.Int) (bool, error) {
+ ret := _m.Called(p1, p2)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Deviates")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (bool, error)); ok {
+ return rf(p1, p2)
+ }
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) bool); ok {
+ r0 = rf(p1, p2)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok {
+ r1 = rf(p1, p2)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimator_Deviates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deviates'
+type MockGasPriceEstimator_Deviates_Call struct {
+ *mock.Call
+}
+
+// Deviates is a helper method to define mock.On call
+// - p1 *big.Int
+// - p2 *big.Int
+func (_e *MockGasPriceEstimator_Expecter) Deviates(p1 interface{}, p2 interface{}) *MockGasPriceEstimator_Deviates_Call {
+ return &MockGasPriceEstimator_Deviates_Call{Call: _e.mock.On("Deviates", p1, p2)}
+}
+
+func (_c *MockGasPriceEstimator_Deviates_Call) Run(run func(p1 *big.Int, p2 *big.Int)) *MockGasPriceEstimator_Deviates_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(*big.Int), args[1].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_Deviates_Call) Return(_a0 bool, _a1 error) *MockGasPriceEstimator_Deviates_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_Deviates_Call) RunAndReturn(run func(*big.Int, *big.Int) (bool, error)) *MockGasPriceEstimator_Deviates_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// EstimateMsgCostUSD provides a mock function with given fields: p, wrappedNativePrice, msg
+func (_m *MockGasPriceEstimator) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) {
+ ret := _m.Called(p, wrappedNativePrice, msg)
+
+ if len(ret) == 0 {
+ panic("no return value specified for EstimateMsgCostUSD")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)); ok {
+ return rf(p, wrappedNativePrice, msg)
+ }
+ if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) *big.Int); ok {
+ r0 = rf(p, wrappedNativePrice, msg)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) error); ok {
+ r1 = rf(p, wrappedNativePrice, msg)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimator_EstimateMsgCostUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateMsgCostUSD'
+type MockGasPriceEstimator_EstimateMsgCostUSD_Call struct {
+ *mock.Call
+}
+
+// EstimateMsgCostUSD is a helper method to define mock.On call
+// - p *big.Int
+// - wrappedNativePrice *big.Int
+// - msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta
+func (_e *MockGasPriceEstimator_Expecter) EstimateMsgCostUSD(p interface{}, wrappedNativePrice interface{}, msg interface{}) *MockGasPriceEstimator_EstimateMsgCostUSD_Call {
+ return &MockGasPriceEstimator_EstimateMsgCostUSD_Call{Call: _e.mock.On("EstimateMsgCostUSD", p, wrappedNativePrice, msg)}
+}
+
+func (_c *MockGasPriceEstimator_EstimateMsgCostUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta)) *MockGasPriceEstimator_EstimateMsgCostUSD_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(*big.Int), args[1].(*big.Int), args[2].(ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_EstimateMsgCostUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_EstimateMsgCostUSD_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_EstimateMsgCostUSD_Call) RunAndReturn(run func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)) *MockGasPriceEstimator_EstimateMsgCostUSD_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGasPrice provides a mock function with given fields: ctx
+func (_m *MockGasPriceEstimator) GetGasPrice(ctx context.Context) (*big.Int, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGasPrice")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimator_GetGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPrice'
+type MockGasPriceEstimator_GetGasPrice_Call struct {
+ *mock.Call
+}
+
+// GetGasPrice is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *MockGasPriceEstimator_Expecter) GetGasPrice(ctx interface{}) *MockGasPriceEstimator_GetGasPrice_Call {
+ return &MockGasPriceEstimator_GetGasPrice_Call{Call: _e.mock.On("GetGasPrice", ctx)}
+}
+
+func (_c *MockGasPriceEstimator_GetGasPrice_Call) Run(run func(ctx context.Context)) *MockGasPriceEstimator_GetGasPrice_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_GetGasPrice_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_GetGasPrice_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_GetGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *MockGasPriceEstimator_GetGasPrice_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Median provides a mock function with given fields: gasPrices
+func (_m *MockGasPriceEstimator) Median(gasPrices []*big.Int) (*big.Int, error) {
+ ret := _m.Called(gasPrices)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Median")
+ }
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func([]*big.Int) (*big.Int, error)); ok {
+ return rf(gasPrices)
+ }
+ if rf, ok := ret.Get(0).(func([]*big.Int) *big.Int); ok {
+ r0 = rf(gasPrices)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func([]*big.Int) error); ok {
+ r1 = rf(gasPrices)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockGasPriceEstimator_Median_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Median'
+type MockGasPriceEstimator_Median_Call struct {
+ *mock.Call
+}
+
+// Median is a helper method to define mock.On call
+// - gasPrices []*big.Int
+func (_e *MockGasPriceEstimator_Expecter) Median(gasPrices interface{}) *MockGasPriceEstimator_Median_Call {
+ return &MockGasPriceEstimator_Median_Call{Call: _e.mock.On("Median", gasPrices)}
+}
+
+func (_c *MockGasPriceEstimator_Median_Call) Run(run func(gasPrices []*big.Int)) *MockGasPriceEstimator_Median_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].([]*big.Int))
+ })
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_Median_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_Median_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockGasPriceEstimator_Median_Call) RunAndReturn(run func([]*big.Int) (*big.Int, error)) *MockGasPriceEstimator_Median_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewMockGasPriceEstimator creates a new instance of MockGasPriceEstimator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockGasPriceEstimator(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockGasPriceEstimator {
+ mock := &MockGasPriceEstimator{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/proxycommitstore.go b/core/services/ocr2/plugins/ccip/proxycommitstore.go
new file mode 100644
index 00000000000..b06f957bd58
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/proxycommitstore.go
@@ -0,0 +1,135 @@
+package ccip
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math/big"
+ "time"
+
+ "go.uber.org/multierr"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+// The disjunct methods in IncompleteSourceCommitStoreReader and IncompleteDestCommitStoreReader satisfy the full
+// CommitStoreReader iface in Union
+var _ cciptypes.CommitStoreReader = (*ProviderProxyCommitStoreReader)(nil)
+
+// ProviderProxyCommitStoreReader is a CommitStoreReader that proxies to two custom provider grpc backed implementations
+// of a CommitStoreReader.
+// [ProviderProxyCommitStoreReader] lives in the memory space of the reporting plugin factory and reporting plugin, and should have no chain-specific details.
+// Why? Historical implementations of a commit store consumed in reporting plugins mixed usage of a gas estimator from
+// the source relayer and contract read and write abilities to a dest relayer. This is not valid in LOOP world.
+type ProviderProxyCommitStoreReader struct {
+ srcCommitStoreReader IncompleteSourceCommitStoreReader
+ dstCommitStoreReader IncompleteDestCommitStoreReader
+}
+
+// IncompleteSourceCommitStoreReader contains only the methods of CommitStoreReader that are serviced by the source chain/relayer.
+type IncompleteSourceCommitStoreReader interface {
+ ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error)
+ GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error)
+ OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error)
+ io.Closer
+}
+
+// IncompleteDestCommitStoreReader contains only the methods of CommitStoreReader that are serviced by the dest chain/relayer.
+type IncompleteDestCommitStoreReader interface {
+ DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error)
+ EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error)
+ GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error)
+ GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error)
+ GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error)
+ GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error)
+ GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error)
+ IsBlessed(ctx context.Context, root [32]byte) (bool, error)
+ IsDestChainHealthy(ctx context.Context) (bool, error)
+ IsDown(ctx context.Context) (bool, error)
+ VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error)
+ io.Closer
+}
+
+func NewProviderProxyCommitStoreReader(srcReader cciptypes.CommitStoreReader, dstReader cciptypes.CommitStoreReader) *ProviderProxyCommitStoreReader {
+ return &ProviderProxyCommitStoreReader{
+ srcCommitStoreReader: srcReader,
+ dstCommitStoreReader: dstReader,
+ }
+}
+
+// ChangeConfig updates the offchainConfig values for the source relayer gas estimator by calling ChangeConfig
+// on the source relayer. Once this is called, GasPriceEstimator and OffchainConfig can be called.
+func (p *ProviderProxyCommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) {
+ return p.srcCommitStoreReader.ChangeConfig(ctx, onchainConfig, offchainConfig)
+}
+
+func (p *ProviderProxyCommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error) {
+ return p.dstCommitStoreReader.DecodeCommitReport(ctx, report)
+}
+
+func (p *ProviderProxyCommitStoreReader) EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error) {
+ return p.dstCommitStoreReader.EncodeCommitReport(ctx, report)
+}
+
+// GasPriceEstimator constructs a gas price estimator on the source relayer
+func (p *ProviderProxyCommitStoreReader) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error) {
+ return p.srcCommitStoreReader.GasPriceEstimator(ctx)
+}
+
+func (p *ProviderProxyCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return p.dstCommitStoreReader.GetAcceptedCommitReportsGteTimestamp(ctx, ts, confirmations)
+}
+
+func (p *ProviderProxyCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return p.dstCommitStoreReader.GetCommitReportMatchingSeqNum(ctx, seqNum, confirmations)
+}
+
+func (p *ProviderProxyCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) {
+ return p.dstCommitStoreReader.GetCommitStoreStaticConfig(ctx)
+}
+
+func (p *ProviderProxyCommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) {
+ return p.dstCommitStoreReader.GetExpectedNextSequenceNumber(ctx)
+}
+
+func (p *ProviderProxyCommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) {
+ return p.dstCommitStoreReader.GetLatestPriceEpochAndRound(ctx)
+}
+
+func (p *ProviderProxyCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) {
+ return p.dstCommitStoreReader.IsBlessed(ctx, root)
+}
+
+func (p *ProviderProxyCommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) {
+ return p.dstCommitStoreReader.IsDestChainHealthy(ctx)
+}
+
+func (p *ProviderProxyCommitStoreReader) IsDown(ctx context.Context) (bool, error) {
+ return p.dstCommitStoreReader.IsDown(ctx)
+}
+
+func (p *ProviderProxyCommitStoreReader) OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error) {
+ return p.srcCommitStoreReader.OffchainConfig(ctx)
+}
+
+func (p *ProviderProxyCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) {
+ return p.dstCommitStoreReader.VerifyExecutionReport(ctx, report)
+}
+
+// SetGasEstimator is invalid on ProviderProxyCommitStoreReader. The provider based impl's do not have SetGasEstimator
+// defined, so this serves no purpose other than satisfying an interface.
+func (p *ProviderProxyCommitStoreReader) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error {
+ return fmt.Errorf("invalid usage of ProviderProxyCommitStoreReader")
+}
+
+// SetSourceMaxGasPrice is invalid on ProviderProxyCommitStoreReader. The provider based impl's do not have SetSourceMaxGasPrice
+// defined, so this serves no purpose other than satisfying an interface.
+func (p *ProviderProxyCommitStoreReader) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error {
+ return fmt.Errorf("invalid usage of ProviderProxyCommitStoreReader")
+}
+
+func (p *ProviderProxyCommitStoreReader) Close() error {
+ return multierr.Append(p.srcCommitStoreReader.Close(), p.dstCommitStoreReader.Close())
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go b/core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go
new file mode 100644
index 00000000000..805c49d91aa
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go
@@ -0,0 +1,1580 @@
+package testhelpers
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/pkg/errors"
+ "github.com/rs/zerolog/log"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2/confighelper"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types"
+ ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/merklemulti"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_proxy_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/maybe_revert_message_receiver"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_admin_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/weth9"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+)
+
+var (
+ // Source
+ SourcePool = "source Link pool"
+ SourcePriceRegistry = "source PriceRegistry"
+ OnRamp = "onramp"
+ OnRampNative = "onramp-native"
+ SourceRouter = "source router"
+
+ // Dest
+ OffRamp = "offramp"
+ DestPool = "dest Link pool"
+
+ Receiver = "receiver"
+ Sender = "sender"
+ Link = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) }
+ HundredLink = Link(100)
+ LinkUSDValue = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) }
+ SourceChainID = uint64(1000)
+ SourceChainSelector = uint64(11787463284727550157)
+ DestChainID = uint64(1337)
+ DestChainSelector = uint64(3379446385462418246)
+)
+
+// Backwards compat, in principle these statuses are version dependent
+// TODO: Adjust integration tests to be version agnostic using readers
+var (
+ ExecutionStateSuccess = MessageExecutionState(cciptypes.ExecutionStateSuccess)
+ ExecutionStateFailure = MessageExecutionState(cciptypes.ExecutionStateFailure)
+)
+
+type MessageExecutionState cciptypes.MessageExecutionState
+type CommitOffchainConfig struct {
+ v1_2_0.JSONCommitOffchainConfig
+}
+
+func (c CommitOffchainConfig) Encode() ([]byte, error) {
+ return ccipconfig.EncodeOffchainConfig(c.JSONCommitOffchainConfig)
+}
+
+func NewCommitOffchainConfig(
+ GasPriceHeartBeat config.Duration,
+ DAGasPriceDeviationPPB uint32,
+ ExecGasPriceDeviationPPB uint32,
+ TokenPriceHeartBeat config.Duration,
+ TokenPriceDeviationPPB uint32,
+ InflightCacheExpiry config.Duration) CommitOffchainConfig {
+ return CommitOffchainConfig{v1_2_0.JSONCommitOffchainConfig{
+ GasPriceHeartBeat: GasPriceHeartBeat,
+ DAGasPriceDeviationPPB: DAGasPriceDeviationPPB,
+ ExecGasPriceDeviationPPB: ExecGasPriceDeviationPPB,
+ TokenPriceHeartBeat: TokenPriceHeartBeat,
+ TokenPriceDeviationPPB: TokenPriceDeviationPPB,
+ InflightCacheExpiry: InflightCacheExpiry,
+ }}
+}
+
+type CommitOnchainConfig struct {
+ ccipdata.CommitOnchainConfig
+}
+
+func NewCommitOnchainConfig(
+ PriceRegistry common.Address,
+) CommitOnchainConfig {
+ return CommitOnchainConfig{ccipdata.CommitOnchainConfig{
+ PriceRegistry: PriceRegistry,
+ }}
+}
+
+type ExecOnchainConfig struct {
+ v1_5_0.ExecOnchainConfig
+}
+
+func NewExecOnchainConfig(
+ PermissionLessExecutionThresholdSeconds uint32,
+ Router common.Address,
+ PriceRegistry common.Address,
+ MaxNumberOfTokensPerMsg uint16,
+ MaxDataBytes uint32,
+ MaxPoolReleaseOrMintGas uint32,
+ MaxTokenTransferGas uint32,
+) ExecOnchainConfig {
+ return ExecOnchainConfig{v1_5_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds,
+ Router: Router,
+ PriceRegistry: PriceRegistry,
+ MaxNumberOfTokensPerMsg: MaxNumberOfTokensPerMsg,
+ MaxDataBytes: MaxDataBytes,
+ MaxPoolReleaseOrMintGas: MaxPoolReleaseOrMintGas,
+ MaxTokenTransferGas: MaxTokenTransferGas,
+ }}
+}
+
+type ExecOffchainConfig struct {
+ v1_2_0.JSONExecOffchainConfig
+}
+
+func (c ExecOffchainConfig) Encode() ([]byte, error) {
+ return ccipconfig.EncodeOffchainConfig(c.JSONExecOffchainConfig)
+}
+
+func NewExecOffchainConfig(
+ DestOptimisticConfirmations uint32,
+ BatchGasLimit uint32,
+ RelativeBoostPerWaitHour float64,
+ InflightCacheExpiry config.Duration,
+ RootSnoozeTime config.Duration,
+) ExecOffchainConfig {
+ return ExecOffchainConfig{v1_2_0.JSONExecOffchainConfig{
+ DestOptimisticConfirmations: DestOptimisticConfirmations,
+ BatchGasLimit: BatchGasLimit,
+ RelativeBoostPerWaitHour: RelativeBoostPerWaitHour,
+ InflightCacheExpiry: InflightCacheExpiry,
+ RootSnoozeTime: RootSnoozeTime,
+ }}
+}
+
+type MaybeRevertReceiver struct {
+ Receiver *maybe_revert_message_receiver.MaybeRevertMessageReceiver
+ Strict bool
+}
+
+type Common struct {
+ ChainID uint64
+ ChainSelector uint64
+ User *bind.TransactOpts
+ Chain *backends.SimulatedBackend
+ LinkToken *link_token_interface.LinkToken
+ LinkTokenPool *lock_release_token_pool.LockReleaseTokenPool
+ CustomToken *link_token_interface.LinkToken
+ WrappedNative *weth9.WETH9
+ WrappedNativePool *lock_release_token_pool.LockReleaseTokenPool
+ ARM *mock_arm_contract.MockARMContract
+ ARMProxy *arm_proxy_contract.ARMProxyContract
+ PriceRegistry *price_registry_1_2_0.PriceRegistry
+ TokenAdminRegistry *token_admin_registry.TokenAdminRegistry
+}
+
+type SourceChain struct {
+ Common
+ Router *router.Router
+ OnRamp *evm_2_evm_onramp.EVM2EVMOnRamp
+}
+
+type DestinationChain struct {
+ Common
+
+ CommitStoreHelper *commit_store_helper.CommitStoreHelper
+ CommitStore *commit_store.CommitStore
+ Router *router.Router
+ OffRamp *evm_2_evm_offramp.EVM2EVMOffRamp
+ Receivers []MaybeRevertReceiver
+}
+
+type OCR2Config struct {
+ Signers []common.Address
+ Transmitters []common.Address
+ F uint8
+ OnchainConfig []byte
+ OffchainConfigVersion uint64
+ OffchainConfig []byte
+}
+
+type BalanceAssertion struct {
+ Name string
+ Address common.Address
+ Expected string
+ Getter func(t *testing.T, addr common.Address) *big.Int
+ Within string
+}
+
+type BalanceReq struct {
+ Name string
+ Addr common.Address
+ Getter func(t *testing.T, addr common.Address) *big.Int
+}
+
+type CCIPContracts struct {
+ Source SourceChain
+ Dest DestinationChain
+ Oracles []confighelper.OracleIdentityExtra
+
+ commitOCRConfig, execOCRConfig *OCR2Config
+}
+
+func (c *CCIPContracts) DeployNewOffRamp(t *testing.T) {
+ prevOffRamp := common.HexToAddress("")
+ if c.Dest.OffRamp != nil {
+ prevOffRamp = c.Dest.OffRamp.Address()
+ }
+ offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp(
+ c.Dest.User,
+ c.Dest.Chain,
+ evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{
+ CommitStore: c.Dest.CommitStore.Address(),
+ ChainSelector: c.Dest.ChainSelector,
+ SourceChainSelector: c.Source.ChainSelector,
+ OnRamp: c.Source.OnRamp.Address(),
+ PrevOffRamp: prevOffRamp,
+ RmnProxy: c.Dest.ARMProxy.Address(), // RMN formerly ARM
+ TokenAdminRegistry: c.Dest.TokenAdminRegistry.Address(),
+ },
+ evm_2_evm_offramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+
+ c.Dest.OffRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, c.Dest.Chain)
+ require.NoError(t, err)
+
+ c.Dest.Chain.Commit()
+ c.Source.Chain.Commit()
+}
+
+func (c *CCIPContracts) EnableOffRamp(t *testing.T) {
+ _, err := c.Dest.Router.ApplyRampUpdates(c.Dest.User, nil, nil, []router.RouterOffRamp{{SourceChainSelector: SourceChainSelector, OffRamp: c.Dest.OffRamp.Address()}})
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+
+ onChainConfig := c.CreateDefaultExecOnchainConfig(t)
+ offChainConfig := c.CreateDefaultExecOffchainConfig(t)
+
+ c.SetupExecOCR2Config(t, onChainConfig, offChainConfig)
+}
+
+func (c *CCIPContracts) EnableCommitStore(t *testing.T) {
+ onChainConfig := c.CreateDefaultCommitOnchainConfig(t)
+ offChainConfig := c.CreateDefaultCommitOffchainConfig(t)
+
+ c.SetupCommitOCR2Config(t, onChainConfig, offChainConfig)
+
+ _, err := c.Dest.PriceRegistry.ApplyPriceUpdatersUpdates(c.Dest.User, []common.Address{c.Dest.CommitStore.Address()}, []common.Address{})
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) DeployNewOnRamp(t *testing.T) {
+ t.Log("Deploying new onRamp")
+ // find the last onRamp
+ prevOnRamp := common.HexToAddress("")
+ if c.Source.OnRamp != nil {
+ prevOnRamp = c.Source.OnRamp.Address()
+ }
+ onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp(
+ c.Source.User, // user
+ c.Source.Chain, // client
+ evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{
+ LinkToken: c.Source.LinkToken.Address(),
+ ChainSelector: c.Source.ChainSelector,
+ DestChainSelector: c.Dest.ChainSelector,
+ DefaultTxGasLimit: 200_000,
+ MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)),
+ PrevOnRamp: prevOnRamp,
+ RmnProxy: c.Source.ARM.Address(), // RMN, formerly ARM
+ TokenAdminRegistry: c.Source.TokenAdminRegistry.Address(),
+ },
+ evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{
+ Router: c.Source.Router.Address(),
+ MaxNumberOfTokensPerMsg: 5,
+ DestGasOverhead: 350_000,
+ DestGasPerPayloadByte: 16,
+ DestDataAvailabilityOverheadGas: 33_596,
+ DestGasPerDataAvailabilityByte: 16,
+ DestDataAvailabilityMultiplierBps: 6840, // 0.684
+ PriceRegistry: c.Source.PriceRegistry.Address(),
+ MaxDataBytes: 1e5,
+ MaxPerMsgGasLimit: 4_000_000,
+ DefaultTokenFeeUSDCents: 50,
+ DefaultTokenDestGasOverhead: 34_000,
+ DefaultTokenDestBytesOverhead: 500,
+ },
+ evm_2_evm_onramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: c.Source.LinkToken.Address(),
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 9e17,
+ Enabled: true,
+ },
+ {
+ Token: c.Source.WrappedNative.Address(),
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 1e18,
+ Enabled: true,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: c.Source.LinkToken.Address(),
+ MinFeeUSDCents: 50, // $0.5
+ MaxFeeUSDCents: 1_000_000_00, // $ 1 million
+ DeciBps: 5_0, // 5 bps
+ DestGasOverhead: 34_000,
+ DestBytesOverhead: 32,
+ AggregateRateLimitEnabled: true,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{},
+ )
+
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ c.Source.OnRamp, err = evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, c.Source.Chain)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) EnableOnRamp(t *testing.T) {
+ t.Log("Setting onRamp on source router")
+ _, err := c.Source.Router.ApplyRampUpdates(c.Source.User, []router.RouterOnRamp{{DestChainSelector: c.Dest.ChainSelector, OnRamp: c.Source.OnRamp.Address()}}, nil, nil)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) DeployNewCommitStore(t *testing.T) {
+ commitStoreAddress, _, _, err := commit_store_helper_1_2_0.DeployCommitStoreHelper(
+ c.Dest.User, // user
+ c.Dest.Chain, // client
+ commit_store_helper_1_2_0.CommitStoreStaticConfig{
+ ChainSelector: c.Dest.ChainSelector,
+ SourceChainSelector: c.Source.ChainSelector,
+ OnRamp: c.Source.OnRamp.Address(),
+ ArmProxy: c.Dest.ARMProxy.Address(),
+ },
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+ c.Dest.CommitStoreHelper, err = commit_store_helper.NewCommitStoreHelper(commitStoreAddress, c.Dest.Chain)
+ require.NoError(t, err)
+ // since CommitStoreHelper derives from CommitStore, it's safe to instantiate both on same address
+ c.Dest.CommitStore, err = commit_store.NewCommitStore(commitStoreAddress, c.Dest.Chain)
+ require.NoError(t, err)
+}
+
+func (c *CCIPContracts) DeployNewPriceRegistry(t *testing.T) {
+ t.Log("Deploying new Price Registry")
+ destPricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry(
+ c.Dest.User,
+ c.Dest.Chain,
+ []common.Address{c.Dest.CommitStore.Address()},
+ []common.Address{c.Dest.LinkToken.Address()},
+ 60*60*24*14, // two weeks
+ )
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ c.Dest.PriceRegistry, err = price_registry_1_2_0.NewPriceRegistry(destPricesAddress, c.Dest.Chain)
+ require.NoError(t, err)
+
+ priceUpdates := price_registry_1_2_0.InternalPriceUpdates{
+ TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{
+ {
+ SourceToken: c.Dest.LinkToken.Address(),
+ UsdPerToken: big.NewInt(8e18), // 8usd
+ },
+ {
+ SourceToken: c.Dest.WrappedNative.Address(),
+ UsdPerToken: big.NewInt(1e18), // 1usd
+ },
+ },
+ GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{
+ {
+ DestChainSelector: c.Source.ChainSelector,
+ UsdPerUnitGas: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9
+ },
+ },
+ }
+ _, err = c.Dest.PriceRegistry.UpdatePrices(c.Dest.User, priceUpdates)
+ require.NoError(t, err)
+
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+
+ t.Logf("New Price Registry deployed at %s", destPricesAddress.String())
+}
+
+func (c *CCIPContracts) SetNopsOnRamp(t *testing.T, nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight) {
+ tx, err := c.Source.OnRamp.SetNops(c.Source.User, nopsAndWeights)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ _, err = bind.WaitMined(context.Background(), c.Source.Chain, tx)
+ require.NoError(t, err)
+}
+
+func (c *CCIPContracts) GetSourceLinkBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Source.Chain, c.Source.LinkToken.Address(), addr)
+}
+
+func (c *CCIPContracts) GetDestLinkBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Dest.Chain, c.Dest.LinkToken.Address(), addr)
+}
+
+func (c *CCIPContracts) GetSourceWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Source.Chain, c.Source.WrappedNative.Address(), addr)
+}
+
+func (c *CCIPContracts) GetDestWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Dest.Chain, c.Dest.WrappedNative.Address(), addr)
+}
+
+func (c *CCIPContracts) AssertBalances(t *testing.T, bas []BalanceAssertion) {
+ for _, b := range bas {
+ actual := b.Getter(t, b.Address)
+ t.Log("Checking balance for", b.Name, "at", b.Address.Hex(), "got", actual)
+ require.NotNil(t, actual, "%v getter return nil", b.Name)
+ if b.Within == "" {
+ require.Equal(t, b.Expected, actual.String(), "wrong balance for %s got %s want %s", b.Name, actual, b.Expected)
+ } else {
+ bi, _ := big.NewInt(0).SetString(b.Expected, 10)
+ withinI, _ := big.NewInt(0).SetString(b.Within, 10)
+ high := big.NewInt(0).Add(bi, withinI)
+ low := big.NewInt(0).Sub(bi, withinI)
+ require.Equal(t, -1, actual.Cmp(high), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high)
+ require.Equal(t, 1, actual.Cmp(low), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high)
+ }
+ }
+}
+
+func AccountToAddress(accounts []ocr2types.Account) (addresses []common.Address, err error) {
+ for _, signer := range accounts {
+ bytes, err := hexutil.Decode(string(signer))
+ if err != nil {
+ return []common.Address{}, errors.Wrap(err, fmt.Sprintf("given address is not valid %s", signer))
+ }
+ if len(bytes) != 20 {
+ return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer)
+ }
+ addresses = append(addresses, common.BytesToAddress(bytes))
+ }
+ return addresses, nil
+}
+
+func OnchainPublicKeyToAddress(publicKeys []ocrtypes.OnchainPublicKey) (addresses []common.Address, err error) {
+ for _, signer := range publicKeys {
+ if len(signer) != 20 {
+ return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer)
+ }
+ addresses = append(addresses, common.BytesToAddress(signer))
+ }
+ return addresses, nil
+}
+
+func (c *CCIPContracts) DeriveOCR2Config(t *testing.T, oracles []confighelper.OracleIdentityExtra, rawOnchainConfig []byte, rawOffchainConfig []byte) *OCR2Config {
+ signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests(
+ 2*time.Second, // deltaProgress
+ 1*time.Second, // deltaResend
+ 1*time.Second, // deltaRound
+ 500*time.Millisecond, // deltaGrace
+ 2*time.Second, // deltaStage
+ 3,
+ []int{1, 1, 1, 1},
+ oracles,
+ rawOffchainConfig,
+ 50*time.Millisecond, // Max duration query
+ 1*time.Second, // Max duration observation
+ 100*time.Millisecond,
+ 100*time.Millisecond,
+ 100*time.Millisecond,
+ 1, // faults
+ rawOnchainConfig,
+ )
+ require.NoError(t, err)
+ lggr := logger.TestLogger(t)
+ lggr.Infow("Setting Config on Oracle Contract",
+ "signers", signers,
+ "transmitters", transmitters,
+ "threshold", threshold,
+ "onchainConfig", onchainConfig,
+ "encodedConfigVersion", offchainConfigVersion,
+ )
+ signerAddresses, err := OnchainPublicKeyToAddress(signers)
+ require.NoError(t, err)
+ transmitterAddresses, err := AccountToAddress(transmitters)
+ require.NoError(t, err)
+
+ return &OCR2Config{
+ Signers: signerAddresses,
+ Transmitters: transmitterAddresses,
+ F: threshold,
+ OnchainConfig: onchainConfig,
+ OffchainConfigVersion: offchainConfigVersion,
+ OffchainConfig: offchainConfig,
+ }
+}
+
+func (c *CCIPContracts) SetupCommitOCR2Config(t *testing.T, commitOnchainConfig, commitOffchainConfig []byte) {
+ c.commitOCRConfig = c.DeriveOCR2Config(t, c.Oracles, commitOnchainConfig, commitOffchainConfig)
+ // Set the DON on the commit store
+ _, err := c.Dest.CommitStore.SetOCR2Config(
+ c.Dest.User,
+ c.commitOCRConfig.Signers,
+ c.commitOCRConfig.Transmitters,
+ c.commitOCRConfig.F,
+ c.commitOCRConfig.OnchainConfig,
+ c.commitOCRConfig.OffchainConfigVersion,
+ c.commitOCRConfig.OffchainConfig,
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) SetupExecOCR2Config(t *testing.T, execOnchainConfig, execOffchainConfig []byte) {
+ c.execOCRConfig = c.DeriveOCR2Config(t, c.Oracles, execOnchainConfig, execOffchainConfig)
+ // Same DON on the offramp
+ _, err := c.Dest.OffRamp.SetOCR2Config(
+ c.Dest.User,
+ c.execOCRConfig.Signers,
+ c.execOCRConfig.Transmitters,
+ c.execOCRConfig.F,
+ c.execOCRConfig.OnchainConfig,
+ c.execOCRConfig.OffchainConfigVersion,
+ c.execOCRConfig.OffchainConfig,
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) SetupOnchainConfig(t *testing.T, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig []byte) int64 {
+ // Note We do NOT set the payees, payment is done in the OCR2Base implementation
+ blockBeforeConfig, err := c.Dest.Chain.BlockByNumber(context.Background(), nil)
+ require.NoError(t, err)
+
+ c.SetupCommitOCR2Config(t, commitOnchainConfig, commitOffchainConfig)
+ c.SetupExecOCR2Config(t, execOnchainConfig, execOffchainConfig)
+
+ return blockBeforeConfig.Number().Int64()
+}
+
+func (c *CCIPContracts) SendMessage(t *testing.T, gasLimit, tokenAmount *big.Int, receiverAddr common.Address) {
+ extraArgs, err := GetEVMExtraArgsV1(gasLimit, false)
+ require.NoError(t, err)
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: MustEncodeAddress(t, receiverAddr),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{
+ {
+ Token: c.Source.LinkToken.Address(),
+ Amount: tokenAmount,
+ },
+ },
+ FeeToken: c.Source.LinkToken.Address(),
+ ExtraArgs: extraArgs,
+ }
+ fee, err := c.Source.Router.GetFee(nil, c.Dest.ChainSelector, msg)
+ require.NoError(t, err)
+ // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice.
+ // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String())
+ // Approve the fee amount + the token amount
+ _, err = c.Source.LinkToken.Approve(c.Source.User, c.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount))
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.SendRequest(t, msg)
+}
+
+func GetBalances(t *testing.T, brs []BalanceReq) (map[string]*big.Int, error) {
+ m := make(map[string]*big.Int)
+ for _, br := range brs {
+ m[br.Name] = br.Getter(t, br.Addr)
+ if m[br.Name] == nil {
+ return nil, fmt.Errorf("%v getter return nil", br.Name)
+ }
+ }
+ return m, nil
+}
+
+func MustAddBigInt(a *big.Int, b string) *big.Int {
+ bi, _ := big.NewInt(0).SetString(b, 10)
+ return big.NewInt(0).Add(a, bi)
+}
+
+func MustSubBigInt(a *big.Int, b string) *big.Int {
+ bi, _ := big.NewInt(0).SetString(b, 10)
+ return big.NewInt(0).Sub(a, bi)
+}
+
+func MustEncodeAddress(t *testing.T, address common.Address) []byte {
+ bts, err := utils.ABIEncode(`[{"type":"address"}]`, address)
+ require.NoError(t, err)
+ return bts
+}
+
+func SetAdminAndRegisterPool(t *testing.T,
+ chain *backends.SimulatedBackend,
+ user *bind.TransactOpts,
+ tokenAdminRegistry *token_admin_registry.TokenAdminRegistry,
+ tokenAddress common.Address,
+ poolAddress common.Address) {
+ _, err := tokenAdminRegistry.ProposeAdministrator(user, tokenAddress, user.From)
+ require.NoError(t, err)
+ _, err = tokenAdminRegistry.AcceptAdminRole(user, tokenAddress)
+ require.NoError(t, err)
+ _, err = tokenAdminRegistry.SetPool(user, tokenAddress, poolAddress)
+ require.NoError(t, err)
+
+ chain.Commit()
+}
+
+func SetupCCIPContracts(t *testing.T, sourceChainID, sourceChainSelector, destChainID, destChainSelector uint64) CCIPContracts {
+ sourceChain, sourceUser := SetupChain(t)
+ destChain, destUser := SetupChain(t)
+
+ // ================================================================
+ // │ Deploy RMN │
+ // ================================================================
+
+ armSourceAddress, _, _, err := mock_arm_contract.DeployMockARMContract(
+ sourceUser,
+ sourceChain,
+ )
+ require.NoError(t, err)
+ sourceARM, err := mock_arm_contract.NewMockARMContract(armSourceAddress, sourceChain)
+ require.NoError(t, err)
+ armProxySourceAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract(
+ sourceUser,
+ sourceChain,
+ armSourceAddress,
+ )
+ require.NoError(t, err)
+ sourceARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxySourceAddress, sourceChain)
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ armDestAddress, _, _, err := mock_arm_contract.DeployMockARMContract(
+ destUser,
+ destChain,
+ )
+ require.NoError(t, err)
+ armProxyDestAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract(
+ destUser,
+ destChain,
+ armDestAddress,
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+ destARM, err := mock_arm_contract.NewMockARMContract(armDestAddress, destChain)
+ require.NoError(t, err)
+ destARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxyDestAddress, destChain)
+ require.NoError(t, err)
+
+ // ================================================================
+ // │ Deploy TokenAdminRegistry │
+ // ================================================================
+
+ sourceTokenAdminRegistryAddress, _, _, err := token_admin_registry.DeployTokenAdminRegistry(sourceUser, sourceChain)
+ require.NoError(t, err)
+ sourceTokenAdminRegistry, err := token_admin_registry.NewTokenAdminRegistry(sourceTokenAdminRegistryAddress, sourceChain)
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ destTokenAdminRegistryAddress, _, _, err := token_admin_registry.DeployTokenAdminRegistry(destUser, destChain)
+ require.NoError(t, err)
+ destTokenAdminRegistry, err := token_admin_registry.NewTokenAdminRegistry(destTokenAdminRegistryAddress, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // ================================================================
+ // │ Deploy Tokens │
+ // ================================================================
+
+ // Deploy link token and pool on source chain
+ sourceLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain)
+ require.NoError(t, err)
+ sourceChain.Commit()
+ sourceLinkToken, err := link_token_interface.NewLinkToken(sourceLinkTokenAddress, sourceChain)
+ require.NoError(t, err)
+ t.Logf("Deloyed LINK token on source chain at %s", sourceLinkTokenAddress.String())
+
+ sourceWeth9addr, _, _, err := weth9.DeployWETH9(sourceUser, sourceChain)
+ require.NoError(t, err)
+ sourceWrapped, err := weth9.NewWETH9(sourceWeth9addr, sourceChain)
+ require.NoError(t, err)
+ t.Logf("Deloyed WETH9 token on source chain at %s", sourceWeth9addr.String())
+
+ sourceCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain)
+ require.NoError(t, err)
+ sourceCustomToken, err := link_token_interface.NewLinkToken(sourceCustomTokenAddress, sourceChain)
+ require.NoError(t, err)
+ destChain.Commit()
+ t.Logf("Deloyed custom token on source chain at %s", sourceCustomTokenAddress.String())
+
+ // Dest chain
+
+ destLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+ destLinkToken, err := link_token_interface.NewLinkToken(destLinkTokenAddress, destChain)
+ require.NoError(t, err)
+ t.Logf("Deloyed LINK token on dest chain at %s", destLinkTokenAddress.String())
+
+ destWeth9addr, _, _, err := weth9.DeployWETH9(destUser, destChain)
+ require.NoError(t, err)
+ destWrapped, err := weth9.NewWETH9(destWeth9addr, destChain)
+ require.NoError(t, err)
+ t.Logf("Deloyed WETH9 token on dest chain at %s", destWeth9addr.String())
+
+ destCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain)
+ require.NoError(t, err)
+ destCustomToken, err := link_token_interface.NewLinkToken(destCustomTokenAddress, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+ t.Logf("Deloyed custom token on dest chain at %s", destCustomTokenAddress.String())
+
+ // ================================================================
+ // │ Deploy Routers │
+ // ================================================================
+
+ sourceRouterAddress, _, _, err := router.DeployRouter(sourceUser, sourceChain, sourceWeth9addr, armProxySourceAddress)
+ require.NoError(t, err)
+ sourceRouter, err := router.NewRouter(sourceRouterAddress, sourceChain)
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ destRouterAddress, _, _, err := router.DeployRouter(destUser, destChain, destWeth9addr, armProxyDestAddress)
+ require.NoError(t, err)
+ destRouter, err := router.NewRouter(destRouterAddress, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // ================================================================
+ // │ Deploy Pools │
+ // ================================================================
+
+ sourcePoolLinkAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool(
+ sourceUser,
+ sourceChain,
+ sourceLinkTokenAddress,
+ []common.Address{},
+ armProxySourceAddress,
+ true,
+ sourceRouterAddress,
+ )
+ require.NoError(t, err)
+ sourceChain.Commit()
+ SetAdminAndRegisterPool(t, sourceChain, sourceUser, sourceTokenAdminRegistry, sourceLinkTokenAddress, sourcePoolLinkAddress)
+
+ sourceLinkPool, err := lock_release_token_pool.NewLockReleaseTokenPool(sourcePoolLinkAddress, sourceChain)
+ require.NoError(t, err)
+
+ sourceWeth9PoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool(
+ sourceUser,
+ sourceChain,
+ sourceWeth9addr,
+ []common.Address{},
+ armProxySourceAddress,
+ true,
+ sourceRouterAddress,
+ )
+ require.NoError(t, err)
+ sourceChain.Commit()
+ SetAdminAndRegisterPool(t, sourceChain, sourceUser, sourceTokenAdminRegistry, sourceWeth9addr, sourceWeth9PoolAddress)
+
+ sourceWeth9Pool, err := lock_release_token_pool.NewLockReleaseTokenPool(sourceWeth9PoolAddress, sourceChain)
+ require.NoError(t, err)
+
+ // dest
+
+ destPoolLinkAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool(
+ destUser,
+ destChain,
+ destLinkTokenAddress,
+ []common.Address{},
+ armProxyDestAddress,
+ true,
+ destRouterAddress,
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+ SetAdminAndRegisterPool(t, destChain, destUser, destTokenAdminRegistry, destLinkTokenAddress, destPoolLinkAddress)
+
+ destLinkPool, err := lock_release_token_pool.NewLockReleaseTokenPool(destPoolLinkAddress, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // Float the offramp pool
+ o, err := destLinkPool.Owner(nil)
+ require.NoError(t, err)
+ require.Equal(t, destUser.From.String(), o.String())
+ _, err = destLinkPool.SetRebalancer(destUser, destUser.From)
+ require.NoError(t, err)
+ _, err = destLinkToken.Approve(destUser, destPoolLinkAddress, Link(200))
+ require.NoError(t, err)
+ _, err = destLinkPool.ProvideLiquidity(destUser, Link(200))
+ require.NoError(t, err)
+ destChain.Commit()
+
+ destWrappedPoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool(
+ destUser,
+ destChain,
+ destWeth9addr,
+ []common.Address{},
+ armProxyDestAddress,
+ true,
+ destRouterAddress,
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+ SetAdminAndRegisterPool(t, destChain, destUser, destTokenAdminRegistry, destWeth9addr, destWrappedPoolAddress)
+
+ destWrappedPool, err := lock_release_token_pool.NewLockReleaseTokenPool(destWrappedPoolAddress, destChain)
+ require.NoError(t, err)
+
+ poolFloatValue := big.NewInt(1e18)
+
+ destUser.Value = poolFloatValue
+ _, err = destWrapped.Deposit(destUser)
+ require.NoError(t, err)
+ destChain.Commit()
+ destUser.Value = nil
+
+ _, err = destWrapped.Transfer(destUser, destWrappedPool.Address(), poolFloatValue)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // ================================================================
+ // │ Configure token pools │
+ // ================================================================
+
+ abiEncodedDestLinkPool, err := abihelpers.EncodeAddress(destLinkPool.Address())
+ require.NoError(t, err)
+ abiEncodedDestLinkTokenAddress, err := abihelpers.EncodeAddress(destLinkToken.Address())
+ require.NoError(t, err)
+ _, err = sourceLinkPool.ApplyChainUpdates(
+ sourceUser,
+ []lock_release_token_pool.TokenPoolChainUpdate{{
+ RemoteChainSelector: DestChainSelector,
+ RemotePoolAddress: abiEncodedDestLinkPool,
+ RemoteTokenAddress: abiEncodedDestLinkTokenAddress,
+ Allowed: true,
+ OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ )
+ require.NoError(t, err)
+
+ abiEncodedDestWrappedPool, err := abihelpers.EncodeAddress(destWrappedPool.Address())
+ require.NoError(t, err)
+ abiEncodedDestWrappedTokenAddr, err := abihelpers.EncodeAddress(destWeth9addr)
+ require.NoError(t, err)
+ _, err = sourceWeth9Pool.ApplyChainUpdates(
+ sourceUser,
+ []lock_release_token_pool.TokenPoolChainUpdate{{
+ RemoteChainSelector: DestChainSelector,
+ RemotePoolAddress: abiEncodedDestWrappedPool,
+ RemoteTokenAddress: abiEncodedDestWrappedTokenAddr,
+ Allowed: true,
+ OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ )
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ abiEncodedSourceLinkPool, err := abihelpers.EncodeAddress(sourceLinkPool.Address())
+ require.NoError(t, err)
+ abiEncodedSourceLinkTokenAddr, err := abihelpers.EncodeAddress(sourceLinkTokenAddress)
+ require.NoError(t, err)
+ _, err = destLinkPool.ApplyChainUpdates(
+ destUser,
+ []lock_release_token_pool.TokenPoolChainUpdate{{
+ RemoteChainSelector: SourceChainSelector,
+ RemotePoolAddress: abiEncodedSourceLinkPool,
+ RemoteTokenAddress: abiEncodedSourceLinkTokenAddr,
+ Allowed: true,
+ OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ )
+ require.NoError(t, err)
+
+ abiEncodedSourceWrappedPool, err := abihelpers.EncodeAddress(sourceWeth9Pool.Address())
+ require.NoError(t, err)
+ abiEncodedSourceWrappedTokenAddr, err := abihelpers.EncodeAddress(sourceWrapped.Address())
+ require.NoError(t, err)
+ _, err = destWrappedPool.ApplyChainUpdates(
+ destUser,
+ []lock_release_token_pool.TokenPoolChainUpdate{{
+ RemoteChainSelector: SourceChainSelector,
+ RemotePoolAddress: abiEncodedSourceWrappedPool,
+ RemoteTokenAddress: abiEncodedSourceWrappedTokenAddr,
+ Allowed: true,
+ OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // ================================================================
+ // │ Deploy Price Registry │
+ // ================================================================
+
+ sourcePricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry(
+ sourceUser,
+ sourceChain,
+ nil,
+ []common.Address{sourceLinkTokenAddress, sourceWeth9addr},
+ 60*60*24*14, // two weeks
+ )
+ require.NoError(t, err)
+
+ srcPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(sourcePricesAddress, sourceChain)
+ require.NoError(t, err)
+
+ _, err = srcPriceRegistry.UpdatePrices(sourceUser, price_registry_1_2_0.InternalPriceUpdates{
+ TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{
+ {
+ SourceToken: sourceLinkTokenAddress,
+ UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(20)),
+ },
+ {
+ SourceToken: sourceWeth9addr,
+ UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(2000)),
+ },
+ },
+ GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{
+ {
+ DestChainSelector: destChainSelector,
+ UsdPerUnitGas: big.NewInt(20000e9),
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // ================================================================
+ // │ Deploy Lane │
+ // ================================================================
+
+ onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp(
+ sourceUser, // user
+ sourceChain, // client
+ evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{
+ LinkToken: sourceLinkTokenAddress,
+ ChainSelector: sourceChainSelector,
+ DestChainSelector: destChainSelector,
+ DefaultTxGasLimit: 200_000,
+ MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)),
+ PrevOnRamp: common.HexToAddress(""),
+ RmnProxy: armProxySourceAddress, // RMN, formerly ARM
+ TokenAdminRegistry: sourceTokenAdminRegistry.Address(),
+ },
+ evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{
+ Router: sourceRouterAddress,
+ MaxNumberOfTokensPerMsg: 5,
+ DestGasOverhead: 350_000,
+ DestGasPerPayloadByte: 16,
+ DestDataAvailabilityOverheadGas: 33_596,
+ DestGasPerDataAvailabilityByte: 16,
+ DestDataAvailabilityMultiplierBps: 6840, // 0.684
+ PriceRegistry: sourcePricesAddress,
+ MaxDataBytes: 1e5,
+ MaxPerMsgGasLimit: 4_000_000,
+ DefaultTokenFeeUSDCents: 50,
+ DefaultTokenDestGasOverhead: 34_000,
+ DefaultTokenDestBytesOverhead: 500,
+ },
+ evm_2_evm_onramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: sourceLinkTokenAddress,
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 9e17,
+ Enabled: true,
+ },
+ {
+ Token: sourceWeth9addr,
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 1e18,
+ Enabled: true,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: sourceLinkTokenAddress,
+ MinFeeUSDCents: 50, // $0.5
+ MaxFeeUSDCents: 1_000_000_00, // $ 1 million
+ DeciBps: 5_0, // 5 bps
+ DestGasOverhead: 34_000,
+ DestBytesOverhead: 32,
+ AggregateRateLimitEnabled: true,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{},
+ )
+ require.NoError(t, err)
+ onRamp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, sourceChain)
+ require.NoError(t, err)
+
+ _, err = sourceRouter.ApplyRampUpdates(sourceUser, []router.RouterOnRamp{{DestChainSelector: destChainSelector, OnRamp: onRampAddress}}, nil, nil)
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ destPriceRegistryAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry(
+ destUser,
+ destChain,
+ nil,
+ []common.Address{destLinkTokenAddress, destWeth9addr},
+ 60*60*24*14, // two weeks
+ )
+ require.NoError(t, err)
+ destPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(destPriceRegistryAddress, destChain)
+ require.NoError(t, err)
+
+ // Deploy commit store.
+ commitStoreAddress, _, _, err := commit_store_helper_1_2_0.DeployCommitStoreHelper(
+ destUser, // user
+ destChain, // client
+ commit_store_helper_1_2_0.CommitStoreStaticConfig{
+ ChainSelector: destChainSelector,
+ SourceChainSelector: sourceChainSelector,
+ OnRamp: onRamp.Address(),
+ ArmProxy: destARMProxy.Address(),
+ },
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+ commitStore, err := commit_store.NewCommitStore(commitStoreAddress, destChain)
+ require.NoError(t, err)
+ commitStoreHelper, err := commit_store_helper.NewCommitStoreHelper(commitStoreAddress, destChain)
+ require.NoError(t, err)
+
+ offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp(
+ destUser,
+ destChain,
+ evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{
+ CommitStore: commitStore.Address(),
+ ChainSelector: destChainSelector,
+ SourceChainSelector: sourceChainSelector,
+ OnRamp: onRampAddress,
+ PrevOffRamp: common.HexToAddress(""),
+ RmnProxy: armProxyDestAddress, // RMN, formerly ARM
+ TokenAdminRegistry: destTokenAdminRegistryAddress,
+ },
+ evm_2_evm_offramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ )
+ require.NoError(t, err)
+ offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ _, err = destPriceRegistry.ApplyPriceUpdatersUpdates(destUser, []common.Address{commitStoreAddress}, []common.Address{})
+ require.NoError(t, err)
+ _, err = destRouter.ApplyRampUpdates(
+ destUser,
+ nil,
+ nil,
+ []router.RouterOffRamp{{SourceChainSelector: sourceChainSelector, OffRamp: offRampAddress}},
+ )
+ require.NoError(t, err)
+
+ // Deploy 2 revertable (one SS one non-SS)
+ revertingMessageReceiver1Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false)
+ require.NoError(t, err)
+ revertingMessageReceiver1, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver1Address, destChain)
+ revertingMessageReceiver2Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false)
+ require.NoError(t, err)
+ revertingMessageReceiver2, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver2Address, destChain)
+ // Need to commit here, or we will hit the block gas limit when deploying the executor
+ sourceChain.Commit()
+ destChain.Commit()
+
+ // Ensure we have at least finality blocks.
+ for i := 0; i < 50; i++ {
+ sourceChain.Commit()
+ destChain.Commit()
+ }
+
+ source := SourceChain{
+ Common: Common{
+ ChainID: sourceChainID,
+ ChainSelector: sourceChainSelector,
+ User: sourceUser,
+ Chain: sourceChain,
+ LinkToken: sourceLinkToken,
+ LinkTokenPool: sourceLinkPool,
+ CustomToken: sourceCustomToken,
+ ARM: sourceARM,
+ ARMProxy: sourceARMProxy,
+ PriceRegistry: srcPriceRegistry,
+ WrappedNative: sourceWrapped,
+ WrappedNativePool: sourceWeth9Pool,
+ TokenAdminRegistry: sourceTokenAdminRegistry,
+ },
+ Router: sourceRouter,
+ OnRamp: onRamp,
+ }
+ dest := DestinationChain{
+ Common: Common{
+ ChainID: destChainID,
+ ChainSelector: destChainSelector,
+ User: destUser,
+ Chain: destChain,
+ LinkToken: destLinkToken,
+ LinkTokenPool: destLinkPool,
+ CustomToken: destCustomToken,
+ ARM: destARM,
+ ARMProxy: destARMProxy,
+ PriceRegistry: destPriceRegistry,
+ WrappedNative: destWrapped,
+ WrappedNativePool: destWrappedPool,
+ TokenAdminRegistry: destTokenAdminRegistry,
+ },
+ CommitStoreHelper: commitStoreHelper,
+ CommitStore: commitStore,
+ Router: destRouter,
+ OffRamp: offRamp,
+ Receivers: []MaybeRevertReceiver{{Receiver: revertingMessageReceiver1, Strict: false}, {Receiver: revertingMessageReceiver2, Strict: true}},
+ }
+
+ return CCIPContracts{
+ Source: source,
+ Dest: dest,
+ }
+}
+
+func (c *CCIPContracts) SendRequest(t *testing.T, msg router.ClientEVM2AnyMessage) *types.Transaction {
+ tx, err := c.Source.Router.CcipSend(c.Source.User, c.Dest.ChainSelector, msg)
+ require.NoError(t, err)
+ ConfirmTxs(t, []*types.Transaction{tx}, c.Source.Chain)
+ return tx
+}
+
+func (c *CCIPContracts) AssertExecState(t *testing.T, log logpoller.Log, state MessageExecutionState, offRampOpts ...common.Address) {
+ var offRamp *evm_2_evm_offramp.EVM2EVMOffRamp
+ var err error
+ if len(offRampOpts) > 0 {
+ offRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offRamp configured")
+ offRamp = c.Dest.OffRamp
+ }
+ executionStateChanged, err := offRamp.ParseExecutionStateChanged(log.ToGethLog())
+ require.NoError(t, err)
+ if MessageExecutionState(executionStateChanged.State) != state {
+ t.Log("Execution failed", hexutil.Encode(executionStateChanged.ReturnData))
+ t.Fail()
+ }
+}
+
+func GetEVMExtraArgsV1(gasLimit *big.Int, strict bool) ([]byte, error) {
+ EVMV1Tag := []byte{0x97, 0xa6, 0x57, 0xc9}
+
+ encodedArgs, err := utils.ABIEncode(`[{"type":"uint256"},{"type":"bool"}]`, gasLimit, strict)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(EVMV1Tag, encodedArgs...), nil
+}
+
+func GetEVMExtraArgsV2(gasLimit *big.Int, allowOutOfOrder bool) ([]byte, error) {
+ // see Client.sol.
+ EVMV2Tag := hexutil.MustDecode("0x181dcf10")
+
+ encodedArgs, err := utils.ABIEncode(`[{"type":"uint256"},{"type":"bool"}]`, gasLimit, allowOutOfOrder)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(EVMV2Tag, encodedArgs...), nil
+}
+
+type ManualExecArgs struct {
+ SourceChainID, DestChainID uint64
+ DestUser *bind.TransactOpts
+ SourceChain, DestChain bind.ContractBackend
+ SourceStartBlock *big.Int // the block in/after which failed ccip-send transaction was triggered
+ DestStartBlock uint64 // the start block for filtering ReportAccepted event (including the failed seq num)
+ // in destination chain. if not provided to be derived by ApproxDestStartBlock method
+ DestLatestBlockNum uint64 // current block number in destination
+ DestDeployedAt uint64 // destination block number for the initial destination contract deployment.
+ // Can be any number before the tx was reverted in destination chain. Preferably this needs to be set up with
+ // a value greater than zero to avoid performance issue in locating approximate destination block
+ SendReqLogIndex uint // log index of the CCIPSendRequested log in source chain
+ SendReqTxHash string // tx hash of the ccip-send transaction for which execution was reverted
+ CommitStore string
+ OnRamp string
+ OffRamp string
+ SeqNr uint64
+ GasLimit *big.Int
+}
+
+// ApproxDestStartBlock attempts to locate a block in destination chain with timestamp closest to the timestamp of the block
+// in source chain in which ccip-send transaction was included
+// it uses binary search to locate the block with the closest timestamp
+// if the block located has a timestamp greater than the timestamp of mentioned source block
+// it just returns the first block found with lesser timestamp of the source block
+// providing a value of args.DestDeployedAt ensures better performance by reducing the range of block numbers to be traversed
+func (args *ManualExecArgs) ApproxDestStartBlock() error {
+ sourceBlockHdr, err := args.SourceChain.HeaderByNumber(context.Background(), args.SourceStartBlock)
+ if err != nil {
+ return err
+ }
+ sendTxTime := sourceBlockHdr.Time
+ maxBlockNum := args.DestLatestBlockNum
+ // setting this to an approx value of 1000 considering destination chain would have at least 1000 blocks before the transaction started
+ minBlockNum := args.DestDeployedAt
+ closestBlockNum := uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2))
+ var closestBlockHdr *types.Header
+ closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum)))
+ if err != nil {
+ return err
+ }
+ // to reduce the number of RPC calls increase the value of blockOffset
+ blockOffset := uint64(10)
+ for {
+ blockNum := closestBlockHdr.Number.Uint64()
+ if minBlockNum > maxBlockNum {
+ break
+ }
+ timeDiff := math.Abs(float64(closestBlockHdr.Time - sendTxTime))
+ // break if the difference in timestamp is lesser than 1 minute
+ if timeDiff < 60 {
+ break
+ } else if closestBlockHdr.Time > sendTxTime {
+ maxBlockNum = blockNum - 1
+ } else {
+ minBlockNum = blockNum + 1
+ }
+ closestBlockNum = uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2))
+ closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum)))
+ if err != nil {
+ return err
+ }
+ }
+
+ for closestBlockHdr.Time > sendTxTime {
+ closestBlockNum = closestBlockNum - blockOffset
+ if closestBlockNum <= 0 {
+ return fmt.Errorf("approx destination blocknumber not found")
+ }
+ closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum)))
+ if err != nil {
+ return err
+ }
+ }
+ args.DestStartBlock = closestBlockHdr.Number.Uint64()
+ fmt.Println("using approx destination start block number", args.DestStartBlock)
+ return nil
+}
+
+func (args *ManualExecArgs) FindSeqNrFromCCIPSendRequested() (uint64, error) {
+ var seqNr uint64
+ onRampContract, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain)
+ if err != nil {
+ return seqNr, err
+ }
+ iterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{
+ Start: args.SourceStartBlock.Uint64(),
+ })
+ if err != nil {
+ return seqNr, err
+ }
+ for iterator.Next() {
+ if iterator.Event.Raw.Index == args.SendReqLogIndex &&
+ iterator.Event.Raw.TxHash.Hex() == args.SendReqTxHash {
+ seqNr = iterator.Event.Message.SequenceNumber
+ break
+ }
+ }
+ if seqNr == 0 {
+ return seqNr,
+ fmt.Errorf("no CCIPSendRequested logs found for logIndex %d starting from block number %d", args.SendReqLogIndex, args.SourceStartBlock)
+ }
+ return seqNr, nil
+}
+
+func (args *ManualExecArgs) ExecuteManually() (*types.Transaction, error) {
+ if args.SourceChainID == 0 ||
+ args.DestChainID == 0 ||
+ args.DestUser == nil {
+ return nil, fmt.Errorf("chain ids and owners are mandatory for source and dest chain")
+ }
+ if !common.IsHexAddress(args.CommitStore) ||
+ !common.IsHexAddress(args.OffRamp) ||
+ !common.IsHexAddress(args.OnRamp) {
+ return nil, fmt.Errorf("contract addresses must be valid hex address")
+ }
+ if args.SendReqTxHash == "" {
+ return nil, fmt.Errorf("tx hash of ccip-send request are required")
+ }
+ if args.SourceStartBlock == nil {
+ return nil, fmt.Errorf("must provide the value of source block in/after which ccip-send tx was included")
+ }
+ if args.SeqNr == 0 {
+ if args.SendReqLogIndex == 0 {
+ return nil, fmt.Errorf("must provide the value of log index of ccip-send request")
+ }
+ // locate seq nr from CCIPSendRequested log
+ seqNr, err := args.FindSeqNrFromCCIPSendRequested()
+ if err != nil {
+ return nil, err
+ }
+ args.SeqNr = seqNr
+ }
+ commitStore, err := commit_store.NewCommitStore(common.HexToAddress(args.CommitStore), args.DestChain)
+ if err != nil {
+ return nil, err
+ }
+ if args.DestStartBlock < 1 {
+ err = args.ApproxDestStartBlock()
+ if err != nil {
+ return nil, err
+ }
+ }
+ iterator, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: args.DestStartBlock})
+ if err != nil {
+ return nil, err
+ }
+
+ var commitReport *commit_store.CommitStoreCommitReport
+ for iterator.Next() {
+ if iterator.Event.Report.Interval.Min <= args.SeqNr && iterator.Event.Report.Interval.Max >= args.SeqNr {
+ commitReport = &iterator.Event.Report
+ fmt.Println("Found root")
+ break
+ }
+ }
+ if commitReport == nil {
+ return nil, fmt.Errorf("unable to find seq num %d in commit report", args.SeqNr)
+ }
+
+ return args.execute(commitReport)
+}
+
+func (args *ManualExecArgs) execute(report *commit_store.CommitStoreCommitReport) (*types.Transaction, error) {
+ log.Info().Msg("Executing request manually")
+ seqNr := args.SeqNr
+ // Build a merkle tree for the report
+ mctx := hashutil.NewKeccak()
+ onRampContract, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain)
+ if err != nil {
+ return nil, err
+ }
+ leafHasher := v1_2_0.NewLeafHasher(args.SourceChainID, args.DestChainID, common.HexToAddress(args.OnRamp), mctx, onRampContract)
+ if leafHasher == nil {
+ return nil, fmt.Errorf("unable to create leaf hasher")
+ }
+
+ var leaves [][32]byte
+ var curr, prove int
+ var msgs []evm_2_evm_offramp.InternalEVM2EVMMessage
+ var manualExecGasLimits []*big.Int
+ var tokenData [][][]byte
+ sendRequestedIterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{
+ Start: args.SourceStartBlock.Uint64(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ for sendRequestedIterator.Next() {
+ if sendRequestedIterator.Event.Message.SequenceNumber <= report.Interval.Max &&
+ sendRequestedIterator.Event.Message.SequenceNumber >= report.Interval.Min {
+ fmt.Println("Found seq num", sendRequestedIterator.Event.Message.SequenceNumber, report.Interval)
+ hash, err2 := leafHasher.HashLeaf(sendRequestedIterator.Event.Raw)
+ if err2 != nil {
+ return nil, err2
+ }
+ leaves = append(leaves, hash)
+ if sendRequestedIterator.Event.Message.SequenceNumber == seqNr {
+ fmt.Printf("Found proving %d %+v\n", curr, sendRequestedIterator.Event.Message)
+ var tokensAndAmounts []evm_2_evm_offramp.ClientEVMTokenAmount
+ for _, tokenAndAmount := range sendRequestedIterator.Event.Message.TokenAmounts {
+ tokensAndAmounts = append(tokensAndAmounts, evm_2_evm_offramp.ClientEVMTokenAmount{
+ Token: tokenAndAmount.Token,
+ Amount: tokenAndAmount.Amount,
+ })
+ }
+ msg := evm_2_evm_offramp.InternalEVM2EVMMessage{
+ SourceChainSelector: sendRequestedIterator.Event.Message.SourceChainSelector,
+ Sender: sendRequestedIterator.Event.Message.Sender,
+ Receiver: sendRequestedIterator.Event.Message.Receiver,
+ SequenceNumber: sendRequestedIterator.Event.Message.SequenceNumber,
+ GasLimit: sendRequestedIterator.Event.Message.GasLimit,
+ Strict: sendRequestedIterator.Event.Message.Strict,
+ Nonce: sendRequestedIterator.Event.Message.Nonce,
+ FeeToken: sendRequestedIterator.Event.Message.FeeToken,
+ FeeTokenAmount: sendRequestedIterator.Event.Message.FeeTokenAmount,
+ Data: sendRequestedIterator.Event.Message.Data,
+ TokenAmounts: tokensAndAmounts,
+ SourceTokenData: sendRequestedIterator.Event.Message.SourceTokenData,
+ MessageId: sendRequestedIterator.Event.Message.MessageId,
+ }
+ msgs = append(msgs, msg)
+ if args.GasLimit != nil {
+ msg.GasLimit = args.GasLimit
+ }
+ manualExecGasLimits = append(manualExecGasLimits, msg.GasLimit)
+ var msgTokenData [][]byte
+ for range sendRequestedIterator.Event.Message.TokenAmounts {
+ msgTokenData = append(msgTokenData, []byte{})
+ }
+
+ tokenData = append(tokenData, msgTokenData)
+ prove = curr
+ }
+ curr++
+ }
+ }
+ sendRequestedIterator.Close()
+ if msgs == nil {
+ return nil, fmt.Errorf("unable to find msg with seqNr %d", seqNr)
+ }
+ tree, err := merklemulti.NewTree(mctx, leaves)
+ if err != nil {
+ return nil, err
+ }
+ if tree.Root() != report.MerkleRoot {
+ return nil, fmt.Errorf("root doesn't match")
+ }
+
+ proof, err := tree.Prove([]int{prove})
+ if err != nil {
+ return nil, err
+ }
+
+ offRampProof := evm_2_evm_offramp.InternalExecutionReport{
+ Messages: msgs,
+ OffchainTokenData: tokenData,
+ Proofs: proof.Hashes,
+ ProofFlagBits: abihelpers.ProofFlagsToBits(proof.SourceFlags),
+ }
+ offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(common.HexToAddress(args.OffRamp), args.DestChain)
+ if err != nil {
+ return nil, err
+ }
+ // Execute.
+ return offRamp.ManuallyExecute(args.DestUser, offRampProof, manualExecGasLimits)
+}
+
+func (c *CCIPContracts) ExecuteMessage(
+ t *testing.T,
+ req logpoller.Log,
+ txHash common.Hash,
+ destStartBlock uint64,
+) uint64 {
+ t.Log("Executing request manually")
+ sendReqReceipt, err := c.Source.Chain.TransactionReceipt(context.Background(), txHash)
+ require.NoError(t, err)
+ args := ManualExecArgs{
+ SourceChainID: c.Source.ChainID,
+ DestChainID: c.Dest.ChainID,
+ DestUser: c.Dest.User,
+ SourceChain: c.Source.Chain,
+ DestChain: c.Dest.Chain,
+ SourceStartBlock: sendReqReceipt.BlockNumber,
+ DestStartBlock: destStartBlock,
+ DestLatestBlockNum: c.Dest.Chain.Blockchain().CurrentBlock().Number.Uint64(),
+ SendReqLogIndex: uint(req.LogIndex),
+ SendReqTxHash: txHash.String(),
+ CommitStore: c.Dest.CommitStore.Address().String(),
+ OnRamp: c.Source.OnRamp.Address().String(),
+ OffRamp: c.Dest.OffRamp.Address().String(),
+ }
+ tx, err := args.ExecuteManually()
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+ c.Source.Chain.Commit()
+ rec, err := c.Dest.Chain.TransactionReceipt(context.Background(), tx.Hash())
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), rec.Status, "manual execution failed")
+ t.Logf("Manual Execution completed for seqNum %d", args.SeqNr)
+ return args.SeqNr
+}
+
+func GetBalance(t *testing.T, chain bind.ContractBackend, tokenAddr common.Address, addr common.Address) *big.Int {
+ token, err := link_token_interface.NewLinkToken(tokenAddr, chain)
+ require.NoError(t, err)
+ bal, err := token.BalanceOf(nil, addr)
+ require.NoError(t, err)
+ return bal
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/config.go b/core/services/ocr2/plugins/ccip/testhelpers/config.go
new file mode 100644
index 00000000000..f70f1954f18
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/config.go
@@ -0,0 +1,73 @@
+// Package with set of configs that should be used only within tests suites
+
+package testhelpers
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+)
+
+var PermissionLessExecutionThresholdSeconds = uint32(FirstBlockAge.Seconds())
+
+func (c *CCIPContracts) CreateDefaultCommitOnchainConfig(t *testing.T) []byte {
+ config, err := abihelpers.EncodeAbiStruct(ccipdata.CommitOnchainConfig{
+ PriceRegistry: c.Dest.PriceRegistry.Address(),
+ })
+ require.NoError(t, err)
+ return config
+}
+
+func (c *CCIPContracts) CreateDefaultCommitOffchainConfig(t *testing.T) []byte {
+ return c.createCommitOffchainConfig(t, 10*time.Second, 5*time.Second)
+}
+
+func (c *CCIPContracts) createCommitOffchainConfig(t *testing.T, feeUpdateHearBeat time.Duration, inflightCacheExpiry time.Duration) []byte {
+ config, err := NewCommitOffchainConfig(
+ *config.MustNewDuration(feeUpdateHearBeat),
+ 1,
+ 1,
+ *config.MustNewDuration(feeUpdateHearBeat),
+ 1,
+ *config.MustNewDuration(inflightCacheExpiry),
+ ).Encode()
+ require.NoError(t, err)
+ return config
+}
+
+func (c *CCIPContracts) CreateDefaultExecOnchainConfig(t *testing.T) []byte {
+ config, err := abihelpers.EncodeAbiStruct(v1_5_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds,
+ Router: c.Dest.Router.Address(),
+ PriceRegistry: c.Dest.PriceRegistry.Address(),
+ MaxDataBytes: 1e5,
+ MaxNumberOfTokensPerMsg: 5,
+ MaxPoolReleaseOrMintGas: 200_000,
+ MaxTokenTransferGas: 100_000,
+ })
+ require.NoError(t, err)
+ return config
+}
+
+func (c *CCIPContracts) CreateDefaultExecOffchainConfig(t *testing.T) []byte {
+ return c.createExecOffchainConfig(t, 1*time.Minute, 1*time.Minute)
+}
+
+func (c *CCIPContracts) createExecOffchainConfig(t *testing.T, inflightCacheExpiry time.Duration, rootSnoozeTime time.Duration) []byte {
+ config, err := NewExecOffchainConfig(
+ 1,
+ 5_000_000,
+ 0.07,
+ *config.MustNewDuration(inflightCacheExpiry),
+ *config.MustNewDuration(rootSnoozeTime),
+ ).Encode()
+ require.NoError(t, err)
+ return config
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go b/core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go
new file mode 100644
index 00000000000..177ccf323b7
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go
@@ -0,0 +1,1035 @@
+package integrationtesthelpers
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ types3 "github.com/ethereum/go-ethereum/core/types"
+ "github.com/google/uuid"
+ "github.com/hashicorp/consul/sdk/freeport"
+ "github.com/jmoiron/sqlx"
+ "github.com/onsi/gomega"
+ "github.com/pkg/errors"
+
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "k8s.io/utils/pointer" //nolint:staticcheck
+
+ "github.com/smartcontractkit/libocr/commontypes"
+ "github.com/smartcontractkit/libocr/offchainreporting2/confighelper"
+ types4 "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/loop"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ coretypes "github.com/smartcontractkit/chainlink-common/pkg/types/core/mocks"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ v2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ evmUtils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
+ configv2 "github.com/smartcontractkit/chainlink/v2/core/config/toml"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/logger/audit"
+ "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
+ feeds2 "github.com/smartcontractkit/chainlink/v2/core/services/feeds"
+ feedsMocks "github.com/smartcontractkit/chainlink/v2/core/services/feeds/mocks"
+ pb "github.com/smartcontractkit/chainlink/v2/core/services/feeds/proto"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key"
+ ksMocks "github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap"
+ evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
+ clutils "github.com/smartcontractkit/chainlink/v2/core/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/utils/crypto"
+ "github.com/smartcontractkit/chainlink/v2/plugins"
+)
+
+const (
+ execSpecTemplate = `
+ type = "offchainreporting2"
+ schemaVersion = 1
+ name = "ccip-exec-1"
+ externalJobID = "67ffad71-d90f-4fe3-b4e4-494924b707fb"
+ forwardingAllowed = false
+ maxTaskDuration = "0s"
+ contractID = "%s"
+ contractConfigConfirmations = 1
+ contractConfigTrackerPollInterval = "20s"
+ ocrKeyBundleID = "%s"
+ relay = "evm"
+ pluginType = "ccip-execution"
+ transmitterID = "%s"
+
+ [relayConfig]
+ chainID = 1_337
+
+ [pluginConfig]
+ destStartBlock = 50
+
+ [pluginConfig.USDCConfig]
+ AttestationAPI = "http://blah.com"
+ SourceMessageTransmitterAddress = "%s"
+ SourceTokenAddress = "%s"
+ AttestationAPITimeoutSeconds = 10
+ `
+ commitSpecTemplatePipeline = `
+ type = "offchainreporting2"
+ schemaVersion = 1
+ name = "ccip-commit-1"
+ externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55"
+ forwardingAllowed = false
+ maxTaskDuration = "0s"
+ contractID = "%s"
+ contractConfigConfirmations = 1
+ contractConfigTrackerPollInterval = "20s"
+ ocrKeyBundleID = "%s"
+ relay = "evm"
+ pluginType = "ccip-commit"
+ transmitterID = "%s"
+
+ [relayConfig]
+ chainID = 1_337
+
+ [pluginConfig]
+ destStartBlock = 50
+ offRamp = "%s"
+ tokenPricesUSDPipeline = """
+ %s
+ """
+ `
+ commitSpecTemplateDynamicPriceGetter = `
+ type = "offchainreporting2"
+ schemaVersion = 1
+ name = "ccip-commit-1"
+ externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55"
+ forwardingAllowed = false
+ maxTaskDuration = "0s"
+ contractID = "%s"
+ contractConfigConfirmations = 1
+ contractConfigTrackerPollInterval = "20s"
+ ocrKeyBundleID = "%s"
+ relay = "evm"
+ pluginType = "ccip-commit"
+ transmitterID = "%s"
+
+ [relayConfig]
+ chainID = 1_337
+
+ [pluginConfig]
+ destStartBlock = 50
+ offRamp = "%s"
+ priceGetterConfig = """
+ %s
+ """
+ `
+)
+
+type Node struct {
+ App chainlink.Application
+ Transmitter common.Address
+ PaymentReceiver common.Address
+ KeyBundle ocr2key.KeyBundle
+}
+
+func (node *Node) FindJobIDForContract(t *testing.T, addr common.Address) int32 {
+ jobs := node.App.JobSpawner().ActiveJobs()
+ for _, j := range jobs {
+ if j.Type == job.OffchainReporting2 && j.OCR2OracleSpec.ContractID == addr.Hex() {
+ return j.ID
+ }
+ }
+ t.Fatalf("Could not find job for contract %s", addr.Hex())
+ return 0
+}
+
+func (node *Node) EventuallyNodeUsesUpdatedPriceRegistry(t *testing.T, ccipContracts CCIPIntegrationTestHarness) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ log, err := c.LogPoller().LatestLogByEventSigWithConfs(
+ testutils.Context(t),
+ v1_0_0.UsdPerUnitGasUpdated,
+ ccipContracts.Dest.PriceRegistry.Address(),
+ 0,
+ )
+ // err can be transient errors such as sql row set empty
+ if err != nil {
+ return false
+ }
+ return log != nil
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is not using updated price registry %s", ccipContracts.Dest.PriceRegistry.Address().Hex())
+ return log
+}
+
+func (node *Node) EventuallyNodeUsesNewCommitConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, commitCfg ccipdata.CommitOnchainConfig) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ log, err := c.LogPoller().LatestLogByEventSigWithConfs(
+ testutils.Context(t),
+ evmrelay.OCR2AggregatorLogDecoder.EventSig(),
+ ccipContracts.Dest.CommitStore.Address(),
+ 0,
+ )
+ require.NoError(t, err)
+ var latestCfg ccipdata.CommitOnchainConfig
+ if log != nil {
+ latestCfg, err = DecodeCommitOnChainConfig(log.Data)
+ require.NoError(t, err)
+ return latestCfg == commitCfg
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg")
+ return log
+}
+
+func (node *Node) EventuallyNodeUsesNewExecConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, execCfg v1_5_0.ExecOnchainConfig) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ log, err := c.LogPoller().LatestLogByEventSigWithConfs(
+ testutils.Context(t),
+ evmrelay.OCR2AggregatorLogDecoder.EventSig(),
+ ccipContracts.Dest.OffRamp.Address(),
+ 0,
+ )
+ require.NoError(t, err)
+ var latestCfg v1_5_0.ExecOnchainConfig
+ if log != nil {
+ latestCfg, err = DecodeExecOnChainConfig(log.Data)
+ require.NoError(t, err)
+ return latestCfg == execCfg
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg")
+ return log
+}
+
+func (node *Node) EventuallyHasReqSeqNum(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, onRamp common.Address, seqNum int) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Source.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ lgs, err := c.LogPoller().LogsDataWordRange(
+ testutils.Context(t),
+ v1_2_0.CCIPSendRequestEventSig,
+ onRamp,
+ v1_2_0.CCIPSendRequestSeqNumIndex,
+ abihelpers.EvmWord(uint64(seqNum)),
+ abihelpers.EvmWord(uint64(seqNum)),
+ 1,
+ )
+ require.NoError(t, err)
+ t.Log("Send requested", len(lgs))
+ if len(lgs) == 1 {
+ log = lgs[0]
+ return true
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has seq num")
+ return log
+}
+
+func (node *Node) EventuallyHasExecutedSeqNums(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, minSeqNum int, maxSeqNum int) []logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var logs []logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ lgs, err := c.LogPoller().IndexedLogsTopicRange(
+ testutils.Context(t),
+ v1_0_0.ExecutionStateChangedEvent,
+ offRamp,
+ v1_0_0.ExecutionStateChangedSeqNrIndex,
+ abihelpers.EvmWord(uint64(minSeqNum)),
+ abihelpers.EvmWord(uint64(maxSeqNum)),
+ 1,
+ )
+ require.NoError(t, err)
+ t.Logf("Have executed logs %d want %d", len(lgs), maxSeqNum-minSeqNum+1)
+ if len(lgs) == maxSeqNum-minSeqNum+1 {
+ logs = lgs
+ t.Logf("Seq Num %d-%d executed", minSeqNum, maxSeqNum)
+ return true
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has not executed seq num")
+ return logs
+}
+
+func (node *Node) ConsistentlySeqNumHasNotBeenExecuted(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, seqNum int) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Consistently(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ lgs, err := c.LogPoller().IndexedLogsTopicRange(
+ testutils.Context(t),
+ v1_0_0.ExecutionStateChangedEvent,
+ offRamp,
+ v1_0_0.ExecutionStateChangedSeqNrIndex,
+ abihelpers.EvmWord(uint64(seqNum)),
+ abihelpers.EvmWord(uint64(seqNum)),
+ 1,
+ )
+ require.NoError(t, err)
+ t.Log("Executed logs", lgs)
+ if len(lgs) == 1 {
+ log = lgs[0]
+ return true
+ }
+ return false
+ }, 10*time.Second, 1*time.Second).Should(gomega.BeFalse(), "seq number got executed")
+ return log
+}
+
+func (node *Node) AddJob(t *testing.T, spec *OCR2TaskJobSpec) {
+ specString, err := spec.String()
+ require.NoError(t, err)
+ ccipJob, err := validate.ValidatedOracleSpecToml(
+ testutils.Context(t),
+ node.App.GetConfig().OCR2(),
+ node.App.GetConfig().Insecure(),
+ specString,
+ // FIXME Ani
+ nil,
+ )
+ require.NoError(t, err)
+ err = node.App.AddJobV2(context.Background(), &ccipJob)
+ require.NoError(t, err)
+}
+
+func (node *Node) AddBootstrapJob(t *testing.T, spec *OCR2TaskJobSpec) {
+ specString, err := spec.String()
+ require.NoError(t, err)
+ ccipJob, err := ocrbootstrap.ValidatedBootstrapSpecToml(specString)
+ require.NoError(t, err)
+ err = node.App.AddJobV2(context.Background(), &ccipJob)
+ require.NoError(t, err)
+}
+
+func (node *Node) AddJobsWithSpec(t *testing.T, jobSpec *OCR2TaskJobSpec) {
+ // set node specific values
+ jobSpec.OCR2OracleSpec.OCRKeyBundleID.SetValid(node.KeyBundle.ID())
+ jobSpec.OCR2OracleSpec.TransmitterID.SetValid(node.Transmitter.Hex())
+ node.AddJob(t, jobSpec)
+}
+
+func setupNodeCCIP(
+ t *testing.T,
+ owner *bind.TransactOpts,
+ port int64,
+ dbName string,
+ sourceChain *backends.SimulatedBackend, destChain *backends.SimulatedBackend,
+ sourceChainID *big.Int, destChainID *big.Int,
+ bootstrapPeerID string,
+ bootstrapPort int64,
+) (chainlink.Application, string, common.Address, ocr2key.KeyBundle) {
+ trueRef, falseRef := true, false
+
+ // Do not want to load fixtures as they contain a dummy chainID.
+ loglevel := configv2.LogLevel(zap.DebugLevel)
+ config, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *chainlink.Config, _ *chainlink.Secrets) {
+ p2pAddresses := []string{
+ fmt.Sprintf("127.0.0.1:%d", port),
+ }
+ c.Log.Level = &loglevel
+ c.Feature.UICSAKeys = &trueRef
+ c.Feature.FeedsManager = &trueRef
+ c.OCR.Enabled = &falseRef
+ c.OCR.DefaultTransactionQueueDepth = pointer.Uint32(200)
+ c.OCR2.Enabled = &trueRef
+ c.Feature.LogPoller = &trueRef
+ c.P2P.V2.Enabled = &trueRef
+
+ dur, err := config.NewDuration(500 * time.Millisecond)
+ if err != nil {
+ panic(err)
+ }
+ c.P2P.V2.DeltaDial = &dur
+
+ dur2, err := config.NewDuration(5 * time.Second)
+ if err != nil {
+ panic(err)
+ }
+
+ c.P2P.V2.DeltaReconcile = &dur2
+ c.P2P.V2.ListenAddresses = &p2pAddresses
+ c.P2P.V2.AnnounceAddresses = &p2pAddresses
+
+ c.EVM = []*v2.EVMConfig{createConfigV2Chain(sourceChainID), createConfigV2Chain(destChainID)}
+
+ if bootstrapPeerID != "" {
+ // Supply the bootstrap IP and port as a V2 peer address
+ c.P2P.V2.DefaultBootstrappers = &[]commontypes.BootstrapperLocator{
+ {
+ PeerID: bootstrapPeerID, Addrs: []string{
+ fmt.Sprintf("127.0.0.1:%d", bootstrapPort),
+ },
+ },
+ }
+ }
+ })
+
+ lggr := logger.TestLogger(t)
+ ctx := testutils.Context(t)
+
+ // The in-memory geth sim does not let you create a custom ChainID, it will always be 1337.
+ // In particular this means that if you sign an eip155 tx, the chainID used MUST be 1337
+ // and the CHAINID op code will always emit 1337. To work around this to simulate a "multichain"
+ // test, we fake different chainIDs using the wrapped sim cltest.SimulatedBackend so the RPC
+ // appears to operate on different chainIDs and we use an EthKeyStoreSim wrapper which always
+ // signs 1337 see https://github.com/smartcontractkit/chainlink-ccip/blob/a24dd436810250a458d27d8bb3fb78096afeb79c/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go#L35
+ sourceClient := client.NewSimulatedBackendClient(t, sourceChain, sourceChainID)
+ destClient := client.NewSimulatedBackendClient(t, destChain, destChainID)
+ csaKeyStore := ksMocks.NewCSA(t)
+
+ key, err := csakey.NewV2()
+ require.NoError(t, err)
+ csaKeyStore.On("GetAll").Return([]csakey.KeyV2{key}, nil)
+ keyStore := NewKsa(db, lggr, csaKeyStore)
+
+ simEthKeyStore := testhelpers.EthKeyStoreSim{
+ ETHKS: keyStore.Eth(),
+ CSAKS: keyStore.CSA(),
+ }
+ mailMon := mailbox.NewMonitor("CCIP", lggr.Named("Mailbox"))
+ evmOpts := chainlink.EVMFactoryConfig{
+ ChainOpts: legacyevm.ChainOpts{
+ AppConfig: config,
+ GenEthClient: func(chainID *big.Int) client.Client {
+ if chainID.String() == sourceChainID.String() {
+ return sourceClient
+ } else if chainID.String() == destChainID.String() {
+ return destClient
+ }
+ t.Fatalf("invalid chain ID %v", chainID.String())
+ return nil
+ },
+ MailMon: mailMon,
+ DS: db,
+ },
+ CSAETHKeystore: simEthKeyStore,
+ }
+ loopRegistry := plugins.NewLoopRegistry(lggr.Named("LoopRegistry"), config.Tracing())
+ relayerFactory := chainlink.RelayerFactory{
+ Logger: lggr,
+ LoopRegistry: loopRegistry,
+ GRPCOpts: loop.GRPCOpts{},
+ CapabilitiesRegistry: coretypes.NewCapabilitiesRegistry(t),
+ }
+ testCtx := testutils.Context(t)
+ // evm alway enabled for backward compatibility
+ initOps := []chainlink.CoreRelayerChainInitFunc{
+ chainlink.InitEVM(testCtx, relayerFactory, evmOpts),
+ }
+
+ relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ app, err := chainlink.NewApplication(chainlink.ApplicationOpts{
+ Config: config,
+ DS: db,
+ KeyStore: keyStore,
+ RelayerChainInteroperators: relayChainInterops,
+ Logger: lggr,
+ ExternalInitiatorManager: nil,
+ CloseLogger: lggr.Sync,
+ UnrestrictedHTTPClient: &http.Client{},
+ RestrictedHTTPClient: &http.Client{},
+ AuditLogger: audit.NoopLogger,
+ MailMon: mailMon,
+ LoopRegistry: plugins.NewLoopRegistry(lggr, config.Tracing()),
+ })
+ require.NoError(t, err)
+ require.NoError(t, app.GetKeyStore().Unlock(ctx, "password"))
+ _, err = app.GetKeyStore().P2P().Create(ctx)
+ require.NoError(t, err)
+
+ p2pIDs, err := app.GetKeyStore().P2P().GetAll()
+ require.NoError(t, err)
+ require.Len(t, p2pIDs, 1)
+ peerID := p2pIDs[0].PeerID()
+
+ _, err = app.GetKeyStore().Eth().Create(testCtx, destChainID)
+ require.NoError(t, err)
+ sendingKeys, err := app.GetKeyStore().Eth().EnabledKeysForChain(testCtx, destChainID)
+ require.NoError(t, err)
+ require.Len(t, sendingKeys, 1)
+ transmitter := sendingKeys[0].Address
+ s, err := app.GetKeyStore().Eth().GetState(testCtx, sendingKeys[0].ID(), destChainID)
+ require.NoError(t, err)
+ lggr.Debug(fmt.Sprintf("Transmitter address %s chainID %s", transmitter, s.EVMChainID.String()))
+
+ // Fund the commitTransmitter address with some ETH
+ n, err := destChain.NonceAt(context.Background(), owner.From, nil)
+ require.NoError(t, err)
+
+ tx := types3.NewTransaction(n, transmitter, big.NewInt(1000000000000000000), 21000, big.NewInt(1000000000), nil)
+ signedTx, err := owner.Signer(owner.From, tx)
+ require.NoError(t, err)
+ err = destChain.SendTransaction(context.Background(), signedTx)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ kb, err := app.GetKeyStore().OCR2().Create(ctx, chaintype.EVM)
+ require.NoError(t, err)
+ return app, peerID.Raw(), transmitter, kb
+}
+
+func createConfigV2Chain(chainId *big.Int) *v2.EVMConfig {
+ // NOTE: For the executor jobs, the default of 500k is insufficient for a 3 message batch
+ defaultGasLimit := uint64(5000000)
+ tr := true
+
+ sourceC := v2.Defaults((*evmUtils.Big)(chainId))
+ sourceC.GasEstimator.LimitDefault = &defaultGasLimit
+ fixedPrice := "FixedPrice"
+ sourceC.GasEstimator.Mode = &fixedPrice
+ d, _ := config.NewDuration(100 * time.Millisecond)
+ sourceC.LogPollInterval = &d
+ fd := uint32(2)
+ sourceC.FinalityDepth = &fd
+ return &v2.EVMConfig{
+ ChainID: (*evmUtils.Big)(chainId),
+ Enabled: &tr,
+ Chain: sourceC,
+ Nodes: v2.EVMNodes{&v2.Node{}},
+ }
+}
+
+type CCIPIntegrationTestHarness struct {
+ testhelpers.CCIPContracts
+ Nodes []Node
+ Bootstrap Node
+}
+
+func SetupCCIPIntegrationTH(t *testing.T, sourceChainID, sourceChainSelector, destChainId, destChainSelector uint64) CCIPIntegrationTestHarness {
+ return CCIPIntegrationTestHarness{
+ CCIPContracts: testhelpers.SetupCCIPContracts(t, sourceChainID, sourceChainSelector, destChainId, destChainSelector),
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) CreatePricesPipeline(t *testing.T) (string, *httptest.Server, *httptest.Server) {
+ linkUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err := w.Write([]byte(`{"UsdPerLink": "8000000000000000000"}`))
+ require.NoError(t, err)
+ }))
+ t.Cleanup(linkUSD.Close)
+
+ ethUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err := w.Write([]byte(`{"UsdPerETH": "1700000000000000000000"}`))
+ require.NoError(t, err)
+ }))
+ t.Cleanup(ethUSD.Close)
+
+ sourceWrappedNative, err := c.Source.Router.GetWrappedNative(nil)
+ require.NoError(t, err)
+ destWrappedNative, err := c.Dest.Router.GetWrappedNative(nil)
+ require.NoError(t, err)
+ tokenPricesUSDPipeline := fmt.Sprintf(`
+// Price 1
+link [type=http method=GET url="%s"];
+link_parse [type=jsonparse path="UsdPerLink"];
+link->link_parse;
+eth [type=http method=GET url="%s"];
+eth_parse [type=jsonparse path="UsdPerETH"];
+eth->eth_parse;
+merge [type=merge left="{}" right="{\\\"%s\\\":$(link_parse), \\\"%s\\\":$(eth_parse), \\\"%s\\\":$(eth_parse)}"];`,
+ linkUSD.URL, ethUSD.URL, c.Dest.LinkToken.Address(), sourceWrappedNative, destWrappedNative)
+
+ return tokenPricesUSDPipeline, linkUSD, ethUSD
+}
+
+func (c *CCIPIntegrationTestHarness) AddAllJobs(t *testing.T, jobParams CCIPJobSpecParams) {
+ jobParams.OffRamp = c.Dest.OffRamp.Address()
+
+ commitSpec, err := jobParams.CommitJobSpec()
+ require.NoError(t, err)
+ geExecutionSpec, err := jobParams.ExecutionJobSpec()
+ require.NoError(t, err)
+ nodes := c.Nodes
+ for _, node := range nodes {
+ node.AddJobsWithSpec(t, commitSpec)
+ node.AddJobsWithSpec(t, geExecutionSpec)
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) jobSpecProposal(t *testing.T, specTemplate string, f func() (*OCR2TaskJobSpec, error), feedsManagerId int64, version int32, opts ...any) feeds2.ProposeJobArgs {
+ spec, err := f()
+ require.NoError(t, err)
+
+ args := []any{spec.OCR2OracleSpec.ContractID}
+ args = append(args, opts...)
+
+ return feeds2.ProposeJobArgs{
+ FeedsManagerID: feedsManagerId,
+ RemoteUUID: uuid.New(),
+ Multiaddrs: nil,
+ Version: version,
+ Spec: fmt.Sprintf(specTemplate, args...),
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) SetupFeedsManager(t *testing.T) {
+ ctx := testutils.Context(t)
+ for _, node := range c.Nodes {
+ f := node.App.GetFeedsService()
+
+ managers, err := f.ListManagers(ctx)
+ require.NoError(t, err)
+ if len(managers) > 0 {
+ // Use at most one feeds manager, don't register if one already exists
+ continue
+ }
+
+ secret := utils.RandomBytes32()
+ pkey, err := crypto.PublicKeyFromHex(hex.EncodeToString(secret[:]))
+ require.NoError(t, err)
+
+ m := feeds2.RegisterManagerParams{
+ Name: "CCIP",
+ URI: "http://localhost:8080",
+ PublicKey: *pkey,
+ }
+
+ connManager := feedsMocks.NewConnectionsManager(t)
+ connManager.On("Connect", mock.Anything).Maybe()
+ connManager.On("GetClient", mock.Anything).Maybe().Return(NoopFeedsClient{}, nil)
+ connManager.On("Close").Maybe().Return()
+ connManager.On("IsConnected", mock.Anything).Maybe().Return(true)
+ f.Unsafe_SetConnectionsManager(connManager)
+
+ _, err = f.RegisterManager(testutils.Context(t), m)
+ require.NoError(t, err)
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) ApproveJobSpecs(t *testing.T, jobParams CCIPJobSpecParams) {
+ ctx := testutils.Context(t)
+
+ for _, node := range c.Nodes {
+ f := node.App.GetFeedsService()
+ managers, err := f.ListManagers(ctx)
+ require.NoError(t, err)
+ require.Len(t, managers, 1, "expected exactly one feeds manager")
+
+ execSpec := c.jobSpecProposal(
+ t,
+ execSpecTemplate,
+ jobParams.ExecutionJobSpec,
+ managers[0].ID,
+ 1,
+ node.KeyBundle.ID(),
+ node.Transmitter.Hex(),
+ utils.RandomAddress().String(),
+ utils.RandomAddress().String(),
+ )
+ execId, err := f.ProposeJob(ctx, &execSpec)
+ require.NoError(t, err)
+
+ err = f.ApproveSpec(ctx, execId, true)
+ require.NoError(t, err)
+
+ var commitSpec feeds2.ProposeJobArgs
+ if jobParams.TokenPricesUSDPipeline != "" {
+ commitSpec = c.jobSpecProposal(
+ t,
+ commitSpecTemplatePipeline,
+ jobParams.CommitJobSpec,
+ managers[0].ID,
+ 2,
+ node.KeyBundle.ID(),
+ node.Transmitter.Hex(),
+ jobParams.OffRamp.String(),
+ jobParams.TokenPricesUSDPipeline,
+ )
+ } else {
+ commitSpec = c.jobSpecProposal(
+ t,
+ commitSpecTemplateDynamicPriceGetter,
+ jobParams.CommitJobSpec,
+ managers[0].ID,
+ 2,
+ node.KeyBundle.ID(),
+ node.Transmitter.Hex(),
+ jobParams.OffRamp.String(),
+ jobParams.PriceGetterConfig,
+ )
+ }
+
+ commitId, err := f.ProposeJob(ctx, &commitSpec)
+ require.NoError(t, err)
+
+ err = f.ApproveSpec(ctx, commitId, true)
+ require.NoError(t, err)
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) AllNodesHaveReqSeqNum(t *testing.T, seqNum int, onRampOpts ...common.Address) logpoller.Log {
+ var log logpoller.Log
+ nodes := c.Nodes
+ var onRamp common.Address
+ if len(onRampOpts) > 0 {
+ onRamp = onRampOpts[0]
+ } else {
+ require.NotNil(t, c.Source.OnRamp, "no onramp configured")
+ onRamp = c.Source.OnRamp.Address()
+ }
+ for _, node := range nodes {
+ log = node.EventuallyHasReqSeqNum(t, c, onRamp, seqNum)
+ }
+ return log
+}
+
+func (c *CCIPIntegrationTestHarness) AllNodesHaveExecutedSeqNums(t *testing.T, minSeqNum int, maxSeqNum int, offRampOpts ...common.Address) []logpoller.Log {
+ var logs []logpoller.Log
+ nodes := c.Nodes
+ var offRamp common.Address
+
+ if len(offRampOpts) > 0 {
+ offRamp = offRampOpts[0]
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offramp configured")
+ offRamp = c.Dest.OffRamp.Address()
+ }
+ for _, node := range nodes {
+ logs = node.EventuallyHasExecutedSeqNums(t, c, offRamp, minSeqNum, maxSeqNum)
+ }
+ return logs
+}
+
+func (c *CCIPIntegrationTestHarness) NoNodesHaveExecutedSeqNum(t *testing.T, seqNum int, offRampOpts ...common.Address) logpoller.Log {
+ var log logpoller.Log
+ nodes := c.Nodes
+ var offRamp common.Address
+ if len(offRampOpts) > 0 {
+ offRamp = offRampOpts[0]
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offramp configured")
+ offRamp = c.Dest.OffRamp.Address()
+ }
+ for _, node := range nodes {
+ log = node.ConsistentlySeqNumHasNotBeenExecuted(t, c, offRamp, seqNum)
+ }
+ return log
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallyCommitReportAccepted(t *testing.T, currentBlock uint64, commitStoreOpts ...common.Address) commit_store.CommitStoreCommitReport {
+ var commitStore *commit_store.CommitStore
+ var err error
+ if len(commitStoreOpts) > 0 {
+ commitStore, err = commit_store.NewCommitStore(commitStoreOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.CommitStore, "no commitStore configured")
+ commitStore = c.Dest.CommitStore
+ }
+ g := gomega.NewGomegaWithT(t)
+ var report commit_store.CommitStoreCommitReport
+ g.Eventually(func() bool {
+ it, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: currentBlock})
+ g.Expect(err).NotTo(gomega.HaveOccurred(), "Error filtering ReportAccepted event")
+ g.Expect(it.Next()).To(gomega.BeTrue(), "No ReportAccepted event found")
+ report = it.Event.Report
+ if report.MerkleRoot != [32]byte{} {
+ t.Log("Report Accepted by commitStore")
+ return true
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "report has not been committed")
+ return report
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallyExecutionStateChangedToSuccess(t *testing.T, seqNum []uint64, blockNum uint64, offRampOpts ...common.Address) {
+ var offRamp *evm_2_evm_offramp.EVM2EVMOffRamp
+ var err error
+ if len(offRampOpts) > 0 {
+ offRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offRamp configured")
+ offRamp = c.Dest.OffRamp
+ }
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ it, err := offRamp.FilterExecutionStateChanged(&bind.FilterOpts{Start: blockNum}, seqNum, [][32]byte{})
+ require.NoError(t, err)
+ for it.Next() {
+ if cciptypes.MessageExecutionState(it.Event.State) == cciptypes.ExecutionStateSuccess {
+ t.Logf("ExecutionStateChanged event found for seqNum %d", it.Event.SequenceNumber)
+ return true
+ }
+ }
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ return false
+ }, testutils.WaitTimeout(t), time.Second).
+ Should(gomega.BeTrue(), "ExecutionStateChanged Event")
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallyReportCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) uint64 {
+ var commitStore *commit_store.CommitStore
+ var err error
+ var committedSeqNum uint64
+ if len(commitStoreOpts) > 0 {
+ commitStore, err = commit_store.NewCommitStore(commitStoreOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.CommitStore, "no commitStore configured")
+ commitStore = c.Dest.CommitStore
+ }
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ t.Log("next expected seq num reported", minSeqNum)
+ committedSeqNum = minSeqNum
+ return minSeqNum > uint64(max)
+ }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "report has not been committed")
+ return committedSeqNum
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallySendRequested(t *testing.T, seqNum uint64, onRampOpts ...common.Address) {
+ var onRamp *evm_2_evm_onramp.EVM2EVMOnRamp
+ var err error
+ if len(onRampOpts) > 0 {
+ onRamp, err = evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampOpts[0], c.Source.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Source.OnRamp, "no onRamp configured")
+ onRamp = c.Source.OnRamp
+ }
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ it, err := onRamp.FilterCCIPSendRequested(nil)
+ require.NoError(t, err)
+ for it.Next() {
+ if it.Event.Message.SequenceNumber == seqNum {
+ t.Log("sendRequested generated for", seqNum)
+ return true
+ }
+ }
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ return false
+ }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "sendRequested has not been generated")
+}
+
+func (c *CCIPIntegrationTestHarness) ConsistentlyReportNotCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) {
+ var commitStore *commit_store.CommitStore
+ var err error
+ if len(commitStoreOpts) > 0 {
+ commitStore, err = commit_store.NewCommitStore(commitStoreOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.CommitStore, "no commitStore configured")
+ commitStore = c.Dest.CommitStore
+ }
+ gomega.NewGomegaWithT(t).Consistently(func() bool {
+ minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ t.Log("min seq num reported", minSeqNum)
+ return minSeqNum > uint64(max)
+ }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeFalse(), "report has been committed")
+}
+
+func (c *CCIPIntegrationTestHarness) SetupAndStartNodes(ctx context.Context, t *testing.T, bootstrapNodePort int64) (Node, []Node, int64) {
+ appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNodeCCIP(t, c.Dest.User, bootstrapNodePort,
+ "bootstrap_ccip", c.Source.Chain, c.Dest.Chain, big.NewInt(0).SetUint64(c.Source.ChainID),
+ big.NewInt(0).SetUint64(c.Dest.ChainID), "", 0)
+ var (
+ oracles []confighelper.OracleIdentityExtra
+ nodes []Node
+ )
+ err := appBootstrap.Start(ctx)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, appBootstrap.Stop())
+ })
+ bootstrapNode := Node{
+ App: appBootstrap,
+ Transmitter: bootstrapTransmitter,
+ KeyBundle: bootstrapKb,
+ }
+ // Set up the minimum 4 oracles all funded with destination ETH
+ for i := int64(0); i < 4; i++ {
+ app, peerID, transmitter, kb := setupNodeCCIP(
+ t,
+ c.Dest.User,
+ int64(freeport.GetOne(t)),
+ fmt.Sprintf("oracle_ccip%d", i),
+ c.Source.Chain,
+ c.Dest.Chain,
+ big.NewInt(0).SetUint64(c.Source.ChainID),
+ big.NewInt(0).SetUint64(c.Dest.ChainID),
+ bootstrapPeerID,
+ bootstrapNodePort,
+ )
+ nodes = append(nodes, Node{
+ App: app,
+ Transmitter: transmitter,
+ KeyBundle: kb,
+ })
+ offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x"))
+ oracles = append(oracles, confighelper.OracleIdentityExtra{
+ OracleIdentity: confighelper.OracleIdentity{
+ OnchainPublicKey: offchainPublicKey,
+ TransmitAccount: types4.Account(transmitter.String()),
+ OffchainPublicKey: kb.OffchainPublicKey(),
+ PeerID: peerID,
+ },
+ ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(),
+ })
+ err = app.Start(ctx)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, app.Stop())
+ })
+ }
+
+ c.Oracles = oracles
+ commitOnchainConfig := c.CreateDefaultCommitOnchainConfig(t)
+ commitOffchainConfig := c.CreateDefaultCommitOffchainConfig(t)
+ execOnchainConfig := c.CreateDefaultExecOnchainConfig(t)
+ execOffchainConfig := c.CreateDefaultExecOffchainConfig(t)
+
+ configBlock := c.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig)
+ c.Nodes = nodes
+ c.Bootstrap = bootstrapNode
+ return bootstrapNode, nodes, configBlock
+}
+
+func (c *CCIPIntegrationTestHarness) SetUpNodesAndJobs(t *testing.T, pricePipeline string, priceGetterConfig string, usdcAttestationAPI string) CCIPJobSpecParams {
+ // setup Jobs
+ ctx := context.Background()
+ // Starts nodes and configures them in the OCR contracts.
+ bootstrapNode, _, configBlock := c.SetupAndStartNodes(ctx, t, int64(freeport.GetOne(t)))
+
+ jobParams := c.NewCCIPJobSpecParams(pricePipeline, priceGetterConfig, configBlock, usdcAttestationAPI)
+
+ // Add the bootstrap job
+ c.Bootstrap.AddBootstrapJob(t, jobParams.BootstrapJob(c.Dest.CommitStore.Address().Hex()))
+ c.AddAllJobs(t, jobParams)
+
+ // Replay for bootstrap.
+ bc, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(c.Dest.ChainID, 10))
+ require.NoError(t, err)
+ require.NoError(t, bc.LogPoller().Replay(context.Background(), configBlock))
+ c.Dest.Chain.Commit()
+
+ return jobParams
+}
+func DecodeCommitOnChainConfig(encoded []byte) (ccipdata.CommitOnchainConfig, error) {
+ var onchainConfig ccipdata.CommitOnchainConfig
+ unpacked, err := abihelpers.DecodeOCR2Config(encoded)
+ if err != nil {
+ return onchainConfig, err
+ }
+ onChainCfg := unpacked.OnchainConfig
+ onchainConfig, err = abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onChainCfg)
+ if err != nil {
+ return onchainConfig, err
+ }
+ return onchainConfig, nil
+}
+
+func DecodeExecOnChainConfig(encoded []byte) (v1_5_0.ExecOnchainConfig, error) {
+ var onchainConfig v1_5_0.ExecOnchainConfig
+ unpacked, err := abihelpers.DecodeOCR2Config(encoded)
+ if err != nil {
+ return onchainConfig, errors.Wrap(err, "failed to unpack log data")
+ }
+ onChainCfg := unpacked.OnchainConfig
+ onchainConfig, err = abihelpers.DecodeAbiStruct[v1_5_0.ExecOnchainConfig](onChainCfg)
+ if err != nil {
+ return onchainConfig, err
+ }
+ return onchainConfig, nil
+}
+
+type ksa struct {
+ keystore.Master
+ csa keystore.CSA
+}
+
+func (k *ksa) CSA() keystore.CSA {
+ return k.csa
+}
+
+func NewKsa(db *sqlx.DB, lggr logger.Logger, csa keystore.CSA) *ksa {
+ return &ksa{
+ Master: keystore.New(db, clutils.FastScryptParams, lggr),
+ csa: csa,
+ }
+}
+
+type NoopFeedsClient struct{}
+
+func (n NoopFeedsClient) ApprovedJob(context.Context, *pb.ApprovedJobRequest) (*pb.ApprovedJobResponse, error) {
+ return &pb.ApprovedJobResponse{}, nil
+}
+
+func (n NoopFeedsClient) Healthcheck(context.Context, *pb.HealthcheckRequest) (*pb.HealthcheckResponse, error) {
+ return &pb.HealthcheckResponse{}, nil
+}
+
+func (n NoopFeedsClient) UpdateNode(context.Context, *pb.UpdateNodeRequest) (*pb.UpdateNodeResponse, error) {
+ return &pb.UpdateNodeResponse{}, nil
+}
+
+func (n NoopFeedsClient) RejectedJob(context.Context, *pb.RejectedJobRequest) (*pb.RejectedJobResponse, error) {
+ return &pb.RejectedJobResponse{}, nil
+}
+
+func (n NoopFeedsClient) CancelledJob(context.Context, *pb.CancelledJobRequest) (*pb.CancelledJobResponse, error) {
+ return &pb.CancelledJobResponse{}, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go b/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go
new file mode 100644
index 00000000000..961e26d1cef
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go
@@ -0,0 +1,334 @@
+package integrationtesthelpers
+
+import (
+ "bytes"
+ "fmt"
+ "text/template"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/lib/pq"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay"
+ "github.com/smartcontractkit/chainlink/v2/core/store/models"
+)
+
+// OCR2TaskJobSpec represents an OCR2 job that is given to other nodes, meant to communicate with the bootstrap node,
+// and provide their answers
+type OCR2TaskJobSpec struct {
+ Name string `toml:"name"`
+ JobType string `toml:"type"`
+ MaxTaskDuration string `toml:"maxTaskDuration"` // Optional
+ ForwardingAllowed bool `toml:"forwardingAllowed"`
+ OCR2OracleSpec job.OCR2OracleSpec
+ ObservationSource string `toml:"observationSource"` // List of commands for the Chainlink node
+}
+
+// Type returns the type of the job
+func (o *OCR2TaskJobSpec) Type() string { return o.JobType }
+
+// String representation of the job
+func (o *OCR2TaskJobSpec) String() (string, error) {
+ var feedID string
+ if o.OCR2OracleSpec.FeedID != nil {
+ feedID = o.OCR2OracleSpec.FeedID.Hex()
+ }
+ specWrap := struct {
+ Name string
+ JobType string
+ MaxTaskDuration string
+ ForwardingAllowed bool
+ ContractID string
+ FeedID string
+ Relay string
+ PluginType string
+ RelayConfig map[string]interface{}
+ PluginConfig map[string]interface{}
+ P2PV2Bootstrappers []string
+ OCRKeyBundleID string
+ MonitoringEndpoint string
+ TransmitterID string
+ BlockchainTimeout time.Duration
+ TrackerSubscribeInterval time.Duration
+ TrackerPollInterval time.Duration
+ ContractConfirmations uint16
+ ObservationSource string
+ }{
+ Name: o.Name,
+ JobType: o.JobType,
+ ForwardingAllowed: o.ForwardingAllowed,
+ MaxTaskDuration: o.MaxTaskDuration,
+ ContractID: o.OCR2OracleSpec.ContractID,
+ FeedID: feedID,
+ Relay: o.OCR2OracleSpec.Relay,
+ PluginType: string(o.OCR2OracleSpec.PluginType),
+ RelayConfig: o.OCR2OracleSpec.RelayConfig,
+ PluginConfig: o.OCR2OracleSpec.PluginConfig,
+ P2PV2Bootstrappers: o.OCR2OracleSpec.P2PV2Bootstrappers,
+ OCRKeyBundleID: o.OCR2OracleSpec.OCRKeyBundleID.String,
+ MonitoringEndpoint: o.OCR2OracleSpec.MonitoringEndpoint.String,
+ TransmitterID: o.OCR2OracleSpec.TransmitterID.String,
+ BlockchainTimeout: o.OCR2OracleSpec.BlockchainTimeout.Duration(),
+ ContractConfirmations: o.OCR2OracleSpec.ContractConfigConfirmations,
+ TrackerPollInterval: o.OCR2OracleSpec.ContractConfigTrackerPollInterval.Duration(),
+ ObservationSource: o.ObservationSource,
+ }
+ ocr2TemplateString := `
+type = "{{ .JobType }}"
+name = "{{.Name}}"
+forwardingAllowed = {{.ForwardingAllowed}}
+{{if .MaxTaskDuration}}
+maxTaskDuration = "{{ .MaxTaskDuration }}" {{end}}
+{{if .PluginType}}
+pluginType = "{{ .PluginType }}" {{end}}
+relay = "{{.Relay}}"
+schemaVersion = 1
+contractID = "{{.ContractID}}"
+{{if .FeedID}}
+feedID = "{{.FeedID}}"
+{{end}}
+{{if eq .JobType "offchainreporting2" }}
+ocrKeyBundleID = "{{.OCRKeyBundleID}}" {{end}}
+{{if eq .JobType "offchainreporting2" }}
+transmitterID = "{{.TransmitterID}}" {{end}}
+{{if .BlockchainTimeout}}
+blockchainTimeout = "{{.BlockchainTimeout}}"
+{{end}}
+{{if .ContractConfirmations}}
+contractConfigConfirmations = {{.ContractConfirmations}}
+{{end}}
+{{if .TrackerPollInterval}}
+contractConfigTrackerPollInterval = "{{.TrackerPollInterval}}"
+{{end}}
+{{if .TrackerSubscribeInterval}}
+contractConfigTrackerSubscribeInterval = "{{.TrackerSubscribeInterval}}"
+{{end}}
+{{if .P2PV2Bootstrappers}}
+p2pv2Bootstrappers = [{{range .P2PV2Bootstrappers}}"{{.}}",{{end}}]{{end}}
+{{if .MonitoringEndpoint}}
+monitoringEndpoint = "{{.MonitoringEndpoint}}" {{end}}
+{{if .ObservationSource}}
+observationSource = """
+{{.ObservationSource}}
+"""{{end}}
+{{if eq .JobType "offchainreporting2" }}
+[pluginConfig]{{range $key, $value := .PluginConfig}}
+{{$key}} = {{$value}}{{end}}
+{{end}}
+[relayConfig]{{range $key, $value := .RelayConfig}}
+{{$key}} = {{$value}}{{end}}
+`
+ return MarshallTemplate(specWrap, "OCR2 Job", ocr2TemplateString)
+}
+
+// MarshallTemplate Helper to marshall templates
+func MarshallTemplate(jobSpec interface{}, name, templateString string) (string, error) {
+ var buf bytes.Buffer
+ tmpl, err := template.New(name).Parse(templateString)
+ if err != nil {
+ return "", err
+ }
+ err = tmpl.Execute(&buf, jobSpec)
+ if err != nil {
+ return "", err
+ }
+ return buf.String(), err
+}
+
+type JobType string
+
+const (
+ Commit JobType = "commit"
+ Execution JobType = "exec"
+ Boostrap JobType = "bootstrap"
+)
+
+func JobName(jobType JobType, source string, destination, version string) string {
+ if version != "" {
+ return fmt.Sprintf("ccip-%s-%s-%s-%s", jobType, source, destination, version)
+ }
+ return fmt.Sprintf("ccip-%s-%s-%s", jobType, source, destination)
+}
+
+type CCIPJobSpecParams struct {
+ Name string
+ Version string
+ OffRamp common.Address
+ CommitStore common.Address
+ SourceChainName string
+ DestChainName string
+ DestEvmChainId uint64
+ TokenPricesUSDPipeline string
+ PriceGetterConfig string
+ SourceStartBlock uint64
+ DestStartBlock uint64
+ USDCAttestationAPI string
+ USDCConfig *config.USDCConfig
+ P2PV2Bootstrappers pq.StringArray
+}
+
+func (params CCIPJobSpecParams) Validate() error {
+ if params.CommitStore == common.HexToAddress("0x0") {
+ return fmt.Errorf("must set commit store address")
+ }
+ return nil
+}
+
+func (params CCIPJobSpecParams) ValidateCommitJobSpec() error {
+ commonErr := params.Validate()
+ if commonErr != nil {
+ return commonErr
+ }
+ if params.OffRamp == common.HexToAddress("0x0") {
+ return fmt.Errorf("OffRamp cannot be empty for execution job")
+ }
+ // Validate token prices config
+ // NB: only validate the dynamic price getter config if present since we could also be using the pipeline instead.
+ // NB: make this test mandatory once we switch to dynamic price getter only.
+ if params.PriceGetterConfig != "" {
+ if _, err := pricegetter.NewDynamicPriceGetterConfig(params.PriceGetterConfig); err != nil {
+ return fmt.Errorf("invalid price getter config: %w", err)
+ }
+ }
+ return nil
+}
+
+func (params CCIPJobSpecParams) ValidateExecJobSpec() error {
+ commonErr := params.Validate()
+ if commonErr != nil {
+ return commonErr
+ }
+ if params.OffRamp == common.HexToAddress("0x0") {
+ return fmt.Errorf("OffRamp cannot be empty for execution job")
+ }
+ return nil
+}
+
+// CommitJobSpec generates template for CCIP-relay job spec.
+// OCRKeyBundleID,TransmitterID need to be set from the calling function
+func (params CCIPJobSpecParams) CommitJobSpec() (*OCR2TaskJobSpec, error) {
+ err := params.ValidateCommitJobSpec()
+ if err != nil {
+ return nil, fmt.Errorf("invalid job spec params: %w", err)
+ }
+
+ pluginConfig := map[string]interface{}{
+ "offRamp": fmt.Sprintf(`"%s"`, params.OffRamp.Hex()),
+ }
+ if params.TokenPricesUSDPipeline != "" {
+ pluginConfig["tokenPricesUSDPipeline"] = fmt.Sprintf(`"""
+%s
+"""`, params.TokenPricesUSDPipeline)
+ }
+ if params.PriceGetterConfig != "" {
+ pluginConfig["priceGetterConfig"] = fmt.Sprintf(`"""
+%s
+"""`, params.PriceGetterConfig)
+ }
+
+ ocrSpec := job.OCR2OracleSpec{
+ Relay: relay.NetworkEVM,
+ PluginType: types.CCIPCommit,
+ ContractID: params.CommitStore.Hex(),
+ ContractConfigConfirmations: 1,
+ ContractConfigTrackerPollInterval: models.Interval(20 * time.Second),
+ P2PV2Bootstrappers: params.P2PV2Bootstrappers,
+ PluginConfig: pluginConfig,
+ RelayConfig: map[string]interface{}{
+ "chainID": params.DestEvmChainId,
+ },
+ }
+ if params.DestStartBlock > 0 {
+ ocrSpec.PluginConfig["destStartBlock"] = params.DestStartBlock
+ }
+ if params.SourceStartBlock > 0 {
+ ocrSpec.PluginConfig["sourceStartBlock"] = params.SourceStartBlock
+ }
+ return &OCR2TaskJobSpec{
+ OCR2OracleSpec: ocrSpec,
+ JobType: "offchainreporting2",
+ Name: JobName(Commit, params.SourceChainName, params.DestChainName, params.Version),
+ }, nil
+}
+
+// ExecutionJobSpec generates template for CCIP-execution job spec.
+// OCRKeyBundleID,TransmitterID need to be set from the calling function
+func (params CCIPJobSpecParams) ExecutionJobSpec() (*OCR2TaskJobSpec, error) {
+ err := params.ValidateExecJobSpec()
+ if err != nil {
+ return nil, err
+ }
+ ocrSpec := job.OCR2OracleSpec{
+ Relay: relay.NetworkEVM,
+ PluginType: types.CCIPExecution,
+ ContractID: params.OffRamp.Hex(),
+ ContractConfigConfirmations: 1,
+ ContractConfigTrackerPollInterval: models.Interval(20 * time.Second),
+
+ P2PV2Bootstrappers: params.P2PV2Bootstrappers,
+ PluginConfig: map[string]interface{}{},
+ RelayConfig: map[string]interface{}{
+ "chainID": params.DestEvmChainId,
+ },
+ }
+ if params.DestStartBlock > 0 {
+ ocrSpec.PluginConfig["destStartBlock"] = params.DestStartBlock
+ }
+ if params.SourceStartBlock > 0 {
+ ocrSpec.PluginConfig["sourceStartBlock"] = params.SourceStartBlock
+ }
+ if params.USDCAttestationAPI != "" {
+ ocrSpec.PluginConfig["USDCConfig.AttestationAPI"] = fmt.Sprintf("\"%s\"", params.USDCAttestationAPI)
+ ocrSpec.PluginConfig["USDCConfig.SourceTokenAddress"] = fmt.Sprintf("\"%s\"", utils.RandomAddress().String())
+ ocrSpec.PluginConfig["USDCConfig.SourceMessageTransmitterAddress"] = fmt.Sprintf("\"%s\"", utils.RandomAddress().String())
+ ocrSpec.PluginConfig["USDCConfig.AttestationAPITimeoutSeconds"] = 5
+ }
+ if params.USDCConfig != nil {
+ ocrSpec.PluginConfig["USDCConfig.AttestationAPI"] = fmt.Sprintf(`"%s"`, params.USDCConfig.AttestationAPI)
+ ocrSpec.PluginConfig["USDCConfig.SourceTokenAddress"] = fmt.Sprintf(`"%s"`, params.USDCConfig.SourceTokenAddress)
+ ocrSpec.PluginConfig["USDCConfig.SourceMessageTransmitterAddress"] = fmt.Sprintf(`"%s"`, params.USDCConfig.SourceMessageTransmitterAddress)
+ ocrSpec.PluginConfig["USDCConfig.AttestationAPITimeoutSeconds"] = params.USDCConfig.AttestationAPITimeoutSeconds
+ }
+ return &OCR2TaskJobSpec{
+ OCR2OracleSpec: ocrSpec,
+ JobType: "offchainreporting2",
+ Name: JobName(Execution, params.SourceChainName, params.DestChainName, params.Version),
+ }, err
+}
+
+func (params CCIPJobSpecParams) BootstrapJob(contractID string) *OCR2TaskJobSpec {
+ bootstrapSpec := job.OCR2OracleSpec{
+ ContractID: contractID,
+ Relay: relay.NetworkEVM,
+ ContractConfigConfirmations: 1,
+ ContractConfigTrackerPollInterval: models.Interval(20 * time.Second),
+ RelayConfig: map[string]interface{}{
+ "chainID": params.DestEvmChainId,
+ },
+ }
+ return &OCR2TaskJobSpec{
+ Name: fmt.Sprintf("%s-%s", Boostrap, params.DestChainName),
+ JobType: "bootstrap",
+ OCR2OracleSpec: bootstrapSpec,
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) NewCCIPJobSpecParams(tokenPricesUSDPipeline string, priceGetterConfig string, configBlock int64, usdcAttestationAPI string) CCIPJobSpecParams {
+ return CCIPJobSpecParams{
+ CommitStore: c.Dest.CommitStore.Address(),
+ OffRamp: c.Dest.OffRamp.Address(),
+ DestEvmChainId: c.Dest.ChainID,
+ SourceChainName: "SimulatedSource",
+ DestChainName: "SimulatedDest",
+ TokenPricesUSDPipeline: tokenPricesUSDPipeline,
+ PriceGetterConfig: priceGetterConfig,
+ DestStartBlock: uint64(configBlock),
+ USDCAttestationAPI: usdcAttestationAPI,
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/offramp.go b/core/services/ocr2/plugins/ccip/testhelpers/offramp.go
new file mode 100644
index 00000000000..d10e693325d
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/offramp.go
@@ -0,0 +1,119 @@
+package testhelpers
+
+import (
+ "sync"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp"
+ mock_contracts "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+)
+
+type FakeOffRamp struct {
+ *mock_contracts.EVM2EVMOffRampInterface
+
+ rateLimiterState cciptypes.TokenBucketRateLimit
+ senderNonces map[common.Address]uint64
+ tokenToPool map[common.Address]common.Address
+ dynamicConfig evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig
+ sourceToDestTokens map[common.Address]common.Address
+
+ mu sync.RWMutex
+}
+
+func NewFakeOffRamp(t *testing.T) (*FakeOffRamp, common.Address) {
+ addr := utils.RandomAddress()
+ mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t)
+ mockOffRamp.On("Address").Return(addr).Maybe()
+
+ offRamp := &FakeOffRamp{EVM2EVMOffRampInterface: mockOffRamp}
+ return offRamp, addr
+}
+
+func (o *FakeOffRamp) CurrentRateLimiterState(opts *bind.CallOpts) (cciptypes.TokenBucketRateLimit, error) {
+ return getOffRampVal(o, func(o *FakeOffRamp) (cciptypes.TokenBucketRateLimit, error) { return o.rateLimiterState, nil })
+}
+
+func (o *FakeOffRamp) SetRateLimiterState(state cciptypes.TokenBucketRateLimit) {
+ setOffRampVal(o, func(o *FakeOffRamp) { o.rateLimiterState = state })
+}
+
+func (o *FakeOffRamp) GetSenderNonce(opts *bind.CallOpts, sender common.Address) (uint64, error) {
+ return getOffRampVal(o, func(o *FakeOffRamp) (uint64, error) { return o.senderNonces[sender], nil })
+}
+
+func (o *FakeOffRamp) SetSenderNonces(senderNonces map[cciptypes.Address]uint64) {
+ evmSenderNonces := make(map[common.Address]uint64)
+ for k, v := range senderNonces {
+ addrs, _ := ccipcalc.GenericAddrsToEvm(k)
+ evmSenderNonces[addrs[0]] = v
+ }
+
+ setOffRampVal(o, func(o *FakeOffRamp) { o.senderNonces = evmSenderNonces })
+}
+
+func (o *FakeOffRamp) GetPoolByDestToken(opts *bind.CallOpts, destToken common.Address) (common.Address, error) {
+ return getOffRampVal(o, func(o *FakeOffRamp) (common.Address, error) {
+ addr, exists := o.tokenToPool[destToken]
+ if !exists {
+ return common.Address{}, errors.New("not found")
+ }
+ return addr, nil
+ })
+}
+
+func (o *FakeOffRamp) SetTokenPools(tokenToPool map[common.Address]common.Address) {
+ setOffRampVal(o, func(o *FakeOffRamp) { o.tokenToPool = tokenToPool })
+}
+
+func (o *FakeOffRamp) GetDynamicConfig(opts *bind.CallOpts) (evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig, error) {
+ return getOffRampVal(o, func(o *FakeOffRamp) (evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig, error) {
+ return o.dynamicConfig, nil
+ })
+}
+
+func (o *FakeOffRamp) SetDynamicConfig(cfg evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig) {
+ setOffRampVal(o, func(o *FakeOffRamp) { o.dynamicConfig = cfg })
+}
+
+func (o *FakeOffRamp) SetSourceToDestTokens(m map[common.Address]common.Address) {
+ setOffRampVal(o, func(o *FakeOffRamp) { o.sourceToDestTokens = m })
+}
+
+func (o *FakeOffRamp) GetSupportedTokens(opts *bind.CallOpts) ([]common.Address, error) {
+ return getOffRampVal(o, func(o *FakeOffRamp) ([]common.Address, error) {
+ tks := make([]common.Address, 0, len(o.sourceToDestTokens))
+ for tk := range o.sourceToDestTokens {
+ tks = append(tks, tk)
+ }
+ return tks, nil
+ })
+}
+
+func (o *FakeOffRamp) GetDestinationTokens(opts *bind.CallOpts) ([]common.Address, error) {
+ return getOffRampVal(o, func(o *FakeOffRamp) ([]common.Address, error) {
+ tokens := make([]common.Address, 0, len(o.sourceToDestTokens))
+ for _, dst := range o.sourceToDestTokens {
+ tokens = append(tokens, dst)
+ }
+ return tokens, nil
+ })
+}
+
+func getOffRampVal[T any](o *FakeOffRamp, getter func(o *FakeOffRamp) (T, error)) (T, error) {
+ o.mu.RLock()
+ defer o.mu.RUnlock()
+ return getter(o)
+}
+
+func setOffRampVal(o *FakeOffRamp, setter func(o *FakeOffRamp)) {
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ setter(o)
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go b/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go
new file mode 100644
index 00000000000..ea91362aaae
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go
@@ -0,0 +1,75 @@
+package testhelpers
+
+import (
+ "context"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ ethtypes "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore"
+)
+
+// FirstBlockAge is used to compute first block's timestamp in SimulatedBackend (time.Now() - FirstBlockAge)
+const FirstBlockAge = 24 * time.Hour
+
+func SetupChain(t *testing.T) (*backends.SimulatedBackend, *bind.TransactOpts) {
+ key, err := crypto.GenerateKey()
+ require.NoError(t, err)
+ user, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
+ require.NoError(t, err)
+ chain := backends.NewSimulatedBackend(core.GenesisAlloc{
+ user.From: {Balance: new(big.Int).Mul(big.NewInt(1000), big.NewInt(1e18))}},
+ ethconfig.Defaults.Miner.GasCeil)
+ // CCIP relies on block timestamps, but SimulatedBackend uses by default clock starting from 1970-01-01
+ // This trick is used to move the clock closer to the current time. We set first block to be X hours ago.
+ // Tests create plenty of transactions so this number can't be too low, every new block mined will tick the clock,
+ // if you mine more than "X hours" transactions, SimulatedBackend will panic because generated timestamps will be in the future.
+ // IMPORTANT: Any adjustments to FirstBlockAge will automatically update PermissionLessExecutionThresholdSeconds in tests
+ blockTime := time.UnixMilli(int64(chain.Blockchain().CurrentHeader().Time))
+ err = chain.AdjustTime(time.Since(blockTime) - FirstBlockAge)
+ require.NoError(t, err)
+ chain.Commit()
+ return chain, user
+}
+
+type EthKeyStoreSim struct {
+ ETHKS keystore.Eth
+ CSAKS keystore.CSA
+}
+
+func (ks EthKeyStoreSim) CSA() keystore.CSA {
+ return ks.CSAKS
+}
+
+func (ks EthKeyStoreSim) Eth() keystore.Eth {
+ return ks.ETHKS
+}
+
+func (ks EthKeyStoreSim) SignTx(address common.Address, tx *ethtypes.Transaction, chainID *big.Int) (*ethtypes.Transaction, error) {
+ if chainID.String() == "1000" {
+ // A terrible hack, just for the multichain test. All simulation clients run on chainID 1337.
+ // We let the DestChainSelector actually use 1337 to make sure the offchainConfig digests are properly generated.
+ return ks.ETHKS.SignTx(context.Background(), address, tx, big.NewInt(1337))
+ }
+ return ks.ETHKS.SignTx(context.Background(), address, tx, chainID)
+}
+
+var _ keystore.Eth = EthKeyStoreSim{}.ETHKS
+
+func ConfirmTxs(t *testing.T, txs []*ethtypes.Transaction, chain *backends.SimulatedBackend) {
+ chain.Commit()
+ for _, tx := range txs {
+ rec, err := bind.WaitMined(context.Background(), chain, tx)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), rec.Status)
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/structfields.go b/core/services/ocr2/plugins/ccip/testhelpers/structfields.go
new file mode 100644
index 00000000000..88e0fffa672
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/structfields.go
@@ -0,0 +1,44 @@
+package testhelpers
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// FindStructFieldsOfCertainType recursively iterates over struct fields and returns all the fields of the provided type.
+func FindStructFieldsOfCertainType(targetType string, v any) []string {
+ typesAndFields := TypesAndFields("", reflect.ValueOf(v))
+ results := make([]string, 0)
+ for _, field := range typesAndFields {
+ if strings.Contains(field, targetType) {
+ results = append(results, field)
+ }
+ }
+ return results
+}
+
+// TypesAndFields will find and return all the fields and their types of the provided value.
+// NOTE: This is not intended for production use, it's a helper method for tests.
+func TypesAndFields(prefix string, v reflect.Value) []string {
+ results := make([]string, 0)
+
+ s := v
+ typeOfT := s.Type()
+ for i := 0; i < s.NumField(); i++ {
+ f := s.Field(i)
+ typeAndName := fmt.Sprintf("%s%s %v", prefix, f.Type(), typeOfT.Field(i).Name)
+ results = append(results, typeAndName)
+
+ if f.Kind().String() == "ptr" {
+ results = append(results, TypesAndFields(typeOfT.Field(i).Name, f.Elem())...)
+ }
+
+ if f.Kind().String() == "struct" {
+ x1 := reflect.ValueOf(f.Interface())
+ results = append(results, TypesAndFields(typeOfT.Field(i).Name, x1)...)
+ }
+ }
+
+ return results
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go
new file mode 100644
index 00000000000..4ea5bb18d7e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go
@@ -0,0 +1,1585 @@
+package testhelpers_1_4_0
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/pkg/errors"
+ "github.com/rs/zerolog/log"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2/confighelper"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types"
+ ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/hashutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/merklemulti"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_proxy_contract"
+ burn_mint_token_pool "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/burn_mint_token_pool_1_4_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0"
+ evm_2_evm_offramp "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ evm_2_evm_onramp "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool_1_0_0"
+ lock_release_token_pool "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool_1_4_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/maybe_revert_message_receiver"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/weth9"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/burn_mint_erc677"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+)
+
+var (
+ // Source
+ SourcePool = "source Link pool"
+ SourcePriceRegistry = "source PriceRegistry"
+ OnRamp = "onramp"
+ OnRampNative = "onramp-native"
+ SourceRouter = "source router"
+
+ // Dest
+ OffRamp = "offramp"
+ DestPool = "dest Link pool"
+
+ Receiver = "receiver"
+ Sender = "sender"
+ Link = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) }
+ HundredLink = Link(100)
+ LinkUSDValue = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) }
+ SourceChainID = uint64(1000)
+ SourceChainSelector = uint64(11787463284727550157)
+ DestChainID = uint64(1337)
+ DestChainSelector = uint64(3379446385462418246)
+)
+
+// Backwards compat, in principle these statuses are version dependent
+// TODO: Adjust integration tests to be version agnostic using readers
+var (
+ ExecutionStateSuccess = MessageExecutionState(cciptypes.ExecutionStateSuccess)
+ ExecutionStateFailure = MessageExecutionState(cciptypes.ExecutionStateFailure)
+)
+
+type MessageExecutionState cciptypes.MessageExecutionState
+type CommitOffchainConfig struct {
+ v1_2_0.JSONCommitOffchainConfig
+}
+
+func (c CommitOffchainConfig) Encode() ([]byte, error) {
+ return ccipconfig.EncodeOffchainConfig(c.JSONCommitOffchainConfig)
+}
+
+func NewCommitOffchainConfig(
+ GasPriceHeartBeat config.Duration,
+ DAGasPriceDeviationPPB uint32,
+ ExecGasPriceDeviationPPB uint32,
+ TokenPriceHeartBeat config.Duration,
+ TokenPriceDeviationPPB uint32,
+ InflightCacheExpiry config.Duration) CommitOffchainConfig {
+ return CommitOffchainConfig{v1_2_0.JSONCommitOffchainConfig{
+ GasPriceHeartBeat: GasPriceHeartBeat,
+ DAGasPriceDeviationPPB: DAGasPriceDeviationPPB,
+ ExecGasPriceDeviationPPB: ExecGasPriceDeviationPPB,
+ TokenPriceHeartBeat: TokenPriceHeartBeat,
+ TokenPriceDeviationPPB: TokenPriceDeviationPPB,
+ InflightCacheExpiry: InflightCacheExpiry,
+ }}
+}
+
+type CommitOnchainConfig struct {
+ ccipdata.CommitOnchainConfig
+}
+
+func NewCommitOnchainConfig(
+ PriceRegistry common.Address,
+) CommitOnchainConfig {
+ return CommitOnchainConfig{ccipdata.CommitOnchainConfig{
+ PriceRegistry: PriceRegistry,
+ }}
+}
+
+type ExecOnchainConfig struct {
+ v1_2_0.ExecOnchainConfig
+}
+
+func NewExecOnchainConfig(
+ PermissionLessExecutionThresholdSeconds uint32,
+ Router common.Address,
+ PriceRegistry common.Address,
+ MaxNumberOfTokensPerMsg uint16,
+ MaxDataBytes uint32,
+ MaxPoolReleaseOrMintGas uint32,
+) ExecOnchainConfig {
+ return ExecOnchainConfig{v1_2_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds,
+ Router: Router,
+ PriceRegistry: PriceRegistry,
+ MaxNumberOfTokensPerMsg: MaxNumberOfTokensPerMsg,
+ MaxDataBytes: MaxDataBytes,
+ MaxPoolReleaseOrMintGas: MaxPoolReleaseOrMintGas,
+ }}
+}
+
+type ExecOffchainConfig struct {
+ v1_2_0.JSONExecOffchainConfig
+}
+
+func (c ExecOffchainConfig) Encode() ([]byte, error) {
+ return ccipconfig.EncodeOffchainConfig(c.JSONExecOffchainConfig)
+}
+
+func NewExecOffchainConfig(
+ DestOptimisticConfirmations uint32,
+ BatchGasLimit uint32,
+ RelativeBoostPerWaitHour float64,
+ InflightCacheExpiry config.Duration,
+ RootSnoozeTime config.Duration,
+) ExecOffchainConfig {
+ return ExecOffchainConfig{v1_2_0.JSONExecOffchainConfig{
+ DestOptimisticConfirmations: DestOptimisticConfirmations,
+ BatchGasLimit: BatchGasLimit,
+ RelativeBoostPerWaitHour: RelativeBoostPerWaitHour,
+ InflightCacheExpiry: InflightCacheExpiry,
+ RootSnoozeTime: RootSnoozeTime,
+ }}
+}
+
+type MaybeRevertReceiver struct {
+ Receiver *maybe_revert_message_receiver.MaybeRevertMessageReceiver
+ Strict bool
+}
+
+type Common struct {
+ ChainID uint64
+ ChainSelector uint64
+ User *bind.TransactOpts
+ Chain *backends.SimulatedBackend
+ LinkToken *link_token_interface.LinkToken
+ LinkTokenPool *lock_release_token_pool.LockReleaseTokenPool
+ CustomToken *link_token_interface.LinkToken
+ WrappedNative *weth9.WETH9
+ WrappedNativePool *lock_release_token_pool_1_0_0.LockReleaseTokenPool
+ ARM *mock_arm_contract.MockARMContract
+ ARMProxy *arm_proxy_contract.ARMProxyContract
+ PriceRegistry *price_registry_1_2_0.PriceRegistry
+}
+
+type SourceChain struct {
+ Common
+ Router *router.Router
+ OnRamp *evm_2_evm_onramp.EVM2EVMOnRamp
+}
+
+type DestinationChain struct {
+ Common
+
+ CommitStore *commit_store_1_2_0.CommitStore
+ Router *router.Router
+ OffRamp *evm_2_evm_offramp.EVM2EVMOffRamp
+ Receivers []MaybeRevertReceiver
+}
+
+type OCR2Config struct {
+ Signers []common.Address
+ Transmitters []common.Address
+ F uint8
+ OnchainConfig []byte
+ OffchainConfigVersion uint64
+ OffchainConfig []byte
+}
+
+type BalanceAssertion struct {
+ Name string
+ Address common.Address
+ Expected string
+ Getter func(t *testing.T, addr common.Address) *big.Int
+ Within string
+}
+
+type BalanceReq struct {
+ Name string
+ Addr common.Address
+ Getter func(t *testing.T, addr common.Address) *big.Int
+}
+
+type CCIPContracts struct {
+ Source SourceChain
+ Dest DestinationChain
+ Oracles []confighelper.OracleIdentityExtra
+
+ commitOCRConfig, execOCRConfig *OCR2Config
+}
+
+func (c *CCIPContracts) DeployNewOffRamp(t *testing.T) {
+ prevOffRamp := common.HexToAddress("")
+ if c.Dest.OffRamp != nil {
+ prevOffRamp = c.Dest.OffRamp.Address()
+ }
+ offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp(
+ c.Dest.User,
+ c.Dest.Chain,
+ evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{
+ CommitStore: c.Dest.CommitStore.Address(),
+ ChainSelector: c.Dest.ChainSelector,
+ SourceChainSelector: c.Source.ChainSelector,
+ OnRamp: c.Source.OnRamp.Address(),
+ PrevOffRamp: prevOffRamp,
+ ArmProxy: c.Dest.ARMProxy.Address(),
+ },
+ []common.Address{c.Source.LinkToken.Address()}, // source tokens
+ []common.Address{c.Dest.LinkTokenPool.Address()}, // pools
+ evm_2_evm_offramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+
+ c.Dest.OffRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, c.Dest.Chain)
+ require.NoError(t, err)
+
+ c.Dest.Chain.Commit()
+ c.Source.Chain.Commit()
+}
+
+func (c *CCIPContracts) EnableOffRamp(t *testing.T) {
+ _, err := c.Dest.Router.ApplyRampUpdates(c.Dest.User, nil, nil, []router.RouterOffRamp{{SourceChainSelector: SourceChainSelector, OffRamp: c.Dest.OffRamp.Address()}})
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+
+ onChainConfig := c.CreateDefaultExecOnchainConfig(t)
+ offChainConfig := c.CreateDefaultExecOffchainConfig(t)
+
+ c.SetupExecOCR2Config(t, onChainConfig, offChainConfig)
+}
+
+func (c *CCIPContracts) EnableCommitStore(t *testing.T) {
+ onChainConfig := c.CreateDefaultCommitOnchainConfig(t)
+ offChainConfig := c.CreateDefaultCommitOffchainConfig(t)
+
+ c.SetupCommitOCR2Config(t, onChainConfig, offChainConfig)
+
+ _, err := c.Dest.PriceRegistry.ApplyPriceUpdatersUpdates(c.Dest.User, []common.Address{c.Dest.CommitStore.Address()}, []common.Address{})
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) DeployNewOnRamp(t *testing.T) {
+ t.Log("Deploying new onRamp")
+ // find the last onRamp
+ prevOnRamp := common.HexToAddress("")
+ if c.Source.OnRamp != nil {
+ prevOnRamp = c.Source.OnRamp.Address()
+ }
+ onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp(
+ c.Source.User, // user
+ c.Source.Chain, // client
+ evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{
+ LinkToken: c.Source.LinkToken.Address(),
+ ChainSelector: c.Source.ChainSelector,
+ DestChainSelector: c.Dest.ChainSelector,
+ DefaultTxGasLimit: 200_000,
+ MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)),
+ PrevOnRamp: prevOnRamp,
+ ArmProxy: c.Source.ARM.Address(), // ARM
+ },
+ evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{
+ Router: c.Source.Router.Address(),
+ MaxNumberOfTokensPerMsg: 5,
+ DestGasOverhead: 350_000,
+ DestGasPerPayloadByte: 16,
+ DestDataAvailabilityOverheadGas: 33_596,
+ DestGasPerDataAvailabilityByte: 16,
+ DestDataAvailabilityMultiplierBps: 6840, // 0.684
+ PriceRegistry: c.Source.PriceRegistry.Address(),
+ MaxDataBytes: 1e5,
+ MaxPerMsgGasLimit: 4_000_000,
+ },
+ []evm_2_evm_onramp.InternalPoolUpdate{
+ {
+ Token: c.Source.LinkToken.Address(),
+ Pool: c.Source.LinkTokenPool.Address(),
+ },
+ },
+ evm_2_evm_onramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: c.Source.LinkToken.Address(),
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 9e17,
+ Enabled: true,
+ },
+ {
+ Token: c.Source.WrappedNative.Address(),
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 1e18,
+ Enabled: true,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: c.Source.LinkToken.Address(),
+ MinFeeUSDCents: 50, // $0.5
+ MaxFeeUSDCents: 1_000_000_00, // $ 1 million
+ DeciBps: 5_0, // 5 bps
+ DestGasOverhead: 34_000,
+ DestBytesOverhead: 32,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{},
+ )
+
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ c.Source.OnRamp, err = evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, c.Source.Chain)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) EnableOnRamp(t *testing.T) {
+ t.Log("Setting onRamp on source router")
+ _, err := c.Source.Router.ApplyRampUpdates(c.Source.User, []router.RouterOnRamp{{DestChainSelector: c.Dest.ChainSelector, OnRamp: c.Source.OnRamp.Address()}}, nil, nil)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) DeployNewCommitStore(t *testing.T) {
+ commitStoreAddress, _, _, err := commit_store_1_2_0.DeployCommitStore(
+ c.Dest.User, // user
+ c.Dest.Chain, // client
+ commit_store_1_2_0.CommitStoreStaticConfig{
+ ChainSelector: c.Dest.ChainSelector,
+ SourceChainSelector: c.Source.ChainSelector,
+ OnRamp: c.Source.OnRamp.Address(),
+ ArmProxy: c.Dest.ARMProxy.Address(),
+ },
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+ // since CommitStoreHelper derives from CommitStore, it's safe to instantiate both on same address
+ c.Dest.CommitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreAddress, c.Dest.Chain)
+ require.NoError(t, err)
+}
+
+func (c *CCIPContracts) DeployNewPriceRegistry(t *testing.T) {
+ t.Log("Deploying new Price Registry")
+ destPricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry(
+ c.Dest.User,
+ c.Dest.Chain,
+ []common.Address{c.Dest.CommitStore.Address()},
+ []common.Address{c.Dest.LinkToken.Address()},
+ 60*60*24*14, // two weeks
+ )
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ c.Dest.PriceRegistry, err = price_registry_1_2_0.NewPriceRegistry(destPricesAddress, c.Dest.Chain)
+ require.NoError(t, err)
+
+ priceUpdates := price_registry_1_2_0.InternalPriceUpdates{
+ TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{
+ {
+ SourceToken: c.Dest.LinkToken.Address(),
+ UsdPerToken: big.NewInt(8e18), // 8usd
+ },
+ {
+ SourceToken: c.Dest.WrappedNative.Address(),
+ UsdPerToken: big.NewInt(1e18), // 1usd
+ },
+ },
+ GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{
+ {
+ DestChainSelector: c.Source.ChainSelector,
+ UsdPerUnitGas: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9
+ },
+ },
+ }
+ _, err = c.Dest.PriceRegistry.UpdatePrices(c.Dest.User, priceUpdates)
+ require.NoError(t, err)
+
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+
+ t.Logf("New Price Registry deployed at %s", destPricesAddress.String())
+}
+
+func (c *CCIPContracts) SetNopsOnRamp(t *testing.T, nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight) {
+ tx, err := c.Source.OnRamp.SetNops(c.Source.User, nopsAndWeights)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ _, err = bind.WaitMined(context.Background(), c.Source.Chain, tx)
+ require.NoError(t, err)
+}
+
+func (c *CCIPContracts) GetSourceLinkBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Source.Chain, c.Source.LinkToken.Address(), addr)
+}
+
+func (c *CCIPContracts) GetDestLinkBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Dest.Chain, c.Dest.LinkToken.Address(), addr)
+}
+
+func (c *CCIPContracts) GetSourceWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Source.Chain, c.Source.WrappedNative.Address(), addr)
+}
+
+func (c *CCIPContracts) GetDestWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int {
+ return GetBalance(t, c.Dest.Chain, c.Dest.WrappedNative.Address(), addr)
+}
+
+func (c *CCIPContracts) AssertBalances(t *testing.T, bas []BalanceAssertion) {
+ for _, b := range bas {
+ actual := b.Getter(t, b.Address)
+ t.Log("Checking balance for", b.Name, "at", b.Address.Hex(), "got", actual)
+ require.NotNil(t, actual, "%v getter return nil", b.Name)
+ if b.Within == "" {
+ require.Equal(t, b.Expected, actual.String(), "wrong balance for %s got %s want %s", b.Name, actual, b.Expected)
+ } else {
+ bi, _ := big.NewInt(0).SetString(b.Expected, 10)
+ withinI, _ := big.NewInt(0).SetString(b.Within, 10)
+ high := big.NewInt(0).Add(bi, withinI)
+ low := big.NewInt(0).Sub(bi, withinI)
+ require.Equal(t, -1, actual.Cmp(high), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high)
+ require.Equal(t, 1, actual.Cmp(low), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high)
+ }
+ }
+}
+
+func AccountToAddress(accounts []ocr2types.Account) (addresses []common.Address, err error) {
+ for _, signer := range accounts {
+ bytes, err := hexutil.Decode(string(signer))
+ if err != nil {
+ return []common.Address{}, errors.Wrap(err, fmt.Sprintf("given address is not valid %s", signer))
+ }
+ if len(bytes) != 20 {
+ return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer)
+ }
+ addresses = append(addresses, common.BytesToAddress(bytes))
+ }
+ return addresses, nil
+}
+
+func OnchainPublicKeyToAddress(publicKeys []ocrtypes.OnchainPublicKey) (addresses []common.Address, err error) {
+ for _, signer := range publicKeys {
+ if len(signer) != 20 {
+ return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer)
+ }
+ addresses = append(addresses, common.BytesToAddress(signer))
+ }
+ return addresses, nil
+}
+
+func (c *CCIPContracts) DeriveOCR2Config(t *testing.T, oracles []confighelper.OracleIdentityExtra, rawOnchainConfig []byte, rawOffchainConfig []byte) *OCR2Config {
+ signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests(
+ 2*time.Second, // deltaProgress
+ 1*time.Second, // deltaResend
+ 1*time.Second, // deltaRound
+ 500*time.Millisecond, // deltaGrace
+ 2*time.Second, // deltaStage
+ 3,
+ []int{1, 1, 1, 1},
+ oracles,
+ rawOffchainConfig,
+ 50*time.Millisecond, // Max duration query
+ 1*time.Second, // Max duration observation
+ 100*time.Millisecond,
+ 100*time.Millisecond,
+ 100*time.Millisecond,
+ 1, // faults
+ rawOnchainConfig,
+ )
+ require.NoError(t, err)
+ lggr := logger.TestLogger(t)
+ lggr.Infow("Setting Config on Oracle Contract",
+ "signers", signers,
+ "transmitters", transmitters,
+ "threshold", threshold,
+ "onchainConfig", onchainConfig,
+ "encodedConfigVersion", offchainConfigVersion,
+ )
+ signerAddresses, err := OnchainPublicKeyToAddress(signers)
+ require.NoError(t, err)
+ transmitterAddresses, err := AccountToAddress(transmitters)
+ require.NoError(t, err)
+
+ return &OCR2Config{
+ Signers: signerAddresses,
+ Transmitters: transmitterAddresses,
+ F: threshold,
+ OnchainConfig: onchainConfig,
+ OffchainConfigVersion: offchainConfigVersion,
+ OffchainConfig: offchainConfig,
+ }
+}
+
+func (c *CCIPContracts) SetupCommitOCR2Config(t *testing.T, commitOnchainConfig, commitOffchainConfig []byte) {
+ c.commitOCRConfig = c.DeriveOCR2Config(t, c.Oracles, commitOnchainConfig, commitOffchainConfig)
+ // Set the DON on the commit store
+ _, err := c.Dest.CommitStore.SetOCR2Config(
+ c.Dest.User,
+ c.commitOCRConfig.Signers,
+ c.commitOCRConfig.Transmitters,
+ c.commitOCRConfig.F,
+ c.commitOCRConfig.OnchainConfig,
+ c.commitOCRConfig.OffchainConfigVersion,
+ c.commitOCRConfig.OffchainConfig,
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) SetupExecOCR2Config(t *testing.T, execOnchainConfig, execOffchainConfig []byte) {
+ c.execOCRConfig = c.DeriveOCR2Config(t, c.Oracles, execOnchainConfig, execOffchainConfig)
+ // Same DON on the offramp
+ _, err := c.Dest.OffRamp.SetOCR2Config(
+ c.Dest.User,
+ c.execOCRConfig.Signers,
+ c.execOCRConfig.Transmitters,
+ c.execOCRConfig.F,
+ c.execOCRConfig.OnchainConfig,
+ c.execOCRConfig.OffchainConfigVersion,
+ c.execOCRConfig.OffchainConfig,
+ )
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+}
+
+func (c *CCIPContracts) SetupOnchainConfig(t *testing.T, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig []byte) int64 {
+ // Note We do NOT set the payees, payment is done in the OCR2Base implementation
+ blockBeforeConfig, err := c.Dest.Chain.BlockByNumber(context.Background(), nil)
+ require.NoError(t, err)
+
+ c.SetupCommitOCR2Config(t, commitOnchainConfig, commitOffchainConfig)
+ c.SetupExecOCR2Config(t, execOnchainConfig, execOffchainConfig)
+
+ return blockBeforeConfig.Number().Int64()
+}
+
+func (c *CCIPContracts) SetupLockAndMintTokenPool(
+ sourceTokenAddress common.Address,
+ wrappedTokenName,
+ wrappedTokenSymbol string) (common.Address, *burn_mint_erc677.BurnMintERC677, error) {
+ // Deploy dest token & pool
+ destTokenAddress, _, _, err := burn_mint_erc677.DeployBurnMintERC677(c.Dest.User, c.Dest.Chain, wrappedTokenName, wrappedTokenSymbol, 18, big.NewInt(0))
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Dest.Chain.Commit()
+
+ destToken, err := burn_mint_erc677.NewBurnMintERC677(destTokenAddress, c.Dest.Chain)
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+
+ destPoolAddress, _, destPool, err := burn_mint_token_pool.DeployBurnMintTokenPool(
+ c.Dest.User,
+ c.Dest.Chain,
+ destTokenAddress,
+ []common.Address{}, // pool originalSender allowList
+ c.Dest.ARMProxy.Address(),
+ c.Dest.Router.Address(),
+ )
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Dest.Chain.Commit()
+
+ _, err = destToken.GrantMintAndBurnRoles(c.Dest.User, destPoolAddress)
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+
+ _, err = destPool.ApplyChainUpdates(c.Dest.User,
+ []burn_mint_token_pool.TokenPoolChainUpdate{
+ {
+ RemoteChainSelector: c.Source.ChainSelector,
+ Allowed: true,
+ OutboundRateLimiterConfig: burn_mint_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: burn_mint_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ },
+ })
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Dest.Chain.Commit()
+
+ sourcePoolAddress, _, sourcePool, err := lock_release_token_pool.DeployLockReleaseTokenPool(
+ c.Source.User,
+ c.Source.Chain,
+ sourceTokenAddress,
+ []common.Address{}, // empty allowList at deploy time indicates pool has no original sender restrictions
+ c.Source.ARMProxy.Address(),
+ true,
+ c.Source.Router.Address(),
+ )
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Source.Chain.Commit()
+
+ // set onRamp as valid caller for source pool
+ _, err = sourcePool.ApplyChainUpdates(c.Source.User, []lock_release_token_pool.TokenPoolChainUpdate{
+ {
+ RemoteChainSelector: c.Dest.ChainSelector,
+ Allowed: true,
+ OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ },
+ })
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Source.Chain.Commit()
+
+ wrappedNativeAddress, err := c.Source.Router.GetWrappedNative(nil)
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+
+ //native token is used as fee token
+ _, err = c.Source.PriceRegistry.UpdatePrices(c.Source.User, price_registry_1_2_0.InternalPriceUpdates{
+ TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{
+ {
+ SourceToken: sourceTokenAddress,
+ UsdPerToken: big.NewInt(5),
+ },
+ },
+ GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{},
+ })
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Source.Chain.Commit()
+
+ _, err = c.Source.PriceRegistry.ApplyFeeTokensUpdates(c.Source.User, []common.Address{wrappedNativeAddress}, nil)
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Source.Chain.Commit()
+
+ // add new token pool created above
+ _, err = c.Source.OnRamp.ApplyPoolUpdates(c.Source.User, nil, []evm_2_evm_onramp.InternalPoolUpdate{
+ {
+ Token: sourceTokenAddress,
+ Pool: sourcePoolAddress,
+ },
+ })
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+
+ _, err = c.Dest.OffRamp.ApplyPoolUpdates(c.Dest.User, nil, []evm_2_evm_offramp.InternalPoolUpdate{
+ {
+ Token: sourceTokenAddress,
+ Pool: destPoolAddress,
+ },
+ })
+ if err != nil {
+ return [20]byte{}, nil, err
+ }
+ c.Dest.Chain.Commit()
+
+ return sourcePoolAddress, destToken, err
+}
+
+func (c *CCIPContracts) SendMessage(t *testing.T, gasLimit, tokenAmount *big.Int, receiverAddr common.Address) {
+ extraArgs, err := GetEVMExtraArgsV1(gasLimit, false)
+ require.NoError(t, err)
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: MustEncodeAddress(t, receiverAddr),
+ Data: []byte("hello"),
+ TokenAmounts: []router.ClientEVMTokenAmount{
+ {
+ Token: c.Source.LinkToken.Address(),
+ Amount: tokenAmount,
+ },
+ },
+ FeeToken: c.Source.LinkToken.Address(),
+ ExtraArgs: extraArgs,
+ }
+ fee, err := c.Source.Router.GetFee(nil, c.Dest.ChainSelector, msg)
+ require.NoError(t, err)
+ // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice.
+ // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String())
+ // Approve the fee amount + the token amount
+ _, err = c.Source.LinkToken.Approve(c.Source.User, c.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount))
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.SendRequest(t, msg)
+}
+
+func GetBalances(t *testing.T, brs []BalanceReq) (map[string]*big.Int, error) {
+ m := make(map[string]*big.Int)
+ for _, br := range brs {
+ m[br.Name] = br.Getter(t, br.Addr)
+ if m[br.Name] == nil {
+ return nil, fmt.Errorf("%v getter return nil", br.Name)
+ }
+ }
+ return m, nil
+}
+
+func MustAddBigInt(a *big.Int, b string) *big.Int {
+ bi, _ := big.NewInt(0).SetString(b, 10)
+ return big.NewInt(0).Add(a, bi)
+}
+
+func MustSubBigInt(a *big.Int, b string) *big.Int {
+ bi, _ := big.NewInt(0).SetString(b, 10)
+ return big.NewInt(0).Sub(a, bi)
+}
+
+func MustEncodeAddress(t *testing.T, address common.Address) []byte {
+ bts, err := utils.ABIEncode(`[{"type":"address"}]`, address)
+ require.NoError(t, err)
+ return bts
+}
+
+func SetupCCIPContracts(t *testing.T, sourceChainID, sourceChainSelector, destChainID, destChainSelector uint64) CCIPContracts {
+ sourceChain, sourceUser := testhelpers.SetupChain(t)
+ destChain, destUser := testhelpers.SetupChain(t)
+
+ armSourceAddress, _, _, err := mock_arm_contract.DeployMockARMContract(
+ sourceUser,
+ sourceChain,
+ )
+ require.NoError(t, err)
+ sourceARM, err := mock_arm_contract.NewMockARMContract(armSourceAddress, sourceChain)
+ require.NoError(t, err)
+ armProxySourceAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract(
+ sourceUser,
+ sourceChain,
+ armSourceAddress,
+ )
+ require.NoError(t, err)
+ sourceARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxySourceAddress, sourceChain)
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ armDestAddress, _, _, err := mock_arm_contract.DeployMockARMContract(
+ destUser,
+ destChain,
+ )
+ require.NoError(t, err)
+ armProxyDestAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract(
+ destUser,
+ destChain,
+ armDestAddress,
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+ destARM, err := mock_arm_contract.NewMockARMContract(armDestAddress, destChain)
+ require.NoError(t, err)
+ destARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxyDestAddress, destChain)
+ require.NoError(t, err)
+
+ // Deploy link token and pool on source chain
+ sourceLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain)
+ require.NoError(t, err)
+ sourceChain.Commit()
+ sourceLinkToken, err := link_token_interface.NewLinkToken(sourceLinkTokenAddress, sourceChain)
+ require.NoError(t, err)
+
+ // Create router
+ sourceWeth9addr, _, _, err := weth9.DeployWETH9(sourceUser, sourceChain)
+ require.NoError(t, err)
+ sourceWrapped, err := weth9.NewWETH9(sourceWeth9addr, sourceChain)
+ require.NoError(t, err)
+
+ sourceRouterAddress, _, _, err := router.DeployRouter(sourceUser, sourceChain, sourceWeth9addr, armProxySourceAddress)
+ require.NoError(t, err)
+ sourceRouter, err := router.NewRouter(sourceRouterAddress, sourceChain)
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ sourceWeth9PoolAddress, _, _, err := lock_release_token_pool_1_0_0.DeployLockReleaseTokenPool(
+ sourceUser,
+ sourceChain,
+ sourceWeth9addr,
+ []common.Address{},
+ armProxySourceAddress,
+ )
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ sourceWeth9Pool, err := lock_release_token_pool_1_0_0.NewLockReleaseTokenPool(sourceWeth9PoolAddress, sourceChain)
+ require.NoError(t, err)
+
+ sourcePoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool(
+ sourceUser,
+ sourceChain,
+ sourceLinkTokenAddress,
+ []common.Address{},
+ armProxySourceAddress,
+ true,
+ sourceRouterAddress,
+ )
+ require.NoError(t, err)
+ sourceChain.Commit()
+ sourcePool, err := lock_release_token_pool.NewLockReleaseTokenPool(sourcePoolAddress, sourceChain)
+ require.NoError(t, err)
+
+ // Deploy custom token pool source
+ sourceCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain) // Just re-use this, it's an ERC20.
+ require.NoError(t, err)
+ sourceCustomToken, err := link_token_interface.NewLinkToken(sourceCustomTokenAddress, sourceChain)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // Deploy custom token pool dest
+ destCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain) // Just re-use this, it's an ERC20.
+ require.NoError(t, err)
+ destCustomToken, err := link_token_interface.NewLinkToken(destCustomTokenAddress, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // Deploy and configure onramp
+ sourcePricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry(
+ sourceUser,
+ sourceChain,
+ nil,
+ []common.Address{sourceLinkTokenAddress, sourceWeth9addr},
+ 60*60*24*14, // two weeks
+ )
+ require.NoError(t, err)
+
+ srcPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(sourcePricesAddress, sourceChain)
+ require.NoError(t, err)
+
+ _, err = srcPriceRegistry.UpdatePrices(sourceUser, price_registry_1_2_0.InternalPriceUpdates{
+ TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{
+ {
+ SourceToken: sourceLinkTokenAddress,
+ UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(20)),
+ },
+ {
+ SourceToken: sourceWeth9addr,
+ UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(2000)),
+ },
+ },
+ GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{
+ {
+ DestChainSelector: destChainSelector,
+ UsdPerUnitGas: big.NewInt(20000e9),
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp(
+ sourceUser, // user
+ sourceChain, // client
+ evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{
+ LinkToken: sourceLinkTokenAddress,
+ ChainSelector: sourceChainSelector,
+ DestChainSelector: destChainSelector,
+ DefaultTxGasLimit: 200_000,
+ MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)),
+ PrevOnRamp: common.HexToAddress(""),
+ ArmProxy: armProxySourceAddress, // ARM
+ },
+ evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{
+ Router: sourceRouterAddress,
+ MaxNumberOfTokensPerMsg: 5,
+ DestGasOverhead: 350_000,
+ DestGasPerPayloadByte: 16,
+ DestDataAvailabilityOverheadGas: 33_596,
+ DestGasPerDataAvailabilityByte: 16,
+ DestDataAvailabilityMultiplierBps: 6840, // 0.684
+ PriceRegistry: sourcePricesAddress,
+ MaxDataBytes: 1e5,
+ MaxPerMsgGasLimit: 4_000_000,
+ },
+ []evm_2_evm_onramp.InternalPoolUpdate{
+ {
+ Token: sourceLinkTokenAddress,
+ Pool: sourcePoolAddress,
+ },
+ {
+ Token: sourceWeth9addr,
+ Pool: sourceWeth9PoolAddress,
+ },
+ },
+ evm_2_evm_onramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{
+ {
+ Token: sourceLinkTokenAddress,
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 9e17,
+ Enabled: true,
+ },
+ {
+ Token: sourceWeth9addr,
+ NetworkFeeUSDCents: 1_00,
+ GasMultiplierWeiPerEth: 1e18,
+ PremiumMultiplierWeiPerEth: 1e18,
+ Enabled: true,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{
+ {
+ Token: sourceLinkTokenAddress,
+ MinFeeUSDCents: 50, // $0.5
+ MaxFeeUSDCents: 1_000_000_00, // $ 1 million
+ DeciBps: 5_0, // 5 bps
+ DestGasOverhead: 34_000,
+ DestBytesOverhead: 32,
+ },
+ },
+ []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{},
+ )
+ require.NoError(t, err)
+ onRamp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, sourceChain)
+ require.NoError(t, err)
+ _, err = sourcePool.ApplyChainUpdates(
+ sourceUser,
+ []lock_release_token_pool.TokenPoolChainUpdate{{
+ RemoteChainSelector: DestChainSelector,
+ Allowed: true,
+ OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ )
+ require.NoError(t, err)
+ _, err = sourceWeth9Pool.ApplyRampUpdates(sourceUser,
+ []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{{Ramp: onRampAddress, Allowed: true,
+ RateLimiterConfig: lock_release_token_pool_1_0_0.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{},
+ )
+ require.NoError(t, err)
+ sourceChain.Commit()
+ _, err = sourceRouter.ApplyRampUpdates(sourceUser, []router.RouterOnRamp{{DestChainSelector: destChainSelector, OnRamp: onRampAddress}}, nil, nil)
+ require.NoError(t, err)
+ sourceChain.Commit()
+
+ destWethaddr, _, _, err := weth9.DeployWETH9(destUser, destChain)
+ require.NoError(t, err)
+ destWrapped, err := weth9.NewWETH9(destWethaddr, destChain)
+ require.NoError(t, err)
+
+ // Create dest router
+ destRouterAddress, _, _, err := router.DeployRouter(destUser, destChain, destWethaddr, armProxyDestAddress)
+ require.NoError(t, err)
+ destChain.Commit()
+ destRouter, err := router.NewRouter(destRouterAddress, destChain)
+ require.NoError(t, err)
+
+ // Deploy link token and pool on destination chain
+ destLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+ destLinkToken, err := link_token_interface.NewLinkToken(destLinkTokenAddress, destChain)
+ require.NoError(t, err)
+ destPoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool(
+ destUser,
+ destChain,
+ destLinkTokenAddress,
+ []common.Address{},
+ armProxyDestAddress,
+ true,
+ destRouterAddress,
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+ destPool, err := lock_release_token_pool.NewLockReleaseTokenPool(destPoolAddress, destChain)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // Float the offramp pool
+ o, err := destPool.Owner(nil)
+ require.NoError(t, err)
+ require.Equal(t, destUser.From.String(), o.String())
+ _, err = destPool.SetRebalancer(destUser, destUser.From)
+ require.NoError(t, err)
+ _, err = destLinkToken.Approve(destUser, destPoolAddress, Link(200))
+ require.NoError(t, err)
+ _, err = destPool.ProvideLiquidity(destUser, Link(200))
+ require.NoError(t, err)
+ destChain.Commit()
+
+ destWrappedPoolAddress, _, _, err := lock_release_token_pool_1_0_0.DeployLockReleaseTokenPool(
+ destUser,
+ destChain,
+ destWethaddr,
+ []common.Address{},
+ armProxyDestAddress,
+ )
+ require.NoError(t, err)
+ destWrappedPool, err := lock_release_token_pool_1_0_0.NewLockReleaseTokenPool(destWrappedPoolAddress, destChain)
+ require.NoError(t, err)
+
+ poolFloatValue := big.NewInt(1e18)
+
+ destUser.Value = poolFloatValue
+ _, err = destWrapped.Deposit(destUser)
+ require.NoError(t, err)
+ destChain.Commit()
+ destUser.Value = nil
+
+ _, err = destWrapped.Transfer(destUser, destWrappedPool.Address(), poolFloatValue)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ // Deploy and configure ge offramp.
+ destPricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry(
+ destUser,
+ destChain,
+ nil,
+ []common.Address{destLinkTokenAddress},
+ 60*60*24*14, // two weeks
+ )
+ require.NoError(t, err)
+ destPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(destPricesAddress, destChain)
+ require.NoError(t, err)
+
+ // Deploy commit store.
+ commitStoreAddress, _, _, err := commit_store_1_2_0.DeployCommitStore(
+ destUser, // user
+ destChain, // client
+ commit_store_1_2_0.CommitStoreStaticConfig{
+ ChainSelector: destChainSelector,
+ SourceChainSelector: sourceChainSelector,
+ OnRamp: onRamp.Address(),
+ ArmProxy: destARMProxy.Address(),
+ },
+ )
+ require.NoError(t, err)
+ destChain.Commit()
+ commitStore, err := commit_store_1_2_0.NewCommitStore(commitStoreAddress, destChain)
+ require.NoError(t, err)
+
+ offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp(
+ destUser,
+ destChain,
+ evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{
+ CommitStore: commitStore.Address(),
+ ChainSelector: destChainSelector,
+ SourceChainSelector: sourceChainSelector,
+ OnRamp: onRampAddress,
+ PrevOffRamp: common.HexToAddress(""),
+ ArmProxy: armProxyDestAddress,
+ },
+ []common.Address{sourceLinkTokenAddress, sourceWeth9addr},
+ []common.Address{destPoolAddress, destWrappedPool.Address()},
+ evm_2_evm_offramp.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: LinkUSDValue(100),
+ Rate: LinkUSDValue(1),
+ },
+ )
+ require.NoError(t, err)
+ offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, destChain)
+ require.NoError(t, err)
+ _, err = destPool.ApplyChainUpdates(destUser,
+ []lock_release_token_pool.TokenPoolChainUpdate{{
+ RemoteChainSelector: sourceChainSelector,
+ Allowed: true,
+ OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ )
+ require.NoError(t, err)
+
+ _, err = destWrappedPool.ApplyRampUpdates(destUser,
+ []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{},
+ []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{{
+ Ramp: offRampAddress,
+ Allowed: true,
+ RateLimiterConfig: lock_release_token_pool_1_0_0.RateLimiterConfig{
+ IsEnabled: true,
+ Capacity: HundredLink,
+ Rate: big.NewInt(1e18),
+ },
+ }},
+ )
+ require.NoError(t, err)
+
+ destChain.Commit()
+ _, err = destPriceRegistry.ApplyPriceUpdatersUpdates(destUser, []common.Address{commitStoreAddress}, []common.Address{})
+ require.NoError(t, err)
+ _, err = destRouter.ApplyRampUpdates(destUser, nil,
+ nil, []router.RouterOffRamp{{SourceChainSelector: sourceChainSelector, OffRamp: offRampAddress}})
+ require.NoError(t, err)
+
+ // Deploy 2 revertable (one SS one non-SS)
+ revertingMessageReceiver1Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false)
+ require.NoError(t, err)
+ revertingMessageReceiver1, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver1Address, destChain)
+ revertingMessageReceiver2Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false)
+ require.NoError(t, err)
+ revertingMessageReceiver2, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver2Address, destChain)
+ // Need to commit here, or we will hit the block gas limit when deploying the executor
+ sourceChain.Commit()
+ destChain.Commit()
+
+ // Ensure we have at least finality blocks.
+ for i := 0; i < 50; i++ {
+ sourceChain.Commit()
+ destChain.Commit()
+ }
+
+ source := SourceChain{
+ Common: Common{
+ ChainID: sourceChainID,
+ ChainSelector: sourceChainSelector,
+ User: sourceUser,
+ Chain: sourceChain,
+ LinkToken: sourceLinkToken,
+ LinkTokenPool: sourcePool,
+ CustomToken: sourceCustomToken,
+ ARM: sourceARM,
+ ARMProxy: sourceARMProxy,
+ PriceRegistry: srcPriceRegistry,
+ WrappedNative: sourceWrapped,
+ WrappedNativePool: sourceWeth9Pool,
+ },
+ Router: sourceRouter,
+ OnRamp: onRamp,
+ }
+ dest := DestinationChain{
+ Common: Common{
+ ChainID: destChainID,
+ ChainSelector: destChainSelector,
+ User: destUser,
+ Chain: destChain,
+ LinkToken: destLinkToken,
+ LinkTokenPool: destPool,
+ CustomToken: destCustomToken,
+ ARM: destARM,
+ ARMProxy: destARMProxy,
+ PriceRegistry: destPriceRegistry,
+ WrappedNative: destWrapped,
+ WrappedNativePool: destWrappedPool,
+ },
+ CommitStore: commitStore,
+ Router: destRouter,
+ OffRamp: offRamp,
+ Receivers: []MaybeRevertReceiver{{Receiver: revertingMessageReceiver1, Strict: false}, {Receiver: revertingMessageReceiver2, Strict: true}},
+ }
+
+ return CCIPContracts{
+ Source: source,
+ Dest: dest,
+ }
+}
+
+func (c *CCIPContracts) SendRequest(t *testing.T, msg router.ClientEVM2AnyMessage) *types.Transaction {
+ tx, err := c.Source.Router.CcipSend(c.Source.User, c.Dest.ChainSelector, msg)
+ require.NoError(t, err)
+ testhelpers.ConfirmTxs(t, []*types.Transaction{tx}, c.Source.Chain)
+ return tx
+}
+
+func (c *CCIPContracts) AssertExecState(t *testing.T, log logpoller.Log, state MessageExecutionState, offRampOpts ...common.Address) {
+ var offRamp *evm_2_evm_offramp.EVM2EVMOffRamp
+ var err error
+ if len(offRampOpts) > 0 {
+ offRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offRamp configured")
+ offRamp = c.Dest.OffRamp
+ }
+ executionStateChanged, err := offRamp.ParseExecutionStateChanged(log.ToGethLog())
+ require.NoError(t, err)
+ if MessageExecutionState(executionStateChanged.State) != state {
+ t.Log("Execution failed", hexutil.Encode(executionStateChanged.ReturnData))
+ t.Fail()
+ }
+}
+
+func GetEVMExtraArgsV1(gasLimit *big.Int, strict bool) ([]byte, error) {
+ EVMV1Tag := []byte{0x97, 0xa6, 0x57, 0xc9}
+
+ encodedArgs, err := utils.ABIEncode(`[{"type":"uint256"},{"type":"bool"}]`, gasLimit, strict)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(EVMV1Tag, encodedArgs...), nil
+}
+
+type ManualExecArgs struct {
+ SourceChainID, DestChainID uint64
+ DestUser *bind.TransactOpts
+ SourceChain, DestChain bind.ContractBackend
+ SourceStartBlock *big.Int // the block in/after which failed ccip-send transaction was triggered
+ DestStartBlock uint64 // the start block for filtering ReportAccepted event (including the failed seq num)
+ // in destination chain. if not provided to be derived by ApproxDestStartBlock method
+ DestLatestBlockNum uint64 // current block number in destination
+ DestDeployedAt uint64 // destination block number for the initial destination contract deployment.
+ // Can be any number before the tx was reverted in destination chain. Preferably this needs to be set up with
+ // a value greater than zero to avoid performance issue in locating approximate destination block
+ SendReqLogIndex uint // log index of the CCIPSendRequested log in source chain
+ SendReqTxHash string // tx hash of the ccip-send transaction for which execution was reverted
+ CommitStore string
+ OnRamp string
+ OffRamp string
+ SeqNr uint64
+ GasLimit *big.Int
+}
+
+// ApproxDestStartBlock attempts to locate a block in destination chain with timestamp closest to the timestamp of the block
+// in source chain in which ccip-send transaction was included
+// it uses binary search to locate the block with the closest timestamp
+// if the block located has a timestamp greater than the timestamp of mentioned source block
+// it just returns the first block found with lesser timestamp of the source block
+// providing a value of args.DestDeployedAt ensures better performance by reducing the range of block numbers to be traversed
+func (args *ManualExecArgs) ApproxDestStartBlock() error {
+ sourceBlockHdr, err := args.SourceChain.HeaderByNumber(context.Background(), args.SourceStartBlock)
+ if err != nil {
+ return err
+ }
+ sendTxTime := sourceBlockHdr.Time
+ maxBlockNum := args.DestLatestBlockNum
+ // setting this to an approx value of 1000 considering destination chain would have at least 1000 blocks before the transaction started
+ minBlockNum := args.DestDeployedAt
+ closestBlockNum := uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2))
+ var closestBlockHdr *types.Header
+ closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum)))
+ if err != nil {
+ return err
+ }
+ // to reduce the number of RPC calls increase the value of blockOffset
+ blockOffset := uint64(10)
+ for {
+ blockNum := closestBlockHdr.Number.Uint64()
+ if minBlockNum > maxBlockNum {
+ break
+ }
+ timeDiff := math.Abs(float64(closestBlockHdr.Time - sendTxTime))
+ // break if the difference in timestamp is lesser than 1 minute
+ if timeDiff < 60 {
+ break
+ } else if closestBlockHdr.Time > sendTxTime {
+ maxBlockNum = blockNum - 1
+ } else {
+ minBlockNum = blockNum + 1
+ }
+ closestBlockNum = uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2))
+ closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum)))
+ if err != nil {
+ return err
+ }
+ }
+
+ for closestBlockHdr.Time > sendTxTime {
+ closestBlockNum = closestBlockNum - blockOffset
+ if closestBlockNum <= 0 {
+ return fmt.Errorf("approx destination blocknumber not found")
+ }
+ closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum)))
+ if err != nil {
+ return err
+ }
+ }
+ args.DestStartBlock = closestBlockHdr.Number.Uint64()
+ fmt.Println("using approx destination start block number", args.DestStartBlock)
+ return nil
+}
+
+func (args *ManualExecArgs) FindSeqNrFromCCIPSendRequested() (uint64, error) {
+ var seqNr uint64
+ onRampContract, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain)
+ if err != nil {
+ return seqNr, err
+ }
+ iterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{
+ Start: args.SourceStartBlock.Uint64(),
+ })
+ if err != nil {
+ return seqNr, err
+ }
+ for iterator.Next() {
+ if iterator.Event.Raw.Index == args.SendReqLogIndex &&
+ iterator.Event.Raw.TxHash.Hex() == args.SendReqTxHash {
+ seqNr = iterator.Event.Message.SequenceNumber
+ break
+ }
+ }
+ if seqNr == 0 {
+ return seqNr,
+ fmt.Errorf("no CCIPSendRequested logs found for logIndex %d starting from block number %d", args.SendReqLogIndex, args.SourceStartBlock)
+ }
+ return seqNr, nil
+}
+
+func (args *ManualExecArgs) ExecuteManually() (*types.Transaction, error) {
+ if args.SourceChainID == 0 ||
+ args.DestChainID == 0 ||
+ args.DestUser == nil {
+ return nil, fmt.Errorf("chain ids and owners are mandatory for source and dest chain")
+ }
+ if !common.IsHexAddress(args.CommitStore) ||
+ !common.IsHexAddress(args.OffRamp) ||
+ !common.IsHexAddress(args.OnRamp) {
+ return nil, fmt.Errorf("contract addresses must be valid hex address")
+ }
+ if args.SendReqTxHash == "" {
+ return nil, fmt.Errorf("tx hash of ccip-send request are required")
+ }
+ if args.SourceStartBlock == nil {
+ return nil, fmt.Errorf("must provide the value of source block in/after which ccip-send tx was included")
+ }
+ if args.SeqNr == 0 {
+ if args.SendReqLogIndex == 0 {
+ return nil, fmt.Errorf("must provide the value of log index of ccip-send request")
+ }
+ // locate seq nr from CCIPSendRequested log
+ seqNr, err := args.FindSeqNrFromCCIPSendRequested()
+ if err != nil {
+ return nil, err
+ }
+ args.SeqNr = seqNr
+ }
+ commitStore, err := commit_store_1_2_0.NewCommitStore(common.HexToAddress(args.CommitStore), args.DestChain)
+ if err != nil {
+ return nil, err
+ }
+ if args.DestStartBlock < 1 {
+ err = args.ApproxDestStartBlock()
+ if err != nil {
+ return nil, err
+ }
+ }
+ iterator, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: args.DestStartBlock})
+ if err != nil {
+ return nil, err
+ }
+
+ var commitReport *commit_store_1_2_0.CommitStoreCommitReport
+ for iterator.Next() {
+ if iterator.Event.Report.Interval.Min <= args.SeqNr && iterator.Event.Report.Interval.Max >= args.SeqNr {
+ commitReport = &iterator.Event.Report
+ fmt.Println("Found root")
+ break
+ }
+ }
+ if commitReport == nil {
+ return nil, fmt.Errorf("unable to find seq num %d in commit report", args.SeqNr)
+ }
+
+ return args.execute(commitReport)
+}
+
+func (args *ManualExecArgs) execute(report *commit_store_1_2_0.CommitStoreCommitReport) (*types.Transaction, error) {
+ log.Info().Msg("Executing request manually")
+ seqNr := args.SeqNr
+ // Build a merkle tree for the report
+ mctx := hashutil.NewKeccak()
+ onRampContract, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain)
+ if err != nil {
+ return nil, err
+ }
+ leafHasher := v1_2_0.NewLeafHasher(args.SourceChainID, args.DestChainID, common.HexToAddress(args.OnRamp), mctx, onRampContract)
+ if leafHasher == nil {
+ return nil, fmt.Errorf("unable to create leaf hasher")
+ }
+
+ var leaves [][32]byte
+ var curr, prove int
+ var msgs []evm_2_evm_offramp.InternalEVM2EVMMessage
+ var manualExecGasLimits []*big.Int
+ var tokenData [][][]byte
+ sendRequestedIterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{
+ Start: args.SourceStartBlock.Uint64(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ for sendRequestedIterator.Next() {
+ if sendRequestedIterator.Event.Message.SequenceNumber <= report.Interval.Max &&
+ sendRequestedIterator.Event.Message.SequenceNumber >= report.Interval.Min {
+ fmt.Println("Found seq num", sendRequestedIterator.Event.Message.SequenceNumber, report.Interval)
+ hash, err2 := leafHasher.HashLeaf(sendRequestedIterator.Event.Raw)
+ if err2 != nil {
+ return nil, err2
+ }
+ leaves = append(leaves, hash)
+ if sendRequestedIterator.Event.Message.SequenceNumber == seqNr {
+ fmt.Printf("Found proving %d %+v\n", curr, sendRequestedIterator.Event.Message)
+ var tokensAndAmounts []evm_2_evm_offramp.ClientEVMTokenAmount
+ for _, tokenAndAmount := range sendRequestedIterator.Event.Message.TokenAmounts {
+ tokensAndAmounts = append(tokensAndAmounts, evm_2_evm_offramp.ClientEVMTokenAmount{
+ Token: tokenAndAmount.Token,
+ Amount: tokenAndAmount.Amount,
+ })
+ }
+ msg := evm_2_evm_offramp.InternalEVM2EVMMessage{
+ SourceChainSelector: sendRequestedIterator.Event.Message.SourceChainSelector,
+ Sender: sendRequestedIterator.Event.Message.Sender,
+ Receiver: sendRequestedIterator.Event.Message.Receiver,
+ SequenceNumber: sendRequestedIterator.Event.Message.SequenceNumber,
+ GasLimit: sendRequestedIterator.Event.Message.GasLimit,
+ Strict: sendRequestedIterator.Event.Message.Strict,
+ Nonce: sendRequestedIterator.Event.Message.Nonce,
+ FeeToken: sendRequestedIterator.Event.Message.FeeToken,
+ FeeTokenAmount: sendRequestedIterator.Event.Message.FeeTokenAmount,
+ Data: sendRequestedIterator.Event.Message.Data,
+ TokenAmounts: tokensAndAmounts,
+ SourceTokenData: sendRequestedIterator.Event.Message.SourceTokenData,
+ MessageId: sendRequestedIterator.Event.Message.MessageId,
+ }
+ msgs = append(msgs, msg)
+ if args.GasLimit != nil {
+ msg.GasLimit = args.GasLimit
+ }
+ manualExecGasLimits = append(manualExecGasLimits, msg.GasLimit)
+ var msgTokenData [][]byte
+ for range sendRequestedIterator.Event.Message.TokenAmounts {
+ msgTokenData = append(msgTokenData, []byte{})
+ }
+
+ tokenData = append(tokenData, msgTokenData)
+ prove = curr
+ }
+ curr++
+ }
+ }
+ sendRequestedIterator.Close()
+ if msgs == nil {
+ return nil, fmt.Errorf("unable to find msg with seqNr %d", seqNr)
+ }
+ tree, err := merklemulti.NewTree(mctx, leaves)
+ if err != nil {
+ return nil, err
+ }
+ if tree.Root() != report.MerkleRoot {
+ return nil, fmt.Errorf("root doesn't match")
+ }
+
+ proof, err := tree.Prove([]int{prove})
+ if err != nil {
+ return nil, err
+ }
+
+ offRampProof := evm_2_evm_offramp.InternalExecutionReport{
+ Messages: msgs,
+ OffchainTokenData: tokenData,
+ Proofs: proof.Hashes,
+ ProofFlagBits: abihelpers.ProofFlagsToBits(proof.SourceFlags),
+ }
+ offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(common.HexToAddress(args.OffRamp), args.DestChain)
+ if err != nil {
+ return nil, err
+ }
+ // Execute.
+ return offRamp.ManuallyExecute(args.DestUser, offRampProof, manualExecGasLimits)
+}
+
+func (c *CCIPContracts) ExecuteMessage(
+ t *testing.T,
+ req logpoller.Log,
+ txHash common.Hash,
+ destStartBlock uint64,
+) uint64 {
+ t.Log("Executing request manually")
+ sendReqReceipt, err := c.Source.Chain.TransactionReceipt(context.Background(), txHash)
+ require.NoError(t, err)
+ args := ManualExecArgs{
+ SourceChainID: c.Source.ChainID,
+ DestChainID: c.Dest.ChainID,
+ DestUser: c.Dest.User,
+ SourceChain: c.Source.Chain,
+ DestChain: c.Dest.Chain,
+ SourceStartBlock: sendReqReceipt.BlockNumber,
+ DestStartBlock: destStartBlock,
+ DestLatestBlockNum: c.Dest.Chain.Blockchain().CurrentBlock().Number.Uint64(),
+ SendReqLogIndex: uint(req.LogIndex),
+ SendReqTxHash: txHash.String(),
+ CommitStore: c.Dest.CommitStore.Address().String(),
+ OnRamp: c.Source.OnRamp.Address().String(),
+ OffRamp: c.Dest.OffRamp.Address().String(),
+ }
+ tx, err := args.ExecuteManually()
+ require.NoError(t, err)
+ c.Dest.Chain.Commit()
+ c.Source.Chain.Commit()
+ rec, err := c.Dest.Chain.TransactionReceipt(context.Background(), tx.Hash())
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), rec.Status, "manual execution failed")
+ t.Logf("Manual Execution completed for seqNum %d", args.SeqNr)
+ return args.SeqNr
+}
+
+func GetBalance(t *testing.T, chain bind.ContractBackend, tokenAddr common.Address, addr common.Address) *big.Int {
+ token, err := link_token_interface.NewLinkToken(tokenAddr, chain)
+ require.NoError(t, err)
+ bal, err := token.BalanceOf(nil, addr)
+ require.NoError(t, err)
+ return bal
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go
new file mode 100644
index 00000000000..25be1c2a9a9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go
@@ -0,0 +1,1045 @@
+package testhelpers_1_4_0
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ types3 "github.com/ethereum/go-ethereum/core/types"
+ "github.com/google/uuid"
+ "github.com/hashicorp/consul/sdk/freeport"
+ "github.com/jmoiron/sqlx"
+ "github.com/onsi/gomega"
+ "github.com/pkg/errors"
+
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "k8s.io/utils/pointer" //nolint:staticcheck
+
+ "github.com/smartcontractkit/libocr/commontypes"
+ "github.com/smartcontractkit/libocr/offchainreporting2/confighelper"
+ types4 "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/loop"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ coretypes "github.com/smartcontractkit/chainlink-common/pkg/types/core/mocks"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ v2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ evmUtils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
+ configv2 "github.com/smartcontractkit/chainlink/v2/core/config/toml"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/logger/audit"
+ "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
+ feeds2 "github.com/smartcontractkit/chainlink/v2/core/services/feeds"
+ feedsMocks "github.com/smartcontractkit/chainlink/v2/core/services/feeds/mocks"
+ pb "github.com/smartcontractkit/chainlink/v2/core/services/feeds/proto"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key"
+ ksMocks "github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+ integrationtesthelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/integration"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap"
+ evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
+ clutils "github.com/smartcontractkit/chainlink/v2/core/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/utils/crypto"
+ "github.com/smartcontractkit/chainlink/v2/plugins"
+)
+
+const (
+ execSpecTemplate = `
+ type = "offchainreporting2"
+ schemaVersion = 1
+ name = "ccip-exec-1"
+ externalJobID = "67ffad71-d90f-4fe3-b4e4-494924b707fb"
+ forwardingAllowed = false
+ maxTaskDuration = "0s"
+ contractID = "%s"
+ contractConfigConfirmations = 1
+ contractConfigTrackerPollInterval = "20s"
+ ocrKeyBundleID = "%s"
+ relay = "evm"
+ pluginType = "ccip-execution"
+ transmitterID = "%s"
+
+ [relayConfig]
+ chainID = 1_337
+
+ [pluginConfig]
+ destStartBlock = 50
+
+ [pluginConfig.USDCConfig]
+ AttestationAPI = "http://blah.com"
+ SourceMessageTransmitterAddress = "%s"
+ SourceTokenAddress = "%s"
+ AttestationAPITimeoutSeconds = 10
+ `
+ commitSpecTemplatePipeline = `
+ type = "offchainreporting2"
+ schemaVersion = 1
+ name = "ccip-commit-1"
+ externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55"
+ forwardingAllowed = false
+ maxTaskDuration = "0s"
+ contractID = "%s"
+ contractConfigConfirmations = 1
+ contractConfigTrackerPollInterval = "20s"
+ ocrKeyBundleID = "%s"
+ relay = "evm"
+ pluginType = "ccip-commit"
+ transmitterID = "%s"
+
+ [relayConfig]
+ chainID = 1_337
+
+ [pluginConfig]
+ destStartBlock = 50
+ offRamp = "%s"
+ tokenPricesUSDPipeline = """
+ %s
+ """
+ `
+ commitSpecTemplateDynamicPriceGetter = `
+ type = "offchainreporting2"
+ schemaVersion = 1
+ name = "ccip-commit-1"
+ externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55"
+ forwardingAllowed = false
+ maxTaskDuration = "0s"
+ contractID = "%s"
+ contractConfigConfirmations = 1
+ contractConfigTrackerPollInterval = "20s"
+ ocrKeyBundleID = "%s"
+ relay = "evm"
+ pluginType = "ccip-commit"
+ transmitterID = "%s"
+
+ [relayConfig]
+ chainID = 1_337
+
+ [pluginConfig]
+ destStartBlock = 50
+ offRamp = "%s"
+ priceGetterConfig = """
+ %s
+ """
+ `
+)
+
+type Node struct {
+ App chainlink.Application
+ Transmitter common.Address
+ PaymentReceiver common.Address
+ KeyBundle ocr2key.KeyBundle
+}
+
+func (node *Node) FindJobIDForContract(t *testing.T, addr common.Address) int32 {
+ jobs := node.App.JobSpawner().ActiveJobs()
+ for _, j := range jobs {
+ if j.Type == job.OffchainReporting2 && j.OCR2OracleSpec.ContractID == addr.Hex() {
+ return j.ID
+ }
+ }
+ t.Fatalf("Could not find job for contract %s", addr.Hex())
+ return 0
+}
+
+func (node *Node) EventuallyNodeUsesUpdatedPriceRegistry(t *testing.T, ccipContracts CCIPIntegrationTestHarness) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ log, err := c.LogPoller().LatestLogByEventSigWithConfs(
+ testutils.Context(t),
+ v1_0_0.UsdPerUnitGasUpdated,
+ ccipContracts.Dest.PriceRegistry.Address(),
+ 0,
+ )
+ // err can be transient errors such as sql row set empty
+ if err != nil {
+ return false
+ }
+ return log != nil
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is not using updated price registry %s", ccipContracts.Dest.PriceRegistry.Address().Hex())
+ return log
+}
+
+func (node *Node) EventuallyNodeUsesNewCommitConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, commitCfg ccipdata.CommitOnchainConfig) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ log, err := c.LogPoller().LatestLogByEventSigWithConfs(
+ testutils.Context(t),
+ evmrelay.OCR2AggregatorLogDecoder.EventSig(),
+ ccipContracts.Dest.CommitStore.Address(),
+ 0,
+ )
+ require.NoError(t, err)
+ var latestCfg ccipdata.CommitOnchainConfig
+ if log != nil {
+ latestCfg, err = DecodeCommitOnChainConfig(log.Data)
+ require.NoError(t, err)
+ return latestCfg == commitCfg
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg")
+ return log
+}
+
+func (node *Node) EventuallyNodeUsesNewExecConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, execCfg v1_2_0.ExecOnchainConfig) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ log, err := c.LogPoller().LatestLogByEventSigWithConfs(
+ testutils.Context(t),
+ evmrelay.OCR2AggregatorLogDecoder.EventSig(),
+ ccipContracts.Dest.OffRamp.Address(),
+ 0,
+ )
+ require.NoError(t, err)
+ var latestCfg v1_2_0.ExecOnchainConfig
+ if log != nil {
+ latestCfg, err = DecodeExecOnChainConfig(log.Data)
+ require.NoError(t, err)
+ return latestCfg == execCfg
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg")
+ return log
+}
+
+func (node *Node) EventuallyHasReqSeqNum(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, onRamp common.Address, seqNum int) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Source.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ lgs, err := c.LogPoller().LogsDataWordRange(
+ testutils.Context(t),
+ v1_2_0.CCIPSendRequestEventSig,
+ onRamp,
+ v1_2_0.CCIPSendRequestSeqNumIndex,
+ abihelpers.EvmWord(uint64(seqNum)),
+ abihelpers.EvmWord(uint64(seqNum)),
+ 1,
+ )
+ require.NoError(t, err)
+ t.Log("Send requested", len(lgs))
+ if len(lgs) == 1 {
+ log = lgs[0]
+ return true
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has seq num")
+ return log
+}
+
+func (node *Node) EventuallyHasExecutedSeqNums(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, minSeqNum int, maxSeqNum int) []logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var logs []logpoller.Log
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ lgs, err := c.LogPoller().IndexedLogsTopicRange(
+ testutils.Context(t),
+ v1_0_0.ExecutionStateChangedEvent,
+ offRamp,
+ v1_0_0.ExecutionStateChangedSeqNrIndex,
+ abihelpers.EvmWord(uint64(minSeqNum)),
+ abihelpers.EvmWord(uint64(maxSeqNum)),
+ 1,
+ )
+ require.NoError(t, err)
+ t.Logf("Have executed logs %d want %d", len(lgs), maxSeqNum-minSeqNum+1)
+ if len(lgs) == maxSeqNum-minSeqNum+1 {
+ logs = lgs
+ t.Logf("Seq Num %d-%d executed", minSeqNum, maxSeqNum)
+ return true
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has not executed seq num")
+ return logs
+}
+
+func (node *Node) ConsistentlySeqNumHasNotBeenExecuted(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, seqNum int) logpoller.Log {
+ c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10))
+ require.NoError(t, err)
+ var log logpoller.Log
+ gomega.NewGomegaWithT(t).Consistently(func() bool {
+ ccipContracts.Source.Chain.Commit()
+ ccipContracts.Dest.Chain.Commit()
+ lgs, err := c.LogPoller().IndexedLogsTopicRange(
+ testutils.Context(t),
+ v1_0_0.ExecutionStateChangedEvent,
+ offRamp,
+ v1_0_0.ExecutionStateChangedSeqNrIndex,
+ abihelpers.EvmWord(uint64(seqNum)),
+ abihelpers.EvmWord(uint64(seqNum)),
+ 1,
+ )
+ require.NoError(t, err)
+ t.Log("Executed logs", lgs)
+ if len(lgs) == 1 {
+ log = lgs[0]
+ return true
+ }
+ return false
+ }, 10*time.Second, 1*time.Second).Should(gomega.BeFalse(), "seq number got executed")
+ return log
+}
+
+func (node *Node) AddJob(t *testing.T, spec *integrationtesthelpers.OCR2TaskJobSpec) {
+ specString, err := spec.String()
+ require.NoError(t, err)
+ ccipJob, err := validate.ValidatedOracleSpecToml(
+ testutils.Context(t),
+ node.App.GetConfig().OCR2(),
+ node.App.GetConfig().Insecure(),
+ specString,
+ // FIXME Ani
+ nil,
+ )
+ require.NoError(t, err)
+ err = node.App.AddJobV2(context.Background(), &ccipJob)
+ require.NoError(t, err)
+}
+
+func (node *Node) AddBootstrapJob(t *testing.T, spec *integrationtesthelpers.OCR2TaskJobSpec) {
+ specString, err := spec.String()
+ require.NoError(t, err)
+ ccipJob, err := ocrbootstrap.ValidatedBootstrapSpecToml(specString)
+ require.NoError(t, err)
+ err = node.App.AddJobV2(context.Background(), &ccipJob)
+ require.NoError(t, err)
+}
+
+func (node *Node) AddJobsWithSpec(t *testing.T, jobSpec *integrationtesthelpers.OCR2TaskJobSpec) {
+ // set node specific values
+ jobSpec.OCR2OracleSpec.OCRKeyBundleID.SetValid(node.KeyBundle.ID())
+ jobSpec.OCR2OracleSpec.TransmitterID.SetValid(node.Transmitter.Hex())
+ node.AddJob(t, jobSpec)
+}
+
+func setupNodeCCIP(
+ t *testing.T,
+ owner *bind.TransactOpts,
+ port int64,
+ dbName string,
+ sourceChain *backends.SimulatedBackend, destChain *backends.SimulatedBackend,
+ sourceChainID *big.Int, destChainID *big.Int,
+ bootstrapPeerID string,
+ bootstrapPort int64,
+) (chainlink.Application, string, common.Address, ocr2key.KeyBundle) {
+ trueRef, falseRef := true, false
+
+ // Do not want to load fixtures as they contain a dummy chainID.
+ loglevel := configv2.LogLevel(zap.DebugLevel)
+ config, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *chainlink.Config, _ *chainlink.Secrets) {
+ p2pAddresses := []string{
+ fmt.Sprintf("127.0.0.1:%d", port),
+ }
+ c.Log.Level = &loglevel
+ c.Feature.UICSAKeys = &trueRef
+ c.Feature.FeedsManager = &trueRef
+ c.OCR.Enabled = &falseRef
+ c.OCR.DefaultTransactionQueueDepth = pointer.Uint32(200)
+ c.OCR2.Enabled = &trueRef
+ c.Feature.LogPoller = &trueRef
+ c.P2P.V2.Enabled = &trueRef
+
+ dur, err := config.NewDuration(500 * time.Millisecond)
+ if err != nil {
+ panic(err)
+ }
+ c.P2P.V2.DeltaDial = &dur
+
+ dur2, err := config.NewDuration(5 * time.Second)
+ if err != nil {
+ panic(err)
+ }
+
+ c.P2P.V2.DeltaReconcile = &dur2
+ c.P2P.V2.ListenAddresses = &p2pAddresses
+ c.P2P.V2.AnnounceAddresses = &p2pAddresses
+
+ c.EVM = []*v2.EVMConfig{createConfigV2Chain(sourceChainID), createConfigV2Chain(destChainID)}
+
+ if bootstrapPeerID != "" {
+ // Supply the bootstrap IP and port as a V2 peer address
+ c.P2P.V2.DefaultBootstrappers = &[]commontypes.BootstrapperLocator{
+ {
+ PeerID: bootstrapPeerID, Addrs: []string{
+ fmt.Sprintf("127.0.0.1:%d", bootstrapPort),
+ },
+ },
+ }
+ }
+ })
+
+ lggr := logger.TestLogger(t)
+
+ // The in-memory geth sim does not let you create a custom ChainID, it will always be 1337.
+ // In particular this means that if you sign an eip155 tx, the chainID used MUST be 1337
+ // and the CHAINID op code will always emit 1337. To work around this to simulate a "multichain"
+ // test, we fake different chainIDs using the wrapped sim cltest.SimulatedBackend so the RPC
+ // appears to operate on different chainIDs and we use an EthKeyStoreSim wrapper which always
+ // signs 1337 see https://github.com/smartcontractkit/chainlink-ccip/blob/a24dd436810250a458d27d8bb3fb78096afeb79c/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go#L35
+ sourceClient := client.NewSimulatedBackendClient(t, sourceChain, sourceChainID)
+ destClient := client.NewSimulatedBackendClient(t, destChain, destChainID)
+ csaKeyStore := ksMocks.NewCSA(t)
+
+ key, err := csakey.NewV2()
+ require.NoError(t, err)
+ csaKeyStore.On("GetAll").Return([]csakey.KeyV2{key}, nil)
+ keyStore := NewKsa(db, lggr, csaKeyStore)
+
+ simEthKeyStore := testhelpers.EthKeyStoreSim{
+ ETHKS: keyStore.Eth(),
+ CSAKS: keyStore.CSA(),
+ }
+ mailMon := mailbox.NewMonitor("CCIP", lggr.Named("Mailbox"))
+ evmOpts := chainlink.EVMFactoryConfig{
+ ChainOpts: legacyevm.ChainOpts{
+ AppConfig: config,
+ GenEthClient: func(chainID *big.Int) client.Client {
+ if chainID.String() == sourceChainID.String() {
+ return sourceClient
+ } else if chainID.String() == destChainID.String() {
+ return destClient
+ }
+ t.Fatalf("invalid chain ID %v", chainID.String())
+ return nil
+ },
+ MailMon: mailMon,
+ DS: db,
+ },
+ CSAETHKeystore: simEthKeyStore,
+ }
+ loopRegistry := plugins.NewLoopRegistry(lggr.Named("LoopRegistry"), config.Tracing())
+ relayerFactory := chainlink.RelayerFactory{
+ Logger: lggr,
+ LoopRegistry: loopRegistry,
+ GRPCOpts: loop.GRPCOpts{},
+ CapabilitiesRegistry: coretypes.NewCapabilitiesRegistry(t),
+ }
+ testCtx := testutils.Context(t)
+ // evm alway enabled for backward compatibility
+ initOps := []chainlink.CoreRelayerChainInitFunc{
+ chainlink.InitEVM(testCtx, relayerFactory, evmOpts),
+ }
+
+ relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ app, err := chainlink.NewApplication(chainlink.ApplicationOpts{
+ Config: config,
+ DS: db,
+ KeyStore: keyStore,
+ RelayerChainInteroperators: relayChainInterops,
+ Logger: lggr,
+ ExternalInitiatorManager: nil,
+ CloseLogger: lggr.Sync,
+ UnrestrictedHTTPClient: &http.Client{},
+ RestrictedHTTPClient: &http.Client{},
+ AuditLogger: audit.NoopLogger,
+ MailMon: mailMon,
+ LoopRegistry: plugins.NewLoopRegistry(lggr, config.Tracing()),
+ })
+ ctx := testutils.Context(t)
+ require.NoError(t, err)
+ require.NoError(t, app.GetKeyStore().Unlock(ctx, "password"))
+ _, err = app.GetKeyStore().P2P().Create(ctx)
+ require.NoError(t, err)
+
+ p2pIDs, err := app.GetKeyStore().P2P().GetAll()
+ require.NoError(t, err)
+ require.Len(t, p2pIDs, 1)
+ peerID := p2pIDs[0].PeerID()
+
+ _, err = app.GetKeyStore().Eth().Create(testCtx, destChainID)
+ require.NoError(t, err)
+ sendingKeys, err := app.GetKeyStore().Eth().EnabledKeysForChain(testCtx, destChainID)
+ require.NoError(t, err)
+ require.Len(t, sendingKeys, 1)
+ transmitter := sendingKeys[0].Address
+ s, err := app.GetKeyStore().Eth().GetState(testCtx, sendingKeys[0].ID(), destChainID)
+ require.NoError(t, err)
+ lggr.Debug(fmt.Sprintf("Transmitter address %s chainID %s", transmitter, s.EVMChainID.String()))
+
+ // Fund the commitTransmitter address with some ETH
+ n, err := destChain.NonceAt(context.Background(), owner.From, nil)
+ require.NoError(t, err)
+
+ tx := types3.NewTransaction(n, transmitter, big.NewInt(1000000000000000000), 21000, big.NewInt(1000000000), nil)
+ signedTx, err := owner.Signer(owner.From, tx)
+ require.NoError(t, err)
+ err = destChain.SendTransaction(context.Background(), signedTx)
+ require.NoError(t, err)
+ destChain.Commit()
+
+ kb, err := app.GetKeyStore().OCR2().Create(ctx, chaintype.EVM)
+ require.NoError(t, err)
+ return app, peerID.Raw(), transmitter, kb
+}
+
+func createConfigV2Chain(chainId *big.Int) *v2.EVMConfig {
+ // NOTE: For the executor jobs, the default of 500k is insufficient for a 3 message batch
+ defaultGasLimit := uint64(5000000)
+ tr := true
+
+ sourceC := v2.Defaults((*evmUtils.Big)(chainId))
+ sourceC.GasEstimator.LimitDefault = &defaultGasLimit
+ fixedPrice := "FixedPrice"
+ sourceC.GasEstimator.Mode = &fixedPrice
+ d, _ := config.NewDuration(100 * time.Millisecond)
+ sourceC.LogPollInterval = &d
+ fd := uint32(2)
+ sourceC.FinalityDepth = &fd
+ return &v2.EVMConfig{
+ ChainID: (*evmUtils.Big)(chainId),
+ Enabled: &tr,
+ Chain: sourceC,
+ Nodes: v2.EVMNodes{&v2.Node{}},
+ }
+}
+
+type CCIPIntegrationTestHarness struct {
+ CCIPContracts
+ Nodes []Node
+ Bootstrap Node
+}
+
+func SetupCCIPIntegrationTH(t *testing.T, sourceChainID, sourceChainSelector, destChainId, destChainSelector uint64) CCIPIntegrationTestHarness {
+ return CCIPIntegrationTestHarness{
+ CCIPContracts: SetupCCIPContracts(t, sourceChainID, sourceChainSelector, destChainId, destChainSelector),
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) CreatePricesPipeline(t *testing.T) (string, *httptest.Server, *httptest.Server) {
+ linkUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err := w.Write([]byte(`{"UsdPerLink": "8000000000000000000"}`))
+ require.NoError(t, err)
+ }))
+ ethUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err := w.Write([]byte(`{"UsdPerETH": "1700000000000000000000"}`))
+ require.NoError(t, err)
+ }))
+ sourceWrappedNative, err := c.Source.Router.GetWrappedNative(nil)
+ require.NoError(t, err)
+ destWrappedNative, err := c.Dest.Router.GetWrappedNative(nil)
+ require.NoError(t, err)
+ tokenPricesUSDPipeline := fmt.Sprintf(`
+// Price 1
+link [type=http method=GET url="%s"];
+link_parse [type=jsonparse path="UsdPerLink"];
+link->link_parse;
+eth [type=http method=GET url="%s"];
+eth_parse [type=jsonparse path="UsdPerETH"];
+eth->eth_parse;
+merge [type=merge left="{}" right="{\\\"%s\\\":$(link_parse), \\\"%s\\\":$(eth_parse), \\\"%s\\\":$(eth_parse)}"];`,
+ linkUSD.URL, ethUSD.URL, c.Dest.LinkToken.Address(), sourceWrappedNative, destWrappedNative)
+
+ return tokenPricesUSDPipeline, linkUSD, ethUSD
+}
+
+func (c *CCIPIntegrationTestHarness) AddAllJobs(t *testing.T, jobParams integrationtesthelpers.CCIPJobSpecParams) {
+ jobParams.OffRamp = c.Dest.OffRamp.Address()
+
+ commitSpec, err := jobParams.CommitJobSpec()
+ require.NoError(t, err)
+ geExecutionSpec, err := jobParams.ExecutionJobSpec()
+ require.NoError(t, err)
+ nodes := c.Nodes
+ for _, node := range nodes {
+ node.AddJobsWithSpec(t, commitSpec)
+ node.AddJobsWithSpec(t, geExecutionSpec)
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) jobSpecProposal(t *testing.T, specTemplate string, f func() (*integrationtesthelpers.OCR2TaskJobSpec, error), feedsManagerId int64, version int32, opts ...any) feeds2.ProposeJobArgs {
+ spec, err := f()
+ require.NoError(t, err)
+
+ args := []any{spec.OCR2OracleSpec.ContractID}
+ args = append(args, opts...)
+
+ return feeds2.ProposeJobArgs{
+ FeedsManagerID: feedsManagerId,
+ RemoteUUID: uuid.New(),
+ Multiaddrs: nil,
+ Version: version,
+ Spec: fmt.Sprintf(specTemplate, args...),
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) SetupFeedsManager(t *testing.T) {
+ ctx := testutils.Context(t)
+ for _, node := range c.Nodes {
+ f := node.App.GetFeedsService()
+
+ managers, err := f.ListManagers(ctx)
+ require.NoError(t, err)
+ if len(managers) > 0 {
+ // Use at most one feeds manager, don't register if one already exists
+ continue
+ }
+
+ secret := utils.RandomBytes32()
+ pkey, err := crypto.PublicKeyFromHex(hex.EncodeToString(secret[:]))
+ require.NoError(t, err)
+
+ m := feeds2.RegisterManagerParams{
+ Name: "CCIP",
+ URI: "http://localhost:8080",
+ PublicKey: *pkey,
+ }
+
+ _, err = f.RegisterManager(testutils.Context(t), m)
+ require.NoError(t, err)
+
+ connManager := feedsMocks.NewConnectionsManager(t)
+ connManager.On("GetClient", mock.Anything).Maybe().Return(NoopFeedsClient{}, nil)
+ connManager.On("Close").Maybe().Return()
+ connManager.On("IsConnected", mock.Anything).Maybe().Return(true)
+ f.Unsafe_SetConnectionsManager(connManager)
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) ApproveJobSpecs(t *testing.T, jobParams integrationtesthelpers.CCIPJobSpecParams) {
+ ctx := testutils.Context(t)
+
+ for _, node := range c.Nodes {
+ f := node.App.GetFeedsService()
+ managers, err := f.ListManagers(ctx)
+ require.NoError(t, err)
+ require.Len(t, managers, 1, "expected exactly one feeds manager")
+
+ execSpec := c.jobSpecProposal(
+ t,
+ execSpecTemplate,
+ jobParams.ExecutionJobSpec,
+ managers[0].ID,
+ 1,
+ node.KeyBundle.ID(),
+ node.Transmitter.Hex(),
+ utils.RandomAddress().String(),
+ utils.RandomAddress().String(),
+ )
+ execId, err := f.ProposeJob(ctx, &execSpec)
+ require.NoError(t, err)
+
+ err = f.ApproveSpec(ctx, execId, true)
+ require.NoError(t, err)
+
+ var commitSpec feeds2.ProposeJobArgs
+ if jobParams.TokenPricesUSDPipeline != "" {
+ commitSpec = c.jobSpecProposal(
+ t,
+ commitSpecTemplatePipeline,
+ jobParams.CommitJobSpec,
+ managers[0].ID,
+ 2,
+ node.KeyBundle.ID(),
+ node.Transmitter.Hex(),
+ jobParams.OffRamp.String(),
+ jobParams.TokenPricesUSDPipeline,
+ )
+ } else {
+ commitSpec = c.jobSpecProposal(
+ t,
+ commitSpecTemplateDynamicPriceGetter,
+ jobParams.CommitJobSpec,
+ managers[0].ID,
+ 2,
+ node.KeyBundle.ID(),
+ node.Transmitter.Hex(),
+ jobParams.OffRamp.String(),
+ jobParams.PriceGetterConfig,
+ )
+ }
+
+ commitId, err := f.ProposeJob(ctx, &commitSpec)
+ require.NoError(t, err)
+
+ err = f.ApproveSpec(ctx, commitId, true)
+ require.NoError(t, err)
+ }
+}
+
+func (c *CCIPIntegrationTestHarness) AllNodesHaveReqSeqNum(t *testing.T, seqNum int, onRampOpts ...common.Address) logpoller.Log {
+ var log logpoller.Log
+ nodes := c.Nodes
+ var onRamp common.Address
+ if len(onRampOpts) > 0 {
+ onRamp = onRampOpts[0]
+ } else {
+ require.NotNil(t, c.Source.OnRamp, "no onramp configured")
+ onRamp = c.Source.OnRamp.Address()
+ }
+ for _, node := range nodes {
+ log = node.EventuallyHasReqSeqNum(t, c, onRamp, seqNum)
+ }
+ return log
+}
+
+func (c *CCIPIntegrationTestHarness) AllNodesHaveExecutedSeqNums(t *testing.T, minSeqNum int, maxSeqNum int, offRampOpts ...common.Address) []logpoller.Log {
+ var logs []logpoller.Log
+ nodes := c.Nodes
+ var offRamp common.Address
+
+ if len(offRampOpts) > 0 {
+ offRamp = offRampOpts[0]
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offramp configured")
+ offRamp = c.Dest.OffRamp.Address()
+ }
+ for _, node := range nodes {
+ logs = node.EventuallyHasExecutedSeqNums(t, c, offRamp, minSeqNum, maxSeqNum)
+ }
+ return logs
+}
+
+func (c *CCIPIntegrationTestHarness) NoNodesHaveExecutedSeqNum(t *testing.T, seqNum int, offRampOpts ...common.Address) logpoller.Log {
+ var log logpoller.Log
+ nodes := c.Nodes
+ var offRamp common.Address
+ if len(offRampOpts) > 0 {
+ offRamp = offRampOpts[0]
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offramp configured")
+ offRamp = c.Dest.OffRamp.Address()
+ }
+ for _, node := range nodes {
+ log = node.ConsistentlySeqNumHasNotBeenExecuted(t, c, offRamp, seqNum)
+ }
+ return log
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallyCommitReportAccepted(t *testing.T, currentBlock uint64, commitStoreOpts ...common.Address) commit_store_1_2_0.CommitStoreCommitReport {
+ var commitStore *commit_store_1_2_0.CommitStore
+ var err error
+ if len(commitStoreOpts) > 0 {
+ commitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.CommitStore, "no commitStore configured")
+ commitStore = c.Dest.CommitStore
+ }
+ g := gomega.NewGomegaWithT(t)
+ var report commit_store_1_2_0.CommitStoreCommitReport
+ g.Eventually(func() bool {
+ it, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: currentBlock})
+ g.Expect(err).NotTo(gomega.HaveOccurred(), "Error filtering ReportAccepted event")
+ g.Expect(it.Next()).To(gomega.BeTrue(), "No ReportAccepted event found")
+ report = it.Event.Report
+ if report.MerkleRoot != [32]byte{} {
+ t.Log("Report Accepted by commitStore")
+ return true
+ }
+ return false
+ }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "report has not been committed")
+ return report
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallyExecutionStateChangedToSuccess(t *testing.T, seqNum []uint64, blockNum uint64, offRampOpts ...common.Address) {
+ var offRamp *evm_2_evm_offramp_1_2_0.EVM2EVMOffRamp
+ var err error
+ if len(offRampOpts) > 0 {
+ offRamp, err = evm_2_evm_offramp_1_2_0.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.OffRamp, "no offRamp configured")
+ offRamp = c.Dest.OffRamp
+ }
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ it, err := offRamp.FilterExecutionStateChanged(&bind.FilterOpts{Start: blockNum}, seqNum, [][32]byte{})
+ require.NoError(t, err)
+ for it.Next() {
+ if cciptypes.MessageExecutionState(it.Event.State) == cciptypes.ExecutionStateSuccess {
+ t.Logf("ExecutionStateChanged event found for seqNum %d", it.Event.SequenceNumber)
+ return true
+ }
+ }
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ return false
+ }, testutils.WaitTimeout(t), time.Second).
+ Should(gomega.BeTrue(), "ExecutionStateChanged Event")
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallyReportCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) uint64 {
+ var commitStore *commit_store_1_2_0.CommitStore
+ var err error
+ var committedSeqNum uint64
+ if len(commitStoreOpts) > 0 {
+ commitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.CommitStore, "no commitStore configured")
+ commitStore = c.Dest.CommitStore
+ }
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ t.Log("next expected seq num reported", minSeqNum)
+ committedSeqNum = minSeqNum
+ return minSeqNum > uint64(max)
+ }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "report has not been committed")
+ return committedSeqNum
+}
+
+func (c *CCIPIntegrationTestHarness) EventuallySendRequested(t *testing.T, seqNum uint64, onRampOpts ...common.Address) {
+ var onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp
+ var err error
+ if len(onRampOpts) > 0 {
+ onRamp, err = evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampOpts[0], c.Source.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Source.OnRamp, "no onRamp configured")
+ onRamp = c.Source.OnRamp
+ }
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ it, err := onRamp.FilterCCIPSendRequested(nil)
+ require.NoError(t, err)
+ for it.Next() {
+ if it.Event.Message.SequenceNumber == seqNum {
+ t.Log("sendRequested generated for", seqNum)
+ return true
+ }
+ }
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ return false
+ }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "sendRequested has not been generated")
+}
+
+func (c *CCIPIntegrationTestHarness) ConsistentlyReportNotCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) {
+ var commitStore *commit_store_1_2_0.CommitStore
+ var err error
+ if len(commitStoreOpts) > 0 {
+ commitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreOpts[0], c.Dest.Chain)
+ require.NoError(t, err)
+ } else {
+ require.NotNil(t, c.Dest.CommitStore, "no commitStore configured")
+ commitStore = c.Dest.CommitStore
+ }
+ gomega.NewGomegaWithT(t).Consistently(func() bool {
+ minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil)
+ require.NoError(t, err)
+ c.Source.Chain.Commit()
+ c.Dest.Chain.Commit()
+ t.Log("min seq num reported", minSeqNum)
+ return minSeqNum > uint64(max)
+ }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeFalse(), "report has been committed")
+}
+
+func (c *CCIPIntegrationTestHarness) SetupAndStartNodes(ctx context.Context, t *testing.T, bootstrapNodePort int64) (Node, []Node, int64) {
+ appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNodeCCIP(t, c.Dest.User, bootstrapNodePort,
+ "bootstrap_ccip", c.Source.Chain, c.Dest.Chain, big.NewInt(0).SetUint64(c.Source.ChainID),
+ big.NewInt(0).SetUint64(c.Dest.ChainID), "", 0)
+ var (
+ oracles []confighelper.OracleIdentityExtra
+ nodes []Node
+ )
+ err := appBootstrap.Start(ctx)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, appBootstrap.Stop())
+ })
+ bootstrapNode := Node{
+ App: appBootstrap,
+ Transmitter: bootstrapTransmitter,
+ KeyBundle: bootstrapKb,
+ }
+ // Set up the minimum 4 oracles all funded with destination ETH
+ for i := int64(0); i < 4; i++ {
+ app, peerID, transmitter, kb := setupNodeCCIP(
+ t,
+ c.Dest.User,
+ int64(freeport.GetOne(t)),
+ fmt.Sprintf("oracle_ccip%d", i),
+ c.Source.Chain,
+ c.Dest.Chain,
+ big.NewInt(0).SetUint64(c.Source.ChainID),
+ big.NewInt(0).SetUint64(c.Dest.ChainID),
+ bootstrapPeerID,
+ bootstrapNodePort,
+ )
+ nodes = append(nodes, Node{
+ App: app,
+ Transmitter: transmitter,
+ KeyBundle: kb,
+ })
+ offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x"))
+ oracles = append(oracles, confighelper.OracleIdentityExtra{
+ OracleIdentity: confighelper.OracleIdentity{
+ OnchainPublicKey: offchainPublicKey,
+ TransmitAccount: types4.Account(transmitter.String()),
+ OffchainPublicKey: kb.OffchainPublicKey(),
+ PeerID: peerID,
+ },
+ ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(),
+ })
+ err = app.Start(ctx)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, app.Stop())
+ })
+ }
+
+ c.Oracles = oracles
+ commitOnchainConfig := c.CreateDefaultCommitOnchainConfig(t)
+ commitOffchainConfig := c.CreateDefaultCommitOffchainConfig(t)
+ execOnchainConfig := c.CreateDefaultExecOnchainConfig(t)
+ execOffchainConfig := c.CreateDefaultExecOffchainConfig(t)
+
+ configBlock := c.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig)
+ c.Nodes = nodes
+ c.Bootstrap = bootstrapNode
+ return bootstrapNode, nodes, configBlock
+}
+
+func (c *CCIPIntegrationTestHarness) SetUpNodesAndJobs(t *testing.T, pricePipeline string, priceGetterConfig string, usdcAttestationAPI string) integrationtesthelpers.CCIPJobSpecParams {
+ // setup Jobs
+ ctx := context.Background()
+ // Starts nodes and configures them in the OCR contracts.
+ bootstrapNode, _, configBlock := c.SetupAndStartNodes(ctx, t, int64(freeport.GetOne(t)))
+
+ jobParams := c.NewCCIPJobSpecParams(pricePipeline, priceGetterConfig, configBlock, usdcAttestationAPI)
+
+ // Add the bootstrap job
+ c.Bootstrap.AddBootstrapJob(t, jobParams.BootstrapJob(c.Dest.CommitStore.Address().Hex()))
+ c.AddAllJobs(t, jobParams)
+
+ // Replay for bootstrap.
+ bc, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(c.Dest.ChainID, 10))
+ require.NoError(t, err)
+ require.NoError(t, bc.LogPoller().Replay(context.Background(), configBlock))
+ c.Dest.Chain.Commit()
+
+ return jobParams
+}
+
+func (c *CCIPIntegrationTestHarness) NewCCIPJobSpecParams(tokenPricesUSDPipeline string, priceGetterConfig string, configBlock int64, usdcAttestationAPI string) integrationtesthelpers.CCIPJobSpecParams {
+ return integrationtesthelpers.CCIPJobSpecParams{
+ CommitStore: c.Dest.CommitStore.Address(),
+ OffRamp: c.Dest.OffRamp.Address(),
+ DestEvmChainId: c.Dest.ChainID,
+ SourceChainName: "SimulatedSource",
+ DestChainName: "SimulatedDest",
+ TokenPricesUSDPipeline: tokenPricesUSDPipeline,
+ PriceGetterConfig: priceGetterConfig,
+ DestStartBlock: uint64(configBlock),
+ USDCAttestationAPI: usdcAttestationAPI,
+ }
+}
+
+func DecodeCommitOnChainConfig(encoded []byte) (ccipdata.CommitOnchainConfig, error) {
+ var onchainConfig ccipdata.CommitOnchainConfig
+ unpacked, err := abihelpers.DecodeOCR2Config(encoded)
+ if err != nil {
+ return onchainConfig, err
+ }
+ onChainCfg := unpacked.OnchainConfig
+ onchainConfig, err = abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onChainCfg)
+ if err != nil {
+ return onchainConfig, err
+ }
+ return onchainConfig, nil
+}
+
+func DecodeExecOnChainConfig(encoded []byte) (v1_2_0.ExecOnchainConfig, error) {
+ var onchainConfig v1_2_0.ExecOnchainConfig
+ unpacked, err := abihelpers.DecodeOCR2Config(encoded)
+ if err != nil {
+ return onchainConfig, errors.Wrap(err, "failed to unpack log data")
+ }
+ onChainCfg := unpacked.OnchainConfig
+ onchainConfig, err = abihelpers.DecodeAbiStruct[v1_2_0.ExecOnchainConfig](onChainCfg)
+ if err != nil {
+ return onchainConfig, err
+ }
+ return onchainConfig, nil
+}
+
+type ksa struct {
+ keystore.Master
+ csa keystore.CSA
+}
+
+func (k *ksa) CSA() keystore.CSA {
+ return k.csa
+}
+
+func NewKsa(db *sqlx.DB, lggr logger.Logger, csa keystore.CSA) *ksa {
+ return &ksa{
+ Master: keystore.New(db, clutils.FastScryptParams, lggr),
+ csa: csa,
+ }
+}
+
+type NoopFeedsClient struct{}
+
+func (n NoopFeedsClient) ApprovedJob(context.Context, *pb.ApprovedJobRequest) (*pb.ApprovedJobResponse, error) {
+ return &pb.ApprovedJobResponse{}, nil
+}
+
+func (n NoopFeedsClient) Healthcheck(context.Context, *pb.HealthcheckRequest) (*pb.HealthcheckResponse, error) {
+ return &pb.HealthcheckResponse{}, nil
+}
+
+func (n NoopFeedsClient) UpdateNode(context.Context, *pb.UpdateNodeRequest) (*pb.UpdateNodeResponse, error) {
+ return &pb.UpdateNodeResponse{}, nil
+}
+
+func (n NoopFeedsClient) RejectedJob(context.Context, *pb.RejectedJobRequest) (*pb.RejectedJobResponse, error) {
+ return &pb.RejectedJobResponse{}, nil
+}
+
+func (n NoopFeedsClient) CancelledJob(context.Context, *pb.CancelledJobRequest) (*pb.CancelledJobResponse, error) {
+ return &pb.CancelledJobResponse{}, nil
+}
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go
new file mode 100644
index 00000000000..751ae5c1a92
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go
@@ -0,0 +1,73 @@
+// Package with set of configs that should be used only within tests suites
+
+package testhelpers_1_4_0
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers"
+)
+
+var PermissionLessExecutionThresholdSeconds = uint32(testhelpers.FirstBlockAge.Seconds())
+
+func (c *CCIPContracts) CreateDefaultCommitOnchainConfig(t *testing.T) []byte {
+ config, err := abihelpers.EncodeAbiStruct(ccipdata.CommitOnchainConfig{
+ PriceRegistry: c.Dest.PriceRegistry.Address(),
+ })
+ require.NoError(t, err)
+ return config
+}
+
+func (c *CCIPContracts) CreateDefaultCommitOffchainConfig(t *testing.T) []byte {
+ return c.createCommitOffchainConfig(t, 10*time.Second, 5*time.Second)
+}
+
+func (c *CCIPContracts) createCommitOffchainConfig(t *testing.T, feeUpdateHearBeat time.Duration, inflightCacheExpiry time.Duration) []byte {
+ config, err := NewCommitOffchainConfig(
+ *config.MustNewDuration(feeUpdateHearBeat),
+ 1,
+ 1,
+ *config.MustNewDuration(feeUpdateHearBeat),
+ 1,
+ *config.MustNewDuration(inflightCacheExpiry),
+ ).Encode()
+ require.NoError(t, err)
+ return config
+}
+
+func (c *CCIPContracts) CreateDefaultExecOnchainConfig(t *testing.T) []byte {
+ config, err := abihelpers.EncodeAbiStruct(v1_2_0.ExecOnchainConfig{
+ PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds,
+ Router: c.Dest.Router.Address(),
+ PriceRegistry: c.Dest.PriceRegistry.Address(),
+ MaxDataBytes: 1e5,
+ MaxNumberOfTokensPerMsg: 5,
+ MaxPoolReleaseOrMintGas: 200_000,
+ })
+ require.NoError(t, err)
+ return config
+}
+
+func (c *CCIPContracts) CreateDefaultExecOffchainConfig(t *testing.T) []byte {
+ return c.createExecOffchainConfig(t, 1*time.Minute, 1*time.Minute)
+}
+
+func (c *CCIPContracts) createExecOffchainConfig(t *testing.T, inflightCacheExpiry time.Duration, rootSnoozeTime time.Duration) []byte {
+ config, err := NewExecOffchainConfig(
+ 1,
+ 5_000_000,
+ 0.07,
+ *config.MustNewDuration(inflightCacheExpiry),
+ *config.MustNewDuration(rootSnoozeTime),
+ ).Encode()
+ require.NoError(t, err)
+ return config
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/bgworker.go b/core/services/ocr2/plugins/ccip/tokendata/bgworker.go
new file mode 100644
index 00000000000..1a74ab2305b
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/bgworker.go
@@ -0,0 +1,213 @@
+package tokendata
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/patrickmn/go-cache"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/services"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+)
+
+type msgResult struct {
+ TokenAmountIndex int
+ Err error
+ Data []byte
+}
+
+type Worker interface {
+ job.ServiceCtx
+ // AddJobsFromMsgs will include the provided msgs for background processing.
+ AddJobsFromMsgs(ctx context.Context, msgs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta)
+
+ // GetMsgTokenData returns the token data for the provided msg. If data are not ready it keeps waiting
+ // until they get ready. Important: Make sure to pass a proper context with timeout.
+ GetMsgTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) ([][]byte, error)
+
+ GetReaders() map[cciptypes.Address]Reader
+}
+
+type BackgroundWorker struct {
+ tokenDataReaders map[cciptypes.Address]Reader
+ numWorkers int
+ jobsChan chan cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta
+ resultsCache *cache.Cache
+ timeoutDur time.Duration
+
+ services.StateMachine
+ wg *sync.WaitGroup
+ backgroundCtx context.Context //nolint:containedctx
+ backgroundCancel context.CancelFunc
+}
+
+func NewBackgroundWorker(
+ tokenDataReaders map[cciptypes.Address]Reader,
+ numWorkers int,
+ timeoutDur time.Duration,
+ expirationDur time.Duration,
+) *BackgroundWorker {
+ if expirationDur == 0 {
+ expirationDur = 24 * time.Hour
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ return &BackgroundWorker{
+ tokenDataReaders: tokenDataReaders,
+ numWorkers: numWorkers,
+ jobsChan: make(chan cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, numWorkers*100),
+ resultsCache: cache.New(expirationDur, expirationDur/2),
+ timeoutDur: timeoutDur,
+
+ wg: new(sync.WaitGroup),
+ backgroundCtx: ctx,
+ backgroundCancel: cancel,
+ }
+}
+
+func (w *BackgroundWorker) Start(context.Context) error {
+ return w.StateMachine.StartOnce("Token BackgroundWorker", func() error {
+ for i := 0; i < w.numWorkers; i++ {
+ w.wg.Add(1)
+ w.run()
+ }
+ return nil
+ })
+}
+
+func (w *BackgroundWorker) Close() error {
+ return w.StateMachine.StopOnce("Token BackgroundWorker", func() error {
+ w.backgroundCancel()
+ w.wg.Wait()
+ return nil
+ })
+}
+
+func (w *BackgroundWorker) AddJobsFromMsgs(ctx context.Context, msgs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) {
+ w.wg.Add(1)
+ go func() {
+ defer w.wg.Done()
+ for _, msg := range msgs {
+ select {
+ case <-w.backgroundCtx.Done():
+ return
+ case <-ctx.Done():
+ return
+ default:
+ if len(msg.TokenAmounts) > 0 {
+ w.jobsChan <- msg
+ }
+ }
+ }
+ }()
+}
+
+func (w *BackgroundWorker) GetReaders() map[cciptypes.Address]Reader {
+ return w.tokenDataReaders
+}
+
+func (w *BackgroundWorker) GetMsgTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) ([][]byte, error) {
+ res, err := w.getMsgTokenData(ctx, msg.SequenceNumber)
+ if err != nil {
+ return nil, err
+ }
+
+ tokenDatas := make([][]byte, len(msg.TokenAmounts))
+ for _, r := range res {
+ if r.Err != nil {
+ return nil, r.Err
+ }
+ if r.TokenAmountIndex < 0 || r.TokenAmountIndex >= len(msg.TokenAmounts) {
+ return nil, fmt.Errorf("token data index inconsistency")
+ }
+ tokenDatas[r.TokenAmountIndex] = r.Data
+ }
+
+ return tokenDatas, nil
+}
+
+func (w *BackgroundWorker) run() {
+ go func() {
+ defer w.wg.Done()
+ for {
+ select {
+ case <-w.backgroundCtx.Done():
+ return
+ case msg := <-w.jobsChan:
+ w.workOnMsg(w.backgroundCtx, msg)
+ }
+ }
+ }()
+}
+
+func (w *BackgroundWorker) workOnMsg(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) {
+ results := make([]msgResult, 0, len(msg.TokenAmounts))
+
+ cachedTokenData := make(map[int]msgResult) // tokenAmount index -> token data
+ if cachedData, exists := w.getFromCache(msg.SequenceNumber); exists {
+ for _, r := range cachedData {
+ cachedTokenData[r.TokenAmountIndex] = r
+ }
+ }
+
+ for i, token := range msg.TokenAmounts {
+ offchainTokenDataProvider, exists := w.tokenDataReaders[token.Token]
+ if !exists {
+ // No token data required
+ continue
+ }
+
+ // if the result exists in the cache and there wasn't any error keep the existing result
+ if cachedResult, exists := cachedTokenData[i]; exists && cachedResult.Err == nil {
+ results = append(results, cachedResult)
+ continue
+ }
+
+ // if there was any error or if the data do not exist in the cache make a call to the provider
+ timeoutCtx, cf := context.WithTimeout(ctx, w.timeoutDur)
+ tknData, err := offchainTokenDataProvider.ReadTokenData(timeoutCtx, msg, i)
+ cf()
+ results = append(results, msgResult{
+ TokenAmountIndex: i,
+ Err: err,
+ Data: tknData,
+ })
+ }
+
+ w.resultsCache.Set(strconv.FormatUint(msg.SequenceNumber, 10), results, cache.DefaultExpiration)
+}
+
+func (w *BackgroundWorker) getMsgTokenData(ctx context.Context, seqNum uint64) ([]msgResult, error) {
+ if msgTokenData, exists := w.getFromCache(seqNum); exists {
+ return msgTokenData, nil
+ }
+
+ ctx, cf := context.WithTimeout(ctx, w.timeoutDur)
+ defer cf()
+
+ // wait until the results are ready or until context timeout is reached
+ tick := time.NewTicker(100 * time.Millisecond)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, context.DeadlineExceeded
+ case <-tick.C:
+ if msgTokenData, exists := w.getFromCache(seqNum); exists {
+ return msgTokenData, nil
+ }
+ }
+ }
+}
+
+func (w *BackgroundWorker) getFromCache(seqNum uint64) ([]msgResult, bool) {
+ rawResult, found := w.resultsCache.Get(strconv.FormatUint(seqNum, 10))
+ if !found {
+ return nil, false
+ }
+ return rawResult.([]msgResult), true
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go b/core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go
new file mode 100644
index 00000000000..5d505363ac7
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go
@@ -0,0 +1,188 @@
+package tokendata_test
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+)
+
+func TestBackgroundWorker(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ const numTokens = 100
+ const numWorkers = 20
+ const numMessages = 40
+ const maxReaderLatencyMS = 200
+ const percentOfTokensWithoutTokenData = 10
+
+ tokens := make([]cciptypes.Address, numTokens)
+ readers := make(map[cciptypes.Address]*tokendata.MockReader, numTokens)
+ tokenDataReaders := make(map[cciptypes.Address]tokendata.Reader, numTokens)
+ tokenData := make(map[cciptypes.Address][]byte)
+ delays := make(map[cciptypes.Address]time.Duration)
+
+ for i := range tokens {
+ tokens[i] = cciptypes.Address(utils.RandomAddress().String())
+ readers[tokens[i]] = tokendata.NewMockReader(t)
+ if rand.Intn(100) >= percentOfTokensWithoutTokenData {
+ tokenDataReaders[tokens[i]] = readers[tokens[i]]
+ tokenData[tokens[i]] = []byte(fmt.Sprintf("...token %x data...", tokens[i]))
+ }
+
+ // specify a random latency for the reader implementation
+ readerLatency := rand.Intn(maxReaderLatencyMS)
+ delays[tokens[i]] = time.Duration(readerLatency) * time.Millisecond
+ }
+ w := tokendata.NewBackgroundWorker(tokenDataReaders, numWorkers, 5*time.Second, time.Hour)
+ require.NoError(t, w.Start(ctx))
+
+ msgs := make([]cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, numMessages)
+ for i := range msgs {
+ tk := tokens[i%len(tokens)]
+
+ msgs[i] = cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: uint64(i + 1),
+ TokenAmounts: []cciptypes.TokenAmount{{Token: tk}},
+ },
+ }
+
+ reader := readers[tk]
+ reader.On("ReadTokenData", mock.Anything, msgs[i], 0).Run(func(args mock.Arguments) {
+ time.Sleep(delays[tk])
+ }).Return(tokenData[tk], nil).Maybe()
+ }
+
+ w.AddJobsFromMsgs(ctx, msgs)
+ // processing of the messages should have started at this point
+
+ tStart := time.Now()
+ for _, msg := range msgs {
+ b, err := w.GetMsgTokenData(ctx, msg) // fetched from provider
+ assert.NoError(t, err)
+ assert.Equal(t, tokenData[msg.TokenAmounts[0].Token], b[0])
+ }
+ assert.True(t, time.Since(tStart) < 600*time.Millisecond)
+ assert.True(t, time.Since(tStart) > 200*time.Millisecond)
+
+ tStart = time.Now()
+ for _, msg := range msgs {
+ b, err := w.GetMsgTokenData(ctx, msg) // fetched from cache
+ assert.NoError(t, err)
+ assert.Equal(t, tokenData[msg.TokenAmounts[0].Token], b[0])
+ }
+ assert.True(t, time.Since(tStart) < 200*time.Millisecond)
+
+ w.AddJobsFromMsgs(ctx, msgs) // same messages are added but they should already be in cache
+ tStart = time.Now()
+ for _, msg := range msgs {
+ b, err := w.GetMsgTokenData(ctx, msg)
+ assert.NoError(t, err)
+ assert.Equal(t, tokenData[msg.TokenAmounts[0].Token], b[0])
+ }
+ assert.True(t, time.Since(tStart) < 200*time.Millisecond)
+
+ require.NoError(t, w.Close())
+}
+
+func TestBackgroundWorker_RetryOnErrors(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ tk1 := cciptypes.Address(utils.RandomAddress().String())
+ tk2 := cciptypes.Address(utils.RandomAddress().String())
+
+ rdr1 := tokendata.NewMockReader(t)
+ rdr2 := tokendata.NewMockReader(t)
+
+ w := tokendata.NewBackgroundWorker(map[cciptypes.Address]tokendata.Reader{
+ tk1: rdr1,
+ tk2: rdr2,
+ }, 10, 5*time.Second, time.Hour)
+ require.NoError(t, w.Start(ctx))
+
+ msgs := []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: uint64(1),
+ TokenAmounts: []cciptypes.TokenAmount{{Token: tk1}},
+ }},
+ {EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: uint64(2),
+ TokenAmounts: []cciptypes.TokenAmount{{Token: tk2}},
+ }},
+ }
+
+ rdr1.On("ReadTokenData", mock.Anything, msgs[0], 0).
+ Return([]byte("some data"), nil).Once()
+
+ // reader2 returns an error
+ rdr2.On("ReadTokenData", mock.Anything, msgs[1], 0).
+ Return(nil, fmt.Errorf("some err")).Once()
+
+ w.AddJobsFromMsgs(ctx, msgs)
+ // processing of the messages should have started at this point
+
+ tokenData, err := w.GetMsgTokenData(ctx, msgs[0])
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("some data"), tokenData[0])
+
+ _, err = w.GetMsgTokenData(ctx, msgs[1])
+ assert.Error(t, err)
+ assert.Errorf(t, err, "some error")
+
+ // we make the second reader to return data
+ rdr2.On("ReadTokenData", mock.Anything, msgs[1], 0).
+ Return([]byte("some other data"), nil).Once()
+
+ // add the jobs again, at this point jobs that previously returned
+ // an error are removed from the cache
+ w.AddJobsFromMsgs(ctx, msgs)
+
+ // since reader1 returned some data before, we expect to get the cached result
+ tokenData, err = w.GetMsgTokenData(ctx, msgs[0])
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("some data"), tokenData[0])
+
+ // wait some time for msg2 to be re-processed and error overwritten
+ time.Sleep(20 * time.Millisecond) // todo: improve the test
+
+ // for reader2 that returned an error before we expect to get data now
+ tokenData, err = w.GetMsgTokenData(ctx, msgs[1])
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("some other data"), tokenData[0])
+
+ require.NoError(t, w.Close())
+}
+
+func TestBackgroundWorker_Timeout(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ tk1 := cciptypes.Address(utils.RandomAddress().String())
+ tk2 := cciptypes.Address(utils.RandomAddress().String())
+
+ rdr1 := tokendata.NewMockReader(t)
+ rdr2 := tokendata.NewMockReader(t)
+
+ w := tokendata.NewBackgroundWorker(
+ map[cciptypes.Address]tokendata.Reader{tk1: rdr1, tk2: rdr2}, 10, 5*time.Second, time.Hour)
+ require.NoError(t, w.Start(ctx))
+
+ ctx, cf := context.WithTimeout(ctx, 500*time.Millisecond)
+ defer cf()
+
+ _, err := w.GetMsgTokenData(ctx, cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1}},
+ )
+ assert.Error(t, err)
+ require.NoError(t, w.Close())
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/http/http_client.go b/core/services/ocr2/plugins/ccip/tokendata/http/http_client.go
new file mode 100644
index 00000000000..79ec21b1b83
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/http/http_client.go
@@ -0,0 +1,48 @@
+package http
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+)
+
+type IHttpClient interface {
+ // Get issue a GET request to the given url and return the response body and status code.
+ Get(ctx context.Context, url string, timeout time.Duration) ([]byte, int, http.Header, error)
+}
+
+type HttpClient struct {
+}
+
+func (s *HttpClient) Get(ctx context.Context, url string, timeout time.Duration) ([]byte, int, http.Header, error) {
+ // Use a timeout to guard against attestation API hanging, causing observation timeout and failing to make any progress.
+ timeoutCtx, cancel := context.WithTimeoutCause(ctx, timeout, tokendata.ErrTimeout)
+ defer cancel()
+ req, err := http.NewRequestWithContext(timeoutCtx, http.MethodGet, url, nil)
+ if err != nil {
+ return nil, http.StatusBadRequest, nil, err
+ }
+ req.Header.Add("accept", "application/json")
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ if errors.Is(err, context.DeadlineExceeded) {
+ return nil, http.StatusRequestTimeout, nil, tokendata.ErrTimeout
+ }
+ // On error, res is nil in most cases, do not read res.StatusCode, return BadRequest
+ return nil, http.StatusBadRequest, nil, err
+ }
+ defer res.Body.Close()
+
+ // Explicitly signal if the API is being rate limited
+ if res.StatusCode == http.StatusTooManyRequests {
+ return nil, res.StatusCode, res.Header, tokendata.ErrRateLimit
+ }
+
+ body, err := io.ReadAll(res.Body)
+ return body, res.StatusCode, res.Header, err
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go b/core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go
new file mode 100644
index 00000000000..d8fb9b1c576
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go
@@ -0,0 +1,69 @@
+package http
+
+import (
+ "context"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+)
+
+var (
+ usdcLatencyBuckets = []float64{
+ float64(10 * time.Millisecond),
+ float64(25 * time.Millisecond),
+ float64(50 * time.Millisecond),
+ float64(75 * time.Millisecond),
+ float64(100 * time.Millisecond),
+ float64(250 * time.Millisecond),
+ float64(500 * time.Millisecond),
+ float64(750 * time.Millisecond),
+ float64(1 * time.Second),
+ float64(2 * time.Second),
+ float64(3 * time.Second),
+ float64(4 * time.Second),
+ float64(5 * time.Second),
+ }
+ usdcClientHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "ccip_usdc_client_request_total",
+ Help: "Latency of calls to the USDC client",
+ Buckets: usdcLatencyBuckets,
+ }, []string{"status", "success"})
+)
+
+type ObservedIHttpClient struct {
+ IHttpClient
+ histogram *prometheus.HistogramVec
+}
+
+// NewObservedIHttpClient Create a new ObservedIHttpClient with the USDC client metric.
+func NewObservedIHttpClient(origin IHttpClient) *ObservedIHttpClient {
+ return NewObservedIHttpClientWithMetric(origin, usdcClientHistogram)
+}
+
+func NewObservedIHttpClientWithMetric(origin IHttpClient, histogram *prometheus.HistogramVec) *ObservedIHttpClient {
+ return &ObservedIHttpClient{
+ IHttpClient: origin,
+ histogram: histogram,
+ }
+}
+
+func (o *ObservedIHttpClient) Get(ctx context.Context, url string, timeout time.Duration) ([]byte, int, http.Header, error) {
+ return withObservedHttpClient(o.histogram, func() ([]byte, int, http.Header, error) {
+ return o.IHttpClient.Get(ctx, url, timeout)
+ })
+}
+
+func withObservedHttpClient[T any](histogram *prometheus.HistogramVec, contract func() (T, int, http.Header, error)) (T, int, http.Header, error) {
+ contractExecutionStarted := time.Now()
+ value, status, headers, err := contract()
+ histogram.
+ WithLabelValues(
+ strconv.FormatInt(int64(status), 10),
+ strconv.FormatBool(err == nil),
+ ).
+ Observe(float64(time.Since(contractExecutionStarted)))
+ return value, status, headers, err
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go b/core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go
new file mode 100644
index 00000000000..0567b725a8b
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go
@@ -0,0 +1,151 @@
+package observability
+
+import (
+ "context"
+ "encoding/json"
+ "math/big"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ io_prometheus_client "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ http2 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/http"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/usdc"
+)
+
+type expected struct {
+ status string
+ result string
+ count int
+}
+
+func TestUSDCClientMonitoring(t *testing.T) {
+ tests := []struct {
+ name string
+ server *httptest.Server
+ requests int
+ expected []expected
+ }{
+ {
+ name: "success",
+ server: newSuccessServer(t),
+ requests: 5,
+ expected: []expected{
+ {"200", "true", 5},
+ {"429", "false", 0},
+ },
+ },
+ {
+ name: "rate_limited",
+ server: newRateLimitedServer(),
+ requests: 26,
+ expected: []expected{
+ {"200", "true", 0},
+ {"429", "false", 1},
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ testMonitoring(t, test.name, test.server, test.requests, test.expected, logger.TestLogger(t))
+ })
+ }
+}
+
+func testMonitoring(t *testing.T, name string, server *httptest.Server, requests int, expected []expected, log logger.Logger) {
+ server.Start()
+ defer server.Close()
+ attestationURI, err := url.ParseRequestURI(server.URL)
+ require.NoError(t, err)
+
+ // Define test histogram (avoid side effects from other tests if using the real usdcHistogram).
+ histogram := promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "test_client_histogram_" + name,
+ Help: "Latency of calls to the USDC mock client",
+ Buckets: []float64{float64(250 * time.Millisecond), float64(1 * time.Second), float64(5 * time.Second)},
+ }, []string{"status", "success"})
+
+ // Mock USDC reader.
+ usdcReader := mocks.NewUSDCReader(t)
+ msgBody := []byte{0xb0, 0xd1}
+ usdcReader.On("GetUSDCMessagePriorToLogIndexInTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(msgBody, nil)
+
+ // Service with monitored http client.
+ usdcTokenAddr := utils.RandomAddress()
+ observedHttpClient := http2.NewObservedIHttpClientWithMetric(&http2.HttpClient{}, histogram)
+ tokenDataReaderDefault := usdc.NewUSDCTokenDataReader(log, usdcReader, attestationURI, 0, usdcTokenAddr, usdc.APIIntervalRateLimitDisabled)
+ tokenDataReader := usdc.NewUSDCTokenDataReaderWithHttpClient(*tokenDataReaderDefault, observedHttpClient, usdcTokenAddr, usdc.APIIntervalRateLimitDisabled)
+ require.NotNil(t, tokenDataReader)
+
+ for i := 0; i < requests; i++ {
+ _, _ = tokenDataReader.ReadTokenData(context.Background(), cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ TokenAmounts: []cciptypes.TokenAmount{
+ {
+ Token: ccipcalc.EvmAddrToGeneric(usdcTokenAddr),
+ Amount: big.NewInt(rand.Int63()),
+ },
+ },
+ },
+ }, 0)
+ }
+
+ // Check that the metrics are updated as expected.
+ for _, e := range expected {
+ assert.Equal(t, e.count, counterFromHistogramByLabels(t, histogram, e.status, e.result))
+ }
+}
+
+func counterFromHistogramByLabels(t *testing.T, histogramVec *prometheus.HistogramVec, labels ...string) int {
+ observer, err := histogramVec.GetMetricWithLabelValues(labels...)
+ require.NoError(t, err)
+
+ metricCh := make(chan prometheus.Metric, 1)
+ observer.(prometheus.Histogram).Collect(metricCh)
+ close(metricCh)
+
+ metric := <-metricCh
+ pb := &io_prometheus_client.Metric{}
+ err = metric.Write(pb)
+ require.NoError(t, err)
+
+ return int(pb.GetHistogram().GetSampleCount())
+}
+
+func newSuccessServer(t *testing.T) *httptest.Server {
+ return httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ response := struct {
+ Status string `json:"status"`
+ Attestation string `json:"attestation"`
+ }{
+ Status: "complete",
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ }
+ responseBytes, err := json.Marshal(response)
+ require.NoError(t, err)
+ _, err = w.Write(responseBytes)
+ require.NoError(t, err)
+ }))
+}
+
+func newRateLimitedServer() *httptest.Server {
+ return httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusTooManyRequests)
+ }))
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/reader.go b/core/services/ocr2/plugins/ccip/tokendata/reader.go
new file mode 100644
index 00000000000..16646bc7c5e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/reader.go
@@ -0,0 +1,19 @@
+package tokendata
+
+import (
+ "errors"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+var (
+ ErrNotReady = errors.New("token data not ready")
+ ErrRateLimit = errors.New("token data API is being rate limited")
+ ErrTimeout = errors.New("token data API timed out")
+ ErrRequestsBlocked = errors.New("requests are currently blocked")
+)
+
+// Reader is an interface for fetching offchain token data
+type Reader interface {
+ cciptypes.TokenDataReader
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/reader_mock.go b/core/services/ocr2/plugins/ccip/tokendata/reader_mock.go
new file mode 100644
index 00000000000..39166d61590
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/reader_mock.go
@@ -0,0 +1,143 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package tokendata
+
+import (
+ context "context"
+
+ ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// MockReader is an autogenerated mock type for the Reader type
+type MockReader struct {
+ mock.Mock
+}
+
+type MockReader_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *MockReader) EXPECT() *MockReader_Expecter {
+ return &MockReader_Expecter{mock: &_m.Mock}
+}
+
+// Close provides a mock function with given fields:
+func (_m *MockReader) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// MockReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type MockReader_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *MockReader_Expecter) Close() *MockReader_Close_Call {
+ return &MockReader_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *MockReader_Close_Call) Run(run func()) *MockReader_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *MockReader_Close_Call) Return(_a0 error) *MockReader_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *MockReader_Close_Call) RunAndReturn(run func() error) *MockReader_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ReadTokenData provides a mock function with given fields: ctx, msg, tokenIndex
+func (_m *MockReader) ReadTokenData(ctx context.Context, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int) ([]byte, error) {
+ ret := _m.Called(ctx, msg, tokenIndex)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ReadTokenData")
+ }
+
+ var r0 []byte
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) ([]byte, error)); ok {
+ return rf(ctx, msg, tokenIndex)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) []byte); ok {
+ r0 = rf(ctx, msg, tokenIndex)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) error); ok {
+ r1 = rf(ctx, msg, tokenIndex)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockReader_ReadTokenData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReadTokenData'
+type MockReader_ReadTokenData_Call struct {
+ *mock.Call
+}
+
+// ReadTokenData is a helper method to define mock.On call
+// - ctx context.Context
+// - msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta
+// - tokenIndex int
+func (_e *MockReader_Expecter) ReadTokenData(ctx interface{}, msg interface{}, tokenIndex interface{}) *MockReader_ReadTokenData_Call {
+ return &MockReader_ReadTokenData_Call{Call: _e.mock.On("ReadTokenData", ctx, msg, tokenIndex)}
+}
+
+func (_c *MockReader_ReadTokenData_Call) Run(run func(ctx context.Context, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int)) *MockReader_ReadTokenData_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *MockReader_ReadTokenData_Call) Return(tokenData []byte, err error) *MockReader_ReadTokenData_Call {
+ _c.Call.Return(tokenData, err)
+ return _c
+}
+
+func (_c *MockReader_ReadTokenData_Call) RunAndReturn(run func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) ([]byte, error)) *MockReader_ReadTokenData_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewMockReader creates a new instance of MockReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockReader(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockReader {
+ mock := &MockReader{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go
new file mode 100644
index 00000000000..fe3a86d2aff
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go
@@ -0,0 +1,339 @@
+package usdc
+
+import (
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+ "golang.org/x/time/rate"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/http"
+)
+
+const (
+ apiVersion = "v1"
+ attestationPath = "attestations"
+ defaultAttestationTimeout = 5 * time.Second
+
+ // defaultCoolDownDurationSec defines the default time to wait after getting rate limited.
+ // this value is only used if the 429 response does not contain the Retry-After header
+ defaultCoolDownDuration = 5 * time.Minute
+
+ // maxCoolDownDuration defines the maximum duration we can wait till firing the next request
+ maxCoolDownDuration = 10 * time.Minute
+
+ // defaultRequestInterval defines the rate in requests per second that the attestation API can be called.
+ // this is set according to the APIs documentated 10 requests per second rate limit.
+ defaultRequestInterval = 100 * time.Millisecond
+
+ // APIIntervalRateLimitDisabled is a special value to disable the rate limiting.
+ APIIntervalRateLimitDisabled = -1
+ // APIIntervalRateLimitDefault is a special value to select the default rate limit interval.
+ APIIntervalRateLimitDefault = 0
+)
+
+type attestationStatus string
+
+const (
+ attestationStatusSuccess attestationStatus = "complete"
+ attestationStatusPending attestationStatus = "pending_confirmations"
+)
+
+var (
+ ErrUnknownResponse = errors.New("unexpected response from attestation API")
+)
+
+// messageAndAttestation has to match the onchain struct `MessageAndAttestation` in the
+// USDC token pool.
+type messageAndAttestation struct {
+ Message []byte
+ Attestation []byte
+}
+
+func (m messageAndAttestation) AbiString() string {
+ return `
+ [{
+ "components": [
+ {"name": "message", "type": "bytes"},
+ {"name": "attestation", "type": "bytes"}
+ ],
+ "type": "tuple"
+ }]`
+}
+
+func (m messageAndAttestation) Validate() error {
+ if len(m.Message) == 0 {
+ return errors.New("message must be non-empty")
+ }
+ if len(m.Attestation) == 0 {
+ return errors.New("attestation must be non-empty")
+ }
+ return nil
+}
+
+type TokenDataReader struct {
+ lggr logger.Logger
+ usdcReader ccipdata.USDCReader
+ httpClient http.IHttpClient
+ attestationApi *url.URL
+ attestationApiTimeout time.Duration
+ usdcTokenAddress common.Address
+ rate *rate.Limiter
+
+ // coolDownUntil defines whether requests are blocked or not.
+ coolDownUntil time.Time
+ coolDownMu *sync.RWMutex
+}
+
+type attestationResponse struct {
+ Status attestationStatus `json:"status"`
+ Attestation string `json:"attestation"`
+ Error string `json:"error"`
+}
+
+var _ tokendata.Reader = &TokenDataReader{}
+
+func NewUSDCTokenDataReader(
+ lggr logger.Logger,
+ usdcReader ccipdata.USDCReader,
+ usdcAttestationApi *url.URL,
+ usdcAttestationApiTimeoutSeconds int,
+ usdcTokenAddress common.Address,
+ requestInterval time.Duration,
+) *TokenDataReader {
+ timeout := time.Duration(usdcAttestationApiTimeoutSeconds) * time.Second
+ if usdcAttestationApiTimeoutSeconds == 0 {
+ timeout = defaultAttestationTimeout
+ }
+
+ if requestInterval == APIIntervalRateLimitDisabled {
+ requestInterval = 0
+ } else if requestInterval == APIIntervalRateLimitDefault {
+ requestInterval = defaultRequestInterval
+ }
+
+ return &TokenDataReader{
+ lggr: lggr,
+ usdcReader: usdcReader,
+ httpClient: http.NewObservedIHttpClient(&http.HttpClient{}),
+ attestationApi: usdcAttestationApi,
+ attestationApiTimeout: timeout,
+ usdcTokenAddress: usdcTokenAddress,
+ coolDownMu: &sync.RWMutex{},
+ rate: rate.NewLimiter(rate.Every(requestInterval), 1),
+ }
+}
+
+func NewUSDCTokenDataReaderWithHttpClient(
+ origin TokenDataReader,
+ httpClient http.IHttpClient,
+ usdcTokenAddress common.Address,
+ requestInterval time.Duration,
+) *TokenDataReader {
+ return &TokenDataReader{
+ lggr: origin.lggr,
+ usdcReader: origin.usdcReader,
+ httpClient: httpClient,
+ attestationApi: origin.attestationApi,
+ attestationApiTimeout: origin.attestationApiTimeout,
+ coolDownMu: origin.coolDownMu,
+ usdcTokenAddress: usdcTokenAddress,
+ rate: rate.NewLimiter(rate.Every(requestInterval), 1),
+ }
+}
+
+// ReadTokenData queries the USDC attestation API to construct a message and
+// attestation response. When called back to back, or multiple times
+// concurrently, responses are delayed according how the request interval is
+// configured.
+func (s *TokenDataReader) ReadTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int) ([]byte, error) {
+ if tokenIndex < 0 || tokenIndex >= len(msg.TokenAmounts) {
+ return nil, fmt.Errorf("token index out of bounds")
+ }
+
+ if s.inCoolDownPeriod() {
+ // rate limiting cool-down period, we prevent new requests from being sent
+ return nil, tokendata.ErrRequestsBlocked
+ }
+
+ if s.rate != nil {
+ // Wait blocks until it the attestation API can be called or the
+ // context is Done.
+ if waitErr := s.rate.Wait(ctx); waitErr != nil {
+ return nil, fmt.Errorf("usdc rate limiting error: %w", waitErr)
+ }
+ }
+
+ messageBody, err := s.getUSDCMessageBody(ctx, msg, tokenIndex)
+ if err != nil {
+ return []byte{}, errors.Wrap(err, "failed getting the USDC message body")
+ }
+
+ msgID := hexutil.Encode(msg.MessageID[:])
+ msgBody := hexutil.Encode(messageBody)
+ s.lggr.Infow("Calling attestation API", "messageBodyHash", msgBody, "messageID", msgID)
+
+ // The attestation API expects the hash of the message body
+ attestationResp, err := s.callAttestationApi(ctx, utils.Keccak256Fixed(messageBody))
+ if err != nil {
+ return []byte{}, errors.Wrap(err, "failed calling usdc attestation API ")
+ }
+
+ s.lggr.Infow("Got response from attestation API", "messageID", msgID,
+ "attestationStatus", attestationResp.Status, "attestation", attestationResp.Attestation,
+ "attestationError", attestationResp.Error)
+
+ switch attestationResp.Status {
+ case attestationStatusSuccess:
+ // The USDC pool needs a combination of the message body and the attestation
+ messageAndAttestation, err := encodeMessageAndAttestation(messageBody, attestationResp.Attestation)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode messageAndAttestation : %w", err)
+ }
+ return messageAndAttestation, nil
+ case attestationStatusPending:
+ return nil, tokendata.ErrNotReady
+ default:
+ s.lggr.Errorw("Unexpected response from attestation API", "attestationResp", attestationResp)
+ return nil, ErrUnknownResponse
+ }
+}
+
+// encodeMessageAndAttestation encodes the message body and attestation into a single byte array
+// that is readable onchain.
+func encodeMessageAndAttestation(messageBody []byte, attestation string) ([]byte, error) {
+ attestationBytes, err := hex.DecodeString(strings.TrimPrefix(attestation, "0x"))
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode response attestation: %w", err)
+ }
+
+ return abihelpers.EncodeAbiStruct[messageAndAttestation](messageAndAttestation{
+ Message: messageBody,
+ Attestation: attestationBytes,
+ })
+}
+
+func (s *TokenDataReader) getUSDCMessageBody(
+ ctx context.Context,
+ msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta,
+ tokenIndex int,
+) ([]byte, error) {
+ usdcTokenEndOffset, err := s.getUsdcTokenEndOffset(msg, tokenIndex)
+ if err != nil {
+ return nil, fmt.Errorf("get usdc token %d end offset: %w", tokenIndex, err)
+ }
+
+ parsedMsgBody, err := s.usdcReader.GetUSDCMessagePriorToLogIndexInTx(ctx, int64(msg.LogIndex), usdcTokenEndOffset, msg.TxHash)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ s.lggr.Infow("Got USDC message body", "messageBody", hexutil.Encode(parsedMsgBody), "messageID", hexutil.Encode(msg.MessageID[:]))
+ return parsedMsgBody, nil
+}
+
+func (s *TokenDataReader) getUsdcTokenEndOffset(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int) (int, error) {
+ if tokenIndex >= len(msg.TokenAmounts) || tokenIndex < 0 {
+ return 0, fmt.Errorf("invalid token index %d for msg with %d tokens", tokenIndex, len(msg.TokenAmounts))
+ }
+
+ if msg.TokenAmounts[tokenIndex].Token != ccipcalc.EvmAddrToGeneric(s.usdcTokenAddress) {
+ return 0, fmt.Errorf("the specified token index %d is not a usdc token", tokenIndex)
+ }
+
+ usdcTokenEndOffset := 0
+ for i := tokenIndex + 1; i < len(msg.TokenAmounts); i++ {
+ evmTokenAddr, err := ccipcalc.GenericAddrToEvm(msg.TokenAmounts[i].Token)
+ if err != nil {
+ continue
+ }
+
+ if evmTokenAddr == s.usdcTokenAddress {
+ usdcTokenEndOffset++
+ }
+ }
+
+ return usdcTokenEndOffset, nil
+}
+
+// callAttestationApi calls the USDC attestation API with the given USDC message hash.
+// The attestation service rate limit is 10 requests per second. If you exceed 10 requests
+// per second, the service blocks all API requests for the next 5 minutes and returns an
+// HTTP 429 response.
+//
+// Documentation:
+//
+// https://developers.circle.com/stablecoins/reference/getattestation
+// https://developers.circle.com/stablecoins/docs/transfer-usdc-on-testnet-from-ethereum-to-avalanche
+func (s *TokenDataReader) callAttestationApi(ctx context.Context, usdcMessageHash [32]byte) (attestationResponse, error) {
+ body, _, headers, err := s.httpClient.Get(
+ ctx,
+ fmt.Sprintf("%s/%s/%s/0x%x", s.attestationApi, apiVersion, attestationPath, usdcMessageHash),
+ s.attestationApiTimeout,
+ )
+ switch {
+ case errors.Is(err, tokendata.ErrRateLimit):
+ coolDownDuration := defaultCoolDownDuration
+ if retryAfterHeader, exists := headers["Retry-After"]; exists && len(retryAfterHeader) > 0 {
+ if retryAfterSec, errParseInt := strconv.ParseInt(retryAfterHeader[0], 10, 64); errParseInt == nil {
+ coolDownDuration = time.Duration(retryAfterSec) * time.Second
+ }
+ }
+ s.setCoolDownPeriod(coolDownDuration)
+
+ // Explicitly signal if the API is being rate limited
+ return attestationResponse{}, tokendata.ErrRateLimit
+ case err != nil:
+ return attestationResponse{}, fmt.Errorf("request error: %w", err)
+ }
+
+ var response attestationResponse
+ err = json.Unmarshal(body, &response)
+ if err != nil {
+ return attestationResponse{}, err
+ }
+ if response.Error != "" {
+ return attestationResponse{}, fmt.Errorf("attestation API error: %s", response.Error)
+ }
+ if response.Status == "" {
+ return attestationResponse{}, fmt.Errorf("invalid attestation response: %s", string(body))
+ }
+ return response, nil
+}
+
+func (s *TokenDataReader) setCoolDownPeriod(d time.Duration) {
+ s.coolDownMu.Lock()
+ if d > maxCoolDownDuration {
+ d = maxCoolDownDuration
+ }
+ s.coolDownUntil = time.Now().Add(d)
+ s.coolDownMu.Unlock()
+}
+
+func (s *TokenDataReader) inCoolDownPeriod() bool {
+ s.coolDownMu.RLock()
+ defer s.coolDownMu.RUnlock()
+ return time.Now().Before(s.coolDownUntil)
+}
+
+func (s *TokenDataReader) Close() error {
+ return nil
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go
new file mode 100644
index 00000000000..95b309ff74e
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go
@@ -0,0 +1,119 @@
+package usdc_test
+
+import (
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/usdc"
+)
+
+type attestationResponse struct {
+ Status string `json:"status"`
+ Attestation string `json:"attestation"`
+}
+
+func TestUSDCReader_ReadTokenData(t *testing.T) {
+ tests := []struct {
+ name string
+ attestationResponse attestationResponse
+ expectedError error
+ }{
+ {
+ name: "status complete",
+ attestationResponse: attestationResponse{
+ Status: "complete",
+ Attestation: "0x9049623e91719ef2aa63c55f357be2529b0e7122ae552c18aff8db58b4633c4d3920ff03d3a6d1ddf11f06bf64d7fd60d45447ac81f527ba628877dc5ca759651b08ffae25a6d3b1411749765244f0a1c131cbfe04430d687a2e12fd9d2e6dc08e118ad95d94ad832332cf3c4f7a4f3da0baa803b7be024b02db81951c0f0714de1b",
+ },
+ expectedError: nil,
+ },
+ {
+ name: "status pending",
+ attestationResponse: attestationResponse{
+ Status: "pending_confirmations",
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ },
+ expectedError: tokendata.ErrNotReady,
+ },
+ {
+ name: "status invalid",
+ attestationResponse: attestationResponse{
+ Status: "invalid",
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ },
+ expectedError: usdc.ErrUnknownResponse,
+ },
+ }
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ // Message is the bytes itself from MessageSend(bytes message)
+ // i.e. ABI parsed.
+ message := "0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861"
+ expectedMessageAndAttestation := "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000000f80000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861000000000000000000000000000000000000000000000000000000000000000000000000000000829049623e91719ef2aa63c55f357be2529b0e7122ae552c18aff8db58b4633c4d3920ff03d3a6d1ddf11f06bf64d7fd60d45447ac81f527ba628877dc5ca759651b08ffae25a6d3b1411749765244f0a1c131cbfe04430d687a2e12fd9d2e6dc08e118ad95d94ad832332cf3c4f7a4f3da0baa803b7be024b02db81951c0f0714de1b000000000000000000000000000000000000000000000000000000000000"
+ lggr := logger.TestLogger(t)
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ messageHash := utils.Keccak256Fixed(hexutil.MustDecode(message))
+ expectedUrl := "/v1/attestations/0x" + hex.EncodeToString(messageHash[:])
+ require.Equal(t, expectedUrl, r.URL.Path)
+
+ responseBytes, err2 := json.Marshal(test.attestationResponse)
+ require.NoError(t, err2)
+
+ _, err2 = w.Write(responseBytes)
+ require.NoError(t, err2)
+ }))
+
+ defer ts.Close()
+
+ seqNum := uint64(23825)
+ txHash := utils.RandomBytes32()
+ logIndex := int64(4)
+
+ usdcReader := ccipdatamocks.USDCReader{}
+ usdcReader.On("GetUSDCMessagePriorToLogIndexInTx",
+ mock.Anything,
+ logIndex,
+ 0,
+ common.Hash(txHash).String(),
+ ).Return(hexutil.MustDecode(message), nil)
+ attestationURI, err := url.ParseRequestURI(ts.URL)
+ require.NoError(t, err)
+
+ addr := utils.RandomAddress()
+ usdcService := usdc.NewUSDCTokenDataReader(lggr, &usdcReader, attestationURI, 0, addr, usdc.APIIntervalRateLimitDisabled)
+ msgAndAttestation, err := usdcService.ReadTokenData(context.Background(), cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ SequenceNumber: seqNum,
+ TokenAmounts: []cciptypes.TokenAmount{{Token: ccipcalc.EvmAddrToGeneric(addr), Amount: nil}},
+ },
+ TxHash: cciptypes.Hash(txHash).String(),
+ LogIndex: uint(logIndex),
+ }, 0)
+ if test.expectedError != nil {
+ require.Error(t, err)
+ require.Equal(t, test.expectedError, err)
+ return
+ }
+ require.NoError(t, err)
+ // Expected attestation for parsed body.
+ require.Equal(t, expectedMessageAndAttestation, hexutil.Encode(msgAndAttestation))
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go
new file mode 100644
index 00000000000..c4221b2dc0f
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go
@@ -0,0 +1,423 @@
+package usdc
+
+import (
+ "context"
+ "encoding/json"
+ "math/big"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata"
+ ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata"
+)
+
+var (
+ mockMsgTransmitter = utils.RandomAddress()
+)
+
+func TestUSDCReader_callAttestationApi(t *testing.T) {
+ t.Skipf("Skipping test because it uses the real USDC attestation API")
+ usdcMessageHash := "912f22a13e9ccb979b621500f6952b2afd6e75be7eadaed93fc2625fe11c52a2"
+ attestationURI, err := url.ParseRequestURI("https://iris-api-sandbox.circle.com")
+ require.NoError(t, err)
+ lggr := logger.TestLogger(t)
+ usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, nil, false)
+ usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, 0, common.Address{}, APIIntervalRateLimitDisabled)
+
+ attestation, err := usdcService.callAttestationApi(context.Background(), [32]byte(common.FromHex(usdcMessageHash)))
+ require.NoError(t, err)
+
+ require.Equal(t, attestationStatusPending, attestation.Status)
+ require.Equal(t, "PENDING", attestation.Attestation)
+}
+
+func TestUSDCReader_callAttestationApiMock(t *testing.T) {
+ response := attestationResponse{
+ Status: attestationStatusSuccess,
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ }
+
+ ts := getMockUSDCEndpoint(t, response)
+ defer ts.Close()
+ attestationURI, err := url.ParseRequestURI(ts.URL)
+ require.NoError(t, err)
+
+ lggr := logger.TestLogger(t)
+ lp := mocks.NewLogPoller(t)
+ usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, lp, false)
+ usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, 0, common.Address{}, APIIntervalRateLimitDisabled)
+ attestation, err := usdcService.callAttestationApi(context.Background(), utils.RandomBytes32())
+ require.NoError(t, err)
+
+ require.Equal(t, response.Status, attestation.Status)
+ require.Equal(t, response.Attestation, attestation.Attestation)
+}
+
+func TestUSDCReader_callAttestationApiMockError(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ getTs func() *httptest.Server
+ parentTimeoutSeconds int
+ customTimeoutSeconds int
+ expectedError error
+ }{
+ {
+ name: "server error",
+ getTs: func() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusInternalServerError)
+ }))
+ },
+ parentTimeoutSeconds: 60,
+ expectedError: nil,
+ },
+ {
+ name: "default timeout",
+ getTs: func() *httptest.Server {
+ response := attestationResponse{
+ Status: attestationStatusSuccess,
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ }
+ responseBytes, _ := json.Marshal(response)
+
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(defaultAttestationTimeout + time.Second)
+ _, err := w.Write(responseBytes)
+ require.NoError(t, err)
+ }))
+ },
+ parentTimeoutSeconds: 60,
+ expectedError: tokendata.ErrTimeout,
+ },
+ {
+ name: "custom timeout",
+ getTs: func() *httptest.Server {
+ response := attestationResponse{
+ Status: attestationStatusSuccess,
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ }
+ responseBytes, _ := json.Marshal(response)
+
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(2*time.Second + time.Second)
+ _, err := w.Write(responseBytes)
+ require.NoError(t, err)
+ }))
+ },
+ parentTimeoutSeconds: 60,
+ customTimeoutSeconds: 2,
+ expectedError: tokendata.ErrTimeout,
+ },
+ {
+ name: "error response",
+ getTs: func() *httptest.Server {
+ response := attestationResponse{
+ Error: "some error",
+ }
+ responseBytes, _ := json.Marshal(response)
+
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := w.Write(responseBytes)
+ require.NoError(t, err)
+ }))
+ },
+ parentTimeoutSeconds: 60,
+ expectedError: nil,
+ },
+ {
+ name: "invalid status",
+ getTs: func() *httptest.Server {
+ response := attestationResponse{
+ Status: "",
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ }
+ responseBytes, _ := json.Marshal(response)
+
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := w.Write(responseBytes)
+ require.NoError(t, err)
+ }))
+ },
+ parentTimeoutSeconds: 60,
+ expectedError: nil,
+ },
+ {
+ name: "rate limit",
+ getTs: func() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusTooManyRequests)
+ }))
+ },
+ parentTimeoutSeconds: 60,
+ expectedError: tokendata.ErrRateLimit,
+ },
+ {
+ name: "parent context timeout",
+ getTs: func() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(defaultAttestationTimeout + time.Second)
+ }))
+ },
+ parentTimeoutSeconds: 1,
+ expectedError: nil,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ts := test.getTs()
+ defer ts.Close()
+
+ attestationURI, err := url.ParseRequestURI(ts.URL)
+ require.NoError(t, err)
+
+ lggr := logger.TestLogger(t)
+ lp := mocks.NewLogPoller(t)
+ usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, lp, false)
+ usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, test.customTimeoutSeconds, common.Address{}, APIIntervalRateLimitDisabled)
+ lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil)
+ require.NoError(t, usdcReader.RegisterFilters())
+
+ parentCtx, cancel := context.WithTimeout(context.Background(), time.Duration(test.parentTimeoutSeconds)*time.Second)
+ defer cancel()
+
+ _, err = usdcService.callAttestationApi(parentCtx, utils.RandomBytes32())
+ require.Error(t, err)
+
+ if test.expectedError != nil {
+ require.True(t, errors.Is(err, test.expectedError))
+ }
+ lp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil)
+ require.NoError(t, usdcReader.Close())
+ })
+ }
+}
+
+func getMockUSDCEndpoint(t *testing.T, response attestationResponse) *httptest.Server {
+ responseBytes, err := json.Marshal(response)
+ require.NoError(t, err)
+
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := w.Write(responseBytes)
+ require.NoError(t, err)
+ }))
+}
+
+func TestGetUSDCMessageBody(t *testing.T) {
+ expectedBody := []byte("0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861")
+ usdcReader := ccipdatamocks.USDCReader{}
+ usdcReader.On("GetUSDCMessagePriorToLogIndexInTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(expectedBody, nil)
+
+ usdcTokenAddr := utils.RandomAddress()
+ lggr := logger.TestLogger(t)
+ usdcService := NewUSDCTokenDataReader(lggr, &usdcReader, nil, 0, usdcTokenAddr, APIIntervalRateLimitDisabled)
+
+ // Make the first call and assert the underlying function is called
+ body, err := usdcService.getUSDCMessageBody(context.Background(), cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ TokenAmounts: []cciptypes.TokenAmount{
+ {
+ Token: ccipcalc.EvmAddrToGeneric(usdcTokenAddr),
+ Amount: big.NewInt(rand.Int63()),
+ },
+ },
+ },
+ }, 0)
+ require.NoError(t, err)
+ require.Equal(t, body, expectedBody)
+
+ usdcReader.AssertNumberOfCalls(t, "GetUSDCMessagePriorToLogIndexInTx", 1)
+}
+
+func TestTokenDataReader_getUsdcTokenEndOffset(t *testing.T) {
+ usdcToken := utils.RandomAddress()
+ nonUsdcToken := utils.RandomAddress()
+
+ multipleTokens := []common.Address{
+ usdcToken, // 2
+ nonUsdcToken,
+ nonUsdcToken,
+ usdcToken, // 1
+ usdcToken, // 0
+ nonUsdcToken,
+ }
+
+ testCases := []struct {
+ name string
+ tokens []common.Address
+ tokenIndex int
+ expOffset int
+ expErr bool
+ }{
+ {name: "one non usdc token", tokens: []common.Address{nonUsdcToken}, tokenIndex: 0, expOffset: 0, expErr: true},
+ {name: "one usdc token", tokens: []common.Address{usdcToken}, tokenIndex: 0, expOffset: 0, expErr: false},
+ {name: "one usdc token wrong index", tokens: []common.Address{usdcToken}, tokenIndex: 1, expOffset: 0, expErr: true},
+ {name: "multiple tokens 1", tokens: multipleTokens, tokenIndex: 0, expOffset: 2},
+ {name: "multiple tokens - non usdc selected", tokens: multipleTokens, tokenIndex: 2, expErr: true},
+ {name: "multiple tokens 2", tokens: multipleTokens, tokenIndex: 3, expOffset: 1},
+ {name: "multiple tokens 3", tokens: multipleTokens, tokenIndex: 4, expOffset: 0},
+ {name: "multiple tokens not found", tokens: multipleTokens, tokenIndex: 5, expErr: true},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ r := &TokenDataReader{usdcTokenAddress: usdcToken}
+ tokenAmounts := make([]cciptypes.TokenAmount, len(tc.tokens))
+ for i := range tokenAmounts {
+ tokenAmounts[i] = cciptypes.TokenAmount{
+ Token: ccipcalc.EvmAddrToGeneric(tc.tokens[i]),
+ Amount: big.NewInt(rand.Int63()),
+ }
+ }
+ msg := cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{EVM2EVMMessage: cciptypes.EVM2EVMMessage{TokenAmounts: tokenAmounts}}
+ offset, err := r.getUsdcTokenEndOffset(msg, tc.tokenIndex)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expOffset, offset)
+ })
+ }
+}
+
+func TestUSDCReader_rateLimiting(t *testing.T) {
+ testCases := []struct {
+ name string
+ requests uint64
+ rateConfig time.Duration
+ testDuration time.Duration
+ timeout time.Duration
+ err string
+ }{
+ {
+ name: "no rate limit when disabled",
+ requests: 10,
+ rateConfig: APIIntervalRateLimitDisabled,
+ testDuration: 1 * time.Millisecond,
+ },
+ {
+ name: "yes rate limited with default config",
+ requests: 5,
+ rateConfig: APIIntervalRateLimitDefault,
+ testDuration: 4 * defaultRequestInterval,
+ },
+ {
+ name: "yes rate limited with config",
+ requests: 10,
+ rateConfig: 50 * time.Millisecond,
+ testDuration: 9 * 50 * time.Millisecond,
+ },
+ {
+ name: "timeout after first request",
+ requests: 5,
+ rateConfig: 100 * time.Millisecond,
+ testDuration: 1 * time.Millisecond,
+ timeout: 1 * time.Millisecond,
+ err: "usdc rate limiting error:",
+ },
+ {
+ name: "timeout after second request",
+ requests: 5,
+ rateConfig: 100 * time.Millisecond,
+ testDuration: 100 * time.Millisecond,
+ timeout: 150 * time.Millisecond,
+ err: "usdc rate limiting error:",
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ response := attestationResponse{
+ Status: attestationStatusSuccess,
+ Attestation: "720502893578a89a8a87982982ef781c18b193",
+ }
+
+ ts := getMockUSDCEndpoint(t, response)
+ defer ts.Close()
+ attestationURI, err := url.ParseRequestURI(ts.URL)
+ require.NoError(t, err)
+
+ lggr := logger.TestLogger(t)
+ lp := mocks.NewLogPoller(t)
+ usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, lp, false)
+ usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, 0, utils.RandomAddress(), tc.rateConfig)
+
+ ctx := context.Background()
+ if tc.timeout > 0 {
+ var cf context.CancelFunc
+ ctx, cf = context.WithTimeout(ctx, tc.timeout)
+ defer cf()
+ }
+
+ trigger := make(chan struct{})
+ errorChan := make(chan error, tc.requests)
+ wg := sync.WaitGroup{}
+ for i := 0; i < int(tc.requests); i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ <-trigger
+ _, err := usdcService.ReadTokenData(ctx, cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{
+ EVM2EVMMessage: cciptypes.EVM2EVMMessage{
+ TokenAmounts: []cciptypes.TokenAmount{{Token: ccipcalc.EvmAddrToGeneric(utils.ZeroAddress), Amount: nil}}, // trigger failure due to wrong address
+ },
+ }, 0)
+
+ errorChan <- err
+ }()
+ }
+
+ // Start the test
+ start := time.Now()
+ close(trigger)
+
+ // Wait for requests to complete
+ wg.Wait()
+ finish := time.Now()
+ close(errorChan)
+
+ // Collect errors
+ errorFound := false
+ for err := range errorChan {
+ if tc.err != "" && strings.Contains(err.Error(), tc.err) {
+ errorFound = true
+ } else if err != nil && !strings.Contains(err.Error(), "get usdc token 0 end offset") {
+ // Ignore that one error, it's expected because of how mocking is used.
+ // Anything else is unexpected.
+ require.Fail(t, "unexpected error", err)
+ }
+ }
+
+ if tc.err != "" {
+ assert.True(t, errorFound)
+ }
+ assert.WithinDuration(t, start.Add(tc.testDuration), finish, 50*time.Millisecond)
+ })
+ }
+}
diff --git a/core/services/ocr2/plugins/ccip/transactions.rlp b/core/services/ocr2/plugins/ccip/transactions.rlp
new file mode 100644
index 0000000000000000000000000000000000000000..96cfc2f48238eef5e38307f425334221e5a48903
GIT binary patch
literal 115794
zcmeI5TZmRw6vxkLnz2U5%QT~oI{E2%ziW6)s$*7Ggpol(B}^BCh#=7l?B!@yLf40)
zD2fl%E|5{FJtV}z%Zq_obbaaz3HfuAMX9I_x^t`-KARt?*-?5#t|mJz>hOJ2L7>#HYj|8wuMaa&ezxPQxa8$UVI(V2gd?mxQc
z$#oAW@~a}2)BRPko+R@9Nus%bkla3fswQ5L?!Rz(=&4`M{`M*V>R`0ntHFMex;^9UQ@4V>Jv2eiJE$!rannipRB1*(bT7E
z>eDp!>6-csO?{@OK1)-dt*Ot^)aPpI^ECDOn!3&(ntEF%_wUH%dRHOWb-%s(GWq@)
zxm=%L$n~XiImt5lf|a>kUscHUyX11RCX+9CD3|Mx7IJ;PTuwG-@&%i6xxTfK>#xe?
zsA=eMd<>Z@8zTl@^uK!ZV^<#25`8|^__#>C=
ze-~ms)%@`?9h}Mccjj`vr;zI!eWueg`Tn`NTwhek^=rj)lHQn!7o@l4a{bOiuHP$`
zlk|a1ydZrf7wgHOid;`L_0}bt`%|g@-(NxLYxmUEKY#e`Gp8rtdiwW++aKDoaK@|s
zw62K7{z>{=RdLS+Qz!bK|9RKh&(ocYcfIocq0=v{-Fo1)3toJ#kmho|wMy%))9mJ!^OOv~?fbeaD>#zB~SGZAnk-;Z65_kUnty@adx;
zy-n}uam(>`yg&d1f+xU#?;B+XeEmnYe>7bN?AO@u=d7IjQ~dg%1%HbD;m_dL3I+s$
zC-x8P5)Kl~Z|TET!I5Cs9Efo4MZlhE?e
zpW^p_vEWa!Kl~ZRYQc~o5Ig~*fo4MZlhE?epY7!MEVSTHu|ND7{93_)AP@xswmi)e
z@F(Hqp+Cj%A8Ns$Vt@EEiq(Q4K_GYnL<7x)@F$_=p+Cj%uWiAfVt@EE__cxoK_ChO
zL<7x)@F$_=p+7sw@mXlWpJIRbGm6!MAweK`0&IDjCE!oO$wPmNKcC5hKgIs=XYgwU
z1A;&l1c(Nj3E@vd%R_&PKflm|KgIs=XB4XiLxMo?1c(Nj3E@vd%R_&PKi}7aKgIs=
zXYgwU1A;&l1c(Nj3E@vd%R_&TAjfB+1%HbD;m;^m3x))N;0dthX_kOL2`3Nz*-4Ji
zLJR&B`@^5XuN4dk0#OiP%hN0Ye-cg}`tu5Md=^^pr`R96iaa~wH73oZCl><@nizg93H2t+}EEl;xq
z{7E=@=+9nqd=^^pr`R9Kkt%hN0Ye-cg}`m;ig&q53S6#K)U
z!LJnz2m(S!ltZVt@EEiq(Q4K_GYnY*w*(4SMu@mXlWpJIRbGm6!M
zAweK`0&IDjCE!oO$wPlmBgbc<1%HbD;m_dL3I+s$CCSS=V51cE2PmZw<){v@0{^ydt6d=^^pr`R9<41TR(KoE$609&4B3HXz6^3b0%
z$?;ie!JlG(_%n*tf+0a5cmiyBnkC>*!pTE_&LYQWp#^`6{o&8x*9ry%fhY*D+tRt};|c-TlgRBRR5YgvwxaMVW3Sd-6u8
z493)y=|*x~)(DkBZ&jIYB*!-*bx`S7=j+)CMLkl#_f^#SdUjG?kJRszYwCPGJ2k6E
z>i20?b-tdJzr9D2%Fv4bR)U6(g>Bo)wwd=NG_{2LS=AGrc5`I*H#;$GRTzKGP%6Jj8)O~
zg(_R-Zm5*8D!MUOWy{=6wK7&kH)pDBnY*Q0#>!}bzZF;L?k^Pg&{U0nz~hvs24vF8
zfDirq0DRs4nt?C)x+UPYJDy+gbxXi)cRat~>z07q?s$H|*DV3J-SPZ_uUi6cyW{x<
zU$+F@cE|GzzHSM)?T+UceBBao+a1p@__`(FwmY6*@O4YTZFfAs;Omxv+wORN!PhMT
zx83plg0EWwZoA|81z)!W+;+$F3%+g%xb2ST7ku3kaN8ZvFZjA8;I=!SU+{HHz-@Ot
zzu@bZfZOhPe!d22B2VYv8!-m9zeQzgjuebLq<`58ko<
f%sW4PJ^b+#k36<=Maz-pBVSzc_B*Fe4&U$(Vy%t%
literal 0
HcmV?d00001
diff --git a/core/services/ocr2/plugins/ccip/transmitter/transmitter.go b/core/services/ocr2/plugins/ccip/transmitter/transmitter.go
new file mode 100644
index 00000000000..3e2962b33a9
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/transmitter/transmitter.go
@@ -0,0 +1,143 @@
+package transmitter
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ statuschecker "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker"
+)
+
+type roundRobinKeystore interface {
+ GetRoundRobinAddress(ctx context.Context, chainID *big.Int, addresses ...common.Address) (address common.Address, err error)
+}
+
+type txManager interface {
+ CreateTransaction(ctx context.Context, txRequest txmgr.TxRequest) (tx txmgr.Tx, err error)
+ GetTransactionStatus(ctx context.Context, transactionID string) (state commontypes.TransactionStatus, err error)
+}
+
+type Transmitter interface {
+ CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error
+ FromAddress() common.Address
+}
+
+type transmitter struct {
+ txm txManager
+ fromAddresses []common.Address
+ gasLimit uint64
+ effectiveTransmitterAddress common.Address
+ strategy types.TxStrategy
+ checker txmgr.TransmitCheckerSpec
+ chainID *big.Int
+ keystore roundRobinKeystore
+ statuschecker statuschecker.CCIPTransactionStatusChecker // Used for CCIP's idempotency key generation
+}
+
+// NewTransmitter creates a new eth transmitter
+func NewTransmitter(
+ txm txManager,
+ fromAddresses []common.Address,
+ gasLimit uint64,
+ effectiveTransmitterAddress common.Address,
+ strategy types.TxStrategy,
+ checker txmgr.TransmitCheckerSpec,
+ chainID *big.Int,
+ keystore roundRobinKeystore,
+) (Transmitter, error) {
+ // Ensure that a keystore is provided.
+ if keystore == nil {
+ return nil, errors.New("nil keystore provided to transmitter")
+ }
+
+ return &transmitter{
+ txm: txm,
+ fromAddresses: fromAddresses,
+ gasLimit: gasLimit,
+ effectiveTransmitterAddress: effectiveTransmitterAddress,
+ strategy: strategy,
+ checker: checker,
+ chainID: chainID,
+ keystore: keystore,
+ }, nil
+}
+
+func NewTransmitterWithStatusChecker(
+ txm txManager,
+ fromAddresses []common.Address,
+ gasLimit uint64,
+ effectiveTransmitterAddress common.Address,
+ strategy types.TxStrategy,
+ checker txmgr.TransmitCheckerSpec,
+ chainID *big.Int,
+ keystore roundRobinKeystore,
+) (Transmitter, error) {
+ t, err := NewTransmitter(txm, fromAddresses, gasLimit, effectiveTransmitterAddress, strategy, checker, chainID, keystore)
+
+ if err != nil {
+ return nil, err
+ }
+
+ transmitter, ok := t.(*transmitter)
+ if !ok {
+ return nil, errors.New("failed to type assert Transmitter to *transmitter")
+ }
+ transmitter.statuschecker = statuschecker.NewTxmStatusChecker(txm.GetTransactionStatus)
+
+ return transmitter, nil
+}
+
+func (t *transmitter) CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error {
+ roundRobinFromAddress, err := t.keystore.GetRoundRobinAddress(ctx, t.chainID, t.fromAddresses...)
+ if err != nil {
+ return fmt.Errorf("skipped OCR transmission, error getting round-robin address: %w", err)
+ }
+
+ var idempotencyKey *string
+
+ // Define idempotency key for CCIP Execution Plugin
+ if len(txMeta.MessageIDs) == 1 && t.statuschecker != nil {
+ messageId := txMeta.MessageIDs[0]
+ _, count, err1 := t.statuschecker.CheckMessageStatus(ctx, messageId)
+
+ if err1 != nil {
+ return errors.Wrap(err, "skipped OCR transmission, error getting message status")
+ }
+ idempotencyKey = func() *string {
+ s := fmt.Sprintf("%s-%d", messageId, count+1)
+ return &s
+ }()
+ }
+
+ _, err = t.txm.CreateTransaction(ctx, txmgr.TxRequest{
+ IdempotencyKey: idempotencyKey,
+ FromAddress: roundRobinFromAddress,
+ ToAddress: toAddress,
+ EncodedPayload: payload,
+ FeeLimit: t.gasLimit,
+ ForwarderAddress: t.forwarderAddress(),
+ Strategy: t.strategy,
+ Checker: t.checker,
+ Meta: txMeta,
+ })
+ return errors.Wrap(err, "skipped OCR transmission")
+}
+
+func (t *transmitter) FromAddress() common.Address {
+ return t.effectiveTransmitterAddress
+}
+
+func (t *transmitter) forwarderAddress() common.Address {
+ for _, a := range t.fromAddresses {
+ if a == t.effectiveTransmitterAddress {
+ return common.Address{}
+ }
+ }
+ return t.effectiveTransmitterAddress
+}
diff --git a/core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go b/core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go
new file mode 100644
index 00000000000..d177f1baa5c
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go
@@ -0,0 +1,282 @@
+package transmitter
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ commontxmmocks "github.com/smartcontractkit/chainlink/v2/common/txmgr/types/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ txmmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
+
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+var (
+ FixtureChainID = *testutils.FixtureChainID
+ Password = testutils.Password
+)
+
+func newMockTxStrategy(t *testing.T) *commontxmmocks.TxStrategy {
+ return commontxmmocks.NewTxStrategy(t)
+}
+
+func Test_DefaultTransmitter_CreateEthTransaction(t *testing.T) {
+ t.Parallel()
+
+ db := pgtest.NewSqlxDB(t)
+ ethKeyStore := NewKeyStore(t, db).Eth()
+
+ _, fromAddress := MustInsertRandomKey(t, ethKeyStore)
+
+ gasLimit := uint64(1000)
+ chainID := big.NewInt(0)
+ effectiveTransmitterAddress := fromAddress
+ toAddress := testutils.NewAddress()
+ payload := []byte{1, 2, 3}
+ txm := txmmocks.NewMockEvmTxManager(t)
+ strategy := newMockTxStrategy(t)
+
+ transmitter, err := ocrcommon.NewTransmitter(
+ txm,
+ []common.Address{fromAddress},
+ gasLimit,
+ effectiveTransmitterAddress,
+ strategy,
+ txmgr.TransmitCheckerSpec{},
+ chainID,
+ ethKeyStore,
+ )
+ require.NoError(t, err)
+
+ txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{
+ FromAddress: fromAddress,
+ ToAddress: toAddress,
+ EncodedPayload: payload,
+ FeeLimit: gasLimit,
+ ForwarderAddress: common.Address{},
+ Meta: nil,
+ Strategy: strategy,
+ }).Return(txmgr.Tx{}, nil).Once()
+ require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil))
+}
+
+func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction(t *testing.T) {
+ t.Parallel()
+
+ db := pgtest.NewSqlxDB(t)
+ ethKeyStore := NewKeyStore(t, db).Eth()
+
+ _, fromAddress := MustInsertRandomKey(t, ethKeyStore)
+ _, fromAddress2 := MustInsertRandomKey(t, ethKeyStore)
+
+ gasLimit := uint64(1000)
+ chainID := big.NewInt(0)
+ effectiveTransmitterAddress := common.Address{}
+ toAddress := testutils.NewAddress()
+ payload := []byte{1, 2, 3}
+ txm := txmmocks.NewMockEvmTxManager(t)
+ strategy := newMockTxStrategy(t)
+
+ transmitter, err := ocrcommon.NewTransmitter(
+ txm,
+ []common.Address{fromAddress, fromAddress2},
+ gasLimit,
+ effectiveTransmitterAddress,
+ strategy,
+ txmgr.TransmitCheckerSpec{},
+ chainID,
+ ethKeyStore,
+ )
+ require.NoError(t, err)
+
+ txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{
+ FromAddress: fromAddress,
+ ToAddress: toAddress,
+ EncodedPayload: payload,
+ FeeLimit: gasLimit,
+ ForwarderAddress: common.Address{},
+ Meta: nil,
+ Strategy: strategy,
+ }).Return(txmgr.Tx{}, nil).Once()
+ txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{
+ FromAddress: fromAddress2,
+ ToAddress: toAddress,
+ EncodedPayload: payload,
+ FeeLimit: gasLimit,
+ ForwarderAddress: common.Address{},
+ Meta: nil,
+ Strategy: strategy,
+ }).Return(txmgr.Tx{}, nil).Once()
+ require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil))
+ require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil))
+}
+
+func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction_Round_Robin_Error(t *testing.T) {
+ t.Parallel()
+
+ db := pgtest.NewSqlxDB(t)
+ ethKeyStore := NewKeyStore(t, db).Eth()
+
+ fromAddress := common.Address{}
+
+ gasLimit := uint64(1000)
+ chainID := big.NewInt(0)
+ effectiveTransmitterAddress := common.Address{}
+ toAddress := testutils.NewAddress()
+ payload := []byte{1, 2, 3}
+ txm := txmmocks.NewMockEvmTxManager(t)
+ strategy := newMockTxStrategy(t)
+
+ transmitter, err := ocrcommon.NewTransmitter(
+ txm,
+ []common.Address{fromAddress},
+ gasLimit,
+ effectiveTransmitterAddress,
+ strategy,
+ txmgr.TransmitCheckerSpec{},
+ chainID,
+ ethKeyStore,
+ )
+ require.NoError(t, err)
+ require.Error(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil))
+}
+
+func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction_No_Keystore_Error(t *testing.T) {
+ t.Parallel()
+
+ db := pgtest.NewSqlxDB(t)
+ ethKeyStore := NewKeyStore(t, db).Eth()
+
+ _, fromAddress := MustInsertRandomKey(t, ethKeyStore)
+ _, fromAddress2 := MustInsertRandomKey(t, ethKeyStore)
+
+ gasLimit := uint64(1000)
+ chainID := big.NewInt(0)
+ effectiveTransmitterAddress := common.Address{}
+ txm := txmmocks.NewMockEvmTxManager(t)
+ strategy := newMockTxStrategy(t)
+
+ _, err := ocrcommon.NewTransmitter(
+ txm,
+ []common.Address{fromAddress, fromAddress2},
+ gasLimit,
+ effectiveTransmitterAddress,
+ strategy,
+ txmgr.TransmitCheckerSpec{},
+ chainID,
+ nil,
+ )
+ require.Error(t, err)
+}
+
+func Test_Transmitter_With_StatusChecker_CreateEthTransaction(t *testing.T) {
+ t.Parallel()
+
+ db := pgtest.NewSqlxDB(t)
+ ethKeyStore := NewKeyStore(t, db).Eth()
+
+ _, fromAddress := MustInsertRandomKey(t, ethKeyStore)
+
+ gasLimit := uint64(1000)
+ chainID := big.NewInt(0)
+ effectiveTransmitterAddress := fromAddress
+ txm := txmmocks.NewMockEvmTxManager(t)
+ strategy := newMockTxStrategy(t)
+ toAddress := testutils.NewAddress()
+ payload := []byte{1, 2, 3}
+ idempotencyKey := "1-0"
+ txMeta := &txmgr.TxMeta{MessageIDs: []string{"1"}}
+
+ transmitter, err := NewTransmitterWithStatusChecker(
+ txm,
+ []common.Address{fromAddress},
+ gasLimit,
+ effectiveTransmitterAddress,
+ strategy,
+ txmgr.TransmitCheckerSpec{},
+ chainID,
+ ethKeyStore,
+ )
+ require.NoError(t, err)
+
+ // This case is for when the message ID was not found in the status checker
+ txm.On("GetTransactionStatus", mock.Anything, idempotencyKey).Return(types.Unknown, errors.New("dummy")).Once()
+
+ txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{
+ IdempotencyKey: &idempotencyKey,
+ FromAddress: fromAddress,
+ ToAddress: toAddress,
+ EncodedPayload: payload,
+ FeeLimit: gasLimit,
+ ForwarderAddress: common.Address{},
+ Meta: txMeta,
+ Strategy: strategy,
+ }).Return(txmgr.Tx{}, nil).Once()
+
+ require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, txMeta))
+ txm.AssertExpectations(t)
+}
+
+func NewKeyStore(t testing.TB, ds sqlutil.DataSource) keystore.Master {
+ ctx := testutils.Context(t)
+ keystore := keystore.NewInMemory(ds, utils.FastScryptParams, logger.TestLogger(t))
+ require.NoError(t, keystore.Unlock(ctx, Password))
+ return keystore
+}
+
+type RandomKey struct {
+ Nonce int64
+ Disabled bool
+
+ chainIDs []ubig.Big // nil: Fixture, set empty for none
+}
+
+func (r RandomKey) MustInsert(t testing.TB, keystore keystore.Eth) (ethkey.KeyV2, common.Address) {
+ ctx := testutils.Context(t)
+ chainIDs := r.chainIDs
+ if chainIDs == nil {
+ chainIDs = []ubig.Big{*ubig.New(&FixtureChainID)}
+ }
+
+ key := MustGenerateRandomKey(t)
+ keystore.XXXTestingOnlyAdd(ctx, key)
+
+ for _, cid := range chainIDs {
+ require.NoError(t, keystore.Add(ctx, key.Address, cid.ToInt()))
+ require.NoError(t, keystore.Enable(ctx, key.Address, cid.ToInt()))
+ if r.Disabled {
+ require.NoError(t, keystore.Disable(ctx, key.Address, cid.ToInt()))
+ }
+ }
+
+ return key, key.Address
+}
+
+func MustInsertRandomKey(t testing.TB, keystore keystore.Eth, chainIDs ...ubig.Big) (ethkey.KeyV2, common.Address) {
+ r := RandomKey{}
+ if len(chainIDs) > 0 {
+ r.chainIDs = chainIDs
+ }
+ return r.MustInsert(t, keystore)
+}
+
+func MustGenerateRandomKey(t testing.TB) ethkey.KeyV2 {
+ key, err := ethkey.NewV2()
+ require.NoError(t, err)
+ return key
+}
diff --git a/core/services/ocr2/plugins/ccip/vars.go b/core/services/ocr2/plugins/ccip/vars.go
new file mode 100644
index 00000000000..a44f5e41d66
--- /dev/null
+++ b/core/services/ocr2/plugins/ccip/vars.go
@@ -0,0 +1,14 @@
+package ccip
+
+import (
+ "github.com/pkg/errors"
+)
+
+const (
+ MaxQueryLength = 0 // empty for both plugins
+ MaxObservationLength = 250_000 // plugins's Observation should make sure to cap to this limit
+ CommitPluginLabel = "commit"
+ ExecPluginLabel = "exec"
+)
+
+var ErrChainIsNotHealthy = errors.New("lane processing is stopped because of healthcheck failure, please see crit logs")
diff --git a/core/services/ocr2/validate/validate.go b/core/services/ocr2/validate/validate.go
index 2993a67114e..8d98a282674 100644
--- a/core/services/ocr2/validate/validate.go
+++ b/core/services/ocr2/validate/validate.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"os/exec"
+ "strings"
"github.com/lib/pq"
"github.com/pelletier/go-toml"
@@ -19,9 +20,11 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/config/env"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config"
mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config"
"github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
"github.com/smartcontractkit/chainlink/v2/core/services/relay"
"github.com/smartcontractkit/chainlink/v2/plugins"
)
@@ -115,6 +118,10 @@ func validateSpec(ctx context.Context, tree *toml.Tree, spec job.Job, rc plugins
return nil
case types.Mercury:
return validateOCR2MercurySpec(spec.OCR2OracleSpec.PluginConfig, *spec.OCR2OracleSpec.FeedID)
+ case types.CCIPExecution:
+ return validateOCR2CCIPExecutionSpec(spec.OCR2OracleSpec.PluginConfig)
+ case types.CCIPCommit:
+ return validateOCR2CCIPCommitSpec(spec.OCR2OracleSpec.PluginConfig)
case types.LLO:
return validateOCR2LLOSpec(spec.OCR2OracleSpec.PluginConfig)
case types.GenericPlugin:
@@ -313,11 +320,61 @@ func validateOCR2MercurySpec(jsonConfig job.JSONConfig, feedId [32]byte) error {
var pluginConfig mercuryconfig.PluginConfig
err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig)
if err != nil {
- return pkgerrors.Wrap(err, "error while unmarshaling plugin config")
+ return pkgerrors.Wrap(err, "error while unmarshalling plugin config")
}
return pkgerrors.Wrap(mercuryconfig.ValidatePluginConfig(pluginConfig, feedId), "Mercury PluginConfig is invalid")
}
+func validateOCR2CCIPExecutionSpec(jsonConfig job.JSONConfig) error {
+ if jsonConfig == nil {
+ return errors.New("pluginConfig is empty")
+ }
+ var cfg config.ExecPluginJobSpecConfig
+ err := json.Unmarshal(jsonConfig.Bytes(), &cfg)
+ if err != nil {
+ return pkgerrors.Wrap(err, "error while unmarshalling plugin config")
+ }
+ if cfg.USDCConfig != (config.USDCConfig{}) {
+ return cfg.USDCConfig.ValidateUSDCConfig()
+ }
+ return nil
+}
+
+func validateOCR2CCIPCommitSpec(jsonConfig job.JSONConfig) error {
+ if jsonConfig == nil {
+ return errors.New("pluginConfig is empty")
+ }
+ var cfg config.CommitPluginJobSpecConfig
+ err := json.Unmarshal(jsonConfig.Bytes(), &cfg)
+ if err != nil {
+ return pkgerrors.Wrap(err, "error while unmarshalling plugin config")
+ }
+
+ // Ensure that either the tokenPricesUSDPipeline or the priceGetterConfig is set, but not both.
+ emptyPipeline := strings.Trim(cfg.TokenPricesUSDPipeline, "\n\t ") == ""
+ emptyPriceGetter := cfg.PriceGetterConfig == nil
+ if emptyPipeline && emptyPriceGetter {
+ return fmt.Errorf("either tokenPricesUSDPipeline or priceGetterConfig must be set")
+ }
+ if !emptyPipeline && !emptyPriceGetter {
+ return fmt.Errorf("only one of tokenPricesUSDPipeline or priceGetterConfig must be set: %s and %v", cfg.TokenPricesUSDPipeline, cfg.PriceGetterConfig)
+ }
+
+ if !emptyPipeline {
+ _, err = pipeline.Parse(cfg.TokenPricesUSDPipeline)
+ if err != nil {
+ return pkgerrors.Wrap(err, "invalid token prices pipeline")
+ }
+ } else {
+ // Validate prices config (like it was done for the pipeline).
+ if emptyPriceGetter {
+ return pkgerrors.New("priceGetterConfig is empty")
+ }
+ }
+
+ return nil
+}
+
func validateOCR2LLOSpec(jsonConfig job.JSONConfig) error {
var pluginConfig lloconfig.PluginConfig
err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig)
diff --git a/core/services/relay/evm/ccip.go b/core/services/relay/evm/ccip.go
new file mode 100644
index 00000000000..34a732e1454
--- /dev/null
+++ b/core/services/relay/evm/ccip.go
@@ -0,0 +1,205 @@
+package evm
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+)
+
+var _ cciptypes.CommitStoreReader = (*IncompleteSourceCommitStoreReader)(nil)
+var _ cciptypes.CommitStoreReader = (*IncompleteDestCommitStoreReader)(nil)
+
+// IncompleteSourceCommitStoreReader is an implementation of CommitStoreReader with the only valid methods being
+// GasPriceEstimator, ChangeConfig, and OffchainConfig
+type IncompleteSourceCommitStoreReader struct {
+ estimator gas.EvmFeeEstimator
+ gasPriceEstimator *prices.DAGasPriceEstimator
+ sourceMaxGasPrice *big.Int
+ offchainConfig cciptypes.CommitOffchainConfig
+}
+
+func NewIncompleteSourceCommitStoreReader(estimator gas.EvmFeeEstimator, sourceMaxGasPrice *big.Int) *IncompleteSourceCommitStoreReader {
+ return &IncompleteSourceCommitStoreReader{
+ estimator: estimator,
+ sourceMaxGasPrice: sourceMaxGasPrice,
+ }
+}
+
+func (i *IncompleteSourceCommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) {
+ onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ccip.CommitOnchainConfig](onchainConfig)
+ if err != nil {
+ return "", err
+ }
+
+ offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[ccip.JSONCommitOffchainConfigV1_2_0](offchainConfig)
+ if err != nil {
+ return "", err
+ }
+
+ i.gasPriceEstimator = prices.NewDAGasPriceEstimator(
+ i.estimator,
+ i.sourceMaxGasPrice,
+ int64(offchainConfigParsed.ExecGasPriceDeviationPPB),
+ int64(offchainConfigParsed.DAGasPriceDeviationPPB),
+ )
+ i.offchainConfig = ccip.NewCommitOffchainConfig(
+ offchainConfigParsed.ExecGasPriceDeviationPPB,
+ offchainConfigParsed.GasPriceHeartBeat.Duration(),
+ offchainConfigParsed.TokenPriceDeviationPPB,
+ offchainConfigParsed.TokenPriceHeartBeat.Duration(),
+ offchainConfigParsed.InflightCacheExpiry.Duration(),
+ offchainConfigParsed.PriceReportingDisabled,
+ )
+
+ return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), nil
+}
+
+func (i *IncompleteSourceCommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error) {
+ return cciptypes.CommitStoreReport{}, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error) {
+ return []byte{}, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+// GasPriceEstimator returns an ExecGasPriceEstimator to satisfy the GasPriceEstimatorCommit interface,
+// with deviationPPB values hardcoded to 0 when this implementation is first constructed.
+// When ChangeConfig is called, another call to this method must be made to fetch a GasPriceEstimator with updated values
+func (i *IncompleteSourceCommitStoreReader) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error) {
+ return i.gasPriceEstimator, nil
+}
+
+func (i *IncompleteSourceCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return nil, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return nil, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) {
+ return cciptypes.CommitStoreStaticConfig{}, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) {
+ return 0, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) {
+ return 0, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) {
+ return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) {
+ return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) IsDown(ctx context.Context) (bool, error) {
+ return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error) {
+ return i.offchainConfig, nil
+}
+
+func (i *IncompleteSourceCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) {
+ return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+func (i *IncompleteSourceCommitStoreReader) Close() error {
+ return fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader")
+}
+
+// IncompleteDestCommitStoreReader is an implementation of CommitStoreReader with all valid methods except
+// GasPriceEstimator, ChangeConfig, and OffchainConfig.
+type IncompleteDestCommitStoreReader struct {
+ cs cciptypes.CommitStoreReader
+}
+
+func NewIncompleteDestCommitStoreReader(lggr logger.Logger, versionFinder ccip.VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller) (*IncompleteDestCommitStoreReader, error) {
+ cs, err := ccip.NewCommitStoreReader(lggr, versionFinder, address, ec, lp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &IncompleteDestCommitStoreReader{
+ cs: cs,
+ }, nil
+}
+
+func (i *IncompleteDestCommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) {
+ return "", fmt.Errorf("invalid usage of IncompleteDestCommitStoreReader")
+}
+
+func (i *IncompleteDestCommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error) {
+ return i.cs.DecodeCommitReport(ctx, report)
+}
+
+func (i *IncompleteDestCommitStoreReader) EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error) {
+ return i.cs.EncodeCommitReport(ctx, report)
+}
+
+func (i *IncompleteDestCommitStoreReader) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error) {
+ return nil, fmt.Errorf("invalid usage of IncompleteDestCommitStoreReader")
+}
+
+func (i *IncompleteDestCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return i.cs.GetAcceptedCommitReportsGteTimestamp(ctx, ts, confirmations)
+}
+
+func (i *IncompleteDestCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) {
+ return i.cs.GetCommitReportMatchingSeqNum(ctx, seqNum, confirmations)
+}
+
+func (i *IncompleteDestCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) {
+ return i.cs.GetCommitStoreStaticConfig(ctx)
+}
+
+func (i *IncompleteDestCommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) {
+ return i.cs.GetExpectedNextSequenceNumber(ctx)
+}
+
+func (i *IncompleteDestCommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) {
+ return i.cs.GetLatestPriceEpochAndRound(ctx)
+}
+
+func (i *IncompleteDestCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) {
+ return i.cs.IsBlessed(ctx, root)
+}
+
+func (i *IncompleteDestCommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) {
+ return i.cs.IsDestChainHealthy(ctx)
+}
+
+func (i *IncompleteDestCommitStoreReader) IsDown(ctx context.Context) (bool, error) {
+ return i.cs.IsDown(ctx)
+}
+
+func (i *IncompleteDestCommitStoreReader) OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error) {
+ return cciptypes.CommitOffchainConfig{}, fmt.Errorf("invalid usage of IncompleteDestCommitStoreReader")
+}
+
+func (i *IncompleteDestCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) {
+ return i.cs.VerifyExecutionReport(ctx, report)
+}
+
+func (i *IncompleteDestCommitStoreReader) Close() error {
+ return i.cs.Close()
+}
diff --git a/core/services/relay/evm/ccip_test.go b/core/services/relay/evm/ccip_test.go
new file mode 100644
index 00000000000..8c0bfe182e1
--- /dev/null
+++ b/core/services/relay/evm/ccip_test.go
@@ -0,0 +1,18 @@
+package evm
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_CCIPSubjectUUID(t *testing.T) {
+ // We want the function to be
+ // (1) an actual function (i.e., deterministic)
+ assert.Equal(t, chainToUUID(big.NewInt(1)), chainToUUID(big.NewInt(1)))
+ // (2) injective (produce different results for different inputs)
+ assert.NotEqual(t, chainToUUID(big.NewInt(1)), chainToUUID(big.NewInt(2)))
+ // (3) stable across runs
+ assert.Equal(t, "c980e777-c95c-577b-83f6-ceb26a1a982d", chainToUUID(big.NewInt(1)).String())
+}
diff --git a/core/services/relay/evm/commit_provider.go b/core/services/relay/evm/commit_provider.go
new file mode 100644
index 00000000000..fe3e327c7f2
--- /dev/null
+++ b/core/services/relay/evm/commit_provider.go
@@ -0,0 +1,309 @@
+package evm
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ "go.uber.org/multierr"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+)
+
+var _ commontypes.CCIPCommitProvider = (*SrcCommitProvider)(nil)
+var _ commontypes.CCIPCommitProvider = (*DstCommitProvider)(nil)
+
+type SrcCommitProvider struct {
+ lggr logger.Logger
+ startBlock uint64
+ client client.Client
+ lp logpoller.LogPoller
+ estimator gas.EvmFeeEstimator
+ maxGasPrice *big.Int
+
+ // these values will be lazily initialized
+ seenOnRampAddress *cciptypes.Address
+ seenSourceChainSelector *uint64
+ seenDestChainSelector *uint64
+}
+
+func NewSrcCommitProvider(
+ lggr logger.Logger,
+ startBlock uint64,
+ client client.Client,
+ lp logpoller.LogPoller,
+ srcEstimator gas.EvmFeeEstimator,
+ maxGasPrice *big.Int,
+) commontypes.CCIPCommitProvider {
+ return &SrcCommitProvider{
+ lggr: lggr,
+ startBlock: startBlock,
+ client: client,
+ lp: lp,
+ estimator: srcEstimator,
+ maxGasPrice: maxGasPrice,
+ }
+}
+
+type DstCommitProvider struct {
+ lggr logger.Logger
+ versionFinder ccip.VersionFinder
+ startBlock uint64
+ client client.Client
+ lp logpoller.LogPoller
+ contractTransmitter *contractTransmitter
+ configWatcher *configWatcher
+ gasEstimator gas.EvmFeeEstimator
+ maxGasPrice big.Int
+
+ // these values will be lazily initialized
+ seenCommitStoreAddress *cciptypes.Address
+ seenOffRampAddress *cciptypes.Address
+}
+
+func NewDstCommitProvider(
+ lggr logger.Logger,
+ versionFinder ccip.VersionFinder,
+ startBlock uint64,
+ client client.Client,
+ lp logpoller.LogPoller,
+ gasEstimator gas.EvmFeeEstimator,
+ maxGasPrice big.Int,
+ contractTransmitter contractTransmitter,
+ configWatcher *configWatcher,
+) commontypes.CCIPCommitProvider {
+ return &DstCommitProvider{
+ lggr: lggr,
+ versionFinder: versionFinder,
+ startBlock: startBlock,
+ client: client,
+ lp: lp,
+ contractTransmitter: &contractTransmitter,
+ configWatcher: configWatcher,
+ gasEstimator: gasEstimator,
+ maxGasPrice: maxGasPrice,
+ }
+}
+
+func (P *SrcCommitProvider) Name() string {
+ return "CCIPCommitProvider.SrcRelayerProvider"
+}
+
+// Close is called when the job that created this provider is deleted.
+// At this time, any of the methods on the provider may or may not have been called.
+// If NewOnRampReader has not been called, their corresponding
+// Close methods will be expected to error.
+func (P *SrcCommitProvider) Close() error {
+ versionFinder := ccip.NewEvmVersionFinder()
+
+ unregisterFuncs := make([]func() error, 0, 2)
+ unregisterFuncs = append(unregisterFuncs, func() error {
+ // avoid panic in the case NewOnRampReader wasn't called
+ if P.seenOnRampAddress == nil {
+ return nil
+ }
+ return ccip.CloseOnRampReader(P.lggr, versionFinder, *P.seenSourceChainSelector, *P.seenDestChainSelector, *P.seenOnRampAddress, P.lp, P.client)
+ })
+
+ var multiErr error
+ for _, fn := range unregisterFuncs {
+ if err := fn(); err != nil {
+ multiErr = multierr.Append(multiErr, err)
+ }
+ }
+ return multiErr
+}
+
+func (P *SrcCommitProvider) Ready() error {
+ return nil
+}
+
+func (P *SrcCommitProvider) HealthReport() map[string]error {
+ return make(map[string]error)
+}
+
+func (P *SrcCommitProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester {
+ // TODO CCIP-2494
+ // "OffchainConfigDigester called on SrcCommitProvider. Valid on DstCommitProvider."
+ return UnimplementedOffchainConfigDigester{}
+}
+
+func (P *SrcCommitProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker {
+ // // TODO CCIP-2494
+ // "ContractConfigTracker called on SrcCommitProvider. Valid on DstCommitProvider.")
+ return UnimplementedContractConfigTracker{}
+}
+
+func (P *SrcCommitProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
+ // // TODO CCIP-2494
+ // "ContractTransmitter called on SrcCommitProvider. Valid on DstCommitProvider."
+ return UnimplementedContractTransmitter{}
+}
+
+func (P *SrcCommitProvider) ChainReader() commontypes.ContractReader {
+ return nil
+}
+
+func (P *SrcCommitProvider) Codec() commontypes.Codec {
+ return nil
+}
+
+func (P *DstCommitProvider) Name() string {
+ return "CCIPCommitProvider.DstRelayerProvider"
+}
+
+func (P *DstCommitProvider) Close() error {
+ versionFinder := ccip.NewEvmVersionFinder()
+
+ unregisterFuncs := make([]func() error, 0, 2)
+ unregisterFuncs = append(unregisterFuncs, func() error {
+ if P.seenCommitStoreAddress == nil {
+ return nil
+ }
+ return ccip.CloseCommitStoreReader(P.lggr, versionFinder, *P.seenCommitStoreAddress, P.client, P.lp)
+ })
+ unregisterFuncs = append(unregisterFuncs, func() error {
+ if P.seenOffRampAddress == nil {
+ return nil
+ }
+ return ccip.CloseOffRampReader(P.lggr, versionFinder, *P.seenOffRampAddress, P.client, P.lp, nil, big.NewInt(0))
+ })
+
+ var multiErr error
+ for _, fn := range unregisterFuncs {
+ if err := fn(); err != nil {
+ multiErr = multierr.Append(multiErr, err)
+ }
+ }
+ return multiErr
+}
+
+func (P *DstCommitProvider) Ready() error {
+ return nil
+}
+
+func (P *DstCommitProvider) HealthReport() map[string]error {
+ return make(map[string]error)
+}
+
+func (P *DstCommitProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester {
+ return P.configWatcher.OffchainConfigDigester()
+}
+
+func (P *DstCommitProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker {
+ return P.configWatcher.ContractConfigTracker()
+}
+
+func (P *DstCommitProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
+ return P.contractTransmitter
+}
+
+func (P *DstCommitProvider) ChainReader() commontypes.ContractReader {
+ return nil
+}
+
+func (P *DstCommitProvider) Codec() commontypes.Codec {
+ return nil
+}
+
+func (P *SrcCommitProvider) Start(ctx context.Context) error {
+ if P.startBlock != 0 {
+ P.lggr.Infow("start replaying src chain", "fromBlock", P.startBlock)
+ return P.lp.Replay(ctx, int64(P.startBlock))
+ }
+ return nil
+}
+
+func (P *DstCommitProvider) Start(ctx context.Context) error {
+ if P.startBlock != 0 {
+ P.lggr.Infow("start replaying dst chain", "fromBlock", P.startBlock)
+ return P.lp.Replay(ctx, int64(P.startBlock))
+ }
+ return nil
+}
+
+func (P *SrcCommitProvider) NewPriceGetter(ctx context.Context) (priceGetter cciptypes.PriceGetter, err error) {
+ return nil, fmt.Errorf("can't construct a price getter from one relayer")
+}
+
+func (P *DstCommitProvider) NewPriceGetter(ctx context.Context) (priceGetter cciptypes.PriceGetter, err error) {
+ return nil, fmt.Errorf("can't construct a price getter from one relayer")
+}
+
+func (P *SrcCommitProvider) NewCommitStoreReader(ctx context.Context, commitStoreAddress cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) {
+ commitStoreReader = NewIncompleteSourceCommitStoreReader(P.estimator, P.maxGasPrice)
+ return
+}
+
+func (P *DstCommitProvider) NewCommitStoreReader(ctx context.Context, commitStoreAddress cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) {
+ P.seenCommitStoreAddress = &commitStoreAddress
+
+ versionFinder := ccip.NewEvmVersionFinder()
+ commitStoreReader, err = NewIncompleteDestCommitStoreReader(P.lggr, versionFinder, commitStoreAddress, P.client, P.lp)
+ return
+}
+
+func (P *SrcCommitProvider) NewOnRampReader(ctx context.Context, onRampAddress cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (onRampReader cciptypes.OnRampReader, err error) {
+ P.seenOnRampAddress = &onRampAddress
+ P.seenSourceChainSelector = &sourceChainSelector
+ P.seenDestChainSelector = &destChainSelector
+
+ versionFinder := ccip.NewEvmVersionFinder()
+ onRampReader, err = ccip.NewOnRampReader(P.lggr, versionFinder, sourceChainSelector, destChainSelector, onRampAddress, P.lp, P.client)
+ return
+}
+
+func (P *DstCommitProvider) NewOnRampReader(ctx context.Context, onRampAddress cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (onRampReader cciptypes.OnRampReader, err error) {
+ return nil, fmt.Errorf("invalid: NewOnRampReader called for DstCommitProvider.NewOnRampReader should be called on SrcCommitProvider")
+}
+
+func (P *SrcCommitProvider) NewOffRampReader(ctx context.Context, offRampAddr cciptypes.Address) (offRampReader cciptypes.OffRampReader, err error) {
+ return nil, fmt.Errorf("invalid: NewOffRampReader called for SrcCommitProvider. NewOffRampReader should be called on DstCommitProvider")
+}
+
+func (P *DstCommitProvider) NewOffRampReader(ctx context.Context, offRampAddr cciptypes.Address) (offRampReader cciptypes.OffRampReader, err error) {
+ offRampReader, err = ccip.NewOffRampReader(P.lggr, P.versionFinder, offRampAddr, P.client, P.lp, P.gasEstimator, &P.maxGasPrice, true)
+ return
+}
+
+func (P *SrcCommitProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) {
+ return nil, fmt.Errorf("invalid: NewPriceRegistryReader called for SrcCommitProvider. NewOffRampReader should be called on DstCommitProvider")
+}
+
+func (P *DstCommitProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) {
+ destPriceRegistry := ccip.NewEvmPriceRegistry(P.lp, P.client, P.lggr, ccip.CommitPluginLabel)
+ priceRegistryReader, err = destPriceRegistry.NewPriceRegistryReader(ctx, addr)
+ return
+}
+
+func (P *SrcCommitProvider) SourceNativeToken(ctx context.Context, sourceRouterAddr cciptypes.Address) (cciptypes.Address, error) {
+ sourceRouterAddrHex, err := ccip.GenericAddrToEvm(sourceRouterAddr)
+ if err != nil {
+ return "", err
+ }
+ sourceRouter, err := router.NewRouter(sourceRouterAddrHex, P.client)
+ if err != nil {
+ return "", err
+ }
+ sourceNative, err := sourceRouter.GetWrappedNative(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return "", err
+ }
+
+ return ccip.EvmAddrToGeneric(sourceNative), nil
+}
+
+func (P *DstCommitProvider) SourceNativeToken(ctx context.Context, sourceRouterAddr cciptypes.Address) (cciptypes.Address, error) {
+ return "", fmt.Errorf("invalid: SourceNativeToken called for DstCommitProvider. SourceNativeToken should be called on SrcCommitProvider")
+}
diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go
index a0782380b5b..e310464a556 100644
--- a/core/services/relay/evm/evm.go
+++ b/core/services/relay/evm/evm.go
@@ -1,13 +1,24 @@
package evm
import (
+ "bytes"
"context"
+ "crypto/sha256"
"encoding/json"
"errors"
"fmt"
+ "math/big"
"strings"
"sync"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipcommit"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipexec"
+ ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
+ cciptransmitter "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/transmitter"
+
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
@@ -72,6 +83,57 @@ func init() {
var _ commontypes.Relayer = &Relayer{} //nolint:staticcheck
+// The current PluginProvider interface does not support an error return. This was fine up until CCIP.
+// CCIP is the first product to introduce the idea of incomplete implementations of a provider based on
+// what chain (for CCIP, src or dest) the provider is created for. The Unimplemented* implementations below allow us to return
+// a non nil value, which is hopefully a better developer experience should you find yourself using the right methods
+// but on the *wrong* provider.
+
+// [UnimplementedOffchainConfigDigester] satisfies the OCR OffchainConfigDigester interface
+type UnimplementedOffchainConfigDigester struct{}
+
+func (e UnimplementedOffchainConfigDigester) ConfigDigest(config ocrtypes.ContractConfig) (ocrtypes.ConfigDigest, error) {
+ return ocrtypes.ConfigDigest{}, fmt.Errorf("unimplemented for this relayer")
+}
+
+func (e UnimplementedOffchainConfigDigester) ConfigDigestPrefix() (ocrtypes.ConfigDigestPrefix, error) {
+ return 0, fmt.Errorf("unimplemented for this relayer")
+}
+
+// [UnimplementedContractConfigTracker] satisfies the OCR ContractConfigTracker interface
+type UnimplementedContractConfigTracker struct{}
+
+func (u UnimplementedContractConfigTracker) Notify() <-chan struct{} {
+ return nil
+}
+
+func (u UnimplementedContractConfigTracker) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) {
+ return 0, ocrtypes.ConfigDigest{}, fmt.Errorf("unimplemented for this relayer")
+}
+
+func (u UnimplementedContractConfigTracker) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) {
+ return ocrtypes.ContractConfig{}, fmt.Errorf("unimplemented for this relayer")
+}
+
+func (u UnimplementedContractConfigTracker) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) {
+ return 0, fmt.Errorf("unimplemented for this relayer")
+}
+
+// [UnimplementedContractTransmitter] satisfies the OCR ContractTransmitter interface
+type UnimplementedContractTransmitter struct{}
+
+func (u UnimplementedContractTransmitter) Transmit(context.Context, ocrtypes.ReportContext, ocrtypes.Report, []ocrtypes.AttributedOnchainSignature) error {
+ return fmt.Errorf("unimplemented for this relayer")
+}
+
+func (u UnimplementedContractTransmitter) FromAccount() (ocrtypes.Account, error) {
+ return "", fmt.Errorf("unimplemented for this relayer")
+}
+
+func (u UnimplementedContractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (configDigest ocrtypes.ConfigDigest, epoch uint32, err error) {
+ return ocrtypes.ConfigDigest{}, 0, fmt.Errorf("unimplemented for this relayer")
+}
+
type Relayer struct {
ds sqlutil.DataSource
chain legacyevm.Chain
@@ -618,6 +680,17 @@ func generateTransmitterFrom(ctx context.Context, rargs commontypes.RelayArgs, e
configWatcher.chain.ID(),
ethKeystore,
)
+ case commontypes.CCIPExecution:
+ transmitter, err = cciptransmitter.NewTransmitterWithStatusChecker(
+ configWatcher.chain.TxManager(),
+ fromAddresses,
+ gasLimit,
+ effectiveTransmitterAddress,
+ strategy,
+ checker,
+ configWatcher.chain.ID(),
+ ethKeystore,
+ )
default:
transmitter, err = ocrcommon.NewTransmitter(
configWatcher.chain.TxManager(),
@@ -734,12 +807,158 @@ func (r *Relayer) NewAutomationProvider(rargs commontypes.RelayArgs, pargs commo
return ocr2keeperRelayer.NewOCR2KeeperProvider(rargs, pargs)
}
-func (r *Relayer) NewCCIPCommitProvider(_ commontypes.RelayArgs, _ commontypes.PluginArgs) (commontypes.CCIPCommitProvider, error) {
- return nil, errors.New("ccip.commit is not supported for evm")
+func chainToUUID(chainID *big.Int) uuid.UUID {
+ // See https://www.rfc-editor.org/rfc/rfc4122.html#section-4.1.3 for the list of supported versions.
+ const VersionSHA1 = 5
+ var buf bytes.Buffer
+ buf.WriteString("CCIP:")
+ buf.Write(chainID.Bytes())
+ // We use SHA-256 instead of SHA-1 because the former has better collision resistance.
+ // The UUID will contain only the first 16 bytes of the hash.
+ // You can't say which algorithms was used just by looking at the UUID bytes.
+ return uuid.NewHash(sha256.New(), uuid.NameSpaceOID, buf.Bytes(), VersionSHA1)
}
-func (r *Relayer) NewCCIPExecProvider(_ commontypes.RelayArgs, _ commontypes.PluginArgs) (commontypes.CCIPExecProvider, error) {
- return nil, errors.New("ccip.exec is not supported for evm")
+// NewCCIPCommitProvider constructs a provider of type CCIPCommitProvider. Since this is happening in the Relayer,
+// which lives in a separate process from delegate which is requesting a provider, we need to wire in through pargs
+// which *type* (impl) of CCIPCommitProvider should be created. CCIP is currently a special case where the provider has a
+// subset of implementations of the complete interface as certain contracts in a CCIP lane are only deployed on the src
+// chain or on the dst chain. This results in the two implementations of providers: a src and dst implementation.
+func (r *Relayer) NewCCIPCommitProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.CCIPCommitProvider, error) {
+ // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887
+ ctx := context.Background()
+
+ versionFinder := ccip.NewEvmVersionFinder()
+
+ var commitPluginConfig ccipconfig.CommitPluginConfig
+ err := json.Unmarshal(pargs.PluginConfig, &commitPluginConfig)
+ if err != nil {
+ return nil, err
+ }
+ sourceStartBlock := commitPluginConfig.SourceStartBlock
+ destStartBlock := commitPluginConfig.DestStartBlock
+
+ // The src chain implementation of this provider does not need a configWatcher or contractTransmitter;
+ // bail early.
+ if commitPluginConfig.IsSourceProvider {
+ return NewSrcCommitProvider(
+ r.lggr,
+ sourceStartBlock,
+ r.chain.Client(),
+ r.chain.LogPoller(),
+ r.chain.GasEstimator(),
+ r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(),
+ ), nil
+ }
+
+ relayOpts := types.NewRelayOpts(rargs)
+ configWatcher, err := newStandardConfigProvider(ctx, r.lggr, r.chain, relayOpts)
+ if err != nil {
+ return nil, err
+ }
+ address := common.HexToAddress(relayOpts.ContractID)
+ typ, ver, err := ccipconfig.TypeAndVersion(address, r.chain.Client())
+ if err != nil {
+ return nil, err
+ }
+ fn, err := ccipcommit.CommitReportToEthTxMeta(typ, ver)
+ if err != nil {
+ return nil, err
+ }
+ subjectID := chainToUUID(configWatcher.chain.ID())
+ contractTransmitter, err := newOnChainContractTransmitter(ctx, r.lggr, rargs, r.ks.Eth(), configWatcher, configTransmitterOpts{
+ subjectID: &subjectID,
+ }, OCR2AggregatorTransmissionContractABI, WithReportToEthMetadata(fn), WithRetention(0))
+ if err != nil {
+ return nil, err
+ }
+
+ return NewDstCommitProvider(
+ r.lggr,
+ versionFinder,
+ destStartBlock,
+ r.chain.Client(),
+ r.chain.LogPoller(),
+ r.chain.GasEstimator(),
+ *r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(),
+ *contractTransmitter,
+ configWatcher,
+ ), nil
+}
+
+// NewCCIPExecProvider constructs a provider of type CCIPExecProvider. Since this is happening in the Relayer,
+// which lives in a separate process from delegate which is requesting a provider, we need to wire in through pargs
+// which *type* (impl) of CCIPExecProvider should be created. CCIP is currently a special case where the provider has a
+// subset of implementations of the complete interface as certain contracts in a CCIP lane are only deployed on the src
+// chain or on the dst chain. This results in the two implementations of providers: a src and dst implementation.
+func (r *Relayer) NewCCIPExecProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.CCIPExecProvider, error) {
+ // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887
+ ctx := context.Background()
+
+ versionFinder := ccip.NewEvmVersionFinder()
+
+ var execPluginConfig ccipconfig.ExecPluginConfig
+ err := json.Unmarshal(pargs.PluginConfig, &execPluginConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ usdcConfig := execPluginConfig.USDCConfig
+
+ // The src chain implementation of this provider does not need a configWatcher or contractTransmitter;
+ // bail early.
+ if execPluginConfig.IsSourceProvider {
+ return NewSrcExecProvider(
+ r.lggr,
+ versionFinder,
+ r.chain.Client(),
+ r.chain.GasEstimator(),
+ r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(),
+ r.chain.LogPoller(),
+ execPluginConfig.SourceStartBlock,
+ execPluginConfig.JobID,
+ usdcConfig.AttestationAPI,
+ int(usdcConfig.AttestationAPITimeoutSeconds),
+ usdcConfig.AttestationAPIIntervalMilliseconds,
+ usdcConfig.SourceMessageTransmitterAddress,
+ )
+ }
+
+ relayOpts := types.NewRelayOpts(rargs)
+ configWatcher, err := newStandardConfigProvider(ctx, r.lggr, r.chain, relayOpts)
+ if err != nil {
+ return nil, err
+ }
+ address := common.HexToAddress(relayOpts.ContractID)
+ typ, ver, err := ccipconfig.TypeAndVersion(address, r.chain.Client())
+ if err != nil {
+ return nil, err
+ }
+ fn, err := ccipexec.ExecReportToEthTxMeta(ctx, typ, ver)
+ if err != nil {
+ return nil, err
+ }
+ subjectID := chainToUUID(configWatcher.chain.ID())
+ contractTransmitter, err := newOnChainContractTransmitter(ctx, r.lggr, rargs, r.ks.Eth(), configWatcher, configTransmitterOpts{
+ subjectID: &subjectID,
+ }, OCR2AggregatorTransmissionContractABI, WithReportToEthMetadata(fn), WithRetention(0))
+ if err != nil {
+ return nil, err
+ }
+
+ return NewDstExecProvider(
+ r.lggr,
+ versionFinder,
+ r.chain.Client(),
+ r.chain.LogPoller(),
+ execPluginConfig.DestStartBlock,
+ contractTransmitter,
+ configWatcher,
+ r.chain.GasEstimator(),
+ *r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(),
+ r.chain.TxManager(),
+ cciptypes.Address(rargs.ContractID),
+ )
}
var _ commontypes.MedianProvider = (*medianProvider)(nil)
diff --git a/core/services/relay/evm/exec_provider.go b/core/services/relay/evm/exec_provider.go
new file mode 100644
index 00000000000..ae3ce56532a
--- /dev/null
+++ b/core/services/relay/evm/exec_provider.go
@@ -0,0 +1,391 @@
+package evm
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "net/url"
+ "time"
+
+ "go.uber.org/multierr"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/usdc"
+)
+
+type SrcExecProvider struct {
+ lggr logger.Logger
+ versionFinder ccip.VersionFinder
+ client client.Client
+ lp logpoller.LogPoller
+ startBlock uint64
+ estimator gas.EvmFeeEstimator
+ maxGasPrice *big.Int
+ usdcReader *ccip.USDCReaderImpl
+ usdcAttestationAPI string
+ usdcAttestationAPITimeoutSeconds int
+ usdcAttestationAPIIntervalMilliseconds int
+ usdcSrcMsgTransmitterAddr common.Address
+
+ // these values are nil and are updated for Close()
+ seenOnRampAddress *cciptypes.Address
+ seenSourceChainSelector *uint64
+ seenDestChainSelector *uint64
+}
+
+func NewSrcExecProvider(
+ lggr logger.Logger,
+ versionFinder ccip.VersionFinder,
+ client client.Client,
+ estimator gas.EvmFeeEstimator,
+ maxGasPrice *big.Int,
+ lp logpoller.LogPoller,
+ startBlock uint64,
+ jobID string,
+ usdcAttestationAPI string,
+ usdcAttestationAPITimeoutSeconds int,
+ usdcAttestationAPIIntervalMilliseconds int,
+ usdcSrcMsgTransmitterAddr common.Address,
+) (commontypes.CCIPExecProvider, error) {
+ var usdcReader *ccip.USDCReaderImpl
+ var err error
+ if usdcAttestationAPI != "" {
+ usdcReader, err = ccip.NewUSDCReader(lggr, jobID, usdcSrcMsgTransmitterAddr, lp, true)
+ if err != nil {
+ return nil, fmt.Errorf("new usdc reader: %w", err)
+ }
+ }
+
+ return &SrcExecProvider{
+ lggr: lggr,
+ versionFinder: versionFinder,
+ client: client,
+ estimator: estimator,
+ maxGasPrice: maxGasPrice,
+ lp: lp,
+ startBlock: startBlock,
+ usdcReader: usdcReader,
+ usdcAttestationAPI: usdcAttestationAPI,
+ usdcAttestationAPITimeoutSeconds: usdcAttestationAPITimeoutSeconds,
+ usdcAttestationAPIIntervalMilliseconds: usdcAttestationAPIIntervalMilliseconds,
+ usdcSrcMsgTransmitterAddr: usdcSrcMsgTransmitterAddr,
+ }, nil
+}
+
+func (s *SrcExecProvider) Name() string {
+ return "CCIP.SrcExecProvider"
+}
+
+func (s *SrcExecProvider) Start(ctx context.Context) error {
+ if s.startBlock != 0 {
+ s.lggr.Infow("start replaying src chain", "fromBlock", s.startBlock)
+ return s.lp.Replay(ctx, int64(s.startBlock))
+ }
+ return nil
+}
+
+// Close is called when the job that created this provider is closed.
+func (s *SrcExecProvider) Close() error {
+ versionFinder := ccip.NewEvmVersionFinder()
+
+ unregisterFuncs := make([]func() error, 0, 2)
+ unregisterFuncs = append(unregisterFuncs, func() error {
+ // avoid panic in the case NewOnRampReader wasn't called
+ if s.seenOnRampAddress == nil {
+ return nil
+ }
+ return ccip.CloseOnRampReader(s.lggr, versionFinder, *s.seenSourceChainSelector, *s.seenDestChainSelector, *s.seenOnRampAddress, s.lp, s.client)
+ })
+ unregisterFuncs = append(unregisterFuncs, func() error {
+ if s.usdcAttestationAPI == "" {
+ return nil
+ }
+ return ccip.CloseUSDCReader(s.lggr, s.lggr.Name(), s.usdcSrcMsgTransmitterAddr, s.lp)
+ })
+ var multiErr error
+ for _, fn := range unregisterFuncs {
+ if err := fn(); err != nil {
+ multiErr = multierr.Append(multiErr, err)
+ }
+ }
+ return multiErr
+}
+
+func (s *SrcExecProvider) Ready() error {
+ return nil
+}
+
+func (s *SrcExecProvider) HealthReport() map[string]error {
+ return make(map[string]error)
+}
+
+func (s *SrcExecProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester {
+ // TODO CCIP-2494
+ // OffchainConfigDigester called on SrcExecProvider. It should only be called on DstExecProvider
+ return UnimplementedOffchainConfigDigester{}
+}
+
+func (s *SrcExecProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker {
+ // TODO CCIP-2494
+ // "ContractConfigTracker called on SrcExecProvider. It should only be called on DstExecProvider
+ return UnimplementedContractConfigTracker{}
+}
+
+func (s *SrcExecProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
+ // TODO CCIP-2494
+ // "ContractTransmitter called on SrcExecProvider. It should only be called on DstExecProvider
+ return UnimplementedContractTransmitter{}
+}
+
+func (s *SrcExecProvider) ChainReader() commontypes.ContractReader {
+ return nil
+}
+
+func (s *SrcExecProvider) Codec() commontypes.Codec {
+ return nil
+}
+
+func (s *SrcExecProvider) GetTransactionStatus(ctx context.Context, transactionID string) (types.TransactionStatus, error) {
+ return 0, fmt.Errorf("invalid: GetTransactionStatus called on SrcExecProvider. It should only be called on DstExecProvider")
+}
+
+func (s *SrcExecProvider) NewCommitStoreReader(ctx context.Context, addr cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) {
+ commitStoreReader = NewIncompleteSourceCommitStoreReader(s.estimator, s.maxGasPrice)
+ return
+}
+
+func (s *SrcExecProvider) NewOffRampReader(ctx context.Context, addr cciptypes.Address) (cciptypes.OffRampReader, error) {
+ return nil, fmt.Errorf("invalid: NewOffRampReader called on SrcExecProvider. Valid on DstExecProvider")
+}
+
+func (s *SrcExecProvider) NewOnRampReader(ctx context.Context, onRampAddress cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (onRampReader cciptypes.OnRampReader, err error) {
+ s.seenOnRampAddress = &onRampAddress
+
+ versionFinder := ccip.NewEvmVersionFinder()
+ onRampReader, err = ccip.NewOnRampReader(s.lggr, versionFinder, sourceChainSelector, destChainSelector, onRampAddress, s.lp, s.client)
+ return
+}
+
+func (s *SrcExecProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) {
+ srcPriceRegistry := ccip.NewEvmPriceRegistry(s.lp, s.client, s.lggr, ccip.ExecPluginLabel)
+ priceRegistryReader, err = srcPriceRegistry.NewPriceRegistryReader(ctx, addr)
+ return
+}
+
+func (s *SrcExecProvider) NewTokenDataReader(ctx context.Context, tokenAddress cciptypes.Address) (tokenDataReader cciptypes.TokenDataReader, err error) {
+ attestationURI, err2 := url.ParseRequestURI(s.usdcAttestationAPI)
+ if err2 != nil {
+ return nil, fmt.Errorf("failed to parse USDC attestation API: %w", err2)
+ }
+ tokenAddr, err2 := ccip.GenericAddrToEvm(tokenAddress)
+ if err2 != nil {
+ return nil, fmt.Errorf("failed to parse token address: %w", err2)
+ }
+ tokenDataReader = usdc.NewUSDCTokenDataReader(
+ s.lggr,
+ s.usdcReader,
+ attestationURI,
+ s.usdcAttestationAPITimeoutSeconds,
+ tokenAddr,
+ time.Duration(s.usdcAttestationAPIIntervalMilliseconds)*time.Millisecond,
+ )
+ return
+}
+
+func (s *SrcExecProvider) NewTokenPoolBatchedReader(ctx context.Context, offRampAddr cciptypes.Address, sourceChainSelector uint64) (cciptypes.TokenPoolBatchedReader, error) {
+ return nil, fmt.Errorf("invalid: NewTokenPoolBatchedReader called on SrcExecProvider. It should only be called on DstExecProvdier")
+}
+
+func (s *SrcExecProvider) SourceNativeToken(ctx context.Context, sourceRouterAddr cciptypes.Address) (cciptypes.Address, error) {
+ sourceRouterAddrHex, err := ccip.GenericAddrToEvm(sourceRouterAddr)
+ if err != nil {
+ return "", err
+ }
+ sourceRouter, err := router.NewRouter(sourceRouterAddrHex, s.client)
+ if err != nil {
+ return "", err
+ }
+ sourceNative, err := sourceRouter.GetWrappedNative(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return "", err
+ }
+
+ return ccip.EvmAddrToGeneric(sourceNative), nil
+}
+
+type DstExecProvider struct {
+ lggr logger.Logger
+ versionFinder ccip.VersionFinder
+ client client.Client
+ lp logpoller.LogPoller
+ startBlock uint64
+ contractTransmitter *contractTransmitter
+ configWatcher *configWatcher
+ gasEstimator gas.EvmFeeEstimator
+ maxGasPrice big.Int
+ txm txmgr.TxManager
+ offRampAddress cciptypes.Address
+
+ // these values are nil and are updated for Close()
+ seenCommitStoreAddr *cciptypes.Address
+}
+
+func NewDstExecProvider(
+ lggr logger.Logger,
+ versionFinder ccip.VersionFinder,
+ client client.Client,
+ lp logpoller.LogPoller,
+ startBlock uint64,
+ contractTransmitter *contractTransmitter,
+ configWatcher *configWatcher,
+ gasEstimator gas.EvmFeeEstimator,
+ maxGasPrice big.Int,
+ txm txmgr.TxManager,
+ offRampAddress cciptypes.Address,
+) (commontypes.CCIPExecProvider, error) {
+ return &DstExecProvider{
+ lggr: lggr,
+ versionFinder: versionFinder,
+ client: client,
+ lp: lp,
+ startBlock: startBlock,
+ contractTransmitter: contractTransmitter,
+ configWatcher: configWatcher,
+ gasEstimator: gasEstimator,
+ maxGasPrice: maxGasPrice,
+ txm: txm,
+ offRampAddress: offRampAddress,
+ }, nil
+}
+
+func (d *DstExecProvider) Name() string {
+ return "CCIP.DestRelayerExecProvider"
+}
+
+func (d *DstExecProvider) Start(ctx context.Context) error {
+ if d.startBlock != 0 {
+ d.lggr.Infow("start replaying dst chain", "fromBlock", d.startBlock)
+ return d.lp.Replay(ctx, int64(d.startBlock))
+ }
+ return nil
+}
+
+// Close is called when the job that created this provider is deleted
+// At this time, any of the methods on the provider may or may not have been called.
+// If NewOnRampReader and NewCommitStoreReader have not been called, their corresponding
+// Close methods will be expected to error.
+func (d *DstExecProvider) Close() error {
+ versionFinder := ccip.NewEvmVersionFinder()
+
+ unregisterFuncs := make([]func() error, 0, 2)
+ unregisterFuncs = append(unregisterFuncs, func() error {
+ if d.seenCommitStoreAddr == nil {
+ return nil
+ }
+ return ccip.CloseCommitStoreReader(d.lggr, versionFinder, *d.seenCommitStoreAddr, d.client, d.lp)
+ })
+ unregisterFuncs = append(unregisterFuncs, func() error {
+ return ccip.CloseOffRampReader(d.lggr, versionFinder, d.offRampAddress, d.client, d.lp, nil, big.NewInt(0))
+ })
+
+ var multiErr error
+ for _, fn := range unregisterFuncs {
+ if err := fn(); err != nil {
+ multiErr = multierr.Append(multiErr, err)
+ }
+ }
+ return multiErr
+}
+
+func (d *DstExecProvider) Ready() error {
+ return nil
+}
+
+func (d *DstExecProvider) HealthReport() map[string]error {
+ return make(map[string]error)
+}
+
+func (d *DstExecProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester {
+ return d.configWatcher.OffchainConfigDigester()
+}
+
+func (d *DstExecProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker {
+ return d.configWatcher.ContractConfigTracker()
+}
+
+func (d *DstExecProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
+ return d.contractTransmitter
+}
+
+func (d *DstExecProvider) ChainReader() commontypes.ContractReader {
+ return nil
+}
+
+func (d *DstExecProvider) Codec() commontypes.Codec {
+ return nil
+}
+
+func (d *DstExecProvider) GetTransactionStatus(ctx context.Context, transactionID string) (types.TransactionStatus, error) {
+ return d.txm.GetTransactionStatus(ctx, transactionID)
+}
+
+func (d *DstExecProvider) NewCommitStoreReader(ctx context.Context, addr cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) {
+ d.seenCommitStoreAddr = &addr
+
+ versionFinder := ccip.NewEvmVersionFinder()
+ commitStoreReader, err = NewIncompleteDestCommitStoreReader(d.lggr, versionFinder, addr, d.client, d.lp)
+ return
+}
+
+func (d *DstExecProvider) NewOffRampReader(ctx context.Context, offRampAddress cciptypes.Address) (offRampReader cciptypes.OffRampReader, err error) {
+ offRampReader, err = ccip.NewOffRampReader(d.lggr, d.versionFinder, offRampAddress, d.client, d.lp, d.gasEstimator, &d.maxGasPrice, true)
+ return
+}
+
+func (d *DstExecProvider) NewOnRampReader(ctx context.Context, addr cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (cciptypes.OnRampReader, error) {
+ return nil, fmt.Errorf("invalid: NewOnRampReader called on DstExecProvider. It should only be called on SrcExecProvider")
+}
+
+func (d *DstExecProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) {
+ destPriceRegistry := ccip.NewEvmPriceRegistry(d.lp, d.client, d.lggr, ccip.ExecPluginLabel)
+ priceRegistryReader, err = destPriceRegistry.NewPriceRegistryReader(ctx, addr)
+ return
+}
+
+func (d *DstExecProvider) NewTokenDataReader(ctx context.Context, tokenAddress cciptypes.Address) (cciptypes.TokenDataReader, error) {
+ return nil, fmt.Errorf("invalid: NewTokenDataReader called on DstExecProvider. It should only be called on SrcExecProvider")
+}
+
+func (d *DstExecProvider) NewTokenPoolBatchedReader(ctx context.Context, offRampAddress cciptypes.Address, sourceChainSelector uint64) (tokenPoolBatchedReader cciptypes.TokenPoolBatchedReader, err error) {
+ batchCaller := ccip.NewDynamicLimitedBatchCaller(
+ d.lggr,
+ d.client,
+ uint(ccip.DefaultRpcBatchSizeLimit),
+ uint(ccip.DefaultRpcBatchBackOffMultiplier),
+ uint(ccip.DefaultMaxParallelRpcCalls),
+ )
+
+ tokenPoolBatchedReader, err = ccip.NewEVMTokenPoolBatchedReader(d.lggr, sourceChainSelector, offRampAddress, batchCaller)
+ if err != nil {
+ return nil, fmt.Errorf("new token pool batched reader: %w", err)
+ }
+ return
+}
+
+func (d *DstExecProvider) SourceNativeToken(ctx context.Context, addr cciptypes.Address) (cciptypes.Address, error) {
+ return "", fmt.Errorf("invalid: SourceNativeToken called on DstExecProvider. It should only be called on SrcExecProvider")
+}
diff --git a/core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go b/core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go
new file mode 100644
index 00000000000..9bd59ccf4ef
--- /dev/null
+++ b/core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go
@@ -0,0 +1,104 @@
+// Code generated by mockery v2.43.2. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+
+ types "github.com/smartcontractkit/chainlink-common/pkg/types"
+)
+
+// CCIPTransactionStatusChecker is an autogenerated mock type for the CCIPTransactionStatusChecker type
+type CCIPTransactionStatusChecker struct {
+ mock.Mock
+}
+
+type CCIPTransactionStatusChecker_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *CCIPTransactionStatusChecker) EXPECT() *CCIPTransactionStatusChecker_Expecter {
+ return &CCIPTransactionStatusChecker_Expecter{mock: &_m.Mock}
+}
+
+// CheckMessageStatus provides a mock function with given fields: ctx, msgID
+func (_m *CCIPTransactionStatusChecker) CheckMessageStatus(ctx context.Context, msgID string) ([]types.TransactionStatus, int, error) {
+ ret := _m.Called(ctx, msgID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CheckMessageStatus")
+ }
+
+ var r0 []types.TransactionStatus
+ var r1 int
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) ([]types.TransactionStatus, int, error)); ok {
+ return rf(ctx, msgID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) []types.TransactionStatus); ok {
+ r0 = rf(ctx, msgID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]types.TransactionStatus)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) int); ok {
+ r1 = rf(ctx, msgID)
+ } else {
+ r1 = ret.Get(1).(int)
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string) error); ok {
+ r2 = rf(ctx, msgID)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// CCIPTransactionStatusChecker_CheckMessageStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckMessageStatus'
+type CCIPTransactionStatusChecker_CheckMessageStatus_Call struct {
+ *mock.Call
+}
+
+// CheckMessageStatus is a helper method to define mock.On call
+// - ctx context.Context
+// - msgID string
+func (_e *CCIPTransactionStatusChecker_Expecter) CheckMessageStatus(ctx interface{}, msgID interface{}) *CCIPTransactionStatusChecker_CheckMessageStatus_Call {
+ return &CCIPTransactionStatusChecker_CheckMessageStatus_Call{Call: _e.mock.On("CheckMessageStatus", ctx, msgID)}
+}
+
+func (_c *CCIPTransactionStatusChecker_CheckMessageStatus_Call) Run(run func(ctx context.Context, msgID string)) *CCIPTransactionStatusChecker_CheckMessageStatus_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *CCIPTransactionStatusChecker_CheckMessageStatus_Call) Return(transactionStatuses []types.TransactionStatus, retryCounter int, err error) *CCIPTransactionStatusChecker_CheckMessageStatus_Call {
+ _c.Call.Return(transactionStatuses, retryCounter, err)
+ return _c
+}
+
+func (_c *CCIPTransactionStatusChecker_CheckMessageStatus_Call) RunAndReturn(run func(context.Context, string) ([]types.TransactionStatus, int, error)) *CCIPTransactionStatusChecker_CheckMessageStatus_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewCCIPTransactionStatusChecker creates a new instance of CCIPTransactionStatusChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewCCIPTransactionStatusChecker(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *CCIPTransactionStatusChecker {
+ mock := &CCIPTransactionStatusChecker{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/services/relay/evm/statuschecker/txm_status_checker.go b/core/services/relay/evm/statuschecker/txm_status_checker.go
new file mode 100644
index 00000000000..f22e6d78b9f
--- /dev/null
+++ b/core/services/relay/evm/statuschecker/txm_status_checker.go
@@ -0,0 +1,54 @@
+package statuschecker
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+)
+
+// CCIPTransactionStatusChecker is an interface that defines the method for checking the status of a transaction.
+// CheckMessageStatus checks the status of a transaction for a given message ID.
+// It returns a list of transaction statuses, the retry counter, and an error if any occurred during the process.
+//
+
+type CCIPTransactionStatusChecker interface {
+ CheckMessageStatus(ctx context.Context, msgID string) (transactionStatuses []types.TransactionStatus, retryCounter int, err error)
+}
+
+type TxmStatusChecker struct {
+ getTransactionStatus func(ctx context.Context, transactionID string) (types.TransactionStatus, error)
+}
+
+func NewTxmStatusChecker(getTransactionStatus func(ctx context.Context, transactionID string) (types.TransactionStatus, error)) *TxmStatusChecker {
+ return &TxmStatusChecker{getTransactionStatus: getTransactionStatus}
+}
+
+// CheckMessageStatus checks the status of a message by checking the status of all transactions associated with the message ID.
+// It returns a slice of all statuses and the number of transactions found (-1 if none).
+// The key will follow the format: -. TXM will be queried for each key until a NotFound error is returned.
+// The goal is to find all transactions associated with a message ID and snooze messages if they are fatal in the Execution Plugin.
+func (tsc *TxmStatusChecker) CheckMessageStatus(ctx context.Context, msgID string) ([]types.TransactionStatus, int, error) {
+ var counter int
+ const maxStatuses = 1000 // Cap the number of statuses to avoid infinite loop
+
+ allStatuses := make([]types.TransactionStatus, 0)
+
+ for {
+ transactionID := fmt.Sprintf("%s-%d", msgID, counter)
+ status, err := tsc.getTransactionStatus(ctx, transactionID)
+ if err != nil && status == types.Unknown {
+ // If the status is unknown and err not nil, it means the transaction was not found
+ break
+ }
+ allStatuses = append(allStatuses, status)
+ counter++
+
+ // Break the loop if the cap is reached
+ if counter >= maxStatuses {
+ return allStatuses, counter - 1, fmt.Errorf("maximum number of statuses reached, possible infinite loop")
+ }
+ }
+
+ return allStatuses, counter - 1, nil
+}
diff --git a/core/services/relay/evm/statuschecker/txm_status_checker_test.go b/core/services/relay/evm/statuschecker/txm_status_checker_test.go
new file mode 100644
index 00000000000..456d07e7a7d
--- /dev/null
+++ b/core/services/relay/evm/statuschecker/txm_status_checker_test.go
@@ -0,0 +1,103 @@
+package statuschecker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+)
+
+func Test_CheckMessageStatus(t *testing.T) {
+ testutils.SkipShort(t, "")
+ ctx := context.Background()
+ mockTxManager := mocks.NewMockEvmTxManager(t)
+ checker := NewTxmStatusChecker(mockTxManager.GetTransactionStatus)
+
+ msgID := "test-message-id"
+
+ // Define test cases
+ testCases := []struct {
+ name string
+ setupMock func()
+ expectedStatus []types.TransactionStatus
+ expectedCounter int
+ expectedError error
+ }{
+ {
+ name: "No transactions found",
+ setupMock: func() {
+ mockTxManager.Mock = mock.Mock{}
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-0"))
+ },
+ expectedStatus: []types.TransactionStatus{},
+ expectedCounter: -1,
+ expectedError: nil,
+ },
+ {
+ name: "Single transaction found",
+ setupMock: func() {
+ mockTxManager.Mock = mock.Mock{}
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Finalized, nil)
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-1").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-1"))
+ },
+ expectedStatus: []types.TransactionStatus{types.Finalized},
+ expectedCounter: 0,
+ expectedError: nil,
+ },
+ {
+ name: "Multiple transactions found",
+ setupMock: func() {
+ mockTxManager.Mock = mock.Mock{}
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Finalized, nil)
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-1").Return(types.Failed, nil)
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-2").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-2"))
+ },
+ expectedStatus: []types.TransactionStatus{types.Finalized, types.Failed},
+ expectedCounter: 1,
+ expectedError: nil,
+ },
+ {
+ name: "Unknown status without nil (in progress)",
+ setupMock: func() {
+ mockTxManager.Mock = mock.Mock{}
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Unknown, nil)
+ mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-1").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-1"))
+ },
+ expectedStatus: []types.TransactionStatus{types.Unknown},
+ expectedCounter: 0,
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ tc.setupMock()
+ statuses, counter, err := checker.CheckMessageStatus(ctx, msgID)
+ assert.Equal(t, tc.expectedStatus, statuses)
+ assert.Equal(t, tc.expectedCounter, counter)
+ assert.Equal(t, tc.expectedError, err)
+ mockTxManager.AssertExpectations(t)
+ })
+ }
+}
+
+func Test_FailForMoreThan1000Retries(t *testing.T) {
+ ctx := context.Background()
+ mockTxManager := mocks.NewMockEvmTxManager(t)
+ checker := NewTxmStatusChecker(mockTxManager.GetTransactionStatus)
+
+ for i := 0; i < 1000; i++ {
+ mockTxManager.On("GetTransactionStatus", ctx, fmt.Sprintf("test-message-id-%d", i)).Return(types.Finalized, nil)
+ }
+
+ msgID := "test-message-id"
+ _, _, err := checker.CheckMessageStatus(ctx, msgID)
+ assert.EqualError(t, err, "maximum number of statuses reached, possible infinite loop")
+}
diff --git a/core/services/synchronization/common.go b/core/services/synchronization/common.go
index 5f469c055d4..bfb9fba6de6 100644
--- a/core/services/synchronization/common.go
+++ b/core/services/synchronization/common.go
@@ -16,6 +16,8 @@ const (
OCR TelemetryType = "ocr"
OCR2Automation TelemetryType = "ocr2-automation"
OCR2Functions TelemetryType = "ocr2-functions"
+ OCR2CCIPCommit TelemetryType = "ocr2-ccip-commit"
+ OCR2CCIPExec TelemetryType = "ocr2-ccip-exec"
OCR2Threshold TelemetryType = "ocr2-threshold"
OCR2S4 TelemetryType = "ocr2-s4"
OCR2Median TelemetryType = "ocr2-median"
diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml
index 1bad3fd91c6..f1325d824ea 100644
--- a/core/web/resolver/testdata/config-empty-effective.toml
+++ b/core/web/resolver/testdata/config-empty-effective.toml
@@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml
index 1672eb1b41d..9421e6198ee 100644
--- a/core/web/resolver/testdata/config-full.toml
+++ b/core/web/resolver/testdata/config-full.toml
@@ -6,6 +6,7 @@ ShutdownGracePeriod = '10s'
FeedsManager = true
LogPoller = true
UICSAKeys = true
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1m0s'
diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml
index 0e12af9a7e4..1c4093cbfca 100644
--- a/core/web/resolver/testdata/config-multi-chain-effective.toml
+++ b/core/web/resolver/testdata/config-multi-chain-effective.toml
@@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index 5caab7614e8..47935390ce8 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -51,6 +51,7 @@ ShutdownGracePeriod is the maximum time allowed to shut down gracefully. If exce
FeedsManager = true # Default
LogPoller = false # Default
UICSAKeys = false # Default
+CCIP = true # Default
```
@@ -72,6 +73,12 @@ UICSAKeys = false # Default
```
UICSAKeys enables CSA Keys in the UI.
+### CCIP
+```toml
+CCIP = true # Default
+```
+CCIP enables the CCIP service.
+
## Database
```toml
[Database]
diff --git a/go.mod b/go.mod
index 45e0b62d52e..4b216ddc0d4 100644
--- a/go.mod
+++ b/go.mod
@@ -9,11 +9,12 @@ require (
github.com/NethermindEth/juno v0.3.1
github.com/NethermindEth/starknet.go v0.7.1-0.20240401080518-34a506f3cfdb
github.com/XSAM/otelsql v0.27.0
- github.com/avast/retry-go/v4 v4.5.1
+ github.com/avast/retry-go/v4 v4.6.0
github.com/btcsuite/btcd/btcec/v2 v2.3.2
github.com/cometbft/cometbft v0.37.2
github.com/cosmos/cosmos-sdk v0.47.4
github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e
+ github.com/deckarep/golang-set/v2 v2.3.0
github.com/dominikbraun/graph v0.23.0
github.com/esote/minmaxheap v1.0.0
github.com/ethereum/go-ethereum v1.13.8
@@ -67,6 +68,7 @@ require (
github.com/prometheus/prometheus v0.48.1
github.com/robfig/cron/v3 v3.0.1
github.com/rogpeppe/go-internal v1.12.0
+ github.com/rs/zerolog v1.30.0
github.com/scylladb/go-reflectx v1.0.1
github.com/shirou/gopsutil/v3 v3.24.3
github.com/shopspring/decimal v1.4.0
@@ -92,6 +94,7 @@ require (
github.com/umbracle/ethgo v0.1.3
github.com/unrolled/secure v1.13.0
github.com/urfave/cli v1.22.14
+ github.com/wk8/go-ordered-map/v2 v2.1.8
go.dedis.ch/fixbuf v1.0.3
go.dedis.ch/kyber/v3 v3.1.0
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.49.0
@@ -112,6 +115,7 @@ require (
google.golang.org/protobuf v1.34.2
gopkg.in/guregu/null.v4 v4.0.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
+ k8s.io/utils v0.0.0-20230711102312-30195339c3c7
)
require (
@@ -174,7 +178,6 @@ require (
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/danieljoos/wincred v1.1.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/deckarep/golang-set/v2 v2.3.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 // indirect
github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
@@ -312,7 +315,6 @@ require (
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 // indirect
github.com/valyala/fastjson v1.4.1 // indirect
- github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zondax/hid v0.9.1 // indirect
diff --git a/go.sum b/go.sum
index 4a6b294c122..6b0ec5aa5c8 100644
--- a/go.sum
+++ b/go.sum
@@ -150,8 +150,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o=
-github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc=
+github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
+github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE=
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4=
@@ -264,6 +264,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk=
github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis=
@@ -491,6 +492,7 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -1093,6 +1095,7 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99
github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo=
github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c=
@@ -1911,6 +1914,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc=
+k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk=
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index 8f9652099b1..0c0ce337695 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -6,7 +6,7 @@ go 1.22.5
replace github.com/smartcontractkit/chainlink/v2 => ../
require (
- github.com/avast/retry-go/v4 v4.5.1
+ github.com/avast/retry-go/v4 v4.6.0
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df
github.com/chaos-mesh/chaos-mesh/api v0.0.0-20240709130330-9f4feec7553f
github.com/cli/go-gh/v2 v2.0.0
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index bca92f4a97c..8b7bd1d2a14 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -207,8 +207,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
-github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o=
-github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc=
+github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
+github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE=
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index f0554f2c725..4395a3ce486 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -76,7 +76,7 @@ require (
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/avast/retry-go v3.0.0+incompatible // indirect
- github.com/avast/retry-go/v4 v4.5.1 // indirect
+ github.com/avast/retry-go/v4 v4.6.0 // indirect
github.com/aws/aws-sdk-go v1.45.25 // indirect
github.com/aws/constructs-go/constructs/v10 v10.1.255 // indirect
github.com/aws/jsii-runtime-go v1.75.0 // indirect
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index f31a11d389d..40152875414 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -207,8 +207,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
-github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o=
-github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc=
+github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
+github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE=
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar
index 1063d9c2a5a..ff8b4889c49 100644
--- a/testdata/scripts/node/validate/default.txtar
+++ b/testdata/scripts/node/validate/default.txtar
@@ -18,6 +18,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
index 56ce1ea7ba8..f4dd43cb900 100644
--- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
@@ -62,6 +62,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
index e534c67a2f3..75a6ae36418 100644
--- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
@@ -62,6 +62,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar
index 29bc189e561..97bae5a84b6 100644
--- a/testdata/scripts/node/validate/disk-based-logging.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging.txtar
@@ -62,6 +62,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar
index 6a09dd06c47..0cdf001eccd 100644
--- a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar
+++ b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar
@@ -47,6 +47,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar
index 60c42c7c399..ab6860ec790 100644
--- a/testdata/scripts/node/validate/invalid.txtar
+++ b/testdata/scripts/node/validate/invalid.txtar
@@ -52,6 +52,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar
index 719bb8bcc47..603fdaada66 100644
--- a/testdata/scripts/node/validate/valid.txtar
+++ b/testdata/scripts/node/validate/valid.txtar
@@ -59,6 +59,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar
index a652943e26b..dea40ec8da0 100644
--- a/testdata/scripts/node/validate/warnings.txtar
+++ b/testdata/scripts/node/validate/warnings.txtar
@@ -41,6 +41,7 @@ ShutdownGracePeriod = '5s'
FeedsManager = true
LogPoller = false
UICSAKeys = false
+CCIP = true
[Database]
DefaultIdleInTxSessionTimeout = '1h0m0s'
From 1d81278edace2411f0d87b7e111321bd67b6b0a5 Mon Sep 17 00:00:00 2001
From: Bartek Tofel
Date: Tue, 6 Aug 2024 13:56:03 +0200
Subject: [PATCH 12/52] update readme's with information about CL node TOML
config (#14028)
---
integration-tests/README.md | 5 +--
integration-tests/testconfig/README.md | 9 +++--
integration-tests/testconfig/default.toml | 41 +++++++++++++++++++++--
3 files changed, 48 insertions(+), 7 deletions(-)
diff --git a/integration-tests/README.md b/integration-tests/README.md
index fcfefe97a73..180021efeef 100644
--- a/integration-tests/README.md
+++ b/integration-tests/README.md
@@ -27,6 +27,8 @@ version = "your tag"
The `./testconfig/overrides.toml` file **should never be committed** and has been added to the [.gitignore](../.gitignore) file as it can often contain secrets like private keys and RPC URLs.
+For more information on how to configure the tests, see the [testconfig README](./testconfig/README.md).
+
## Build
If you'd like to run the tests on a local build of Chainlink, you can point to your own docker image, or build a fresh one with `make`.
@@ -76,8 +78,7 @@ make test_soak_ocr_reorg_2
Run reorg/automation_reorg_test.go with reorg settings:
-1. Use Simulated Geth network and put GethReorgConfig in overrides.toml
-
+1. Use Simulated Geth network and put GethReorgConfig in overrides.toml
```toml
[Network]
diff --git a/integration-tests/testconfig/README.md b/integration-tests/testconfig/README.md
index 7ff6cedd24c..878b36bc756 100644
--- a/integration-tests/testconfig/README.md
+++ b/integration-tests/testconfig/README.md
@@ -137,7 +137,9 @@ DefaultTransactionQueueDepth = 0
"""
```
Note that you cannot override individual values in BaseConfigTOML. You must provide the entire configuration.
+This corresponds to [Config struct](../../core/services/chainlink/config.go) in Chainlink Node that excludes all chain-specific configuration, which is built based on selected_networks and either Chainlink Node's defaults for each network, or `ChainConfigTOMLByChainID` (if an entry with matching chain id is defined) or `CommonChainConfigTOML` (if no entry with matching chain id is defined).
+If BaseConfigTOML is empty, then default base config provided by the Chainlink Node is used. If tracing is enabled unique id will be generated and shared between all Chainlink nodes in the same test.
To set base config for EVM chains use `NodeConfig.CommonChainConfigTOML`. Example:
```toml
@@ -153,12 +155,12 @@ FeeCapDefault = '200 gwei'
"""
```
-This is the default configuration used for all EVM chains unless ChainConfigTOMLByChainID is specified.
+This is the default configuration used for all EVM chains unless `ChainConfigTOMLByChainID` is specified. Do remember that if either `ChainConfigTOMLByChainID` or `CommonChainConfigTOML` is defined, it will override any defaults that Chainlink Node might have for the given network. Part of the configuration that defines blockchain node URLs is always dynamically generated based on the EVMNetwork configuration.
To set custom per-chain config use `[NodeConfig.ChainConfigTOMLByChainID]`. Example:
```toml
[NodeConfig.ChainConfigTOMLByChainID]
-# applicable for arbitrum-goerli chain
+# applicable only to arbitrum-goerli chain
421613 = """
[GasEstimator]
PriceMax = '400 gwei'
@@ -170,7 +172,8 @@ BumpMin = '100 gwei'
"""
```
-For more examples see `example.toml` in product TOML configs like `testconfig/automation/example.toml`.
+For more examples see `example.toml` in product TOML configs like `testconfig/automation/example.toml`. If either ChainConfigTOMLByChainID or CommonChainConfigTOML is defined, it will override any defaults that Chainlink Node might have for the given network. Part of the configuration that defines blockchain node URLs is always dynamically generated based on the EVMNetwork configuration.
+Currently, all networks are treated as EVM networks. There's no way to provide Solana, Starknet, Cosmos or Aptos configuration yet.
### Setting env vars for Chainlink Node
diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml
index e4e216cf4a8..0d0bb14da95 100644
--- a/integration-tests/testconfig/default.toml
+++ b/integration-tests/testconfig/default.toml
@@ -1,37 +1,72 @@
[Logging]
+# set to true to flush logs to selected target regardless of test result; otherwise logs are only flushed if test failed
test_log_collect = false
[Logging.LogStream]
+# supported targets: file, loki, in-memory. if empty no logs will be persisted
log_targets = ["file"]
+# context timeout for starting log producer and also time-frame for requesting logs
log_producer_timeout = "10s"
+# number of retries before log producer gives up and stops listening to logs
log_producer_retry_limit = 10
[ChainlinkImage]
+# postgres version to use
postgres_version = "15.6"
+# chainlink image to use
image = "public.ecr.aws/chainlink/chainlink"
+# chainlink image tag to use
version = "2.12.0"
[Common]
+# chainlink node funding in native token
chainlink_node_funding = 0.5
[Network]
+# slice of networks to use; at lesat one network must be selected; each selected network must either be already defined in the CTF as a known network, or be defined in
+# TOML test files as a new network
selected_networks = ["simulated"]
[PrivateEthereumNetwork]
+# ethereum version to use; eth1 or eth2 (post-merge)
ethereum_version = "eth1"
+# execution layer to use; geth, besu, nethermind, erigon or reth
execution_layer = "geth"
[PrivateEthereumNetwork.EthereumChainConfig]
+# duration of single slot, lower => faster block production, must be >= 3
seconds_per_slot = 3
+# number of slots in epoch, lower => faster epoch finalisation, must be >= 2
slots_per_epoch = 2
+# extra genesis delay, no need to modify, but it should be after all validators/beacon chain starts
genesis_delay = 15
+# number of validators in the network
validator_count = 4
+# chain id to use
chain_id = 1337
+# slice of addresses that will be funded with native token in genesis
addresses_to_fund = ["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"]
+# map of hard fork epochs for each network; key is fork name, value is hard fork epoch
+# keep in mind that this depends on the specific version of eth2 client you are using
+# this configuration is fault-tolerant and incorrect forks will be ignored
[PrivateEthereumNetwork.EthereumChainConfig.HardForkEpochs]
Deneb = 500
+# General config of the Chainklink node corresponding to core/services/chainlink/config.go (Config struct) that excludes
+# all chain-specific configuration, which is built based on selected_networks and either Chainlink Node's defaults for
+# each network, or ChainConfigTOMLByChainID (if an entry with matching chain id is defined) or CommonChainConfigTOML (if no
+# entry with matching chain id is defined).
+#
+# Please remember that if either ChainConfigTOMLByChainID or CommonChainConfigTOML is defined, it will override any defaults
+# that Chainlink Node might have for the given network. Part of the configuration that defines blockchain node URLs is always
+# dynamically generated based on the EVMNetwork configuration.
+#
+# Last, but not least, currently all selected networks are treated as EVM networks. There's no way to provide Solana, Starknet,
+# Cosmos or Aptos configuration yet.
+#
+# If BaseConfigTOML is empty, then default base config provided by the Chainlink Node is used.
+# Also, if tracing is enabled unique id will be generated and shared between all Chainlink nodes in the same test.
[NodeConfig]
BaseConfigTOML = """
[Feature]
@@ -78,12 +113,14 @@ DeltaDial = '500ms'
DeltaReconcile = '5s'
"""
-# override config toml related to EVMNode configs for chainlink nodes; applicable to all EVM node configs in chainlink toml
+# Overrides default config TOML related to EVMNode configs for chainlink nodes; applicable to all EVM node configs in chainlink TOML.
+# Do not use it, if you want the default values to be used. Passing blockchain nodes URLs here will have no effect.
CommonChainConfigTOML = """
"""
[NodeConfig.ChainConfigTOMLByChainID]
-# applicable for simulated chain
+# Chain-specific EVMNode config TOML for chainlink nodes; applicable to all EVM node configs in chainlink TOML. It takes precedence
+# over CommonChainConfigTOML and Chainlink Node's defaults. Passing blockchain nodes URLs here will have no effect.
1337 = """
AutoCreateKey = true
FinalityDepth = 1
From d963b0aaac2117902742cf1d6fc8471e82ae711b Mon Sep 17 00:00:00 2001
From: Matthew Pendrey
Date: Tue, 6 Aug 2024 15:15:24 +0100
Subject: [PATCH 13/52] ks-409 fix the mock trigger to ensure events are sent
(#14047)
---
.changeset/odd-hats-repeat.md | 5 +++++
.../integration_tests/mock_trigger.go | 18 ++++++++++--------
.../integration_tests/streams_test.go | 6 +++---
3 files changed, 18 insertions(+), 11 deletions(-)
create mode 100644 .changeset/odd-hats-repeat.md
diff --git a/.changeset/odd-hats-repeat.md b/.changeset/odd-hats-repeat.md
new file mode 100644
index 00000000000..ce80b45caff
--- /dev/null
+++ b/.changeset/odd-hats-repeat.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#internal fix the mock trigger to ensure events are sent
diff --git a/core/capabilities/integration_tests/mock_trigger.go b/core/capabilities/integration_tests/mock_trigger.go
index cb673f54ff6..0ed1fe5c8dd 100644
--- a/core/capabilities/integration_tests/mock_trigger.go
+++ b/core/capabilities/integration_tests/mock_trigger.go
@@ -88,18 +88,20 @@ func (s *streamsTrigger) RegisterTrigger(ctx context.Context, request capabiliti
responseCh := make(chan capabilities.CapabilityResponse)
- ctxWithCancel, cancel := context.WithCancel(ctx)
+ ctxWithCancel, cancel := context.WithCancel(context.Background())
s.cancel = cancel
s.wg.Add(1)
go func() {
defer s.wg.Done()
- select {
- case <-s.stopCh:
- return
- case <-ctxWithCancel.Done():
- return
- case resp := <-s.toSend:
- responseCh <- resp
+ for {
+ select {
+ case <-s.stopCh:
+ return
+ case <-ctxWithCancel.Done():
+ return
+ case resp := <-s.toSend:
+ responseCh <- resp
+ }
}
}()
diff --git a/core/capabilities/integration_tests/streams_test.go b/core/capabilities/integration_tests/streams_test.go
index 7be392932f8..8c8f51914c2 100644
--- a/core/capabilities/integration_tests/streams_test.go
+++ b/core/capabilities/integration_tests/streams_test.go
@@ -24,7 +24,7 @@ func Test_AllAtOnceTransmissionSchedule(t *testing.T) {
// in the setupCapabilitiesRegistryContract function, should this order change the don IDs will need updating.
workflowDonInfo := createDonInfo(t, don{id: 1, numNodes: 7, f: 2})
triggerDonInfo := createDonInfo(t, don{id: 2, numNodes: 7, f: 2})
- targetDonInfo := createDonInfo(t, don{id: 3, numNodes: 4, f: 2})
+ targetDonInfo := createDonInfo(t, don{id: 3, numNodes: 4, f: 1})
consumer, feedIDs, triggerSink := setupStreamDonsWithTransmissionSchedule(ctx, t, workflowDonInfo, triggerDonInfo, targetDonInfo, 3,
"2s", "allAtOnce")
@@ -45,8 +45,8 @@ func Test_OneAtATimeTransmissionSchedule(t *testing.T) {
// The don IDs set in the below calls are inferred from the order in which the dons are added to the capabilities registry
// in the setupCapabilitiesRegistryContract function, should this order change the don IDs will need updating.
- workflowDonInfo := createDonInfo(t, don{id: 1, numNodes: 5, f: 1})
- triggerDonInfo := createDonInfo(t, don{id: 2, numNodes: 7, f: 1})
+ workflowDonInfo := createDonInfo(t, don{id: 1, numNodes: 7, f: 2})
+ triggerDonInfo := createDonInfo(t, don{id: 2, numNodes: 7, f: 2})
targetDonInfo := createDonInfo(t, don{id: 3, numNodes: 4, f: 1})
consumer, feedIDs, triggerSink := setupStreamDonsWithTransmissionSchedule(ctx, t, workflowDonInfo, triggerDonInfo, targetDonInfo, 3,
From d1d0f445de2e7f4cca132d805be8194be4e50703 Mon Sep 17 00:00:00 2001
From: Bartek Tofel
Date: Tue, 6 Aug 2024 17:20:41 +0200
Subject: [PATCH 14/52] [TT-1262] dump pg on failure (#14029)
* bump CTF
* bump bump
* go mod
* bump bump
* bump to 1.34.0
* dump Postgres db on failure and upload as artifacts
* test dump in CI
* remove test failing on demand, fix test-summary action input name
* use tagged CTF, save sql dump also if flag is set
---
.../workflows/client-compatibility-tests.yml | 1 +
.github/workflows/integration-tests.yml | 7 ++-
.github/workflows/live-testnet-tests.yml | 2 +-
.github/workflows/live-vrf-tests.yml | 4 +-
.../on-demand-keeper-smoke-tests.yml | 3 +-
.../on-demand-vrfv2-eth2-clients-test.yml | 22 ++++---
.../on-demand-vrfv2plus-eth2-clients-test.yml | 16 ++---
.../run-e2e-tests-reusable-workflow.yml | 63 ++++++++++---------
.gitignore | 1 +
integration-tests/actions/private_network.go | 5 +-
integration-tests/chaos/ocr_chaos_test.go | 4 +-
.../citool/cmd/create_test_config_cmd.go | 9 +--
.../citool/cmd/test_config_cmd_test.go | 5 +-
.../docker/test_env/test_env_builder.go | 43 ++++++++++++-
integration-tests/go.mod | 5 +-
integration-tests/go.sum | 10 +--
integration-tests/load/go.mod | 5 +-
integration-tests/load/go.sum | 10 +--
integration-tests/testsetups/ocr.go | 7 +--
19 files changed, 138 insertions(+), 84 deletions(-)
diff --git a/.github/workflows/client-compatibility-tests.yml b/.github/workflows/client-compatibility-tests.yml
index 91ada8b7ab4..9c1971abb61 100644
--- a/.github/workflows/client-compatibility-tests.yml
+++ b/.github/workflows/client-compatibility-tests.yml
@@ -641,6 +641,7 @@ jobs:
artifacts_name: ${{ env.TEST_LOG_NAME }}
artifacts_location: |
./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
/tmp/gotest.log
publish_check_name: ${{ matrix.evm_node.product }}-${{ matrix.evm_node.eth_implementation }}
token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index ec9168133da..950add5596f 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -359,6 +359,7 @@ jobs:
artifacts_name: ${{ matrix.product.name }}-test-logs
artifacts_location: |
./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
/tmp/gotest.log
publish_check_name: ${{ matrix.product.name }}
token: ${{ secrets.GITHUB_TOKEN }}
@@ -472,6 +473,7 @@ jobs:
artifacts_name: ${{ matrix.product.name }}-test-logs
artifacts_location: |
./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
/tmp/gotest.log
publish_check_name: ${{ matrix.product.name }}
token: ${{ secrets.GITHUB_TOKEN }}
@@ -709,6 +711,7 @@ jobs:
artifacts_name: ${{ matrix.product.name }}${{ matrix.product.tag_suffix }}-test-logs
artifacts_location: |
./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
/tmp/gotest.log
publish_check_name: ${{ matrix.product.name }}
token: ${{ secrets.GITHUB_TOKEN }}
@@ -777,7 +780,7 @@ jobs:
if: always()
uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25
with:
- test_directory: ./integration-tests/smoke/
+ test_directories: ./integration-tests/smoke/
### Used to check the required checks box when the matrix completes
eth-smoke-tests:
@@ -978,7 +981,7 @@ jobs:
DEFAULT_GRAFANA_BASE_URL: "http://localhost:8080/primary"
DEFAULT_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs"
DEFAULT_GRAFANA_BEARER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }}
-
+
- name: Upload Coverage Data
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
timeout-minutes: 2
diff --git a/.github/workflows/live-testnet-tests.yml b/.github/workflows/live-testnet-tests.yml
index a7eaa19f7f0..bcf4dfea199 100644
--- a/.github/workflows/live-testnet-tests.yml
+++ b/.github/workflows/live-testnet-tests.yml
@@ -302,7 +302,7 @@ jobs:
if: always()
uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25
with:
- test_directory: "./"
+ test_directories: "./"
bsc-testnet-smoke-tests:
environment: integration
diff --git a/.github/workflows/live-vrf-tests.yml b/.github/workflows/live-vrf-tests.yml
index faa4042e66e..28f5867954b 100644
--- a/.github/workflows/live-vrf-tests.yml
+++ b/.github/workflows/live-vrf-tests.yml
@@ -120,7 +120,7 @@ jobs:
needs: [build-chainlink, build-tests]
strategy:
fail-fast: false
- matrix:
+ matrix:
network: ${{fromJson(needs.build-tests.outputs.matrix)}}
name: Smoke Tests on ${{ matrix.network }}
runs-on: ubuntu-latest
@@ -190,4 +190,4 @@ jobs:
if: always()
uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25
with:
- test_directory: "./"
\ No newline at end of file
+ test_directories: "./"
diff --git a/.github/workflows/on-demand-keeper-smoke-tests.yml b/.github/workflows/on-demand-keeper-smoke-tests.yml
index 75359c7501f..626daf00579 100644
--- a/.github/workflows/on-demand-keeper-smoke-tests.yml
+++ b/.github/workflows/on-demand-keeper-smoke-tests.yml
@@ -149,6 +149,7 @@ jobs:
artifacts_name: ${{ matrix.product.name }}-test-logs
artifacts_location: |
./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
/tmp/gotest.log
publish_check_name: ${{ matrix.product.name }}
token: ${{ secrets.GITHUB_TOKEN }}
@@ -286,4 +287,4 @@ jobs:
go test -run=NonExistentTest ./smoke/... || echo "ignore expected test failure"
go_mod_path: ./integration-tests/go.mod
cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }}
- cache_restore_only: "false"
\ No newline at end of file
+ cache_restore_only: "false"
diff --git a/.github/workflows/on-demand-vrfv2-eth2-clients-test.yml b/.github/workflows/on-demand-vrfv2-eth2-clients-test.yml
index 5f24fa81c3d..6d92acd9ea8 100644
--- a/.github/workflows/on-demand-vrfv2-eth2-clients-test.yml
+++ b/.github/workflows/on-demand-vrfv2-eth2-clients-test.yml
@@ -5,12 +5,12 @@ on:
base64Config:
description: base64-ed config
required: true
- type: string
+ type: string
test_secrets_override_key:
description: 'Key to run tests with custom test secrets'
required: false
- type: string
-
+ type: string
+
jobs:
vrfv2_smoke_test:
name: VRFV2 Smoke Test with custom EL client client
@@ -24,11 +24,11 @@ jobs:
env:
TEST_LOG_LEVEL: debug
REF_NAME: ${{ github.head_ref || github.ref_name }}
- steps:
+ steps:
- name: Checkout code
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with:
- fetch-depth: 0
+ fetch-depth: 0
- name: Mask base64 config
run: |
BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH)
@@ -37,7 +37,7 @@ jobs:
- name: Parse base64 config
uses: ./.github/actions/setup-parse-base64-config
with:
- base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }}
+ base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }}
- name: Send details to Step Summary
shell: bash
run: |
@@ -48,7 +48,7 @@ jobs:
echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY
echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY
echo "### Execution client used" >>$GITHUB_STEP_SUMMARY
- echo "\`${{ env.ETH2_EL_CLIENT }}\`" >>$GITHUB_STEP_SUMMARY
+ echo "\`${{ env.ETH2_EL_CLIENT }}\`" >>$GITHUB_STEP_SUMMARY
- name: Run Tests
uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@2967f2287bd3f3ddbac7b476e9568993df01796e # v2.3.27
with:
@@ -59,12 +59,14 @@ jobs:
cl_image_tag: ${{ env.CHAINLINK_VERSION }}
aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
artifacts_name: vrf-test-logs
- artifacts_location: ./integration-tests/smoke/logs/
+ artifacts_location: |
+ ./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
token: ${{ secrets.GITHUB_TOKEN }}
go_mod_path: ./integration-tests/go.mod
should_cleanup: false
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
- QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
QA_KUBECONFIG: ""
DEFAULT_CHAINLINK_IMAGE: ${{ env.CHAINLINK_IMAGE }}
DEFAULT_LOKI_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }}
@@ -72,4 +74,4 @@ jobs:
DEFAULT_LOKI_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }}
DEFAULT_GRAFANA_BASE_URL: "http://localhost:8080/primary"
DEFAULT_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs"
- DEFAULT_GRAFANA_BEARER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }}
\ No newline at end of file
+ DEFAULT_GRAFANA_BEARER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }}
diff --git a/.github/workflows/on-demand-vrfv2plus-eth2-clients-test.yml b/.github/workflows/on-demand-vrfv2plus-eth2-clients-test.yml
index 58ecd39763d..1e58002fc1b 100644
--- a/.github/workflows/on-demand-vrfv2plus-eth2-clients-test.yml
+++ b/.github/workflows/on-demand-vrfv2plus-eth2-clients-test.yml
@@ -5,11 +5,11 @@ on:
base64Config:
description: base64-ed config
required: true
- type: string
+ type: string
test_secrets_override_key:
description: 'Key to run tests with custom test secrets'
required: false
- type: string
+ type: string
jobs:
vrfv2plus_smoke_test:
@@ -24,7 +24,7 @@ jobs:
env:
TEST_LOG_LEVEL: debug
REF_NAME: ${{ github.head_ref || github.ref_name }}
- steps:
+ steps:
- name: Checkout code
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with:
@@ -48,7 +48,7 @@ jobs:
echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY
echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY
echo "### Execution client used" >>$GITHUB_STEP_SUMMARY
- echo "\`${{ env.ETH2_EL_CLIENT }}\`" >>$GITHUB_STEP_SUMMARY
+ echo "\`${{ env.ETH2_EL_CLIENT }}\`" >>$GITHUB_STEP_SUMMARY
- name: Run Tests
uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@2967f2287bd3f3ddbac7b476e9568993df01796e # v2.3.27
with:
@@ -59,12 +59,14 @@ jobs:
cl_image_tag: ${{ env.CHAINLINK_VERSION }}
aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
artifacts_name: vrfplus-test-logs
- artifacts_location: ./integration-tests/smoke/logs/
+ artifacts_location: |
+ ./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
token: ${{ secrets.GITHUB_TOKEN }}
go_mod_path: ./integration-tests/go.mod
should_cleanup: false
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
- QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
QA_KUBECONFIG: ""
DEFAULT_CHAINLINK_IMAGE: ${{ env.CHAINLINK_IMAGE }}
DEFAULT_LOKI_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }}
@@ -72,4 +74,4 @@ jobs:
DEFAULT_LOKI_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }}
DEFAULT_GRAFANA_BASE_URL: "http://localhost:8080/primary"
DEFAULT_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs"
- DEFAULT_GRAFANA_BEARER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }}
\ No newline at end of file
+ DEFAULT_GRAFANA_BEARER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }}
diff --git a/.github/workflows/run-e2e-tests-reusable-workflow.yml b/.github/workflows/run-e2e-tests-reusable-workflow.yml
index 2d3f31aa3b3..4c177f9a137 100644
--- a/.github/workflows/run-e2e-tests-reusable-workflow.yml
+++ b/.github/workflows/run-e2e-tests-reusable-workflow.yml
@@ -1,4 +1,4 @@
-# This is a reusable workflow that runs E2E tests for Chainlink.
+# This is a reusable workflow that runs E2E tests for Chainlink.
# It is not meant to be run on its own.
name: Run E2E Tests
on:
@@ -7,7 +7,7 @@ on:
chainlink_version:
description: 'Enter Chainlink version to use for the tests. Example: "v2.10.0" or sha'
required: false
- type: string
+ type: string
test_ids:
description: 'Run tests by test ids separated by commas. Example: "run_all_in_ocr_tests_go,run_TestOCRv2Request_in_ocr2_test_go". Check all test IDs in .github/e2e-tests.yml'
required: false
@@ -15,12 +15,12 @@ on:
test_list:
description: 'Base64 encoded list of tests (YML objects) to run. Example in run-automation-ondemand-e2e-tests.yml'
required: false
- type: string
+ type: string
test_workflow:
description: 'Run tests by workflow name. Example: "Run Nightly E2E Tests"'
required: false
type: string
- # TODO: Uncomment once Test Config does not have any secrets. Related ticket https://smartcontract-it.atlassian.net/browse/TT-1392
+ # TODO: Uncomment once Test Config does not have any secrets. Related ticket https://smartcontract-it.atlassian.net/browse/TT-1392
# test_config_override_base64:
# required: false
# description: The base64-encoded test config override
@@ -64,7 +64,7 @@ on:
description: 'Number of days to retain the test log. Default is 3 days'
required: false
type: number
- default: 3
+ default: 3
secrets:
TEST_SECRETS_OVERRIDE_BASE64:
required: false
@@ -89,17 +89,17 @@ on:
GRAFANA_INTERNAL_URL_SHORTENER_TOKEN:
required: true
GH_TOKEN:
- required: true
+ required: true
AWS_REGION:
required: true
AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN:
required: true
AWS_API_GW_HOST_GRAFANA:
- required: true
+ required: true
SLACK_BOT_TOKEN:
required: false
-
-env:
+
+env:
CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink
QA_CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink
GITHUB_SHA_PLUGINS: ${{ github.sha }}-plugins
@@ -127,7 +127,7 @@ jobs:
echo "Will run tests with custom test secrets"
fi
- name: Install jq
- run: sudo apt-get install jq
+ run: sudo apt-get install jq
- name: Create matrix for required Chainlink image versions
id: set-required-chainlink-image-versions-matrix
run: |
@@ -328,11 +328,11 @@ jobs:
tag_suffix: ''
check_image_exists: 'true'
AWS_REGION: ${{ secrets.QA_AWS_REGION }}
- AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
# Build Chainlink plugins required for the tests
require-chainlink-plugin-versions-in-qa-ecr:
- name: Build Chainlink plugins
+ name: Build Chainlink plugins
needs: [validate-inputs, load-test-configurations]
if: ${{ needs.validate-inputs.outputs.require_chainlink_plugin_versions_in_qa_ecr_matrix != '' }}
runs-on: ubuntu-latest
@@ -357,14 +357,14 @@ jobs:
tag_suffix: '-plugins'
check_image_exists: 'true'
AWS_REGION: ${{ secrets.QA_AWS_REGION }}
- AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
# Run Docker tests
run-docker-tests:
name: Run ${{ matrix.tests.id }}
needs: [load-test-configurations, require-chainlink-image-versions-in-qa-ecr, require-chainlink-plugin-versions-in-qa-ecr, get_latest_chainlink_release_version]
- # Run when none of the needed jobs fail or are cancelled (skipped or successful jobs are ok)
- if: ${{ needs.load-test-configurations.outputs.run-docker-tests == 'true' && always() && !failure() && !cancelled() }}
+ # Run when none of the needed jobs fail or are cancelled (skipped or successful jobs are ok)
+ if: ${{ needs.load-test-configurations.outputs.run-docker-tests == 'true' && always() && !failure() && !cancelled() }}
runs-on: ${{ matrix.tests.runs_on }}
strategy:
fail-fast: false
@@ -417,7 +417,7 @@ jobs:
test_command_to_run: ${{ matrix.tests.test_cmd }} 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false -hidepassinglogs
test_download_vendor_packages_command: cd ./integration-tests && go mod download
test_secrets_override_base64: ${{ secrets.TEST_SECRETS_OVERRIDE_BASE64 }}
- # TODO: Uncomment once Test Config does not have any secrets. Related ticket https://smartcontract-it.atlassian.net/browse/TT-1392
+ # TODO: Uncomment once Test Config does not have any secrets. Related ticket https://smartcontract-it.atlassian.net/browse/TT-1392
# test_config_override_base64: ${{ inputs.test_config_override_base64 }}
test_config_chainlink_version: ${{ matrix.tests.test_inputs.chainlink_version || inputs.chainlink_version || github.sha }}
test_config_chainlink_upgrade_version: ${{ matrix.tests.test_inputs.chainlink_upgrade_version }}
@@ -431,6 +431,7 @@ jobs:
artifacts_name: ${{ matrix.tests.id_sanitized }}-test-logs
artifacts_location: |
./integration-tests/smoke/logs/
+ ./integration-tests/smoke/db_dumps/
/tmp/gotest.log
publish_check_name: ${{ matrix.tests.id_sanitized }}
token: ${{ secrets.GH_TOKEN }}
@@ -462,13 +463,13 @@ jobs:
name: test_log_${{ matrix.tests.id_sanitized }}
path: /tmp/gotest.log
retention-days: ${{ inputs.test_log_upload_retention_days }}
- continue-on-error: true
+ continue-on-error: true
# Run K8s tests using old remote runner
prepare-remote-runner-test-image:
needs: [load-test-configurations, require-chainlink-image-versions-in-qa-ecr, require-chainlink-plugin-versions-in-qa-ecr]
- if: ${{ needs.load-test-configurations.outputs.run-k8s-tests == 'true' && always() && !failure() && !cancelled() }}
+ if: ${{ needs.load-test-configurations.outputs.run-k8s-tests == 'true' && always() && !failure() && !cancelled() }}
name: Prepare remote runner test image
runs-on: ubuntu-latest
environment: integration
@@ -484,7 +485,7 @@ jobs:
ENV_JOB_IMAGE_BASE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-tests
steps:
- name: Checkout repository
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
+ uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- name: Build Test Runner Image
uses: ./.github/actions/build-test-image
if: ${{ inputs.with_existing_remote_runner_version == '' }}
@@ -503,7 +504,7 @@ jobs:
run-k8s-runner-tests:
needs: [load-test-configurations, prepare-remote-runner-test-image, require-chainlink-image-versions-in-qa-ecr, require-chainlink-plugin-versions-in-qa-ecr, get_latest_chainlink_release_version]
- if: ${{ needs.load-test-configurations.outputs.run-k8s-tests == 'true' && always() && !failure() && !cancelled() }}
+ if: ${{ needs.load-test-configurations.outputs.run-k8s-tests == 'true' && always() && !failure() && !cancelled() }}
name: Run ${{ matrix.tests.id }}
runs-on: ${{ matrix.tests.runs_on }}
strategy:
@@ -517,7 +518,7 @@ jobs:
id-token: write
contents: read
env:
- LATEST_CHAINLINK_RELEASE_VERSION: ${{ needs.get_latest_chainlink_release_version.outputs.latest_chainlink_release_version }}
+ LATEST_CHAINLINK_RELEASE_VERSION: ${{ needs.get_latest_chainlink_release_version.outputs.latest_chainlink_release_version }}
steps:
- name: Collect Metrics
if: always()
@@ -558,7 +559,7 @@ jobs:
test_command_to_run: ${{ matrix.tests.test_cmd }} 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false -hidepassinglogs
test_download_vendor_packages_command: make gomod
test_secrets_override_base64: ${{ secrets.TEST_SECRETS_OVERRIDE_BASE64 }}
- # TODO: Uncomment once Test Config does not have any secrets. Related ticket https://smartcontract-it.atlassian.net/browse/TT-1392
+ # TODO: Uncomment once Test Config does not have any secrets. Related ticket https://smartcontract-it.atlassian.net/browse/TT-1392
# test_config_override_base64: ${{ inputs.test_config_override_base64 }}
test_config_chainlink_version: ${{ matrix.tests.test_inputs.chainlink_version || inputs.chainlink_version || github.sha }}
test_config_chainlink_upgrade_version: ${{ matrix.tests.test_inputs.chainlink_upgrade_version }}
@@ -574,7 +575,7 @@ jobs:
go_mod_path: ./integration-tests/go.mod
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
- QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
DEFAULT_CHAINLINK_IMAGE: ${{ matrix.tests.test_inputs.chainlink_image || env.CHAINLINK_IMAGE }}
DEFAULT_CHAINLINK_UPGRADE_IMAGE: ${{ matrix.tests.test_inputs.chainlink_upgrade_image }}
DEFAULT_LOKI_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }}
@@ -587,7 +588,7 @@ jobs:
DEFAULT_PYROSCOPE_SERVER_URL: ${{ matrix.tests.pyroscope_env != '' && secrets.QA_PYROSCOPE_INSTANCE || '' }}
DEFAULT_PYROSCOPE_KEY: ${{ matrix.tests.pyroscope_env != '' && secrets.QA_PYROSCOPE_KEY || '' }}
DEFAULT_PYROSCOPE_ENABLED: ${{ matrix.tests.pyroscope_env != '' && 'true' || '' }}
-
+
- name: Upload test log as Github artifact
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: inputs.test_log_upload_on_failure && failure()
@@ -595,7 +596,7 @@ jobs:
name: test_log_${{ matrix.tests.id_sanitized }}
path: /tmp/gotest.log
retention-days: ${{ inputs.test_log_upload_retention_days }}
- continue-on-error: true
+ continue-on-error: true
after_tests:
needs: [run-docker-tests, run-k8s-runner-tests]
@@ -670,12 +671,12 @@ jobs:
# steps:
# - name: Checkout repository
# uses: actions/checkout@v2
-
+
# - name: Set up Go
# uses: actions/setup-go@v2
# with:
# go-version: '1.18'
-
+
# - name: Load Runner Config
# run: echo "$RUNNER_CONFIG" > runner.toml
# env:
@@ -683,7 +684,7 @@ jobs:
# # Runner configuration
# detached_mode = true
# debug = false
-
+
# [[test_runs]]
# namespace = "dev-env"
# rbac_role_name = "dev-role"
@@ -708,7 +709,7 @@ jobs:
# WASP_LOG_LEVEL = "info"
# TEST_LOG_LEVEL = "info"
# MERCURY_TEST_LOG_LEVEL = "info"
-
+
# [[test_runs]]
# namespace = "prod-env"
# rbac_role_name = "prod-role"
@@ -733,7 +734,7 @@ jobs:
# WASP_LOG_LEVEL = "info"
# TEST_LOG_LEVEL = "info"
# MERCURY_TEST_LOG_LEVEL = "info"
-
+
# # Schedule the tests in K8s in remote runner
# - name: Run Kubernetes Tests
- # run: go run ./cmd/main.go run -c runner.toml
\ No newline at end of file
+ # run: go run ./cmd/main.go run -c runner.toml
diff --git a/.gitignore b/.gitignore
index 2b31c9d3a59..10636f88d81 100644
--- a/.gitignore
+++ b/.gitignore
@@ -69,6 +69,7 @@ ztarrepo.tar.gz
**/test-ledger/*
__debug_bin*
.test_summary/
+db_dumps/
.run.id
integration-tests/**/traces/
benchmark_report.csv
diff --git a/integration-tests/actions/private_network.go b/integration-tests/actions/private_network.go
index 70239a60060..f10371d41a6 100644
--- a/integration-tests/actions/private_network.go
+++ b/integration-tests/actions/private_network.go
@@ -4,6 +4,7 @@ import (
"github.com/rs/zerolog"
ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config"
+ ctf_config_types "github.com/smartcontractkit/chainlink-testing-framework/config/types"
ctf_test_env "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env"
)
@@ -12,8 +13,8 @@ func EthereumNetworkConfigFromConfig(l zerolog.Logger, config ctf_config.GlobalT
l.Warn().Msg("No TOML private ethereum network config found, will use old geth")
ethBuilder := ctf_test_env.NewEthereumNetworkBuilder()
network, err = ethBuilder.
- WithEthereumVersion(ctf_config.EthereumVersion_Eth1).
- WithExecutionLayer(ctf_config.ExecutionLayer_Geth).
+ WithEthereumVersion(ctf_config_types.EthereumVersion_Eth1).
+ WithExecutionLayer(ctf_config_types.ExecutionLayer_Geth).
Build()
return
diff --git a/integration-tests/chaos/ocr_chaos_test.go b/integration-tests/chaos/ocr_chaos_test.go
index 54a02cf64f3..200c97a795f 100644
--- a/integration-tests/chaos/ocr_chaos_test.go
+++ b/integration-tests/chaos/ocr_chaos_test.go
@@ -178,9 +178,7 @@ func TestOCRChaos(t *testing.T) {
require.NoError(t, err, "Error tearing down environment")
})
- ms, err := ctfClient.ConnectMockServer(testEnvironment)
- require.NoError(t, err, "Creating mockserver clients shouldn't fail")
-
+ ms := ctfClient.ConnectMockServer(testEnvironment)
linkContract, err := contracts.DeployLinkTokenContract(l, seth)
require.NoError(t, err, "Error deploying link token contract")
diff --git a/integration-tests/citool/cmd/create_test_config_cmd.go b/integration-tests/citool/cmd/create_test_config_cmd.go
index bc1b65bcdcd..c0cd91b05fb 100644
--- a/integration-tests/citool/cmd/create_test_config_cmd.go
+++ b/integration-tests/citool/cmd/create_test_config_cmd.go
@@ -8,6 +8,7 @@ import (
"github.com/spf13/cobra"
ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config"
+ ctf_config_types "github.com/smartcontractkit/chainlink-testing-framework/config/types"
)
var createTestConfigCmd = &cobra.Command{
@@ -148,13 +149,13 @@ var createTestConfigCmd = &cobra.Command{
privateEthereumNetworkCustomDockerImage = &oc.PrivateEthereumNetworkCustomDockerImages
}
if privateEthereumNetworkExecutionLayer != nil || privateEthereumNetworkEthereumVersion != nil || privateEthereumNetworkCustomDockerImage != nil {
- var el ctf_config.ExecutionLayer
+ var el ctf_config_types.ExecutionLayer
if privateEthereumNetworkExecutionLayer != nil {
- el = ctf_config.ExecutionLayer(*privateEthereumNetworkExecutionLayer)
+ el = ctf_config_types.ExecutionLayer(*privateEthereumNetworkExecutionLayer)
}
- var ev ctf_config.EthereumVersion
+ var ev ctf_config_types.EthereumVersion
if privateEthereumNetworkEthereumVersion != nil {
- ev = ctf_config.EthereumVersion(*privateEthereumNetworkEthereumVersion)
+ ev = ctf_config_types.EthereumVersion(*privateEthereumNetworkEthereumVersion)
}
var customImages map[ctf_config.ContainerType]string
if privateEthereumNetworkCustomDockerImage != nil {
diff --git a/integration-tests/citool/cmd/test_config_cmd_test.go b/integration-tests/citool/cmd/test_config_cmd_test.go
index fb1ef5332bd..79185e60822 100644
--- a/integration-tests/citool/cmd/test_config_cmd_test.go
+++ b/integration-tests/citool/cmd/test_config_cmd_test.go
@@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/assert"
ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config"
+ ctf_config_types "github.com/smartcontractkit/chainlink-testing-framework/config/types"
)
func TestCreateTestConfigCmd(t *testing.T) {
@@ -34,8 +35,8 @@ func TestCreateTestConfigCmd(t *testing.T) {
check: func(t *testing.T, tc *ctf_config.TestConfig) {
assert.NotNil(t, tc.PrivateEthereumNetwork)
assert.NotNil(t, tc.PrivateEthereumNetwork.ExecutionLayer)
- assert.Equal(t, ctf_config.ExecutionLayer("geth"), *tc.PrivateEthereumNetwork.ExecutionLayer)
- assert.Equal(t, ctf_config.EthereumVersion("1.10.0"), *tc.PrivateEthereumNetwork.EthereumVersion)
+ assert.Equal(t, ctf_config_types.ExecutionLayer("geth"), *tc.PrivateEthereumNetwork.ExecutionLayer)
+ assert.Equal(t, ctf_config_types.EthereumVersion("1.10.0"), *tc.PrivateEthereumNetwork.EthereumVersion)
},
},
{
diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go
index df399cbb460..fbd4a7e8705 100644
--- a/integration-tests/docker/test_env/test_env_builder.go
+++ b/integration-tests/docker/test_env/test_env_builder.go
@@ -3,9 +3,11 @@ package test_env
import (
"fmt"
"os"
+ "path/filepath"
"slices"
"strings"
"testing"
+ "time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
@@ -255,7 +257,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
b.t.Cleanup(func() {
b.l.Info().Msg("Shutting down LogStream")
logPath, err := osutil.GetAbsoluteFolderPath("logs")
- if err != nil {
+ if err == nil {
b.l.Info().Str("Absolute path", logPath).Msg("LogStream logs folder location")
}
@@ -281,7 +283,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
LogScanningLoop:
for i := 0; i < b.clNodesCount; i++ {
// if something went wrong during environment setup we might not have all nodes, and we don't want an NPE
- if b == nil || b.te == nil || b.te.ClCluster == nil || b.te.ClCluster.Nodes == nil || b.te.ClCluster.Nodes[i] == nil || len(b.te.ClCluster.Nodes)-1 < i {
+ if b == nil || b.te == nil || b.te.ClCluster == nil || b.te.ClCluster.Nodes == nil || len(b.te.ClCluster.Nodes)-1 < i || b.te.ClCluster.Nodes[i] == nil {
continue
}
// ignore count return, because we are only interested in the error
@@ -308,6 +310,43 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
b.te.LogStream.SaveLogLocationInTestSummary()
}
b.l.Info().Msg("Finished shutting down LogStream")
+
+ if b.t.Failed() || *b.testConfig.GetLoggingConfig().TestLogCollect {
+ b.l.Info().Msg("Dump state of all Postgres DBs used by Chainlink Nodes")
+
+ dbDumpFolder := "db_dumps"
+ dbDumpPath := fmt.Sprintf("%s/%s-%s", dbDumpFolder, b.t.Name(), time.Now().Format("2006-01-02T15-04-05"))
+ if err := os.MkdirAll(dbDumpPath, os.ModePerm); err != nil {
+ b.l.Error().Err(err).Msg("Error creating folder for Postgres DB dump")
+ return
+ }
+
+ absDbDumpPath, err := osutil.GetAbsoluteFolderPath(dbDumpFolder)
+ if err == nil {
+ b.l.Info().Str("Absolute path", absDbDumpPath).Msg("PostgresDB dump folder location")
+ }
+
+ for i := 0; i < b.clNodesCount; i++ {
+ // if something went wrong during environment setup we might not have all nodes, and we don't want an NPE
+ if b == nil || b.te == nil || b.te.ClCluster == nil || b.te.ClCluster.Nodes == nil || len(b.te.ClCluster.Nodes)-1 < i || b.te.ClCluster.Nodes[i] == nil || b.te.ClCluster.Nodes[i].PostgresDb == nil {
+ continue
+ }
+
+ filePath := filepath.Join(dbDumpPath, fmt.Sprintf("postgres_db_dump_%s.sql", b.te.ClCluster.Nodes[i].ContainerName))
+ localDbDumpFile, err := os.Create(filePath)
+ if err != nil {
+ b.l.Error().Err(err).Msg("Error creating localDbDumpFile for Postgres DB dump")
+ _ = localDbDumpFile.Close()
+ continue
+ }
+
+ if err := b.te.ClCluster.Nodes[i].PostgresDb.ExecPgDumpFromContainer(localDbDumpFile); err != nil {
+ b.l.Error().Err(err).Msg("Error dumping Postgres DB")
+ }
+ _ = localDbDumpFile.Close()
+ }
+ b.l.Info().Msg("Finished dumping state of all Postgres DBs used by Chainlink Nodes")
+ }
})
} else {
b.l.Warn().Msg("LogStream won't be cleaned up, because either test instance is not set or cleanup type is set to none")
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index 0c0ce337695..3168a702b12 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -29,12 +29,12 @@ require (
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c
- github.com/smartcontractkit/chainlink-testing-framework v1.33.0
+ github.com/smartcontractkit/chainlink-testing-framework v1.34.2
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
github.com/smartcontractkit/havoc/k8schaos v0.0.0-20240409145249-e78d20847e37
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7
- github.com/smartcontractkit/seth v1.0.12
+ github.com/smartcontractkit/seth v1.1.1
github.com/smartcontractkit/wasp v0.4.5
github.com/spf13/cobra v1.8.0
github.com/stretchr/testify v1.9.0
@@ -90,6 +90,7 @@ require (
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/avast/retry-go v3.0.0+incompatible // indirect
+ github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect
github.com/aws/aws-sdk-go v1.45.25 // indirect
github.com/aws/constructs-go/constructs/v10 v10.1.255 // indirect
github.com/aws/jsii-runtime-go v1.75.0 // indirect
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 8b7bd1d2a14..0de6dc281d1 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -209,6 +209,8 @@ github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHS
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE=
+github.com/awalterschulze/gographviz v2.0.3+incompatible h1:9sVEXJBJLwGX7EQVhLm2elIKCm7P2YHFC8v6096G09E=
+github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs=
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
@@ -1498,8 +1500,8 @@ github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e/go.mod h1:hsFhop+SlQHKD+DEFjZrMJmbauT1A/wvtZIeeo4PxFU=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
-github.com/smartcontractkit/chainlink-testing-framework v1.33.0 h1:vHQODEdsq5AIbRiyZZ30de6uwJUNFXLYvCr+Odr8TIs=
-github.com/smartcontractkit/chainlink-testing-framework v1.33.0/go.mod h1:GrhHthZ5AmceF82+Ypw6Fov1EvB05JJbb1T0EKyO1x0=
+github.com/smartcontractkit/chainlink-testing-framework v1.34.2 h1:YL3ft7KJB7SAopdmJeyeR4/kv0j4jOdagNihXq8OZ38=
+github.com/smartcontractkit/chainlink-testing-framework v1.34.2/go.mod h1:hRZEDh2+afO8MSZb9qYNscmWb+3mHZf01J5ACZuIdTQ=
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239 h1:Kk5OVlx/5g9q3Z3lhxytZS4/f8ds1MiNM8yaHgK3Oe8=
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239/go.mod h1:DC8sQMyTlI/44UCTL8QWFwb0bYNoXCfjwCv2hMivYZU=
github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo=
@@ -1510,8 +1512,8 @@ github.com/smartcontractkit/havoc/k8schaos v0.0.0-20240409145249-e78d20847e37 h1
github.com/smartcontractkit/havoc/k8schaos v0.0.0-20240409145249-e78d20847e37/go.mod h1:/kFr0D7SI/vueXl1N03uzOun4nViGPFRyA5X6eL3jXw=
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7 h1:e38V5FYE7DA1JfKXeD5Buo/7lczALuVXlJ8YNTAUxcw=
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM=
-github.com/smartcontractkit/seth v1.0.12 h1:iVdgMx42XWanPPnBaM5StR4c1XsTr/0/B/kKRZL5BsY=
-github.com/smartcontractkit/seth v1.0.12/go.mod h1:thWtbLyW4nRHJGzC5heknQDORoJPErE15sF34LHkorg=
+github.com/smartcontractkit/seth v1.1.1 h1:6hvexjJD7ek8ht/CLlEwQcH21K2E/WEYwbSRdKInZmM=
+github.com/smartcontractkit/seth v1.1.1/go.mod h1:cDfKHi/hJLpO9sRpVbrflrHCOV+MJPAMJHloExJnIXk=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg=
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index 4395a3ce486..46b3dd293df 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -17,11 +17,11 @@ require (
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-automation v1.0.4
github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c
- github.com/smartcontractkit/chainlink-testing-framework v1.33.0
+ github.com/smartcontractkit/chainlink-testing-framework v1.34.2
github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c
github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7
- github.com/smartcontractkit/seth v1.0.12
+ github.com/smartcontractkit/seth v1.1.1
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1
github.com/smartcontractkit/wasp v0.4.7
github.com/stretchr/testify v1.9.0
@@ -35,6 +35,7 @@ require (
cosmossdk.io/depinject v1.0.0-alpha.3 // indirect
cosmossdk.io/errors v1.0.0 // indirect
cosmossdk.io/math v1.0.1 // indirect
+ github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index 40152875414..0434fe8f42a 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -209,6 +209,8 @@ github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHS
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE=
+github.com/awalterschulze/gographviz v2.0.3+incompatible h1:9sVEXJBJLwGX7EQVhLm2elIKCm7P2YHFC8v6096G09E=
+github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs=
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
@@ -1480,8 +1482,8 @@ github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e/go.mod h1:hsFhop+SlQHKD+DEFjZrMJmbauT1A/wvtZIeeo4PxFU=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
-github.com/smartcontractkit/chainlink-testing-framework v1.33.0 h1:vHQODEdsq5AIbRiyZZ30de6uwJUNFXLYvCr+Odr8TIs=
-github.com/smartcontractkit/chainlink-testing-framework v1.33.0/go.mod h1:GrhHthZ5AmceF82+Ypw6Fov1EvB05JJbb1T0EKyO1x0=
+github.com/smartcontractkit/chainlink-testing-framework v1.34.2 h1:YL3ft7KJB7SAopdmJeyeR4/kv0j4jOdagNihXq8OZ38=
+github.com/smartcontractkit/chainlink-testing-framework v1.34.2/go.mod h1:hRZEDh2+afO8MSZb9qYNscmWb+3mHZf01J5ACZuIdTQ=
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239 h1:Kk5OVlx/5g9q3Z3lhxytZS4/f8ds1MiNM8yaHgK3Oe8=
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239/go.mod h1:DC8sQMyTlI/44UCTL8QWFwb0bYNoXCfjwCv2hMivYZU=
github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo=
@@ -1492,8 +1494,8 @@ github.com/smartcontractkit/havoc/k8schaos v0.0.0-20240409145249-e78d20847e37 h1
github.com/smartcontractkit/havoc/k8schaos v0.0.0-20240409145249-e78d20847e37/go.mod h1:/kFr0D7SI/vueXl1N03uzOun4nViGPFRyA5X6eL3jXw=
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7 h1:e38V5FYE7DA1JfKXeD5Buo/7lczALuVXlJ8YNTAUxcw=
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM=
-github.com/smartcontractkit/seth v1.0.12 h1:iVdgMx42XWanPPnBaM5StR4c1XsTr/0/B/kKRZL5BsY=
-github.com/smartcontractkit/seth v1.0.12/go.mod h1:thWtbLyW4nRHJGzC5heknQDORoJPErE15sF34LHkorg=
+github.com/smartcontractkit/seth v1.1.1 h1:6hvexjJD7ek8ht/CLlEwQcH21K2E/WEYwbSRdKInZmM=
+github.com/smartcontractkit/seth v1.1.1/go.mod h1:cDfKHi/hJLpO9sRpVbrflrHCOV+MJPAMJHloExJnIXk=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg=
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ=
diff --git a/integration-tests/testsetups/ocr.go b/integration-tests/testsetups/ocr.go
index 45c334bf69d..b38c39eebe4 100644
--- a/integration-tests/testsetups/ocr.go
+++ b/integration-tests/testsetups/ocr.go
@@ -277,7 +277,7 @@ func (o *OCRSoakTest) Setup(ocrTestConfig tt.OcrTestConfig) {
nodes, err := client.ConnectChainlinkNodes(o.testEnvironment)
require.NoError(o.t, err, "Connecting to chainlink nodes shouldn't fail")
o.bootstrapNode, o.workerNodes = nodes[0], nodes[1:]
- o.mockServer, err = ctf_client.ConnectMockServer(o.testEnvironment)
+ o.mockServer = ctf_client.ConnectMockServer(o.testEnvironment)
require.NoError(o.t, err, "Creating mockserver clients shouldn't fail")
linkContract, err := contracts.DeployLinkTokenContract(o.log, sethClient)
@@ -546,10 +546,7 @@ func (o *OCRSoakTest) LoadState() error {
}
}
- o.mockServer, err = ctf_client.ConnectMockServerURL(testState.MockServerURL)
- if err != nil {
- return err
- }
+ o.mockServer = ctf_client.ConnectMockServerURL(testState.MockServerURL)
return err
}
From e014a137b8a11a39e943cbee1705076bd2e5891a Mon Sep 17 00:00:00 2001
From: Bolek <1416262+bolekk@users.noreply.github.com>
Date: Tue, 6 Aug 2024 08:38:04 -0700
Subject: [PATCH 15/52] [KS-411] Extra validation for FeedIDs in Streams Codec
(#14038)
Make sure the ID extracted from FullReport matcheds the top-level one.
---
core/capabilities/streams/codec.go | 4 ++++
core/capabilities/streams/codec_test.go | 16 +++++++++++++++-
2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/core/capabilities/streams/codec.go b/core/capabilities/streams/codec.go
index d2bc451a39f..26011cb7f35 100644
--- a/core/capabilities/streams/codec.go
+++ b/core/capabilities/streams/codec.go
@@ -1,6 +1,7 @@
package streams
import (
+ "encoding/hex"
"fmt"
"github.com/ethereum/go-ethereum/common"
@@ -34,6 +35,9 @@ func (c *codec) Unwrap(wrapped values.Value) ([]datastreams.FeedReport, error) {
if err2 != nil {
return nil, fmt.Errorf("failed to decode: %v", err2)
}
+ if decoded.FeedId != id.Bytes() {
+ return nil, fmt.Errorf("feed ID mismatch: FeedID: %s, FullReport.FeedId: %s", id, hex.EncodeToString(decoded.FeedId[:]))
+ }
dest[i].BenchmarkPrice = decoded.BenchmarkPrice.Bytes()
dest[i].ObservationTimestamp = int64(decoded.ObservationsTimestamp)
}
diff --git a/core/capabilities/streams/codec_test.go b/core/capabilities/streams/codec_test.go
index e3ada731e43..02ec474fec9 100644
--- a/core/capabilities/streams/codec_test.go
+++ b/core/capabilities/streams/codec_test.go
@@ -69,7 +69,7 @@ func TestCodec_WrapUnwrap(t *testing.T) {
_, err = codec.Unwrap(values.NewBool(true))
require.Error(t, err)
- // correct reports byt wrong signatures
+ // correct reports but wrong signatures
unwrapped, err := codec.Unwrap(wrapped)
require.NoError(t, err)
require.Equal(t, 2, len(unwrapped))
@@ -85,6 +85,20 @@ func TestCodec_WrapUnwrap(t *testing.T) {
for _, report := range unwrapped {
require.NoError(t, codec.Validate(report, allowedSigners, 2))
}
+
+ // invalid FeedID
+ wrappedInvalid, err := codec.Wrap([]datastreams.FeedReport{
+ {
+ FeedID: id2Str, // ID #2 doesn't match what's in report #1
+ FullReport: report1,
+ ReportContext: rawCtx,
+ Signatures: [][]byte{signatureK1R1, signatureK2R1},
+ },
+ })
+ require.NoError(t, err)
+ _, err = codec.Unwrap(wrappedInvalid)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "feed ID mismatch")
}
func newFeedID(t *testing.T) ([32]byte, string) {
From 537d2ec1ad846898f820874442c3f69915096bad Mon Sep 17 00:00:00 2001
From: Matthew Pendrey
Date: Tue, 6 Aug 2024 18:08:44 +0100
Subject: [PATCH 16/52] fix data race in syncer/launcher (#14050)
---
.changeset/twelve-balloons-turn.md | 5 +++++
core/capabilities/registry.go | 2 ++
2 files changed, 7 insertions(+)
create mode 100644 .changeset/twelve-balloons-turn.md
diff --git a/.changeset/twelve-balloons-turn.md b/.changeset/twelve-balloons-turn.md
new file mode 100644
index 00000000000..f4f0e2670e9
--- /dev/null
+++ b/.changeset/twelve-balloons-turn.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#internal fix data race in syncer launcher
diff --git a/core/capabilities/registry.go b/core/capabilities/registry.go
index 8a99450c096..d6891c81ab9 100644
--- a/core/capabilities/registry.go
+++ b/core/capabilities/registry.go
@@ -37,6 +37,8 @@ func (r *Registry) LocalNode(ctx context.Context) (capabilities.Node, error) {
}
func (r *Registry) ConfigForCapability(ctx context.Context, capabilityID string, donID uint32) (capabilities.CapabilityConfiguration, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
if r.metadataRegistry == nil {
return capabilities.CapabilityConfiguration{}, errors.New("metadataRegistry information not available")
}
From 0a7372cdcd287862069e26c591a5d1ade36d45cf Mon Sep 17 00:00:00 2001
From: Aaron Lu <50029043+aalu1418@users.noreply.github.com>
Date: Tue, 6 Aug 2024 11:43:37 -0600
Subject: [PATCH 17/52] update solana e2e test build deps (#13978)
* bump solana commit
* replace projectserum with backpackapp
* handle tagged versions
* quick solana bump again
* use tagged version
---
.github/workflows/integration-tests.yml | 4 ++--
core/scripts/go.mod | 2 +-
core/scripts/go.sum | 4 ++--
go.mod | 2 +-
go.sum | 4 ++--
integration-tests/go.mod | 2 +-
integration-tests/go.sum | 4 ++--
integration-tests/load/go.mod | 2 +-
integration-tests/load/go.sum | 4 ++--
9 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index 950add5596f..96a2a7a39f9 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -1040,7 +1040,7 @@ jobs:
id: getsha
run: |
cd solanapath
- full_sha=$(git rev-parse ${{steps.getshortsha.outputs.short_sha}})
+ full_sha=$(git rev-parse ${{steps.getshortsha.outputs.short_sha}}^{}) # additional suffix allows handling tagged versions as well
if [ -z "${full_sha}" ]; then
echo "Error: could not get the full sha from the short sha using git, look above for error(s)"
exit 1
@@ -1125,7 +1125,7 @@ jobs:
uses: smartcontractkit/chainlink-solana/.github/actions/build_contract_artifacts@46b1311a5a83f33d08ffa8e1e0ab04f9ad51665d # node20 update on may 10, 2024
with:
ref: ${{ needs.get_solana_sha.outputs.sha }}
- image: projectserum/build
+ image: backpackapp/build
image-version: ${{ needs.get_projectserum_version.outputs.projectserum_version }}
solana-build-test-image:
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 0b7f510bcd8..45b5ee59059 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -273,7 +273,7 @@ require (
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 // indirect
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
- github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e // indirect
+ github.com/smartcontractkit/chainlink-solana v1.1.0 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 // indirect
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index 6abc303888f..dff6f3f356a 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -1192,8 +1192,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e/go.mod h1:hsFhop+SlQHKD+DEFjZrMJmbauT1A/wvtZIeeo4PxFU=
+github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
+github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo=
diff --git a/go.mod b/go.mod
index 4b216ddc0d4..78ec7d29ee1 100644
--- a/go.mod
+++ b/go.mod
@@ -78,7 +78,7 @@ require (
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827
- github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e
+ github.com/smartcontractkit/chainlink-solana v1.1.0
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1
diff --git a/go.sum b/go.sum
index 6b0ec5aa5c8..f5ef0f91e70 100644
--- a/go.sum
+++ b/go.sum
@@ -1147,8 +1147,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e/go.mod h1:hsFhop+SlQHKD+DEFjZrMJmbauT1A/wvtZIeeo4PxFU=
+github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
+github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo=
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index 3168a702b12..a648e46e9f0 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -380,7 +380,7 @@ require (
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 // indirect
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
- github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e // indirect
+ github.com/smartcontractkit/chainlink-solana v1.1.0 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 // indirect
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 0de6dc281d1..03e4a9082ff 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -1496,8 +1496,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e/go.mod h1:hsFhop+SlQHKD+DEFjZrMJmbauT1A/wvtZIeeo4PxFU=
+github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
+github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/chainlink-testing-framework v1.34.2 h1:YL3ft7KJB7SAopdmJeyeR4/kv0j4jOdagNihXq8OZ38=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index 46b3dd293df..1aa754f8cfa 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -372,7 +372,7 @@ require (
github.com/smartcontractkit/chain-selectors v1.0.10 // indirect
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
- github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e // indirect
+ github.com/smartcontractkit/chainlink-solana v1.1.0 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239 // indirect
github.com/smartcontractkit/havoc/k8schaos v0.0.0-20240409145249-e78d20847e37 // indirect
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index 0434fe8f42a..698623c50f1 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -1478,8 +1478,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e h1:PzwzlHNv1YbJ6ZIdl/pIFRoOuOS4V4WLvjZvFUnZFL4=
-github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240712132946-267a37c5ac6e/go.mod h1:hsFhop+SlQHKD+DEFjZrMJmbauT1A/wvtZIeeo4PxFU=
+github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
+github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/chainlink-testing-framework v1.34.2 h1:YL3ft7KJB7SAopdmJeyeR4/kv0j4jOdagNihXq8OZ38=
From c2c31c05ac3fe19d4df8313af25eb740953b935a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ot=C3=A1vio=20Migliavacca=20Madalosso?=
Date: Tue, 6 Aug 2024 15:02:57 -0300
Subject: [PATCH 18/52] Set PriceMin to match pip-35 definition (#14014)
---
.changeset/tasty-walls-collect.md | 5 +++++
core/chains/evm/config/toml/defaults/Polygon_Amoy.toml | 4 +++-
core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml | 3 ++-
docs/CONFIG.md | 8 ++++----
4 files changed, 14 insertions(+), 6 deletions(-)
create mode 100644 .changeset/tasty-walls-collect.md
diff --git a/.changeset/tasty-walls-collect.md b/.changeset/tasty-walls-collect.md
new file mode 100644
index 00000000000..eefe4441507
--- /dev/null
+++ b/.changeset/tasty-walls-collect.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#updated Update Polygon configs to match PIP-35
diff --git a/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml b/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml
index 77438343e29..bca42d9b403 100644
--- a/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml
+++ b/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml
@@ -11,8 +11,10 @@ NoNewFinalizedHeadsThreshold = '12m'
MaxQueued = 5000
[GasEstimator]
-EIP1559DynamicFees = true
+PriceDefault = '25 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '25 gwei'
+EIP1559DynamicFees = true
BumpMin = '20 gwei'
BumpThreshold = 5
diff --git a/core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml b/core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml
index ce0f8861de2..b9c993c6b23 100644
--- a/core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml
+++ b/core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml
@@ -11,8 +11,9 @@ RPCDefaultBatchSize = 100
MaxQueued = 5000
[GasEstimator]
-PriceDefault = '1 gwei'
+PriceDefault = '25 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '25 gwei'
BumpMin = '20 gwei'
BumpThreshold = 5
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index 47935390ce8..74afcec7400 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -6322,9 +6322,9 @@ Enabled = true
[GasEstimator]
Mode = 'BlockHistory'
-PriceDefault = '1 gwei'
+PriceDefault = '25 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
-PriceMin = '1 gwei'
+PriceMin = '25 gwei'
LimitDefault = 500000
LimitMax = 500000
LimitMultiplier = '1'
@@ -6415,9 +6415,9 @@ Enabled = true
[GasEstimator]
Mode = 'BlockHistory'
-PriceDefault = '20 gwei'
+PriceDefault = '25 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
-PriceMin = '1 gwei'
+PriceMin = '25 gwei'
LimitDefault = 500000
LimitMax = 500000
LimitMultiplier = '1'
From ce90bc32f562e92af3d22c895446a963109c36e3 Mon Sep 17 00:00:00 2001
From: FelixFan1992
Date: Tue, 6 Aug 2024 14:56:12 -0400
Subject: [PATCH 19/52] auto: adjust cron contract imports (#13927)
* auto: adjust cron contract imports
* update
---
contracts/.changeset/seven-donkeys-live.md | 5 +++++
contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol | 12 ++++++------
.../v0.8/automation/upkeeps/CronUpkeepDelegate.sol | 2 +-
.../v0.8/automation/upkeeps/CronUpkeepFactory.sol | 6 +++---
4 files changed, 15 insertions(+), 10 deletions(-)
create mode 100644 contracts/.changeset/seven-donkeys-live.md
diff --git a/contracts/.changeset/seven-donkeys-live.md b/contracts/.changeset/seven-donkeys-live.md
new file mode 100644
index 00000000000..141588f5b9f
--- /dev/null
+++ b/contracts/.changeset/seven-donkeys-live.md
@@ -0,0 +1,5 @@
+---
+'@chainlink/contracts': patch
+---
+
+improve cron contracts imports
diff --git a/contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol b/contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol
index 614b84635ab..b9eda1f4001 100644
--- a/contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol
+++ b/contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol
@@ -18,12 +18,12 @@
pragma solidity 0.8.6;
-import "@openzeppelin/contracts/security/Pausable.sol";
-import "@openzeppelin/contracts/proxy/Proxy.sol";
-import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol";
-import "../../shared/access/ConfirmedOwner.sol";
-import "../KeeperBase.sol";
-import "../interfaces/KeeperCompatibleInterface.sol";
+import {Pausable} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol";
+import {Proxy} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/proxy/Proxy.sol";
+import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
+import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol";
+import {KeeperBase as KeeperBase} from "../KeeperBase.sol";
+import {KeeperCompatibleInterface as KeeperCompatibleInterface} from "../interfaces/KeeperCompatibleInterface.sol";
import {Cron as CronInternal, Spec} from "../libraries/internal/Cron.sol";
import {Cron as CronExternal} from "../libraries/external/Cron.sol";
diff --git a/contracts/src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol b/contracts/src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol
index ec2c2a0fd91..ed8d031c86f 100644
--- a/contracts/src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol
+++ b/contracts/src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.6;
-import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol";
+import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
import {Cron, Spec} from "../libraries/internal/Cron.sol";
/**
diff --git a/contracts/src/v0.8/automation/upkeeps/CronUpkeepFactory.sol b/contracts/src/v0.8/automation/upkeeps/CronUpkeepFactory.sol
index cd9ae5d7a92..2b6e97e4d03 100644
--- a/contracts/src/v0.8/automation/upkeeps/CronUpkeepFactory.sol
+++ b/contracts/src/v0.8/automation/upkeeps/CronUpkeepFactory.sol
@@ -2,9 +2,9 @@
pragma solidity 0.8.6;
-import "./CronUpkeep.sol";
-import "./CronUpkeepDelegate.sol";
-import "../../shared/access/ConfirmedOwner.sol";
+import {CronUpkeep} from "./CronUpkeep.sol";
+import {CronUpkeepDelegate} from "./CronUpkeepDelegate.sol";
+import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol";
import {Spec, Cron as CronExternal} from "../libraries/external/Cron.sol";
/**
From 2312827156f24fa4a6e420aec12e5a3aeac81e2b Mon Sep 17 00:00:00 2001
From: amit-momin <108959691+amit-momin@users.noreply.github.com>
Date: Tue, 6 Aug 2024 14:33:16 -0500
Subject: [PATCH 20/52] Add finalizer component to TXM (#13638)
* Added a finalizer component that assesses confirmed transactions for finality
* Moved Finalizer component into EVM code and addressed feedback
* Fixed linting and renumbered sql migration
* Added limit to Finalizer RPC batch calls
* Cleaned up unneeded code
* Renumbered sql migration
* Updated Finalizer to use LatestAndFinalizedBlock method from HeadTracker
* Fixed health check tests and fixed linting
* Fixed lint error
* Fixed lint error
* Added finalized state to replace finalized column
* Updated finalizer batch RPC validation to use blockByNumber and added filter to DB query
* Updated reaper to reap old confirmed transactions
* Fixed migration test
* Fixed lint error
* Changed log level
* Renumbered sql migration
* Updated Finalizer to only process on new finalized heads and improved query performance
* Fixed mocks
* Updated TxStore method name and fixed mocks
* Fixed mock
* Updated TxStore method to exit early
* Removed unused error
---------
Co-authored-by: Silas Lenihan <32529249+silaslenihan@users.noreply.github.com>
---
.changeset/itchy-bugs-clean.md | 5 +
common/txmgr/models.go | 1 +
common/txmgr/reaper.go | 9 +-
common/txmgr/txmgr.go | 20 +-
common/txmgr/types/config.go | 6 -
common/txmgr/types/finalizer.go | 12 +
.../txmgr/types/mocks/reaper_chain_config.go | 77 -----
common/txmgr/types/mocks/tx_store.go | 80 +----
common/txmgr/types/tx_store.go | 3 +-
.../evm/headtracker/simulated_head_tracker.go | 29 ++
core/chains/evm/txmgr/builder.go | 12 +-
core/chains/evm/txmgr/client.go | 4 +
core/chains/evm/txmgr/config.go | 1 -
core/chains/evm/txmgr/evm_tx_store.go | 124 ++++++--
core/chains/evm/txmgr/evm_tx_store_test.go | 83 +++--
core/chains/evm/txmgr/finalizer.go | 294 ++++++++++++++++++
core/chains/evm/txmgr/finalizer_test.go | 240 ++++++++++++++
core/chains/evm/txmgr/mocks/evm_tx_store.go | 190 ++++++-----
core/chains/evm/txmgr/models.go | 3 +-
core/chains/evm/txmgr/reaper_test.go | 64 ++--
core/chains/evm/txmgr/test_helpers.go | 13 +-
core/chains/evm/txmgr/txmgr_test.go | 72 ++++-
core/chains/legacyevm/chain.go | 2 +-
core/chains/legacyevm/evm_txm.go | 5 +-
.../promreporter/prom_reporter_test.go | 3 +-
core/services/vrf/delegate_test.go | 2 +-
core/services/vrf/v2/integration_v2_test.go | 2 +-
core/services/vrf/v2/listener_v2_test.go | 2 +-
core/store/migrate/migrate_test.go | 11 +
.../0248_add_tx_finalized_state.sql | 135 ++++++++
core/web/testdata/body/health.html | 3 +
core/web/testdata/body/health.json | 9 +
core/web/testdata/body/health.txt | 1 +
testdata/scripts/health/multi-chain.txtar | 10 +
34 files changed, 1170 insertions(+), 357 deletions(-)
create mode 100644 .changeset/itchy-bugs-clean.md
create mode 100644 common/txmgr/types/finalizer.go
delete mode 100644 common/txmgr/types/mocks/reaper_chain_config.go
create mode 100644 core/chains/evm/txmgr/finalizer.go
create mode 100644 core/chains/evm/txmgr/finalizer_test.go
create mode 100644 core/store/migrate/migrations/0248_add_tx_finalized_state.sql
diff --git a/.changeset/itchy-bugs-clean.md b/.changeset/itchy-bugs-clean.md
new file mode 100644
index 00000000000..beeed8ace1e
--- /dev/null
+++ b/.changeset/itchy-bugs-clean.md
@@ -0,0 +1,5 @@
+---
+"chainlink": minor
+---
+
+Introduced finalized transaction state. Added a finalizer component to the TXM to mark transactions as finalized. #internal
diff --git a/common/txmgr/models.go b/common/txmgr/models.go
index dd121a2c7c4..ca5e7d4f251 100644
--- a/common/txmgr/models.go
+++ b/common/txmgr/models.go
@@ -11,4 +11,5 @@ const (
TxUnconfirmed = txmgrtypes.TxState("unconfirmed")
TxConfirmed = txmgrtypes.TxState("confirmed")
TxConfirmedMissingReceipt = txmgrtypes.TxState("confirmed_missing_receipt")
+ TxFinalized = txmgrtypes.TxState("finalized")
)
diff --git a/common/txmgr/reaper.go b/common/txmgr/reaper.go
index 932b58f6430..0c797548b16 100644
--- a/common/txmgr/reaper.go
+++ b/common/txmgr/reaper.go
@@ -14,7 +14,6 @@ import (
// Reaper handles periodic database cleanup for Txm
type Reaper[CHAIN_ID types.ID] struct {
store txmgrtypes.TxHistoryReaper[CHAIN_ID]
- config txmgrtypes.ReaperChainConfig
txConfig txmgrtypes.ReaperTransactionsConfig
chainID CHAIN_ID
log logger.Logger
@@ -25,10 +24,9 @@ type Reaper[CHAIN_ID types.ID] struct {
}
// NewReaper instantiates a new reaper object
-func NewReaper[CHAIN_ID types.ID](lggr logger.Logger, store txmgrtypes.TxHistoryReaper[CHAIN_ID], config txmgrtypes.ReaperChainConfig, txConfig txmgrtypes.ReaperTransactionsConfig, chainID CHAIN_ID) *Reaper[CHAIN_ID] {
+func NewReaper[CHAIN_ID types.ID](lggr logger.Logger, store txmgrtypes.TxHistoryReaper[CHAIN_ID], txConfig txmgrtypes.ReaperTransactionsConfig, chainID CHAIN_ID) *Reaper[CHAIN_ID] {
r := &Reaper[CHAIN_ID]{
store,
- config,
txConfig,
chainID,
logger.Named(lggr, "Reaper"),
@@ -103,13 +101,12 @@ func (r *Reaper[CHAIN_ID]) ReapTxes(headNum int64) error {
r.log.Debug("Transactions.ReaperThreshold set to 0; skipping ReapTxes")
return nil
}
- minBlockNumberToKeep := headNum - int64(r.config.FinalityDepth())
mark := time.Now()
timeThreshold := mark.Add(-threshold)
- r.log.Debugw(fmt.Sprintf("reaping old txes created before %s", timeThreshold.Format(time.RFC3339)), "ageThreshold", threshold, "timeThreshold", timeThreshold, "minBlockNumberToKeep", minBlockNumberToKeep)
+ r.log.Debugw(fmt.Sprintf("reaping old txes created before %s", timeThreshold.Format(time.RFC3339)), "ageThreshold", threshold, "timeThreshold", timeThreshold)
- if err := r.store.ReapTxHistory(ctx, minBlockNumberToKeep, timeThreshold, r.chainID); err != nil {
+ if err := r.store.ReapTxHistory(ctx, timeThreshold, r.chainID); err != nil {
return err
}
diff --git a/common/txmgr/txmgr.go b/common/txmgr/txmgr.go
index fc27e930c37..49ac8a89b73 100644
--- a/common/txmgr/txmgr.go
+++ b/common/txmgr/txmgr.go
@@ -108,6 +108,7 @@ type Txm[
broadcaster *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
confirmer *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]
tracker *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]
+ finalizer txmgrtypes.Finalizer[BLOCK_HASH, HEAD]
fwdMgr txmgrtypes.ForwarderManager[ADDR]
txAttemptBuilder txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
newErrorClassifier NewErrorClassifier
@@ -143,6 +144,7 @@ func NewTxm[
confirmer *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE],
resender *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE],
tracker *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE],
+ finalizer txmgrtypes.Finalizer[BLOCK_HASH, HEAD],
newErrorClassifierFunc NewErrorClassifier,
) *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
b := Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{
@@ -165,13 +167,14 @@ func NewTxm[
resender: resender,
tracker: tracker,
newErrorClassifier: newErrorClassifierFunc,
+ finalizer: finalizer,
}
if txCfg.ResendAfterThreshold() <= 0 {
b.logger.Info("Resender: Disabled")
}
if txCfg.ReaperThreshold() > 0 && txCfg.ReaperInterval() > 0 {
- b.reaper = NewReaper[CHAIN_ID](lggr, b.txStore, cfg, txCfg, chainId)
+ b.reaper = NewReaper[CHAIN_ID](lggr, b.txStore, txCfg, chainId)
} else {
b.logger.Info("TxReaper: Disabled")
}
@@ -199,6 +202,10 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx
return fmt.Errorf("Txm: Tracker failed to start: %w", err)
}
+ if err := ms.Start(ctx, b.finalizer); err != nil {
+ return fmt.Errorf("Txm: Finalizer failed to start: %w", err)
+ }
+
b.logger.Info("Txm starting runLoop")
b.wg.Add(1)
go b.runLoop()
@@ -293,6 +300,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) HealthRepo
services.CopyHealth(report, b.broadcaster.HealthReport())
services.CopyHealth(report, b.confirmer.HealthReport())
services.CopyHealth(report, b.txAttemptBuilder.HealthReport())
+ services.CopyHealth(report, b.finalizer.HealthReport())
})
if b.txConfig.ForwardersEnabled() {
@@ -415,6 +423,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop()
case head := <-b.chHeads:
b.confirmer.mb.Deliver(head)
b.tracker.mb.Deliver(head.BlockNumber())
+ b.finalizer.DeliverLatestHead(head)
case reset := <-b.reset:
// This check prevents the weird edge-case where you can select
// into this block after chStop has already been closed and the
@@ -446,6 +455,10 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop()
if err != nil && (!errors.Is(err, services.ErrAlreadyStopped) || !errors.Is(err, services.ErrCannotStopUnstarted)) {
b.logger.Errorw(fmt.Sprintf("Failed to Close Tracker: %v", err), "err", err)
}
+ err = b.finalizer.Close()
+ if err != nil && (!errors.Is(err, services.ErrAlreadyStopped) || !errors.Is(err, services.ErrCannotStopUnstarted)) {
+ b.logger.Errorw(fmt.Sprintf("Failed to Close Finalizer: %v", err), "err", err)
+ }
return
case <-keysChanged:
// This check prevents the weird edge-case where you can select
@@ -644,9 +657,10 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetTransac
// Return unconfirmed for ConfirmedMissingReceipt since a receipt is required to determine if it is finalized
return commontypes.Unconfirmed, nil
case TxConfirmed:
- // TODO: Check for finality and return finalized status
- // Return unconfirmed if tx receipt's block is newer than the latest finalized block
+ // Return unconfirmed for confirmed transactions because they are not yet finalized
return commontypes.Unconfirmed, nil
+ case TxFinalized:
+ return commontypes.Finalized, nil
case TxFatalError:
// Use an ErrorClassifier to determine if the transaction is considered Fatal
txErr := b.newErrorClassifier(tx.GetError())
diff --git a/common/txmgr/types/config.go b/common/txmgr/types/config.go
index 4d9af5f0673..8b11a45d11d 100644
--- a/common/txmgr/types/config.go
+++ b/common/txmgr/types/config.go
@@ -5,7 +5,6 @@ import "time"
type TransactionManagerChainConfig interface {
BroadcasterChainConfig
ConfirmerChainConfig
- ReaperChainConfig
}
type TransactionManagerFeeConfig interface {
@@ -74,11 +73,6 @@ type ResenderTransactionsConfig interface {
MaxInFlight() uint32
}
-// ReaperConfig is the config subset used by the reaper
-type ReaperChainConfig interface {
- FinalityDepth() uint32
-}
-
type ReaperTransactionsConfig interface {
ReaperInterval() time.Duration
ReaperThreshold() time.Duration
diff --git a/common/txmgr/types/finalizer.go b/common/txmgr/types/finalizer.go
new file mode 100644
index 00000000000..be3c897d0e2
--- /dev/null
+++ b/common/txmgr/types/finalizer.go
@@ -0,0 +1,12 @@
+package types
+
+import (
+ "github.com/smartcontractkit/chainlink-common/pkg/services"
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+type Finalizer[BLOCK_HASH types.Hashable, HEAD types.Head[BLOCK_HASH]] interface {
+ // interfaces for running the underlying estimator
+ services.Service
+ DeliverLatestHead(head HEAD) bool
+}
diff --git a/common/txmgr/types/mocks/reaper_chain_config.go b/common/txmgr/types/mocks/reaper_chain_config.go
deleted file mode 100644
index 0531b071708..00000000000
--- a/common/txmgr/types/mocks/reaper_chain_config.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Code generated by mockery v2.43.2. DO NOT EDIT.
-
-package mocks
-
-import mock "github.com/stretchr/testify/mock"
-
-// ReaperConfig is an autogenerated mock type for the ReaperChainConfig type
-type ReaperConfig struct {
- mock.Mock
-}
-
-type ReaperConfig_Expecter struct {
- mock *mock.Mock
-}
-
-func (_m *ReaperConfig) EXPECT() *ReaperConfig_Expecter {
- return &ReaperConfig_Expecter{mock: &_m.Mock}
-}
-
-// FinalityDepth provides a mock function with given fields:
-func (_m *ReaperConfig) FinalityDepth() uint32 {
- ret := _m.Called()
-
- if len(ret) == 0 {
- panic("no return value specified for FinalityDepth")
- }
-
- var r0 uint32
- if rf, ok := ret.Get(0).(func() uint32); ok {
- r0 = rf()
- } else {
- r0 = ret.Get(0).(uint32)
- }
-
- return r0
-}
-
-// ReaperConfig_FinalityDepth_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FinalityDepth'
-type ReaperConfig_FinalityDepth_Call struct {
- *mock.Call
-}
-
-// FinalityDepth is a helper method to define mock.On call
-func (_e *ReaperConfig_Expecter) FinalityDepth() *ReaperConfig_FinalityDepth_Call {
- return &ReaperConfig_FinalityDepth_Call{Call: _e.mock.On("FinalityDepth")}
-}
-
-func (_c *ReaperConfig_FinalityDepth_Call) Run(run func()) *ReaperConfig_FinalityDepth_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run()
- })
- return _c
-}
-
-func (_c *ReaperConfig_FinalityDepth_Call) Return(_a0 uint32) *ReaperConfig_FinalityDepth_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *ReaperConfig_FinalityDepth_Call) RunAndReturn(run func() uint32) *ReaperConfig_FinalityDepth_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// NewReaperConfig creates a new instance of ReaperConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-// The first argument is typically a *testing.T value.
-func NewReaperConfig(t interface {
- mock.TestingT
- Cleanup(func())
-}) *ReaperConfig {
- mock := &ReaperConfig{}
- mock.Mock.Test(t)
-
- t.Cleanup(func() { mock.AssertExpectations(t) })
-
- return mock
-}
diff --git a/common/txmgr/types/mocks/tx_store.go b/common/txmgr/types/mocks/tx_store.go
index ee166638e34..0b9c7110660 100644
--- a/common/txmgr/types/mocks/tx_store.go
+++ b/common/txmgr/types/mocks/tx_store.go
@@ -1760,65 +1760,6 @@ func (_c *TxStore_HasInProgressTransaction_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_H
return _c
}
-// IsTxFinalized provides a mock function with given fields: ctx, blockHeight, txID, chainID
-func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID CHAIN_ID) (bool, error) {
- ret := _m.Called(ctx, blockHeight, txID, chainID)
-
- if len(ret) == 0 {
- panic("no return value specified for IsTxFinalized")
- }
-
- var r0 bool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, int64, int64, CHAIN_ID) (bool, error)); ok {
- return rf(ctx, blockHeight, txID, chainID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, int64, int64, CHAIN_ID) bool); ok {
- r0 = rf(ctx, blockHeight, txID, chainID)
- } else {
- r0 = ret.Get(0).(bool)
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, int64, int64, CHAIN_ID) error); ok {
- r1 = rf(ctx, blockHeight, txID, chainID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// TxStore_IsTxFinalized_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsTxFinalized'
-type TxStore_IsTxFinalized_Call[ADDR types.Hashable, CHAIN_ID types.ID, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], SEQ types.Sequence, FEE feetypes.Fee] struct {
- *mock.Call
-}
-
-// IsTxFinalized is a helper method to define mock.On call
-// - ctx context.Context
-// - blockHeight int64
-// - txID int64
-// - chainID CHAIN_ID
-func (_e *TxStore_Expecter[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) IsTxFinalized(ctx interface{}, blockHeight interface{}, txID interface{}, chainID interface{}) *TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
- return &TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{Call: _e.mock.On("IsTxFinalized", ctx, blockHeight, txID, chainID)}
-}
-
-func (_c *TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Run(run func(ctx context.Context, blockHeight int64, txID int64, chainID CHAIN_ID)) *TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(int64), args[2].(int64), args[3].(CHAIN_ID))
- })
- return _c
-}
-
-func (_c *TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Return(finalized bool, err error) *TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
- _c.Call.Return(finalized, err)
- return _c
-}
-
-func (_c *TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) RunAndReturn(run func(context.Context, int64, int64, CHAIN_ID) (bool, error)) *TxStore_IsTxFinalized_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
- _c.Call.Return(run)
- return _c
-}
-
// LoadTxAttempts provides a mock function with given fields: ctx, etx
func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) LoadTxAttempts(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error {
ret := _m.Called(ctx, etx)
@@ -2069,17 +2010,17 @@ func (_c *TxStore_PruneUnstartedTxQueue_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH
return _c
}
-// ReapTxHistory provides a mock function with given fields: ctx, minBlockNumberToKeep, timeThreshold, chainID
-func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID CHAIN_ID) error {
- ret := _m.Called(ctx, minBlockNumberToKeep, timeThreshold, chainID)
+// ReapTxHistory provides a mock function with given fields: ctx, timeThreshold, chainID
+func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ReapTxHistory(ctx context.Context, timeThreshold time.Time, chainID CHAIN_ID) error {
+ ret := _m.Called(ctx, timeThreshold, chainID)
if len(ret) == 0 {
panic("no return value specified for ReapTxHistory")
}
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time, CHAIN_ID) error); ok {
- r0 = rf(ctx, minBlockNumberToKeep, timeThreshold, chainID)
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, CHAIN_ID) error); ok {
+ r0 = rf(ctx, timeThreshold, chainID)
} else {
r0 = ret.Error(0)
}
@@ -2094,16 +2035,15 @@ type TxStore_ReapTxHistory_Call[ADDR types.Hashable, CHAIN_ID types.ID, TX_HASH
// ReapTxHistory is a helper method to define mock.On call
// - ctx context.Context
-// - minBlockNumberToKeep int64
// - timeThreshold time.Time
// - chainID CHAIN_ID
-func (_e *TxStore_Expecter[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ReapTxHistory(ctx interface{}, minBlockNumberToKeep interface{}, timeThreshold interface{}, chainID interface{}) *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
- return &TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{Call: _e.mock.On("ReapTxHistory", ctx, minBlockNumberToKeep, timeThreshold, chainID)}
+func (_e *TxStore_Expecter[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ReapTxHistory(ctx interface{}, timeThreshold interface{}, chainID interface{}) *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
+ return &TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{Call: _e.mock.On("ReapTxHistory", ctx, timeThreshold, chainID)}
}
-func (_c *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Run(run func(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID CHAIN_ID)) *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
+func (_c *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Run(run func(ctx context.Context, timeThreshold time.Time, chainID CHAIN_ID)) *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
_c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(int64), args[2].(time.Time), args[3].(CHAIN_ID))
+ run(args[0].(context.Context), args[1].(time.Time), args[2].(CHAIN_ID))
})
return _c
}
@@ -2113,7 +2053,7 @@ func (_c *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ
return _c
}
-func (_c *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) RunAndReturn(run func(context.Context, int64, time.Time, CHAIN_ID) error) *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
+func (_c *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) RunAndReturn(run func(context.Context, time.Time, CHAIN_ID) error) *TxStore_ReapTxHistory_Call[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] {
_c.Call.Return(run)
return _c
}
diff --git a/common/txmgr/types/tx_store.go b/common/txmgr/types/tx_store.go
index 875339cfbac..63b56dd169a 100644
--- a/common/txmgr/types/tx_store.go
+++ b/common/txmgr/types/tx_store.go
@@ -105,11 +105,10 @@ type TransactionStore[
UpdateTxUnstartedToInProgress(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error
UpdateTxFatalError(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error
UpdateTxForRebroadcast(ctx context.Context, etx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], etxAttempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error
- IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID CHAIN_ID) (finalized bool, err error)
}
type TxHistoryReaper[CHAIN_ID types.ID] interface {
- ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID CHAIN_ID) error
+ ReapTxHistory(ctx context.Context, timeThreshold time.Time, chainID CHAIN_ID) error
}
type UnstartedTxQueuePruner interface {
diff --git a/core/chains/evm/headtracker/simulated_head_tracker.go b/core/chains/evm/headtracker/simulated_head_tracker.go
index e1e550de992..62bb4968c2f 100644
--- a/core/chains/evm/headtracker/simulated_head_tracker.go
+++ b/core/chains/evm/headtracker/simulated_head_tracker.go
@@ -2,6 +2,7 @@ package headtracker
import (
"context"
+ "errors"
"fmt"
"math/big"
@@ -51,3 +52,31 @@ func (ht *simulatedHeadTracker) LatestAndFinalizedBlock(ctx context.Context) (*e
return latest, finalizedBlock, nil
}
+
+func (ht *simulatedHeadTracker) LatestChain() *evmtypes.Head {
+ return nil
+}
+
+func (ht *simulatedHeadTracker) HealthReport() map[string]error {
+ return nil
+}
+
+func (ht *simulatedHeadTracker) Start(_ context.Context) error {
+ return nil
+}
+
+func (ht *simulatedHeadTracker) Close() error {
+ return nil
+}
+
+func (ht *simulatedHeadTracker) Backfill(_ context.Context, _ *evmtypes.Head) error {
+ return errors.New("unimplemented")
+}
+
+func (ht *simulatedHeadTracker) Name() string {
+ return "SimulatedHeadTracker"
+}
+
+func (ht *simulatedHeadTracker) Ready() error {
+ return nil
+}
diff --git a/core/chains/evm/txmgr/builder.go b/core/chains/evm/txmgr/builder.go
index 8234d55b960..d85d6acdc8c 100644
--- a/core/chains/evm/txmgr/builder.go
+++ b/core/chains/evm/txmgr/builder.go
@@ -13,6 +13,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/chaintype"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/keystore"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
@@ -32,6 +33,7 @@ func NewTxm(
logPoller logpoller.LogPoller,
keyStore keystore.Eth,
estimator gas.EvmFeeEstimator,
+ headTracker httypes.HeadTracker,
) (txm TxManager,
err error,
) {
@@ -54,11 +56,12 @@ func NewTxm(
evmTracker := NewEvmTracker(txStore, keyStore, chainID, lggr)
stuckTxDetector := NewStuckTxDetector(lggr, client.ConfiguredChainID(), chainConfig.ChainType(), fCfg.PriceMax(), txConfig.AutoPurge(), estimator, txStore, client)
evmConfirmer := NewEvmConfirmer(txStore, txmClient, txmCfg, feeCfg, txConfig, dbConfig, keyStore, txAttemptBuilder, lggr, stuckTxDetector)
+ evmFinalizer := NewEvmFinalizer(lggr, client.ConfiguredChainID(), chainConfig.RPCDefaultBatchSize(), txStore, client, headTracker)
var evmResender *Resender
if txConfig.ResendAfterThreshold() > 0 {
evmResender = NewEvmResender(lggr, txStore, txmClient, evmTracker, keyStore, txmgr.DefaultResenderPollInterval, chainConfig, txConfig)
}
- txm = NewEvmTxm(chainID, txmCfg, txConfig, keyStore, lggr, checker, fwdMgr, txAttemptBuilder, txStore, evmBroadcaster, evmConfirmer, evmResender, evmTracker)
+ txm = NewEvmTxm(chainID, txmCfg, txConfig, keyStore, lggr, checker, fwdMgr, txAttemptBuilder, txStore, evmBroadcaster, evmConfirmer, evmResender, evmTracker, evmFinalizer)
return txm, nil
}
@@ -77,8 +80,9 @@ func NewEvmTxm(
confirmer *Confirmer,
resender *Resender,
tracker *Tracker,
+ finalizer Finalizer,
) *Txm {
- return txmgr.NewTxm(chainId, cfg, txCfg, keyStore, lggr, checkerFactory, fwdMgr, txAttemptBuilder, txStore, broadcaster, confirmer, resender, tracker, client.NewTxError)
+ return txmgr.NewTxm(chainId, cfg, txCfg, keyStore, lggr, checkerFactory, fwdMgr, txAttemptBuilder, txStore, broadcaster, confirmer, resender, tracker, finalizer, client.NewTxError)
}
// NewEvmResender creates a new concrete EvmResender
@@ -96,8 +100,8 @@ func NewEvmResender(
}
// NewEvmReaper instantiates a new EVM-specific reaper object
-func NewEvmReaper(lggr logger.Logger, store txmgrtypes.TxHistoryReaper[*big.Int], config EvmReaperConfig, txConfig txmgrtypes.ReaperTransactionsConfig, chainID *big.Int) *Reaper {
- return txmgr.NewReaper(lggr, store, config, txConfig, chainID)
+func NewEvmReaper(lggr logger.Logger, store txmgrtypes.TxHistoryReaper[*big.Int], txConfig txmgrtypes.ReaperTransactionsConfig, chainID *big.Int) *Reaper {
+ return txmgr.NewReaper(lggr, store, txConfig, chainID)
}
// NewEvmConfirmer instantiates a new EVM confirmer
diff --git a/core/chains/evm/txmgr/client.go b/core/chains/evm/txmgr/client.go
index 661a180af50..e995080a260 100644
--- a/core/chains/evm/txmgr/client.go
+++ b/core/chains/evm/txmgr/client.go
@@ -183,3 +183,7 @@ func (c *evmTxmClient) CallContract(ctx context.Context, a TxAttempt, blockNumbe
}, blockNumber)
return client.ExtractRPCError(errCall)
}
+
+func (c *evmTxmClient) HeadByHash(ctx context.Context, hash common.Hash) (*evmtypes.Head, error) {
+ return c.client.HeadByHash(ctx, hash)
+}
diff --git a/core/chains/evm/txmgr/config.go b/core/chains/evm/txmgr/config.go
index b53f99840b9..af20c9a5901 100644
--- a/core/chains/evm/txmgr/config.go
+++ b/core/chains/evm/txmgr/config.go
@@ -48,7 +48,6 @@ type (
EvmBroadcasterConfig txmgrtypes.BroadcasterChainConfig
EvmConfirmerConfig txmgrtypes.ConfirmerChainConfig
EvmResenderConfig txmgrtypes.ResenderChainConfig
- EvmReaperConfig txmgrtypes.ReaperChainConfig
)
var _ EvmTxmConfig = (*evmTxmConfig)(nil)
diff --git a/core/chains/evm/txmgr/evm_tx_store.go b/core/chains/evm/txmgr/evm_tx_store.go
index e83a83907e4..45de437e443 100644
--- a/core/chains/evm/txmgr/evm_tx_store.go
+++ b/core/chains/evm/txmgr/evm_tx_store.go
@@ -44,6 +44,10 @@ type EvmTxStore interface {
// redeclare TxStore for mockery
txmgrtypes.TxStore[common.Address, *big.Int, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee]
TxStoreWebApi
+
+ // methods used solely in EVM components
+ FindConfirmedTxesReceipts(ctx context.Context, finalizedBlockNum int64, chainID *big.Int) (receipts []Receipt, err error)
+ UpdateTxStatesToFinalizedUsingReceiptIds(ctx context.Context, etxIDs []int64, chainId *big.Int) error
}
// TxStoreWebApi encapsulates the methods that are not used by the txmgr and only used by the various web controllers, readers, or evm specific components
@@ -87,7 +91,7 @@ var _ TestEvmTxStore = (*evmTxStore)(nil)
// Directly maps to columns of database table "evm.receipts".
// Do not modify type unless you
// intend to modify the database schema
-type dbReceipt struct {
+type DbReceipt struct {
ID int64
TxHash common.Hash
BlockHash common.Hash
@@ -97,8 +101,8 @@ type dbReceipt struct {
CreatedAt time.Time
}
-func DbReceiptFromEvmReceipt(evmReceipt *evmtypes.Receipt) dbReceipt {
- return dbReceipt{
+func DbReceiptFromEvmReceipt(evmReceipt *evmtypes.Receipt) DbReceipt {
+ return DbReceipt{
TxHash: evmReceipt.TxHash,
BlockHash: evmReceipt.BlockHash,
BlockNumber: evmReceipt.BlockNumber.Int64(),
@@ -107,7 +111,7 @@ func DbReceiptFromEvmReceipt(evmReceipt *evmtypes.Receipt) dbReceipt {
}
}
-func DbReceiptToEvmReceipt(receipt *dbReceipt) *evmtypes.Receipt {
+func DbReceiptToEvmReceipt(receipt *DbReceipt) *evmtypes.Receipt {
return &receipt.Receipt
}
@@ -131,7 +135,7 @@ type dbReceiptPlus struct {
FailOnRevert bool `db:"FailOnRevert"`
}
-func fromDBReceipts(rs []dbReceipt) []*evmtypes.Receipt {
+func fromDBReceipts(rs []DbReceipt) []*evmtypes.Receipt {
receipts := make([]*evmtypes.Receipt, len(rs))
for i := 0; i < len(rs); i++ {
receipts[i] = DbReceiptToEvmReceipt(&rs[i])
@@ -677,7 +681,7 @@ func (o *evmTxStore) loadEthTxesAttemptsReceipts(ctx context.Context, etxs []*Tx
attemptHashes = append(attemptHashes, attempt.Hash.Bytes())
}
}
- var rs []dbReceipt
+ var rs []DbReceipt
if err = o.q.SelectContext(ctx, &rs, `SELECT * FROM evm.receipts WHERE tx_hash = ANY($1)`, pq.Array(attemptHashes)); err != nil {
return pkgerrors.Wrap(err, "loadEthTxesAttemptsReceipts failed to load evm.receipts")
}
@@ -700,7 +704,7 @@ func loadConfirmedAttemptsReceipts(ctx context.Context, q sqlutil.DataSource, at
byHash[attempt.Hash.String()] = &attempts[i]
hashes = append(hashes, attempt.Hash.Bytes())
}
- var rs []dbReceipt
+ var rs []DbReceipt
if err := q.SelectContext(ctx, &rs, `SELECT * FROM evm.receipts WHERE tx_hash = ANY($1)`, pq.Array(hashes)); err != nil {
return pkgerrors.Wrap(err, "loadConfirmedAttemptsReceipts failed to load evm.receipts")
}
@@ -1116,7 +1120,7 @@ func updateEthTxAttemptUnbroadcast(ctx context.Context, orm *evmTxStore, attempt
func updateEthTxUnconfirm(ctx context.Context, orm *evmTxStore, etx Tx) error {
if etx.State != txmgr.TxConfirmed {
- return errors.New("expected eth_tx state to be confirmed")
+ return errors.New("expected tx state to be confirmed")
}
_, err := orm.q.ExecContext(ctx, `UPDATE evm.txes SET state = 'unconfirmed' WHERE id = $1`, etx.ID)
return pkgerrors.Wrap(err, "updateEthTxUnconfirm failed")
@@ -1205,24 +1209,6 @@ AND evm_chain_id = $1`, chainID.String()).Scan(&earliestUnconfirmedTxBlock)
return earliestUnconfirmedTxBlock, err
}
-func (o *evmTxStore) IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID *big.Int) (finalized bool, err error) {
- var cancel context.CancelFunc
- ctx, cancel = o.stopCh.Ctx(ctx)
- defer cancel()
-
- var count int32
- err = o.q.GetContext(ctx, &count, `
- SELECT COUNT(evm.receipts.receipt) FROM evm.txes
- INNER JOIN evm.tx_attempts ON evm.txes.id = evm.tx_attempts.eth_tx_id
- INNER JOIN evm.receipts ON evm.tx_attempts.hash = evm.receipts.tx_hash
- WHERE evm.receipts.block_number <= ($1 - evm.txes.min_confirmations)
- AND evm.txes.id = $2 AND evm.txes.evm_chain_id = $3`, blockHeight, txID, chainID.String())
- if err != nil {
- return false, fmt.Errorf("failed to retrieve transaction reciepts: %w", err)
- }
- return count > 0, nil
-}
-
func (o *evmTxStore) saveAttemptWithNewState(ctx context.Context, attempt TxAttempt, broadcastAt time.Time) error {
var dbAttempt DbEthTxAttempt
dbAttempt.FromTxAttempt(&attempt)
@@ -1872,7 +1858,7 @@ id < (
return
}
-func (o *evmTxStore) ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID *big.Int) error {
+func (o *evmTxStore) ReapTxHistory(ctx context.Context, timeThreshold time.Time, chainID *big.Int) error {
var cancel context.CancelFunc
ctx, cancel = o.stopCh.Ctx(ctx)
defer cancel()
@@ -1885,19 +1871,18 @@ func (o *evmTxStore) ReapTxHistory(ctx context.Context, minBlockNumberToKeep int
res, err := o.q.ExecContext(ctx, `
WITH old_enough_receipts AS (
SELECT tx_hash FROM evm.receipts
- WHERE block_number < $1
ORDER BY block_number ASC, id ASC
- LIMIT $2
+ LIMIT $1
)
DELETE FROM evm.txes
USING old_enough_receipts, evm.tx_attempts
WHERE evm.tx_attempts.eth_tx_id = evm.txes.id
AND evm.tx_attempts.hash = old_enough_receipts.tx_hash
-AND evm.txes.created_at < $3
-AND evm.txes.state = 'confirmed'
-AND evm_chain_id = $4`, minBlockNumberToKeep, limit, timeThreshold, chainID.String())
+AND evm.txes.created_at < $2
+AND evm.txes.state = 'finalized'
+AND evm_chain_id = $3`, limit, timeThreshold, chainID.String())
if err != nil {
- return count, pkgerrors.Wrap(err, "ReapTxes failed to delete old confirmed evm.txes")
+ return count, pkgerrors.Wrap(err, "ReapTxes failed to delete old finalized evm.txes")
}
rowsAffected, err := res.RowsAffected()
if err != nil {
@@ -1906,7 +1891,7 @@ AND evm_chain_id = $4`, minBlockNumberToKeep, limit, timeThreshold, chainID.Stri
return uint(rowsAffected), err
}, batchSize)
if err != nil {
- return pkgerrors.Wrap(err, "TxmReaper#reapEthTxes batch delete of confirmed evm.txes failed")
+ return pkgerrors.Wrap(err, "TxmReaper#reapEthTxes batch delete of finalized evm.txes failed")
}
// Delete old 'fatal_error' evm.txes
err = sqlutil.Batch(func(_, limit uint) (count uint, err error) {
@@ -1927,6 +1912,38 @@ AND evm_chain_id = $2`, timeThreshold, chainID.String())
if err != nil {
return pkgerrors.Wrap(err, "TxmReaper#reapEthTxes batch delete of fatally errored evm.txes failed")
}
+ // Delete old 'confirmed' evm.txes that were never finalized
+ // This query should never result in changes but added just in case transactions slip through the cracks
+ // to avoid them building up in the DB
+ err = sqlutil.Batch(func(_, limit uint) (count uint, err error) {
+ res, err := o.q.ExecContext(ctx, `
+WITH old_enough_receipts AS (
+ SELECT tx_hash FROM evm.receipts
+ ORDER BY block_number ASC, id ASC
+ LIMIT $1
+)
+DELETE FROM evm.txes
+USING old_enough_receipts, evm.tx_attempts
+WHERE evm.tx_attempts.eth_tx_id = evm.txes.id
+AND evm.tx_attempts.hash = old_enough_receipts.tx_hash
+AND evm.txes.created_at < $2
+AND evm.txes.state = 'confirmed'
+AND evm_chain_id = $3`, limit, timeThreshold, chainID.String())
+ if err != nil {
+ return count, pkgerrors.Wrap(err, "ReapTxes failed to delete old confirmed evm.txes")
+ }
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ return count, pkgerrors.Wrap(err, "ReapTxes failed to get rows affected")
+ }
+ if rowsAffected > 0 {
+ o.logger.Errorf("%d confirmed transactions were reaped before being marked as finalized. This should never happen unless the threshold is set too low or the transactions were lost track of", rowsAffected)
+ }
+ return uint(rowsAffected), err
+ }, batchSize)
+ if err != nil {
+ return pkgerrors.Wrap(err, "TxmReaper#reapEthTxes batch delete of confirmed evm.txes failed")
+ }
return nil
}
@@ -2055,3 +2072,42 @@ func (o *evmTxStore) UpdateTxAttemptBroadcastBeforeBlockNum(ctx context.Context,
_, err := o.q.ExecContext(ctx, sql, blockNum, id)
return err
}
+
+// Returns all confirmed transactions with receipt block nums older than or equal to the finalized block number
+func (o *evmTxStore) FindConfirmedTxesReceipts(ctx context.Context, finalizedBlockNum int64, chainID *big.Int) (receipts []Receipt, err error) {
+ var cancel context.CancelFunc
+ ctx, cancel = o.stopCh.Ctx(ctx)
+ defer cancel()
+ err = o.Transact(ctx, true, func(orm *evmTxStore) error {
+ sql := `SELECT evm.receipts.* FROM evm.receipts
+ INNER JOIN evm.tx_attempts ON evm.tx_attempts.hash = evm.receipts.tx_hash
+ INNER JOIN evm.txes ON evm.txes.id = evm.tx_attempts.eth_tx_id
+ WHERE evm.txes.state = 'confirmed' AND evm.receipts.block_number <= $1 AND evm.txes.evm_chain_id = $2`
+ var dbReceipts []DbReceipt
+ err = o.q.SelectContext(ctx, &dbReceipts, sql, finalizedBlockNum, chainID.String())
+ if len(dbReceipts) == 0 {
+ return nil
+ }
+ receipts = dbReceipts
+ return nil
+ })
+ return receipts, err
+}
+
+// Mark transactions corresponding to receipt IDs as finalized
+func (o *evmTxStore) UpdateTxStatesToFinalizedUsingReceiptIds(ctx context.Context, receiptIDs []int64, chainId *big.Int) error {
+ if len(receiptIDs) == 0 {
+ return nil
+ }
+ var cancel context.CancelFunc
+ ctx, cancel = o.stopCh.Ctx(ctx)
+ defer cancel()
+ sql := `
+UPDATE evm.txes SET state = 'finalized' WHERE evm.txes.evm_chain_id = $1 AND evm.txes.id IN (SELECT evm.txes.id FROM evm.txes
+ INNER JOIN evm.tx_attempts ON evm.tx_attempts.eth_tx_id = evm.txes.id
+ INNER JOIN evm.receipts ON evm.receipts.tx_hash = evm.tx_attempts.hash
+ WHERE evm.receipts.id = ANY($2))
+`
+ _, err := o.q.ExecContext(ctx, sql, chainId.String(), pq.Array(receiptIDs))
+ return err
+}
diff --git a/core/chains/evm/txmgr/evm_tx_store_test.go b/core/chains/evm/txmgr/evm_tx_store_test.go
index afb8de4ca52..191a0a5fed2 100644
--- a/core/chains/evm/txmgr/evm_tx_store_test.go
+++ b/core/chains/evm/txmgr/evm_tx_store_test.go
@@ -783,30 +783,6 @@ func TestORM_UpdateTxForRebroadcast(t *testing.T) {
})
}
-func TestORM_IsTxFinalized(t *testing.T) {
- t.Parallel()
-
- db := pgtest.NewSqlxDB(t)
- txStore := cltest.NewTestTxStore(t, db)
- ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
-
- t.Run("confirmed tx not past finality_depth", func(t *testing.T) {
- confirmedAddr := cltest.MustGenerateRandomKey(t).Address
- tx := mustInsertConfirmedEthTxWithReceipt(t, txStore, confirmedAddr, 123, 1)
- finalized, err := txStore.IsTxFinalized(tests.Context(t), 2, tx.ID, ethClient.ConfiguredChainID())
- require.NoError(t, err)
- require.False(t, finalized)
- })
-
- t.Run("confirmed tx past finality_depth", func(t *testing.T) {
- confirmedAddr := cltest.MustGenerateRandomKey(t).Address
- tx := mustInsertConfirmedEthTxWithReceipt(t, txStore, confirmedAddr, 123, 1)
- finalized, err := txStore.IsTxFinalized(tests.Context(t), 10, tx.ID, ethClient.ConfiguredChainID())
- require.NoError(t, err)
- require.True(t, finalized)
- })
-}
-
func TestORM_FindTransactionsConfirmedInBlockRange(t *testing.T) {
t.Parallel()
@@ -1382,7 +1358,7 @@ func TestORM_UpdateTxUnstartedToInProgress(t *testing.T) {
evmTxmCfg := txmgr.NewEvmTxmConfig(ccfg.EVM())
ec := evmtest.NewEthClientMockWithDefaultChain(t)
txMgr := txmgr.NewEvmTxm(ec.ConfiguredChainID(), evmTxmCfg, ccfg.EVM().Transactions(), nil, logger.Test(t), nil, nil,
- nil, txStore, nil, nil, nil, nil)
+ nil, txStore, nil, nil, nil, nil, nil)
err := txMgr.XXXTestAbandon(fromAddress) // mark transaction as abandoned
require.NoError(t, err)
@@ -1871,3 +1847,60 @@ func AssertCountPerSubject(t *testing.T, txStore txmgr.TestEvmTxStore, expected
require.NoError(t, err)
require.Equal(t, int(expected), count)
}
+
+func TestORM_FindTransactionsByState(t *testing.T) {
+ t.Parallel()
+
+ ctx := tests.Context(t)
+ db := pgtest.NewSqlxDB(t)
+ txStore := cltest.NewTestTxStore(t, db)
+ kst := cltest.NewKeyStore(t, db)
+ _, fromAddress := cltest.MustInsertRandomKey(t, kst.Eth())
+ finalizedBlockNum := int64(100)
+
+ mustInsertUnstartedTx(t, txStore, fromAddress)
+ mustInsertInProgressEthTxWithAttempt(t, txStore, 0, fromAddress)
+ mustInsertUnconfirmedEthTxWithAttemptState(t, txStore, 1, fromAddress, txmgrtypes.TxAttemptBroadcast)
+ mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt(t, txStore, 2, finalizedBlockNum, time.Now(), fromAddress)
+ mustInsertConfirmedEthTxWithReceipt(t, txStore, fromAddress, 3, finalizedBlockNum+1)
+ mustInsertConfirmedEthTxWithReceipt(t, txStore, fromAddress, 4, finalizedBlockNum)
+ mustInsertFatalErrorEthTx(t, txStore, fromAddress)
+
+ receipts, err := txStore.FindConfirmedTxesReceipts(ctx, finalizedBlockNum, testutils.FixtureChainID)
+ require.NoError(t, err)
+ require.Len(t, receipts, 1)
+}
+
+func TestORM_UpdateTxesFinalized(t *testing.T) {
+ t.Parallel()
+
+ ctx := tests.Context(t)
+ db := pgtest.NewSqlxDB(t)
+ txStore := cltest.NewTestTxStore(t, db)
+ kst := cltest.NewKeyStore(t, db)
+ broadcast := time.Now()
+ _, fromAddress := cltest.MustInsertRandomKey(t, kst.Eth())
+
+ t.Run("successfully finalizes a confirmed transaction", func(t *testing.T) {
+ nonce := evmtypes.Nonce(0)
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ FromAddress: fromAddress,
+ EncodedPayload: []byte{1, 2, 3},
+ State: txmgrcommon.TxConfirmed,
+ BroadcastAt: &broadcast,
+ InitialBroadcastAt: &broadcast,
+ }
+ err := txStore.InsertTx(ctx, tx)
+ require.NoError(t, err)
+ attempt := newBroadcastLegacyEthTxAttempt(t, tx.ID)
+ err = txStore.InsertTxAttempt(ctx, &attempt)
+ require.NoError(t, err)
+ receipt := mustInsertEthReceipt(t, txStore, 100, testutils.NewHash(), attempt.Hash)
+ err = txStore.UpdateTxStatesToFinalizedUsingReceiptIds(ctx, []int64{receipt.ID}, testutils.FixtureChainID)
+ require.NoError(t, err)
+ etx, err := txStore.FindTxWithAttempts(ctx, tx.ID)
+ require.NoError(t, err)
+ require.Equal(t, txmgrcommon.TxFinalized, etx.State)
+ })
+}
diff --git a/core/chains/evm/txmgr/finalizer.go b/core/chains/evm/txmgr/finalizer.go
new file mode 100644
index 00000000000..6d5fb81782c
--- /dev/null
+++ b/core/chains/evm/txmgr/finalizer.go
@@ -0,0 +1,294 @@
+package txmgr
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ "github.com/smartcontractkit/chainlink-common/pkg/services"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox"
+
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+)
+
+var _ Finalizer = (*evmFinalizer)(nil)
+
+// processHeadTimeout represents a sanity limit on how long ProcessHead should take to complete
+const processHeadTimeout = 10 * time.Minute
+
+type finalizerTxStore interface {
+ FindConfirmedTxesReceipts(ctx context.Context, finalizedBlockNum int64, chainID *big.Int) ([]Receipt, error)
+ UpdateTxStatesToFinalizedUsingReceiptIds(ctx context.Context, txs []int64, chainId *big.Int) error
+}
+
+type finalizerChainClient interface {
+ BatchCallContext(ctx context.Context, elems []rpc.BatchElem) error
+}
+
+type finalizerHeadTracker interface {
+ LatestAndFinalizedBlock(ctx context.Context) (latest, finalized *evmtypes.Head, err error)
+}
+
+// Finalizer handles processing new finalized blocks and marking transactions as finalized accordingly in the TXM DB
+type evmFinalizer struct {
+ services.StateMachine
+ lggr logger.SugaredLogger
+ chainId *big.Int
+ rpcBatchSize int
+
+ txStore finalizerTxStore
+ client finalizerChainClient
+ headTracker finalizerHeadTracker
+
+ mb *mailbox.Mailbox[*evmtypes.Head]
+ stopCh services.StopChan
+ wg sync.WaitGroup
+
+ lastProcessedFinalizedBlockNum int64
+}
+
+func NewEvmFinalizer(
+ lggr logger.Logger,
+ chainId *big.Int,
+ rpcBatchSize uint32,
+ txStore finalizerTxStore,
+ client finalizerChainClient,
+ headTracker finalizerHeadTracker,
+) *evmFinalizer {
+ lggr = logger.Named(lggr, "Finalizer")
+ return &evmFinalizer{
+ lggr: logger.Sugared(lggr),
+ chainId: chainId,
+ rpcBatchSize: int(rpcBatchSize),
+ txStore: txStore,
+ client: client,
+ headTracker: headTracker,
+ mb: mailbox.NewSingle[*evmtypes.Head](),
+ }
+}
+
+// Start the finalizer
+func (f *evmFinalizer) Start(ctx context.Context) error {
+ return f.StartOnce("Finalizer", func() error {
+ f.lggr.Debugf("started Finalizer with RPC batch size limit: %d", f.rpcBatchSize)
+ f.stopCh = make(chan struct{})
+ f.wg.Add(1)
+ go f.runLoop()
+ return nil
+ })
+}
+
+// Close the finalizer
+func (f *evmFinalizer) Close() error {
+ return f.StopOnce("Finalizer", func() error {
+ f.lggr.Debug("closing Finalizer")
+ close(f.stopCh)
+ f.wg.Wait()
+ return nil
+ })
+}
+
+func (f *evmFinalizer) Name() string {
+ return f.lggr.Name()
+}
+
+func (f *evmFinalizer) HealthReport() map[string]error {
+ return map[string]error{f.Name(): f.Healthy()}
+}
+
+func (f *evmFinalizer) runLoop() {
+ defer f.wg.Done()
+ ctx, cancel := f.stopCh.NewCtx()
+ defer cancel()
+ for {
+ select {
+ case <-f.mb.Notify():
+ for {
+ if ctx.Err() != nil {
+ return
+ }
+ head, exists := f.mb.Retrieve()
+ if !exists {
+ break
+ }
+ if err := f.ProcessHead(ctx, head); err != nil {
+ f.lggr.Errorw("Error processing head", "err", err)
+ f.SvcErrBuffer.Append(err)
+ continue
+ }
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (f *evmFinalizer) DeliverLatestHead(head *evmtypes.Head) bool {
+ return f.mb.Deliver(head)
+}
+
+func (f *evmFinalizer) ProcessHead(ctx context.Context, head *evmtypes.Head) error {
+ ctx, cancel := context.WithTimeout(ctx, processHeadTimeout)
+ defer cancel()
+ _, latestFinalizedHead, err := f.headTracker.LatestAndFinalizedBlock(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to retrieve latest finalized head: %w", err)
+ }
+ return f.processFinalizedHead(ctx, latestFinalizedHead)
+}
+
+// Determines if any confirmed transactions can be marked as finalized by comparing their receipts against the latest finalized block
+func (f *evmFinalizer) processFinalizedHead(ctx context.Context, latestFinalizedHead *evmtypes.Head) error {
+ // Cannot determine finality without a finalized head for comparison
+ if latestFinalizedHead == nil || !latestFinalizedHead.IsValid() {
+ return fmt.Errorf("invalid latestFinalizedHead")
+ }
+ // Only continue processing if the latestFinalizedHead has not already been processed
+ // Helps avoid unnecessary processing on every head if blocks are finalized in batches
+ if latestFinalizedHead.BlockNumber() == f.lastProcessedFinalizedBlockNum {
+ return nil
+ }
+ if latestFinalizedHead.BlockNumber() < f.lastProcessedFinalizedBlockNum {
+ f.lggr.Errorw("Received finalized block older than one already processed. This should never happen and could be an issue with RPCs.", "lastProcessedFinalizedBlockNum", f.lastProcessedFinalizedBlockNum, "retrievedFinalizedBlockNum", latestFinalizedHead.BlockNumber())
+ return nil
+ }
+
+ earliestBlockNumInChain := latestFinalizedHead.EarliestHeadInChain().BlockNumber()
+ f.lggr.Debugw("processing latest finalized head", "blockNum", latestFinalizedHead.BlockNumber(), "blockHash", latestFinalizedHead.BlockHash(), "earliestBlockNumInChain", earliestBlockNumInChain)
+
+ // Retrieve all confirmed transactions with receipts older than or equal to the finalized block, loaded with attempts and receipts
+ unfinalizedReceipts, err := f.txStore.FindConfirmedTxesReceipts(ctx, latestFinalizedHead.BlockNumber(), f.chainId)
+ if err != nil {
+ return fmt.Errorf("failed to retrieve receipts for confirmed, unfinalized transactions: %w", err)
+ }
+
+ var finalizedReceipts []Receipt
+ // Group by block hash transactions whose receipts cannot be validated using the cached heads
+ blockNumToReceiptsMap := make(map[int64][]Receipt)
+ // Find transactions with receipt block nums older than the latest finalized block num and block hashes still in chain
+ for _, receipt := range unfinalizedReceipts {
+ // The tx store query ensures transactions have receipts but leaving this check here for a belts and braces approach
+ if receipt.Receipt.IsZero() || receipt.Receipt.IsUnmined() {
+ f.lggr.AssumptionViolationw("invalid receipt found for confirmed transaction", "receipt", receipt)
+ continue
+ }
+ // The tx store query only returns transactions with receipts older than or equal to the finalized block but leaving this check here for a belts and braces approach
+ if receipt.BlockNumber > latestFinalizedHead.BlockNumber() {
+ continue
+ }
+ // Receipt block num older than earliest head in chain. Validate hash using RPC call later
+ if receipt.BlockNumber < earliestBlockNumInChain {
+ blockNumToReceiptsMap[receipt.BlockNumber] = append(blockNumToReceiptsMap[receipt.BlockNumber], receipt)
+ continue
+ }
+ blockHashInChain := latestFinalizedHead.HashAtHeight(receipt.BlockNumber)
+ // Receipt block hash does not match the block hash in chain. Transaction has been re-org'd out but DB state has not been updated yet
+ if blockHashInChain.String() != receipt.BlockHash.String() {
+ // Log error if a transaction is marked as confirmed with a receipt older than the finalized block
+ // This scenario could potentially point to a re-org'd transaction the Confirmer has lost track of
+ f.lggr.Errorw("found confirmed transaction with re-org'd receipt older than finalized block", "receipt", receipt, "onchainBlockHash", blockHashInChain.String())
+ continue
+ }
+ finalizedReceipts = append(finalizedReceipts, receipt)
+ }
+
+ // Check if block hashes exist for receipts on-chain older than the earliest cached head
+ // Transactions are grouped by their receipt block hash to avoid repeat requests on the same hash in case transactions were confirmed in the same block
+ validatedReceipts := f.batchCheckReceiptHashesOnchain(ctx, blockNumToReceiptsMap)
+ finalizedReceipts = append(finalizedReceipts, validatedReceipts...)
+
+ receiptIDs := f.buildReceiptIdList(finalizedReceipts)
+
+ err = f.txStore.UpdateTxStatesToFinalizedUsingReceiptIds(ctx, receiptIDs, f.chainId)
+ if err != nil {
+ return fmt.Errorf("failed to update transactions as finalized: %w", err)
+ }
+ // Update lastProcessedFinalizedBlockNum after processing has completed to allow failed processing to retry on subsequent heads
+ // Does not need to be protected with mutex lock because the Finalizer only runs in a single loop
+ f.lastProcessedFinalizedBlockNum = latestFinalizedHead.BlockNumber()
+ return nil
+}
+
+func (f *evmFinalizer) batchCheckReceiptHashesOnchain(ctx context.Context, blockNumToReceiptsMap map[int64][]Receipt) []Receipt {
+ if len(blockNumToReceiptsMap) == 0 {
+ return nil
+ }
+ // Group the RPC batch calls in groups of rpcBatchSize
+ var rpcBatchGroups [][]rpc.BatchElem
+ var rpcBatch []rpc.BatchElem
+ for blockNum := range blockNumToReceiptsMap {
+ elem := rpc.BatchElem{
+ Method: "eth_getBlockByNumber",
+ Args: []any{
+ hexutil.EncodeBig(big.NewInt(blockNum)),
+ false,
+ },
+ Result: new(evmtypes.Head),
+ }
+ rpcBatch = append(rpcBatch, elem)
+ if len(rpcBatch) >= f.rpcBatchSize {
+ rpcBatchGroups = append(rpcBatchGroups, rpcBatch)
+ rpcBatch = []rpc.BatchElem{}
+ }
+ }
+ if len(rpcBatch) > 0 {
+ rpcBatchGroups = append(rpcBatchGroups, rpcBatch)
+ }
+
+ var finalizedReceipts []Receipt
+ for _, rpcBatch := range rpcBatchGroups {
+ err := f.client.BatchCallContext(ctx, rpcBatch)
+ if err != nil {
+ // Continue if batch RPC call failed so other batches can still be considered for finalization
+ f.lggr.Errorw("failed to find blocks due to batch call failure", "error", err)
+ continue
+ }
+ for _, req := range rpcBatch {
+ if req.Error != nil {
+ // Continue if particular RPC call failed so other txs can still be considered for finalization
+ f.lggr.Errorw("failed to find block by number", "blockNum", req.Args[0], "error", req.Error)
+ continue
+ }
+ head, ok := req.Result.(*evmtypes.Head)
+ if !ok || !head.IsValid() {
+ // Continue if particular RPC call yielded a nil block so other txs can still be considered for finalization
+ f.lggr.Errorw("retrieved nil head for block number", "blockNum", req.Args[0])
+ continue
+ }
+ receipts := blockNumToReceiptsMap[head.BlockNumber()]
+ // Check if transaction receipts match the block hash at the given block num
+ // If they do not, the transactions may have been re-org'd out
+ // The expectation is for the Confirmer to pick up on these re-orgs and get the transaction included
+ for _, receipt := range receipts {
+ if receipt.BlockHash.String() == head.BlockHash().String() {
+ finalizedReceipts = append(finalizedReceipts, receipt)
+ } else {
+ // Log error if a transaction is marked as confirmed with a receipt older than the finalized block
+ // This scenario could potentially point to a re-org'd transaction the Confirmer has lost track of
+ f.lggr.Errorw("found confirmed transaction with re-org'd receipt older than finalized block", "receipt", receipt, "onchainBlockHash", head.BlockHash().String())
+ }
+ }
+ }
+ }
+ return finalizedReceipts
+}
+
+// Build list of transaction IDs
+func (f *evmFinalizer) buildReceiptIdList(finalizedReceipts []Receipt) []int64 {
+ receiptIds := make([]int64, len(finalizedReceipts))
+ for i, receipt := range finalizedReceipts {
+ f.lggr.Debugw("transaction considered finalized",
+ "txHash", receipt.TxHash.String(),
+ "receiptBlockNum", receipt.BlockNumber,
+ "receiptBlockHash", receipt.BlockHash.String(),
+ )
+ receiptIds[i] = receipt.ID
+ }
+ return receiptIds
+}
diff --git a/core/chains/evm/txmgr/finalizer_test.go b/core/chains/evm/txmgr/finalizer_test.go
new file mode 100644
index 00000000000..f83a53bf499
--- /dev/null
+++ b/core/chains/evm/txmgr/finalizer_test.go
@@ -0,0 +1,240 @@
+package txmgr_test
+
+import (
+ "errors"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+
+ txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+)
+
+func TestFinalizer_MarkTxFinalized(t *testing.T) {
+ t.Parallel()
+ ctx := tests.Context(t)
+ db := pgtest.NewSqlxDB(t)
+ txStore := cltest.NewTestTxStore(t, db)
+ ethKeyStore := cltest.NewKeyStore(t, db).Eth()
+ feeLimit := uint64(10_000)
+ ethClient := testutils.NewEthClientMockWithDefaultChain(t)
+ rpcBatchSize := uint32(1)
+ ht := headtracker.NewSimulatedHeadTracker(ethClient, true, 0)
+
+ head := &evmtypes.Head{
+ Hash: utils.NewHash(),
+ Number: 100,
+ Parent: &evmtypes.Head{
+ Hash: utils.NewHash(),
+ Number: 99,
+ IsFinalized: true,
+ },
+ }
+
+ t.Run("returns not finalized for tx with receipt newer than finalized block", func(t *testing.T) {
+ finalizer := txmgr.NewEvmFinalizer(logger.Test(t), testutils.FixtureChainID, rpcBatchSize, txStore, ethClient, ht)
+ servicetest.Run(t, finalizer)
+
+ idempotencyKey := uuid.New().String()
+ _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore)
+ nonce := evmtypes.Nonce(0)
+ broadcast := time.Now()
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ IdempotencyKey: &idempotencyKey,
+ FromAddress: fromAddress,
+ EncodedPayload: []byte{1, 2, 3},
+ FeeLimit: feeLimit,
+ State: txmgrcommon.TxConfirmed,
+ BroadcastAt: &broadcast,
+ InitialBroadcastAt: &broadcast,
+ }
+ attemptHash := insertTxAndAttemptWithIdempotencyKey(t, txStore, tx, idempotencyKey)
+ // Insert receipt for unfinalized block num
+ mustInsertEthReceipt(t, txStore, head.Number, head.Hash, attemptHash)
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head, nil).Once()
+ ethClient.On("LatestFinalizedBlock", mock.Anything).Return(head.Parent, nil).Once()
+ err := finalizer.ProcessHead(ctx, head)
+ require.NoError(t, err)
+ tx, err = txStore.FindTxWithIdempotencyKey(ctx, idempotencyKey, testutils.FixtureChainID)
+ require.NoError(t, err)
+ require.Equal(t, txmgrcommon.TxConfirmed, tx.State)
+ })
+
+ t.Run("returns not finalized for tx with receipt re-org'd out", func(t *testing.T) {
+ finalizer := txmgr.NewEvmFinalizer(logger.Test(t), testutils.FixtureChainID, rpcBatchSize, txStore, ethClient, ht)
+ servicetest.Run(t, finalizer)
+
+ idempotencyKey := uuid.New().String()
+ _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore)
+ nonce := evmtypes.Nonce(0)
+ broadcast := time.Now()
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ IdempotencyKey: &idempotencyKey,
+ FromAddress: fromAddress,
+ EncodedPayload: []byte{1, 2, 3},
+ FeeLimit: feeLimit,
+ State: txmgrcommon.TxConfirmed,
+ BroadcastAt: &broadcast,
+ InitialBroadcastAt: &broadcast,
+ }
+ attemptHash := insertTxAndAttemptWithIdempotencyKey(t, txStore, tx, idempotencyKey)
+ // Insert receipt for finalized block num
+ mustInsertEthReceipt(t, txStore, head.Parent.Number, utils.NewHash(), attemptHash)
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head, nil).Once()
+ ethClient.On("LatestFinalizedBlock", mock.Anything).Return(head.Parent, nil).Once()
+ err := finalizer.ProcessHead(ctx, head)
+ require.NoError(t, err)
+ tx, err = txStore.FindTxWithIdempotencyKey(ctx, idempotencyKey, testutils.FixtureChainID)
+ require.NoError(t, err)
+ require.Equal(t, txmgrcommon.TxConfirmed, tx.State)
+ })
+
+ t.Run("returns finalized for tx with receipt in a finalized block", func(t *testing.T) {
+ finalizer := txmgr.NewEvmFinalizer(logger.Test(t), testutils.FixtureChainID, rpcBatchSize, txStore, ethClient, ht)
+ servicetest.Run(t, finalizer)
+
+ idempotencyKey := uuid.New().String()
+ _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore)
+ nonce := evmtypes.Nonce(0)
+ broadcast := time.Now()
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ IdempotencyKey: &idempotencyKey,
+ FromAddress: fromAddress,
+ EncodedPayload: []byte{1, 2, 3},
+ FeeLimit: feeLimit,
+ State: txmgrcommon.TxConfirmed,
+ BroadcastAt: &broadcast,
+ InitialBroadcastAt: &broadcast,
+ }
+ attemptHash := insertTxAndAttemptWithIdempotencyKey(t, txStore, tx, idempotencyKey)
+ // Insert receipt for finalized block num
+ mustInsertEthReceipt(t, txStore, head.Parent.Number, head.Parent.Hash, attemptHash)
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head, nil).Once()
+ ethClient.On("LatestFinalizedBlock", mock.Anything).Return(head.Parent, nil).Once()
+ err := finalizer.ProcessHead(ctx, head)
+ require.NoError(t, err)
+ tx, err = txStore.FindTxWithIdempotencyKey(ctx, idempotencyKey, testutils.FixtureChainID)
+ require.NoError(t, err)
+ require.Equal(t, txmgrcommon.TxFinalized, tx.State)
+ })
+
+ t.Run("returns finalized for tx with receipt older than block history depth", func(t *testing.T) {
+ finalizer := txmgr.NewEvmFinalizer(logger.Test(t), testutils.FixtureChainID, rpcBatchSize, txStore, ethClient, ht)
+ servicetest.Run(t, finalizer)
+
+ idempotencyKey := uuid.New().String()
+ _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore)
+ nonce := evmtypes.Nonce(0)
+ broadcast := time.Now()
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ IdempotencyKey: &idempotencyKey,
+ FromAddress: fromAddress,
+ EncodedPayload: []byte{1, 2, 3},
+ FeeLimit: feeLimit,
+ State: txmgrcommon.TxConfirmed,
+ BroadcastAt: &broadcast,
+ InitialBroadcastAt: &broadcast,
+ }
+ attemptHash := insertTxAndAttemptWithIdempotencyKey(t, txStore, tx, idempotencyKey)
+ // Insert receipt for finalized block num
+ receiptBlockHash1 := utils.NewHash()
+ mustInsertEthReceipt(t, txStore, head.Parent.Number-2, receiptBlockHash1, attemptHash)
+ idempotencyKey = uuid.New().String()
+ nonce = evmtypes.Nonce(1)
+ tx = &txmgr.Tx{
+ Sequence: &nonce,
+ IdempotencyKey: &idempotencyKey,
+ FromAddress: fromAddress,
+ EncodedPayload: []byte{1, 2, 3},
+ FeeLimit: feeLimit,
+ State: txmgrcommon.TxConfirmed,
+ BroadcastAt: &broadcast,
+ InitialBroadcastAt: &broadcast,
+ }
+ attemptHash = insertTxAndAttemptWithIdempotencyKey(t, txStore, tx, idempotencyKey)
+ // Insert receipt for finalized block num
+ receiptBlockHash2 := utils.NewHash()
+ mustInsertEthReceipt(t, txStore, head.Parent.Number-1, receiptBlockHash2, attemptHash)
+ // Separate batch calls will be made for each tx due to RPC batch size set to 1 when finalizer initialized above
+ ethClient.On("BatchCallContext", mock.Anything, mock.IsType([]rpc.BatchElem{})).Run(func(args mock.Arguments) {
+ rpcElements := args.Get(1).([]rpc.BatchElem)
+ require.Equal(t, 1, len(rpcElements))
+
+ require.Equal(t, "eth_getBlockByNumber", rpcElements[0].Method)
+ require.Equal(t, false, rpcElements[0].Args[1])
+
+ reqBlockNum := rpcElements[0].Args[0].(string)
+ req1BlockNum := hexutil.EncodeBig(big.NewInt(head.Parent.Number - 2))
+ req2BlockNum := hexutil.EncodeBig(big.NewInt(head.Parent.Number - 1))
+ var headResult evmtypes.Head
+ if req1BlockNum == reqBlockNum {
+ headResult = evmtypes.Head{Number: head.Parent.Number - 2, Hash: receiptBlockHash1}
+ } else if req2BlockNum == reqBlockNum {
+ headResult = evmtypes.Head{Number: head.Parent.Number - 1, Hash: receiptBlockHash2}
+ } else {
+ require.Fail(t, "unrecognized block hash")
+ }
+ rpcElements[0].Result = &headResult
+ }).Return(nil).Twice()
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head, nil).Once()
+ ethClient.On("LatestFinalizedBlock", mock.Anything).Return(head.Parent, nil).Once()
+ err := finalizer.ProcessHead(ctx, head)
+ require.NoError(t, err)
+ tx, err = txStore.FindTxWithIdempotencyKey(ctx, idempotencyKey, testutils.FixtureChainID)
+ require.NoError(t, err)
+ require.Equal(t, txmgrcommon.TxFinalized, tx.State)
+ })
+
+ t.Run("returns error if failed to retrieve latest head in headtracker", func(t *testing.T) {
+ finalizer := txmgr.NewEvmFinalizer(logger.Test(t), testutils.FixtureChainID, rpcBatchSize, txStore, ethClient, ht)
+ servicetest.Run(t, finalizer)
+
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(nil, errors.New("failed to get latest head")).Once()
+ err := finalizer.ProcessHead(ctx, head)
+ require.Error(t, err)
+ })
+
+ t.Run("returns error if failed to calculate latest finalized head in headtracker", func(t *testing.T) {
+ finalizer := txmgr.NewEvmFinalizer(logger.Test(t), testutils.FixtureChainID, rpcBatchSize, txStore, ethClient, ht)
+ servicetest.Run(t, finalizer)
+
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head, nil).Once()
+ ethClient.On("LatestFinalizedBlock", mock.Anything).Return(nil, errors.New("failed to calculate latest finalized head")).Once()
+ err := finalizer.ProcessHead(ctx, head)
+ require.Error(t, err)
+ })
+}
+
+func insertTxAndAttemptWithIdempotencyKey(t *testing.T, txStore txmgr.TestEvmTxStore, tx *txmgr.Tx, idempotencyKey string) common.Hash {
+ ctx := tests.Context(t)
+ err := txStore.InsertTx(ctx, tx)
+ require.NoError(t, err)
+ tx, err = txStore.FindTxWithIdempotencyKey(ctx, idempotencyKey, testutils.FixtureChainID)
+ require.NoError(t, err)
+ attempt := cltest.NewLegacyEthTxAttempt(t, tx.ID)
+ err = txStore.InsertTxAttempt(ctx, &attempt)
+ require.NoError(t, err)
+ return attempt.Hash
+}
diff --git a/core/chains/evm/txmgr/mocks/evm_tx_store.go b/core/chains/evm/txmgr/mocks/evm_tx_store.go
index b28e55ec324..b40c0ca8376 100644
--- a/core/chains/evm/txmgr/mocks/evm_tx_store.go
+++ b/core/chains/evm/txmgr/mocks/evm_tx_store.go
@@ -18,6 +18,8 @@ import (
time "time"
+ txmgr "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+
types "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
uuid "github.com/google/uuid"
@@ -444,6 +446,66 @@ func (_c *EvmTxStore_DeleteInProgressAttempt_Call) RunAndReturn(run func(context
return _c
}
+// FindConfirmedTxesReceipts provides a mock function with given fields: ctx, finalizedBlockNum, chainID
+func (_m *EvmTxStore) FindConfirmedTxesReceipts(ctx context.Context, finalizedBlockNum int64, chainID *big.Int) ([]txmgr.DbReceipt, error) {
+ ret := _m.Called(ctx, finalizedBlockNum, chainID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for FindConfirmedTxesReceipts")
+ }
+
+ var r0 []txmgr.DbReceipt
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) ([]txmgr.DbReceipt, error)); ok {
+ return rf(ctx, finalizedBlockNum, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) []txmgr.DbReceipt); ok {
+ r0 = rf(ctx, finalizedBlockNum, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]txmgr.DbReceipt)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64, *big.Int) error); ok {
+ r1 = rf(ctx, finalizedBlockNum, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// EvmTxStore_FindConfirmedTxesReceipts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindConfirmedTxesReceipts'
+type EvmTxStore_FindConfirmedTxesReceipts_Call struct {
+ *mock.Call
+}
+
+// FindConfirmedTxesReceipts is a helper method to define mock.On call
+// - ctx context.Context
+// - finalizedBlockNum int64
+// - chainID *big.Int
+func (_e *EvmTxStore_Expecter) FindConfirmedTxesReceipts(ctx interface{}, finalizedBlockNum interface{}, chainID interface{}) *EvmTxStore_FindConfirmedTxesReceipts_Call {
+ return &EvmTxStore_FindConfirmedTxesReceipts_Call{Call: _e.mock.On("FindConfirmedTxesReceipts", ctx, finalizedBlockNum, chainID)}
+}
+
+func (_c *EvmTxStore_FindConfirmedTxesReceipts_Call) Run(run func(ctx context.Context, finalizedBlockNum int64, chainID *big.Int)) *EvmTxStore_FindConfirmedTxesReceipts_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64), args[2].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *EvmTxStore_FindConfirmedTxesReceipts_Call) Return(receipts []txmgr.DbReceipt, err error) *EvmTxStore_FindConfirmedTxesReceipts_Call {
+ _c.Call.Return(receipts, err)
+ return _c
+}
+
+func (_c *EvmTxStore_FindConfirmedTxesReceipts_Call) RunAndReturn(run func(context.Context, int64, *big.Int) ([]txmgr.DbReceipt, error)) *EvmTxStore_FindConfirmedTxesReceipts_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// FindEarliestUnconfirmedBroadcastTime provides a mock function with given fields: ctx, chainID
func (_m *EvmTxStore) FindEarliestUnconfirmedBroadcastTime(ctx context.Context, chainID *big.Int) (null.Time, error) {
ret := _m.Called(ctx, chainID)
@@ -2058,65 +2120,6 @@ func (_c *EvmTxStore_HasInProgressTransaction_Call) RunAndReturn(run func(contex
return _c
}
-// IsTxFinalized provides a mock function with given fields: ctx, blockHeight, txID, chainID
-func (_m *EvmTxStore) IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID *big.Int) (bool, error) {
- ret := _m.Called(ctx, blockHeight, txID, chainID)
-
- if len(ret) == 0 {
- panic("no return value specified for IsTxFinalized")
- }
-
- var r0 bool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, int64, int64, *big.Int) (bool, error)); ok {
- return rf(ctx, blockHeight, txID, chainID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, int64, int64, *big.Int) bool); ok {
- r0 = rf(ctx, blockHeight, txID, chainID)
- } else {
- r0 = ret.Get(0).(bool)
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, int64, int64, *big.Int) error); ok {
- r1 = rf(ctx, blockHeight, txID, chainID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// EvmTxStore_IsTxFinalized_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsTxFinalized'
-type EvmTxStore_IsTxFinalized_Call struct {
- *mock.Call
-}
-
-// IsTxFinalized is a helper method to define mock.On call
-// - ctx context.Context
-// - blockHeight int64
-// - txID int64
-// - chainID *big.Int
-func (_e *EvmTxStore_Expecter) IsTxFinalized(ctx interface{}, blockHeight interface{}, txID interface{}, chainID interface{}) *EvmTxStore_IsTxFinalized_Call {
- return &EvmTxStore_IsTxFinalized_Call{Call: _e.mock.On("IsTxFinalized", ctx, blockHeight, txID, chainID)}
-}
-
-func (_c *EvmTxStore_IsTxFinalized_Call) Run(run func(ctx context.Context, blockHeight int64, txID int64, chainID *big.Int)) *EvmTxStore_IsTxFinalized_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(int64), args[2].(int64), args[3].(*big.Int))
- })
- return _c
-}
-
-func (_c *EvmTxStore_IsTxFinalized_Call) Return(finalized bool, err error) *EvmTxStore_IsTxFinalized_Call {
- _c.Call.Return(finalized, err)
- return _c
-}
-
-func (_c *EvmTxStore_IsTxFinalized_Call) RunAndReturn(run func(context.Context, int64, int64, *big.Int) (bool, error)) *EvmTxStore_IsTxFinalized_Call {
- _c.Call.Return(run)
- return _c
-}
-
// LoadTxAttempts provides a mock function with given fields: ctx, etx
func (_m *EvmTxStore) LoadTxAttempts(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error {
ret := _m.Called(ctx, etx)
@@ -2367,17 +2370,17 @@ func (_c *EvmTxStore_PruneUnstartedTxQueue_Call) RunAndReturn(run func(context.C
return _c
}
-// ReapTxHistory provides a mock function with given fields: ctx, minBlockNumberToKeep, timeThreshold, chainID
-func (_m *EvmTxStore) ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID *big.Int) error {
- ret := _m.Called(ctx, minBlockNumberToKeep, timeThreshold, chainID)
+// ReapTxHistory provides a mock function with given fields: ctx, timeThreshold, chainID
+func (_m *EvmTxStore) ReapTxHistory(ctx context.Context, timeThreshold time.Time, chainID *big.Int) error {
+ ret := _m.Called(ctx, timeThreshold, chainID)
if len(ret) == 0 {
panic("no return value specified for ReapTxHistory")
}
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time, *big.Int) error); ok {
- r0 = rf(ctx, minBlockNumberToKeep, timeThreshold, chainID)
+ if rf, ok := ret.Get(0).(func(context.Context, time.Time, *big.Int) error); ok {
+ r0 = rf(ctx, timeThreshold, chainID)
} else {
r0 = ret.Error(0)
}
@@ -2392,16 +2395,15 @@ type EvmTxStore_ReapTxHistory_Call struct {
// ReapTxHistory is a helper method to define mock.On call
// - ctx context.Context
-// - minBlockNumberToKeep int64
// - timeThreshold time.Time
// - chainID *big.Int
-func (_e *EvmTxStore_Expecter) ReapTxHistory(ctx interface{}, minBlockNumberToKeep interface{}, timeThreshold interface{}, chainID interface{}) *EvmTxStore_ReapTxHistory_Call {
- return &EvmTxStore_ReapTxHistory_Call{Call: _e.mock.On("ReapTxHistory", ctx, minBlockNumberToKeep, timeThreshold, chainID)}
+func (_e *EvmTxStore_Expecter) ReapTxHistory(ctx interface{}, timeThreshold interface{}, chainID interface{}) *EvmTxStore_ReapTxHistory_Call {
+ return &EvmTxStore_ReapTxHistory_Call{Call: _e.mock.On("ReapTxHistory", ctx, timeThreshold, chainID)}
}
-func (_c *EvmTxStore_ReapTxHistory_Call) Run(run func(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID *big.Int)) *EvmTxStore_ReapTxHistory_Call {
+func (_c *EvmTxStore_ReapTxHistory_Call) Run(run func(ctx context.Context, timeThreshold time.Time, chainID *big.Int)) *EvmTxStore_ReapTxHistory_Call {
_c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(int64), args[2].(time.Time), args[3].(*big.Int))
+ run(args[0].(context.Context), args[1].(time.Time), args[2].(*big.Int))
})
return _c
}
@@ -2411,7 +2413,7 @@ func (_c *EvmTxStore_ReapTxHistory_Call) Return(_a0 error) *EvmTxStore_ReapTxHis
return _c
}
-func (_c *EvmTxStore_ReapTxHistory_Call) RunAndReturn(run func(context.Context, int64, time.Time, *big.Int) error) *EvmTxStore_ReapTxHistory_Call {
+func (_c *EvmTxStore_ReapTxHistory_Call) RunAndReturn(run func(context.Context, time.Time, *big.Int) error) *EvmTxStore_ReapTxHistory_Call {
_c.Call.Return(run)
return _c
}
@@ -3197,6 +3199,54 @@ func (_c *EvmTxStore_UpdateTxForRebroadcast_Call) RunAndReturn(run func(context.
return _c
}
+// UpdateTxStatesToFinalizedUsingReceiptIds provides a mock function with given fields: ctx, etxIDs, chainId
+func (_m *EvmTxStore) UpdateTxStatesToFinalizedUsingReceiptIds(ctx context.Context, etxIDs []int64, chainId *big.Int) error {
+ ret := _m.Called(ctx, etxIDs, chainId)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateTxStatesToFinalizedUsingReceiptIds")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, []int64, *big.Int) error); ok {
+ r0 = rf(ctx, etxIDs, chainId)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateTxStatesToFinalizedUsingReceiptIds'
+type EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call struct {
+ *mock.Call
+}
+
+// UpdateTxStatesToFinalizedUsingReceiptIds is a helper method to define mock.On call
+// - ctx context.Context
+// - etxIDs []int64
+// - chainId *big.Int
+func (_e *EvmTxStore_Expecter) UpdateTxStatesToFinalizedUsingReceiptIds(ctx interface{}, etxIDs interface{}, chainId interface{}) *EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call {
+ return &EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call{Call: _e.mock.On("UpdateTxStatesToFinalizedUsingReceiptIds", ctx, etxIDs, chainId)}
+}
+
+func (_c *EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call) Run(run func(ctx context.Context, etxIDs []int64, chainId *big.Int)) *EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].([]int64), args[2].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call) Return(_a0 error) *EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call) RunAndReturn(run func(context.Context, []int64, *big.Int) error) *EvmTxStore_UpdateTxStatesToFinalizedUsingReceiptIds_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UpdateTxUnstartedToInProgress provides a mock function with given fields: ctx, etx, attempt
func (_m *EvmTxStore) UpdateTxUnstartedToInProgress(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], attempt *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error {
ret := _m.Called(ctx, etx, attempt)
diff --git a/core/chains/evm/txmgr/models.go b/core/chains/evm/txmgr/models.go
index f8682ffd500..1ba3d193cba 100644
--- a/core/chains/evm/txmgr/models.go
+++ b/core/chains/evm/txmgr/models.go
@@ -36,12 +36,13 @@ type (
Tx = txmgrtypes.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
TxMeta = txmgrtypes.TxMeta[common.Address, common.Hash]
TxAttempt = txmgrtypes.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
- Receipt = dbReceipt // EvmReceipt is the exported DB table model for receipts
+ Receipt = DbReceipt // DbReceipt is the exported DB table model for receipts
ReceiptPlus = txmgrtypes.ReceiptPlus[*evmtypes.Receipt]
StuckTxDetector = txmgrtypes.StuckTxDetector[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
TxmClient = txmgrtypes.TxmClient[*big.Int, common.Address, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee]
TransactionClient = txmgrtypes.TransactionClient[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
ChainReceipt = txmgrtypes.ChainReceipt[common.Hash, common.Hash]
+ Finalizer = txmgrtypes.Finalizer[common.Hash, *evmtypes.Head]
)
var _ KeyStore = (keystore.Eth)(nil) // check interface in txmgr to avoid circular import
diff --git a/core/chains/evm/txmgr/reaper_test.go b/core/chains/evm/txmgr/reaper_test.go
index b3ce48b702c..cfaccdf04eb 100644
--- a/core/chains/evm/txmgr/reaper_test.go
+++ b/core/chains/evm/txmgr/reaper_test.go
@@ -12,18 +12,17 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/utils"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
- txmgrmocks "github.com/smartcontractkit/chainlink/v2/common/txmgr/types/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
)
-func newReaperWithChainID(t *testing.T, db txmgrtypes.TxHistoryReaper[*big.Int], cfg txmgrtypes.ReaperChainConfig, txConfig txmgrtypes.ReaperTransactionsConfig, cid *big.Int) *txmgr.Reaper {
- return txmgr.NewEvmReaper(logger.Test(t), db, cfg, txConfig, cid)
+func newReaperWithChainID(t *testing.T, db txmgrtypes.TxHistoryReaper[*big.Int], txConfig txmgrtypes.ReaperTransactionsConfig, cid *big.Int) *txmgr.Reaper {
+ return txmgr.NewEvmReaper(logger.Test(t), db, txConfig, cid)
}
-func newReaper(t *testing.T, db txmgrtypes.TxHistoryReaper[*big.Int], cfg txmgrtypes.ReaperChainConfig, txConfig txmgrtypes.ReaperTransactionsConfig) *txmgr.Reaper {
- return newReaperWithChainID(t, db, cfg, txConfig, &cltest.FixtureChainID)
+func newReaper(t *testing.T, db txmgrtypes.TxHistoryReaper[*big.Int], txConfig txmgrtypes.ReaperTransactionsConfig) *txmgr.Reaper {
+ return newReaperWithChainID(t, db, txConfig, &cltest.FixtureChainID)
}
type reaperConfig struct {
@@ -51,12 +50,9 @@ func TestReaper_ReapTxes(t *testing.T) {
oneDayAgo := time.Now().Add(-24 * time.Hour)
t.Run("with nothing in the database, doesn't error", func(t *testing.T) {
- config := txmgrmocks.NewReaperConfig(t)
- config.On("FinalityDepth").Return(uint32(10))
-
tc := &reaperConfig{reaperThreshold: 1 * time.Hour}
- r := newReaper(t, txStore, config, tc)
+ r := newReaper(t, txStore, tc)
err := r.ReapTxes(42)
assert.NoError(t, err)
@@ -66,11 +62,9 @@ func TestReaper_ReapTxes(t *testing.T) {
mustInsertConfirmedEthTxWithReceipt(t, txStore, from, nonce, 5)
t.Run("skips if threshold=0", func(t *testing.T) {
- config := txmgrmocks.NewReaperConfig(t)
-
tc := &reaperConfig{reaperThreshold: 0 * time.Second}
- r := newReaper(t, txStore, config, tc)
+ r := newReaper(t, txStore, tc)
err := r.ReapTxes(42)
assert.NoError(t, err)
@@ -79,12 +73,9 @@ func TestReaper_ReapTxes(t *testing.T) {
})
t.Run("doesn't touch ethtxes with different chain ID", func(t *testing.T) {
- config := txmgrmocks.NewReaperConfig(t)
- config.On("FinalityDepth").Return(uint32(10))
-
tc := &reaperConfig{reaperThreshold: 1 * time.Hour}
- r := newReaperWithChainID(t, txStore, config, tc, big.NewInt(42))
+ r := newReaperWithChainID(t, txStore, tc, big.NewInt(42))
err := r.ReapTxes(42)
assert.NoError(t, err)
@@ -92,41 +83,30 @@ func TestReaper_ReapTxes(t *testing.T) {
cltest.AssertCount(t, db, "evm.txes", 1)
})
- t.Run("deletes confirmed evm.txes that exceed the age threshold with at least EVM.FinalityDepth blocks above their receipt", func(t *testing.T) {
- config := txmgrmocks.NewReaperConfig(t)
- config.On("FinalityDepth").Return(uint32(10))
-
+ t.Run("deletes finalized evm.txes that exceed the age threshold", func(t *testing.T) {
tc := &reaperConfig{reaperThreshold: 1 * time.Hour}
- r := newReaper(t, txStore, config, tc)
+ r := newReaper(t, txStore, tc)
err := r.ReapTxes(42)
assert.NoError(t, err)
// Didn't delete because eth_tx was not old enough
cltest.AssertCount(t, db, "evm.txes", 1)
- pgtest.MustExec(t, db, `UPDATE evm.txes SET created_at=$1`, oneDayAgo)
-
- err = r.ReapTxes(12)
- assert.NoError(t, err)
- // Didn't delete because eth_tx although old enough, was still within EVM.FinalityDepth of the current head
- cltest.AssertCount(t, db, "evm.txes", 1)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET created_at=$1, state='finalized'`, oneDayAgo)
err = r.ReapTxes(42)
assert.NoError(t, err)
- // Now it deleted because the eth_tx was past EVM.FinalityDepth
+ // Now it deleted because the eth_tx was past the age threshold
cltest.AssertCount(t, db, "evm.txes", 0)
})
mustInsertFatalErrorEthTx(t, txStore, from)
t.Run("deletes errored evm.txes that exceed the age threshold", func(t *testing.T) {
- config := txmgrmocks.NewReaperConfig(t)
- config.On("FinalityDepth").Return(uint32(10))
-
tc := &reaperConfig{reaperThreshold: 1 * time.Hour}
- r := newReaper(t, txStore, config, tc)
+ r := newReaper(t, txStore, tc)
err := r.ReapTxes(42)
assert.NoError(t, err)
@@ -140,4 +120,24 @@ func TestReaper_ReapTxes(t *testing.T) {
// Deleted because it is old enough now
cltest.AssertCount(t, db, "evm.txes", 0)
})
+
+ mustInsertConfirmedEthTxWithReceipt(t, txStore, from, 0, 42)
+
+ t.Run("deletes confirmed evm.txes that exceed the age threshold", func(t *testing.T) {
+ tc := &reaperConfig{reaperThreshold: 1 * time.Hour}
+
+ r := newReaper(t, txStore, tc)
+
+ err := r.ReapTxes(42)
+ assert.NoError(t, err)
+ // Didn't delete because eth_tx was not old enough
+ cltest.AssertCount(t, db, "evm.txes", 1)
+
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET created_at=$1`, oneDayAgo)
+
+ err = r.ReapTxes(42)
+ assert.NoError(t, err)
+ // Now it deleted because the eth_tx was past the age threshold
+ cltest.AssertCount(t, db, "evm.txes", 0)
+ })
}
diff --git a/core/chains/evm/txmgr/test_helpers.go b/core/chains/evm/txmgr/test_helpers.go
index 3b3584a988b..8d208744329 100644
--- a/core/chains/evm/txmgr/test_helpers.go
+++ b/core/chains/evm/txmgr/test_helpers.go
@@ -53,6 +53,7 @@ type TestEvmConfig struct {
Threshold uint32
MinAttempts uint32
DetectionApiUrl *url.URL
+ RpcDefaultBatchSize uint32
}
func (e *TestEvmConfig) Transactions() evmconfig.Transactions {
@@ -65,6 +66,8 @@ func (e *TestEvmConfig) FinalityDepth() uint32 { return 42 }
func (e *TestEvmConfig) ChainType() chaintype.ChainType { return "" }
+func (e *TestEvmConfig) RPCDefaultBatchSize() uint32 { return e.RpcDefaultBatchSize }
+
type TestGasEstimatorConfig struct {
bumpThreshold uint64
}
@@ -141,10 +144,9 @@ type autoPurgeConfig struct {
func (a *autoPurgeConfig) Enabled() bool { return false }
type MockConfig struct {
- EvmConfig *TestEvmConfig
- RpcDefaultBatchSize uint32
- finalityDepth uint32
- finalityTagEnabled bool
+ EvmConfig *TestEvmConfig
+ finalityDepth uint32
+ finalityTagEnabled bool
}
func (c *MockConfig) EVM() evmconfig.EVM {
@@ -156,11 +158,10 @@ func (c *MockConfig) ChainType() chaintype.ChainType { return "" }
func (c *MockConfig) FinalityDepth() uint32 { return c.finalityDepth }
func (c *MockConfig) SetFinalityDepth(fd uint32) { c.finalityDepth = fd }
func (c *MockConfig) FinalityTagEnabled() bool { return c.finalityTagEnabled }
-func (c *MockConfig) RPCDefaultBatchSize() uint32 { return c.RpcDefaultBatchSize }
func MakeTestConfigs(t *testing.T) (*MockConfig, *TestDatabaseConfig, *TestEvmConfig) {
db := &TestDatabaseConfig{defaultQueryTimeout: utils.DefaultQueryTimeout}
- ec := &TestEvmConfig{BumpThreshold: 42, MaxInFlight: uint32(42), MaxQueued: uint64(0), ReaperInterval: time.Duration(0), ReaperThreshold: time.Duration(0)}
+ ec := &TestEvmConfig{BumpThreshold: 42, MaxInFlight: uint32(42), MaxQueued: uint64(0), ReaperInterval: time.Duration(0), ReaperThreshold: time.Duration(0), RpcDefaultBatchSize: uint32(250)}
config := &MockConfig{EvmConfig: ec}
return config, db, ec
}
diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go
index 40df5616c99..5f932db8720 100644
--- a/core/chains/evm/txmgr/txmgr_test.go
+++ b/core/chains/evm/txmgr/txmgr_test.go
@@ -85,7 +85,8 @@ func makeTestEvmTxm(
lggr,
lp,
keyStore,
- estimator)
+ estimator,
+ ht)
}
func TestTxm_SendNativeToken_DoesNotSendToZero(t *testing.T) {
@@ -489,14 +490,20 @@ func TestTxm_Lifecycle(t *testing.T) {
config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t)
config.SetFinalityDepth(uint32(42))
- config.RpcDefaultBatchSize = uint32(4)
+ evmConfig.RpcDefaultBatchSize = uint32(4)
evmConfig.ResendAfterThreshold = 1 * time.Hour
evmConfig.ReaperThreshold = 1 * time.Hour
evmConfig.ReaperInterval = 1 * time.Hour
kst.On("EnabledAddressesForChain", mock.Anything, &cltest.FixtureChainID).Return([]common.Address{}, nil)
+ head := cltest.Head(42)
+ finalizedHead := cltest.Head(0)
+
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head, nil).Once()
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(finalizedHead, nil).Once()
+
keyChangeCh := make(chan struct{})
unsub := cltest.NewAwaiter()
kst.On("SubscribeToKeyChanges", mock.Anything).Return(keyChangeCh, unsub.ItHappened)
@@ -505,7 +512,6 @@ func TestTxm_Lifecycle(t *testing.T) {
txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), kst)
require.NoError(t, err)
- head := cltest.Head(42)
// It should not hang or panic
txm.OnNewLongestChain(tests.Context(t), head)
@@ -607,8 +613,20 @@ func TestTxm_GetTransactionStatus(t *testing.T) {
gcfg := configtest.NewTestGeneralConfig(t)
cfg := evmtest.NewChainScopedConfig(t, gcfg)
+ head := &evmtypes.Head{
+ Hash: utils.NewHash(),
+ Number: 100,
+ Parent: &evmtypes.Head{
+ Hash: utils.NewHash(),
+ Number: 99,
+ IsFinalized: true,
+ },
+ }
+
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe()
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head, nil).Once()
+ ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(head.Parent, nil).Once()
feeEstimator := gasmocks.NewEvmFeeEstimator(t)
feeEstimator.On("Start", mock.Anything).Return(nil).Once()
feeEstimator.On("Close", mock.Anything).Return(nil).Once()
@@ -617,15 +635,6 @@ func TestTxm_GetTransactionStatus(t *testing.T) {
require.NoError(t, err)
servicetest.Run(t, txm)
- head := &evmtypes.Head{
- Hash: utils.NewHash(),
- Number: 100,
- Parent: &evmtypes.Head{
- Hash: utils.NewHash(),
- Number: 99,
- IsFinalized: true,
- },
- }
txm.OnNewLongestChain(ctx, head)
t.Run("returns error if transaction not found", func(t *testing.T) {
@@ -715,13 +724,42 @@ func TestTxm_GetTransactionStatus(t *testing.T) {
attempt := cltest.NewLegacyEthTxAttempt(t, tx.ID)
err = txStore.InsertTxAttempt(ctx, &attempt)
require.NoError(t, err)
- // Insert receipt for finalized block num
- mustInsertEthReceipt(t, txStore, head.Parent.Number, head.ParentHash, attempt.Hash)
+ // Insert receipt for unfinalized block num
+ mustInsertEthReceipt(t, txStore, head.Number, head.Hash, attempt.Hash)
state, err := txm.GetTransactionStatus(ctx, idempotencyKey)
require.NoError(t, err)
require.Equal(t, commontypes.Unconfirmed, state)
})
+ t.Run("returns finalized for finalized state", func(t *testing.T) {
+ idempotencyKey := uuid.New().String()
+ _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore)
+ nonce := evmtypes.Nonce(0)
+ broadcast := time.Now()
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ IdempotencyKey: &idempotencyKey,
+ FromAddress: fromAddress,
+ EncodedPayload: []byte{1, 2, 3},
+ FeeLimit: feeLimit,
+ State: txmgrcommon.TxFinalized,
+ BroadcastAt: &broadcast,
+ InitialBroadcastAt: &broadcast,
+ }
+ err := txStore.InsertTx(ctx, tx)
+ require.NoError(t, err)
+ tx, err = txStore.FindTxWithIdempotencyKey(ctx, idempotencyKey, testutils.FixtureChainID)
+ require.NoError(t, err)
+ attempt := cltest.NewLegacyEthTxAttempt(t, tx.ID)
+ err = txStore.InsertTxAttempt(ctx, &attempt)
+ require.NoError(t, err)
+ // Insert receipt for finalized block num
+ mustInsertEthReceipt(t, txStore, head.Parent.Number, head.Parent.Hash, attempt.Hash)
+ state, err := txm.GetTransactionStatus(ctx, idempotencyKey)
+ require.NoError(t, err)
+ require.Equal(t, commontypes.Finalized, state)
+ })
+
t.Run("returns unconfirmed for confirmed missing receipt state", func(t *testing.T) {
idempotencyKey := uuid.New().String()
_, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore)
@@ -1018,6 +1056,12 @@ func mustCreateUnstartedTxFromEvmTxRequest(t testing.TB, txStore txmgr.EvmTxStor
return tx
}
+func mustInsertUnstartedTx(t testing.TB, txStore txmgr.TestEvmTxStore, fromAddress common.Address) {
+ etx := cltest.NewEthTx(fromAddress)
+ ctx := tests.Context(t)
+ require.NoError(t, txStore.InsertTx(ctx, &etx))
+}
+
func txRequestWithStrategy(strategy txmgrtypes.TxStrategy) func(*txmgr.TxRequest) {
return func(tx *txmgr.TxRequest) {
tx.Strategy = strategy
diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go
index 129c0318820..68ff8d4e111 100644
--- a/core/chains/legacyevm/chain.go
+++ b/core/chains/legacyevm/chain.go
@@ -247,7 +247,7 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod
}
// note: gas estimator is started as a part of the txm
- txm, gasEstimator, err := newEvmTxm(opts.DS, cfg.EVM(), opts.AppConfig.EVMRPCEnabled(), opts.AppConfig.Database(), opts.AppConfig.Database().Listener(), client, l, logPoller, opts)
+ txm, gasEstimator, err := newEvmTxm(opts.DS, cfg.EVM(), opts.AppConfig.EVMRPCEnabled(), opts.AppConfig.Database(), opts.AppConfig.Database().Listener(), client, l, logPoller, opts, headTracker)
if err != nil {
return nil, fmt.Errorf("failed to instantiate EvmTxm for chain with ID %s: %w", chainID.String(), err)
}
diff --git a/core/chains/legacyevm/evm_txm.go b/core/chains/legacyevm/evm_txm.go
index cecfd4ffafe..ab116749665 100644
--- a/core/chains/legacyevm/evm_txm.go
+++ b/core/chains/legacyevm/evm_txm.go
@@ -7,6 +7,7 @@ import (
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
+ httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -22,6 +23,7 @@ func newEvmTxm(
lggr logger.Logger,
logPoller logpoller.LogPoller,
opts ChainRelayExtenderConfig,
+ headTracker httypes.HeadTracker,
) (txm txmgr.TxManager,
estimator gas.EvmFeeEstimator,
err error,
@@ -63,7 +65,8 @@ func newEvmTxm(
lggr,
logPoller,
opts.KeyStore,
- estimator)
+ estimator,
+ headTracker)
} else {
txm = opts.GenTxManager(chainID)
}
diff --git a/core/services/promreporter/prom_reporter_test.go b/core/services/promreporter/prom_reporter_test.go
index b61fa25bdc4..a0a4a247c21 100644
--- a/core/services/promreporter/prom_reporter_test.go
+++ b/core/services/promreporter/prom_reporter_test.go
@@ -62,7 +62,8 @@ func newLegacyChainContainer(t *testing.T, db *sqlx.DB) legacyevm.LegacyChainCon
lggr,
lp,
keyStore,
- estimator)
+ estimator,
+ ht)
require.NoError(t, err)
cfg := configtest.NewGeneralConfig(t, nil)
diff --git a/core/services/vrf/delegate_test.go b/core/services/vrf/delegate_test.go
index 889b19d0e04..9718dc376a7 100644
--- a/core/services/vrf/delegate_test.go
+++ b/core/services/vrf/delegate_test.go
@@ -83,7 +83,7 @@ func buildVrfUni(t *testing.T, db *sqlx.DB, cfg chainlink.GeneralConfig) vrfUniv
btORM := bridges.NewORM(db)
ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr)
_, dbConfig, evmConfig := txmgr.MakeTestConfigs(t)
- txm, err := txmgr.NewTxm(db, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), nil, dbConfig, dbConfig.Listener(), ec, logger.TestLogger(t), nil, ks.Eth(), nil)
+ txm, err := txmgr.NewTxm(db, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), nil, dbConfig, dbConfig.Listener(), ec, logger.TestLogger(t), nil, ks.Eth(), nil, nil)
orm := headtracker.NewORM(*testutils.FixtureChainID, db)
require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(51)))
jrm := job.NewORM(db, prm, btORM, ks, lggr)
diff --git a/core/services/vrf/v2/integration_v2_test.go b/core/services/vrf/v2/integration_v2_test.go
index e9ae908565a..178b555667b 100644
--- a/core/services/vrf/v2/integration_v2_test.go
+++ b/core/services/vrf/v2/integration_v2_test.go
@@ -142,7 +142,7 @@ func makeTestTxm(t *testing.T, txStore txmgr.TestEvmTxStore, keyStore keystore.M
_, _, evmConfig := txmgr.MakeTestConfigs(t)
txmConfig := txmgr.NewEvmTxmConfig(evmConfig)
txm := txmgr.NewEvmTxm(ec.ConfiguredChainID(), txmConfig, evmConfig.Transactions(), keyStore.Eth(), logger.TestLogger(t), nil, nil,
- nil, txStore, nil, nil, nil, nil)
+ nil, txStore, nil, nil, nil, nil, nil)
return txm
}
diff --git a/core/services/vrf/v2/listener_v2_test.go b/core/services/vrf/v2/listener_v2_test.go
index ac59f1fdb69..b7a8710c4f8 100644
--- a/core/services/vrf/v2/listener_v2_test.go
+++ b/core/services/vrf/v2/listener_v2_test.go
@@ -40,7 +40,7 @@ func makeTestTxm(t *testing.T, txStore txmgr.TestEvmTxStore, keyStore keystore.M
ec := evmtest.NewEthClientMockWithDefaultChain(t)
txmConfig := txmgr.NewEvmTxmConfig(evmConfig)
txm := txmgr.NewEvmTxm(ec.ConfiguredChainID(), txmConfig, evmConfig.Transactions(), keyStore.Eth(), logger.TestLogger(t), nil, nil,
- nil, txStore, nil, nil, nil, nil)
+ nil, txStore, nil, nil, nil, nil, nil)
return txm
}
diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go
index 9a8bf96573e..f4e91f0a2d2 100644
--- a/core/store/migrate/migrate_test.go
+++ b/core/store/migrate/migrate_test.go
@@ -618,3 +618,14 @@ func BenchmarkBackfillingRecordsWithMigration202(b *testing.B) {
require.NoError(b, err)
}
}
+
+func TestRollback_247_TxStateEnumUpdate(t *testing.T) {
+ ctx := testutils.Context(t)
+ _, db := heavyweight.FullTestDBV2(t, nil)
+ p, err := migrate.NewProvider(ctx, db.DB)
+ require.NoError(t, err)
+ _, err = p.DownTo(ctx, 54)
+ require.NoError(t, err)
+ _, err = p.UpTo(ctx, 247)
+ require.NoError(t, err)
+}
diff --git a/core/store/migrate/migrations/0248_add_tx_finalized_state.sql b/core/store/migrate/migrations/0248_add_tx_finalized_state.sql
new file mode 100644
index 00000000000..dcfe8eec734
--- /dev/null
+++ b/core/store/migrate/migrations/0248_add_tx_finalized_state.sql
@@ -0,0 +1,135 @@
+-- +goose Up
+-- Creating new column and enum instead of just adding new value to the existing enum so the migration changes match the rollback logic
+-- Otherwise, migration will complain about mismatching column order
+
+-- +goose StatementBegin
+-- Rename the existing enum with finalized state to mark it as old
+ALTER TYPE evm.txes_state RENAME TO txes_state_old;
+
+-- Create new enum without finalized state
+CREATE TYPE evm.txes_state AS ENUM (
+ 'unstarted',
+ 'in_progress',
+ 'fatal_error',
+ 'unconfirmed',
+ 'confirmed_missing_receipt',
+ 'confirmed',
+ 'finalized'
+);
+
+-- Add a new state column with the new enum type to the txes table
+ALTER TABLE evm.txes ADD COLUMN state_new evm.txes_state;
+
+-- Copy data from the old column to the new
+UPDATE evm.txes SET state_new = state::text::evm.txes_state;
+
+-- Drop constraints referring to old enum type on the old state column
+ALTER TABLE evm.txes ALTER COLUMN state DROP DEFAULT;
+ALTER TABLE evm.txes DROP CONSTRAINT chk_eth_txes_fsm;
+DROP INDEX IF EXISTS idx_eth_txes_state_from_address_evm_chain_id;
+DROP INDEX IF EXISTS idx_eth_txes_min_unconfirmed_nonce_for_key_evm_chain_id;
+DROP INDEX IF EXISTS idx_only_one_in_progress_tx_per_account_id_per_evm_chain_id;
+DROP INDEX IF EXISTS idx_eth_txes_unstarted_subject_id_evm_chain_id;
+
+-- Drop the old state column
+ALTER TABLE evm.txes DROP state;
+
+-- Drop the old enum type
+DROP TYPE evm.txes_state_old;
+
+-- Rename the new column name state to replace the old column
+ALTER TABLE evm.txes RENAME state_new TO state;
+
+-- Reset the state column's default
+ALTER TABLE evm.txes ALTER COLUMN state SET DEFAULT 'unstarted'::evm.txes_state, ALTER COLUMN state SET NOT NULL;
+
+-- Recreate constraint with finalized state
+ALTER TABLE evm.txes ADD CONSTRAINT chk_eth_txes_fsm CHECK (
+ state = 'unstarted'::evm.txes_state AND nonce IS NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL
+ OR
+ state = 'in_progress'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL
+ OR
+ state = 'fatal_error'::evm.txes_state AND error IS NOT NULL
+ OR
+ state = 'unconfirmed'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL
+ OR
+ state = 'confirmed'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL
+ OR
+ state = 'confirmed_missing_receipt'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL
+ OR
+ state = 'finalized'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL
+) NOT VALID;
+
+-- Recreate index to include finalized state
+CREATE INDEX idx_eth_txes_state_from_address_evm_chain_id ON evm.txes(evm_chain_id, from_address, state) WHERE state <> 'confirmed'::evm.txes_state AND state <> 'finalized'::evm.txes_state;
+CREATE INDEX idx_eth_txes_min_unconfirmed_nonce_for_key_evm_chain_id ON evm.txes(evm_chain_id, from_address, nonce) WHERE state = 'unconfirmed'::evm.txes_state;
+CREATE UNIQUE INDEX idx_only_one_in_progress_tx_per_account_id_per_evm_chain_id ON evm.txes(evm_chain_id, from_address) WHERE state = 'in_progress'::evm.txes_state;
+CREATE INDEX idx_eth_txes_unstarted_subject_id_evm_chain_id ON evm.txes(evm_chain_id, subject, id) WHERE subject IS NOT NULL AND state = 'unstarted'::evm.txes_state;
+-- +goose StatementEnd
+
+-- +goose Down
+-- +goose StatementBegin
+
+-- Rename the existing enum with finalized state to mark it as old
+ALTER TYPE evm.txes_state RENAME TO txes_state_old;
+
+-- Create new enum without finalized state
+CREATE TYPE evm.txes_state AS ENUM (
+ 'unstarted',
+ 'in_progress',
+ 'fatal_error',
+ 'unconfirmed',
+ 'confirmed_missing_receipt',
+ 'confirmed'
+);
+
+-- Add a new state column with the new enum type to the txes table
+ALTER TABLE evm.txes ADD COLUMN state_new evm.txes_state;
+
+-- Update all transactions with finalized state to confirmed in the old state column
+UPDATE evm.txes SET state = 'confirmed'::evm.txes_state_old WHERE state = 'finalized'::evm.txes_state_old;
+
+-- Copy data from the old column to the new
+UPDATE evm.txes SET state_new = state::text::evm.txes_state;
+
+-- Drop constraints referring to old enum type on the old state column
+ALTER TABLE evm.txes ALTER COLUMN state DROP DEFAULT;
+ALTER TABLE evm.txes DROP CONSTRAINT chk_eth_txes_fsm;
+DROP INDEX IF EXISTS idx_eth_txes_state_from_address_evm_chain_id;
+DROP INDEX IF EXISTS idx_eth_txes_min_unconfirmed_nonce_for_key_evm_chain_id;
+DROP INDEX IF EXISTS idx_only_one_in_progress_tx_per_account_id_per_evm_chain_id;
+DROP INDEX IF EXISTS idx_eth_txes_unstarted_subject_id_evm_chain_id;
+
+-- Drop the old state column
+ALTER TABLE evm.txes DROP state;
+
+-- Drop the old enum type
+DROP TYPE evm.txes_state_old;
+
+-- Rename the new column name state to replace the old column
+ALTER TABLE evm.txes RENAME state_new TO state;
+
+-- Reset the state column's default
+ALTER TABLE evm.txes ALTER COLUMN state SET DEFAULT 'unstarted'::evm.txes_state, ALTER COLUMN state SET NOT NULL;
+
+-- Recereate constraint without finalized state
+ALTER TABLE evm.txes ADD CONSTRAINT chk_eth_txes_fsm CHECK (
+ state = 'unstarted'::evm.txes_state AND nonce IS NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL
+ OR
+ state = 'in_progress'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL
+ OR
+ state = 'fatal_error'::evm.txes_state AND error IS NOT NULL
+ OR
+ state = 'unconfirmed'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL
+ OR
+ state = 'confirmed'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL
+ OR
+ state = 'confirmed_missing_receipt'::evm.txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL
+) NOT VALID;
+
+-- Recreate index with new enum type
+CREATE INDEX idx_eth_txes_state_from_address_evm_chain_id ON evm.txes(evm_chain_id, from_address, state) WHERE state <> 'confirmed'::evm.txes_state;
+CREATE INDEX idx_eth_txes_min_unconfirmed_nonce_for_key_evm_chain_id ON evm.txes(evm_chain_id, from_address, nonce) WHERE state = 'unconfirmed'::evm.txes_state;
+CREATE UNIQUE INDEX idx_only_one_in_progress_tx_per_account_id_per_evm_chain_id ON evm.txes(evm_chain_id, from_address) WHERE state = 'in_progress'::evm.txes_state;
+CREATE INDEX idx_eth_txes_unstarted_subject_id_evm_chain_id ON evm.txes(evm_chain_id, subject, id) WHERE subject IS NOT NULL AND state = 'unstarted'::evm.txes_state;
+-- +goose StatementEnd
diff --git a/core/web/testdata/body/health.html b/core/web/testdata/body/health.html
index 2a1b2227530..90d301bc8b8 100644
--- a/core/web/testdata/body/health.html
+++ b/core/web/testdata/body/health.html
@@ -63,6 +63,9 @@
Confirmer
+
+ Finalizer
+
WrappedEvmEstimator
diff --git a/core/web/testdata/body/health.json b/core/web/testdata/body/health.json
index 10415c0abdc..839428a5103 100644
--- a/core/web/testdata/body/health.json
+++ b/core/web/testdata/body/health.json
@@ -90,6 +90,15 @@
"output": ""
}
},
+ {
+ "type": "checks",
+ "id": "EVM.0.Txm.Finalizer",
+ "attributes": {
+ "name": "EVM.0.Txm.Finalizer",
+ "status": "passing",
+ "output": ""
+ }
+ },
{
"type": "checks",
"id": "EVM.0.Txm.WrappedEvmEstimator",
diff --git a/core/web/testdata/body/health.txt b/core/web/testdata/body/health.txt
index 09c8cff6c2d..3709b4e15f0 100644
--- a/core/web/testdata/body/health.txt
+++ b/core/web/testdata/body/health.txt
@@ -9,6 +9,7 @@ ok EVM.0.Txm
ok EVM.0.Txm.BlockHistoryEstimator
ok EVM.0.Txm.Broadcaster
ok EVM.0.Txm.Confirmer
+ok EVM.0.Txm.Finalizer
ok EVM.0.Txm.WrappedEvmEstimator
ok JobSpawner
ok Mailbox.Monitor
diff --git a/testdata/scripts/health/multi-chain.txtar b/testdata/scripts/health/multi-chain.txtar
index 7e01493b30b..76937329cb8 100644
--- a/testdata/scripts/health/multi-chain.txtar
+++ b/testdata/scripts/health/multi-chain.txtar
@@ -82,6 +82,7 @@ ok EVM.1.Txm
ok EVM.1.Txm.BlockHistoryEstimator
ok EVM.1.Txm.Broadcaster
ok EVM.1.Txm.Confirmer
+ok EVM.1.Txm.Finalizer
ok EVM.1.Txm.WrappedEvmEstimator
ok JobSpawner
ok Mailbox.Monitor
@@ -219,6 +220,15 @@ ok TelemetryManager
"output": ""
}
},
+ {
+ "type": "checks",
+ "id": "EVM.1.Txm.Finalizer",
+ "attributes": {
+ "name": "EVM.1.Txm.Finalizer",
+ "status": "passing",
+ "output": ""
+ }
+ },
{
"type": "checks",
"id": "EVM.1.Txm.WrappedEvmEstimator",
From 69f7bd68199b91c4ef4be65b5701e4d45a250350 Mon Sep 17 00:00:00 2001
From: Jordan Krage
Date: Tue, 6 Aug 2024 23:55:49 +0200
Subject: [PATCH 21/52] use services.Config.NewService/Engine (#13851)
* use services.Config.NewService/Engine
* feedback
---
common/headtracker/head_broadcaster.go | 70 +++----
common/headtracker/head_listener.go | 73 ++++---
common/headtracker/head_tracker.go | 105 ++++------
core/bridges/cache.go | 76 ++-----
core/chains/evm/headtracker/head_listener.go | 28 ---
.../evm/headtracker/head_listener_test.go | 188 ++++++++---------
core/chains/evm/headtracker/head_tracker.go | 4 +-
core/chains/evm/monitor/balance.go | 76 +++----
core/recovery/recover.go | 42 ++--
core/services/chainlink/application.go | 13 +-
.../fluxmonitorv2/deviation_checker.go | 4 +-
core/services/fluxmonitorv2/flux_monitor.go | 95 ++++-----
.../fluxmonitorv2/flux_monitor_test.go | 16 +-
core/services/fluxmonitorv2/helpers_test.go | 12 +-
core/services/fluxmonitorv2/poll_manager.go | 10 +-
core/services/nurse.go | 192 ++++++++----------
core/services/nurse_test.go | 3 +-
.../relay/evm/functions/logpoller_wrapper.go | 116 +++++------
core/services/synchronization/helpers_test.go | 6 +-
.../telemetry_ingress_batch_client.go | 133 ++++++------
.../telemetry_ingress_batch_worker.go | 64 ++----
.../telemetry_ingress_batch_worker_test.go | 4 -
.../telemetry_ingress_client.go | 90 +++-----
core/services/telemetry/manager.go | 92 ++++-----
core/services/telemetry/manager_test.go | 2 +-
25 files changed, 608 insertions(+), 906 deletions(-)
delete mode 100644 core/chains/evm/headtracker/head_listener.go
diff --git a/common/headtracker/head_broadcaster.go b/common/headtracker/head_broadcaster.go
index 7edcccfccbd..c81c61141f2 100644
--- a/common/headtracker/head_broadcaster.go
+++ b/common/headtracker/head_broadcaster.go
@@ -42,13 +42,12 @@ type HeadBroadcaster[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] interf
}
type headBroadcaster[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] struct {
- services.StateMachine
- logger logger.Logger
+ services.Service
+ eng *services.Engine
+
callbacks callbackSet[H, BLOCK_HASH]
mailbox *mailbox.Mailbox[H]
mutex sync.Mutex
- chClose services.StopChan
- wgDone sync.WaitGroup
latest H
lastCallbackID int
}
@@ -60,41 +59,29 @@ func NewHeadBroadcaster[
](
lggr logger.Logger,
) HeadBroadcaster[H, BLOCK_HASH] {
- return &headBroadcaster[H, BLOCK_HASH]{
- logger: logger.Named(lggr, "HeadBroadcaster"),
+ hb := &headBroadcaster[H, BLOCK_HASH]{
callbacks: make(callbackSet[H, BLOCK_HASH]),
mailbox: mailbox.NewSingle[H](),
- chClose: make(chan struct{}),
}
+ hb.Service, hb.eng = services.Config{
+ Name: "HeadBroadcaster",
+ Start: hb.start,
+ Close: hb.close,
+ }.NewServiceEngine(lggr)
+ return hb
}
-func (hb *headBroadcaster[H, BLOCK_HASH]) Start(context.Context) error {
- return hb.StartOnce("HeadBroadcaster", func() error {
- hb.wgDone.Add(1)
- go hb.run()
- return nil
- })
-}
-
-func (hb *headBroadcaster[H, BLOCK_HASH]) Close() error {
- return hb.StopOnce("HeadBroadcaster", func() error {
- hb.mutex.Lock()
- // clear all callbacks
- hb.callbacks = make(callbackSet[H, BLOCK_HASH])
- hb.mutex.Unlock()
-
- close(hb.chClose)
- hb.wgDone.Wait()
- return nil
- })
+func (hb *headBroadcaster[H, BLOCK_HASH]) start(context.Context) error {
+ hb.eng.Go(hb.run)
+ return nil
}
-func (hb *headBroadcaster[H, BLOCK_HASH]) Name() string {
- return hb.logger.Name()
-}
-
-func (hb *headBroadcaster[H, BLOCK_HASH]) HealthReport() map[string]error {
- return map[string]error{hb.Name(): hb.Healthy()}
+func (hb *headBroadcaster[H, BLOCK_HASH]) close() error {
+ hb.mutex.Lock()
+ // clear all callbacks
+ hb.callbacks = make(callbackSet[H, BLOCK_HASH])
+ hb.mutex.Unlock()
+ return nil
}
func (hb *headBroadcaster[H, BLOCK_HASH]) BroadcastNewLongestChain(head H) {
@@ -121,15 +108,13 @@ func (hb *headBroadcaster[H, BLOCK_HASH]) Subscribe(callback HeadTrackable[H, BL
return
}
-func (hb *headBroadcaster[H, BLOCK_HASH]) run() {
- defer hb.wgDone.Done()
-
+func (hb *headBroadcaster[H, BLOCK_HASH]) run(ctx context.Context) {
for {
select {
- case <-hb.chClose:
+ case <-ctx.Done():
return
case <-hb.mailbox.Notify():
- hb.executeCallbacks()
+ hb.executeCallbacks(ctx)
}
}
}
@@ -137,10 +122,10 @@ func (hb *headBroadcaster[H, BLOCK_HASH]) run() {
// DEV: the head relayer makes no promises about head delivery! Subscribing
// Jobs should expect to the relayer to skip heads if there is a large number of listeners
// and all callbacks cannot be completed in the allotted time.
-func (hb *headBroadcaster[H, BLOCK_HASH]) executeCallbacks() {
+func (hb *headBroadcaster[H, BLOCK_HASH]) executeCallbacks(ctx context.Context) {
head, exists := hb.mailbox.Retrieve()
if !exists {
- hb.logger.Info("No head to retrieve. It might have been skipped")
+ hb.eng.Info("No head to retrieve. It might have been skipped")
return
}
@@ -149,7 +134,7 @@ func (hb *headBroadcaster[H, BLOCK_HASH]) executeCallbacks() {
hb.latest = head
hb.mutex.Unlock()
- hb.logger.Debugw("Initiating callbacks",
+ hb.eng.Debugw("Initiating callbacks",
"headNum", head.BlockNumber(),
"numCallbacks", len(callbacks),
)
@@ -157,9 +142,6 @@ func (hb *headBroadcaster[H, BLOCK_HASH]) executeCallbacks() {
wg := sync.WaitGroup{}
wg.Add(len(callbacks))
- ctx, cancel := hb.chClose.NewCtx()
- defer cancel()
-
for _, callback := range callbacks {
go func(trackable HeadTrackable[H, BLOCK_HASH]) {
defer wg.Done()
@@ -168,7 +150,7 @@ func (hb *headBroadcaster[H, BLOCK_HASH]) executeCallbacks() {
defer cancel()
trackable.OnNewLongestChain(cctx, head)
elapsed := time.Since(start)
- hb.logger.Debugw(fmt.Sprintf("Finished callback in %s", elapsed),
+ hb.eng.Debugw(fmt.Sprintf("Finished callback in %s", elapsed),
"callbackType", reflect.TypeOf(trackable), "blockNumber", head.BlockNumber(), "time", elapsed)
}(callback)
}
diff --git a/common/headtracker/head_listener.go b/common/headtracker/head_listener.go
index 25715b35280..d240caab3c3 100644
--- a/common/headtracker/head_listener.go
+++ b/common/headtracker/head_listener.go
@@ -29,14 +29,15 @@ var (
}, []string{"ChainID"})
)
-// headHandler is a callback that handles incoming heads
-type headHandler[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] func(ctx context.Context, header H) error
+// HeadHandler is a callback that handles incoming heads
+type HeadHandler[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] func(ctx context.Context, header H) error
// HeadListener is a chain agnostic interface that manages connection of Client that receives heads from the blockchain node
type HeadListener[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] interface {
- // ListenForNewHeads kicks off the listen loop (not thread safe)
- // done() must be executed upon leaving ListenForNewHeads()
- ListenForNewHeads(onSubscribe func(), handleNewHead headHandler[H, BLOCK_HASH], done func())
+ services.Service
+
+ // ListenForNewHeads runs the listen loop (not thread safe)
+ ListenForNewHeads(ctx context.Context)
// ReceivingHeads returns true if the listener is receiving heads (thread safe)
ReceivingHeads() bool
@@ -54,10 +55,13 @@ type headListener[
ID types.ID,
BLOCK_HASH types.Hashable,
] struct {
+ services.Service
+ eng *services.Engine
+
config htrktypes.Config
client htrktypes.Client[HTH, S, ID, BLOCK_HASH]
- logger logger.Logger
- chStop services.StopChan
+ onSubscription func(context.Context)
+ handleNewHead HeadHandler[HTH, BLOCK_HASH]
chHeaders chan HTH
headSubscription types.Subscription
connected atomic.Bool
@@ -74,38 +78,43 @@ func NewHeadListener[
lggr logger.Logger,
client CLIENT,
config htrktypes.Config,
- chStop chan struct{},
+ onSubscription func(context.Context),
+ handleNewHead HeadHandler[HTH, BLOCK_HASH],
) HeadListener[HTH, BLOCK_HASH] {
- return &headListener[HTH, S, ID, BLOCK_HASH]{
- config: config,
- client: client,
- logger: logger.Named(lggr, "HeadListener"),
- chStop: chStop,
+ hl := &headListener[HTH, S, ID, BLOCK_HASH]{
+ config: config,
+ client: client,
+ onSubscription: onSubscription,
+ handleNewHead: handleNewHead,
}
+ hl.Service, hl.eng = services.Config{
+ Name: "HeadListener",
+ Start: hl.start,
+ }.NewServiceEngine(lggr)
+ return hl
}
-func (hl *headListener[HTH, S, ID, BLOCK_HASH]) Name() string {
- return hl.logger.Name()
+func (hl *headListener[HTH, S, ID, BLOCK_HASH]) start(context.Context) error {
+ hl.eng.Go(hl.ListenForNewHeads)
+ return nil
}
-func (hl *headListener[HTH, S, ID, BLOCK_HASH]) ListenForNewHeads(onSubscription func(), handleNewHead headHandler[HTH, BLOCK_HASH], done func()) {
- defer done()
+func (hl *headListener[HTH, S, ID, BLOCK_HASH]) ListenForNewHeads(ctx context.Context) {
defer hl.unsubscribe()
- ctx, cancel := hl.chStop.NewCtx()
- defer cancel()
-
for {
if !hl.subscribe(ctx) {
break
}
- onSubscription()
- err := hl.receiveHeaders(ctx, handleNewHead)
+ if hl.onSubscription != nil {
+ hl.onSubscription(ctx)
+ }
+ err := hl.receiveHeaders(ctx, hl.handleNewHead)
if ctx.Err() != nil {
break
} else if err != nil {
- hl.logger.Errorw("Error in new head subscription, unsubscribed", "err", err)
+ hl.eng.Errorw("Error in new head subscription, unsubscribed", "err", err)
continue
}
break
@@ -131,7 +140,7 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) HealthReport() map[string]error
return map[string]error{hl.Name(): err}
}
-func (hl *headListener[HTH, S, ID, BLOCK_HASH]) receiveHeaders(ctx context.Context, handleNewHead headHandler[HTH, BLOCK_HASH]) error {
+func (hl *headListener[HTH, S, ID, BLOCK_HASH]) receiveHeaders(ctx context.Context, handleNewHead HeadHandler[HTH, BLOCK_HASH]) error {
var noHeadsAlarmC <-chan time.Time
var noHeadsAlarmT *time.Ticker
noHeadsAlarmDuration := hl.config.BlockEmissionIdleWarningThreshold()
@@ -142,7 +151,7 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) receiveHeaders(ctx context.Conte
for {
select {
- case <-hl.chStop:
+ case <-ctx.Done():
return nil
case blockHeader, open := <-hl.chHeaders:
@@ -158,13 +167,13 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) receiveHeaders(ctx context.Conte
return errors.New("head listener: chHeaders prematurely closed")
}
if !blockHeader.IsValid() {
- hl.logger.Error("got nil block header")
+ hl.eng.Error("got nil block header")
continue
}
// Compare the chain ID of the block header to the chain ID of the client
if !blockHeader.HasChainID() || blockHeader.ChainID().String() != chainId.String() {
- hl.logger.Panicf("head listener for %s received block header for %s", chainId, blockHeader.ChainID())
+ hl.eng.Panicf("head listener for %s received block header for %s", chainId, blockHeader.ChainID())
}
promNumHeadsReceived.WithLabelValues(chainId.String()).Inc()
@@ -184,7 +193,7 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) receiveHeaders(ctx context.Conte
case <-noHeadsAlarmC:
// We haven't received a head on the channel for a long time, log a warning
- hl.logger.Warnf("have not received a head for %v", noHeadsAlarmDuration)
+ hl.eng.Warnf("have not received a head for %v", noHeadsAlarmDuration)
hl.receivingHeads.Store(false)
}
}
@@ -198,19 +207,19 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) subscribe(ctx context.Context) b
for {
hl.unsubscribe()
- hl.logger.Debugf("Subscribing to new heads on chain %s", chainId.String())
+ hl.eng.Debugf("Subscribing to new heads on chain %s", chainId.String())
select {
- case <-hl.chStop:
+ case <-ctx.Done():
return false
case <-time.After(subscribeRetryBackoff.Duration()):
err := hl.subscribeToHead(ctx)
if err != nil {
promEthConnectionErrors.WithLabelValues(chainId.String()).Inc()
- hl.logger.Warnw("Failed to subscribe to heads on chain", "chainID", chainId.String(), "err", err)
+ hl.eng.Warnw("Failed to subscribe to heads on chain", "chainID", chainId.String(), "err", err)
} else {
- hl.logger.Debugf("Subscribed to heads on chain %s", chainId.String())
+ hl.eng.Debugf("Subscribed to heads on chain %s", chainId.String())
return true
}
}
diff --git a/common/headtracker/head_tracker.go b/common/headtracker/head_tracker.go
index 851458591b8..8546d856b67 100644
--- a/common/headtracker/head_tracker.go
+++ b/common/headtracker/head_tracker.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"math/big"
- "sync"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -51,7 +50,9 @@ type headTracker[
ID types.ID,
BLOCK_HASH types.Hashable,
] struct {
- services.StateMachine
+ services.Service
+ eng *services.Engine
+
log logger.SugaredLogger
headBroadcaster HeadBroadcaster[HTH, BLOCK_HASH]
headSaver HeadSaver[HTH, BLOCK_HASH]
@@ -64,8 +65,6 @@ type headTracker[
backfillMB *mailbox.Mailbox[HTH]
broadcastMB *mailbox.Mailbox[HTH]
headListener HeadListener[HTH, BLOCK_HASH]
- chStop services.StopChan
- wgDone sync.WaitGroup
getNilHead func() HTH
}
@@ -85,52 +84,52 @@ func NewHeadTracker[
mailMon *mailbox.Monitor,
getNilHead func() HTH,
) HeadTracker[HTH, BLOCK_HASH] {
- chStop := make(chan struct{})
- lggr = logger.Named(lggr, "HeadTracker")
- return &headTracker[HTH, S, ID, BLOCK_HASH]{
+ ht := &headTracker[HTH, S, ID, BLOCK_HASH]{
headBroadcaster: headBroadcaster,
client: client,
chainID: client.ConfiguredChainID(),
config: config,
htConfig: htConfig,
- log: logger.Sugared(lggr),
backfillMB: mailbox.NewSingle[HTH](),
broadcastMB: mailbox.New[HTH](HeadsBufferSize),
- chStop: chStop,
- headListener: NewHeadListener[HTH, S, ID, BLOCK_HASH](lggr, client, config, chStop),
headSaver: headSaver,
mailMon: mailMon,
getNilHead: getNilHead,
}
+ ht.Service, ht.eng = services.Config{
+ Name: "HeadTracker",
+ NewSubServices: func(lggr logger.Logger) []services.Service {
+ ht.headListener = NewHeadListener[HTH, S, ID, BLOCK_HASH](lggr, client, config,
+ // NOTE: Always try to start the head tracker off with whatever the
+ // latest head is, without waiting for the subscription to send us one.
+ //
+ // In some cases the subscription will send us the most recent head
+ // anyway when we connect (but we should not rely on this because it is
+ // not specced). If it happens this is fine, and the head will be
+ // ignored as a duplicate.
+ func(ctx context.Context) {
+ err := ht.handleInitialHead(ctx)
+ if err != nil {
+ ht.log.Errorw("Error handling initial head", "err", err.Error())
+ }
+ }, ht.handleNewHead)
+ return []services.Service{ht.headListener}
+ },
+ Start: ht.start,
+ Close: ht.close,
+ }.NewServiceEngine(lggr)
+ ht.log = logger.Sugared(ht.eng)
+ return ht
}
// Start starts HeadTracker service.
-func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Start(ctx context.Context) error {
- return ht.StartOnce("HeadTracker", func() error {
- ht.log.Debugw("Starting HeadTracker", "chainID", ht.chainID)
- // NOTE: Always try to start the head tracker off with whatever the
- // latest head is, without waiting for the subscription to send us one.
- //
- // In some cases the subscription will send us the most recent head
- // anyway when we connect (but we should not rely on this because it is
- // not specced). If it happens this is fine, and the head will be
- // ignored as a duplicate.
- onSubscribe := func() {
- err := ht.handleInitialHead(ctx)
- if err != nil {
- ht.log.Errorw("Error handling initial head", "err", err.Error())
- }
- }
+func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) start(context.Context) error {
+ ht.eng.Go(ht.backfillLoop)
+ ht.eng.Go(ht.broadcastLoop)
- ht.wgDone.Add(3)
- go ht.headListener.ListenForNewHeads(onSubscribe, ht.handleNewHead, ht.wgDone.Done)
- go ht.backfillLoop()
- go ht.broadcastLoop()
+ ht.mailMon.Monitor(ht.broadcastMB, "HeadTracker", "Broadcast", ht.chainID.String())
- ht.mailMon.Monitor(ht.broadcastMB, "HeadTracker", "Broadcast", ht.chainID.String())
-
- return nil
- })
+ return nil
}
func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) handleInitialHead(ctx context.Context) error {
@@ -176,23 +175,8 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) handleInitialHead(ctx context.Con
return nil
}
-// Close stops HeadTracker service.
-func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Close() error {
- return ht.StopOnce("HeadTracker", func() error {
- close(ht.chStop)
- ht.wgDone.Wait()
- return ht.broadcastMB.Close()
- })
-}
-
-func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Name() string {
- return ht.log.Name()
-}
-
-func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) HealthReport() map[string]error {
- report := map[string]error{ht.Name(): ht.Healthy()}
- services.CopyHealth(report, ht.headListener.HealthReport())
- return report
+func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) close() error {
+ return ht.broadcastMB.Close()
}
func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Backfill(ctx context.Context, headWithChain HTH) (err error) {
@@ -265,15 +249,13 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) handleNewHead(ctx context.Context
promOldHead.WithLabelValues(ht.chainID.String()).Inc()
err := fmt.Errorf("got very old block with number %d (highest seen was %d)", head.BlockNumber(), prevHead.BlockNumber())
ht.log.Critical("Got very old block. Either a very deep re-org occurred, one of the RPC nodes has gotten far out of sync, or the chain went backwards in block numbers. This node may not function correctly without manual intervention.", "err", err)
- ht.SvcErrBuffer.Append(err)
+ ht.eng.EmitHealthErr(err)
}
}
return nil
}
-func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) broadcastLoop() {
- defer ht.wgDone.Done()
-
+func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) broadcastLoop(ctx context.Context) {
samplingInterval := ht.htConfig.SamplingInterval()
if samplingInterval > 0 {
ht.log.Debugf("Head sampling is enabled - sampling interval is set to: %v", samplingInterval)
@@ -281,7 +263,7 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) broadcastLoop() {
defer debounceHead.Stop()
for {
select {
- case <-ht.chStop:
+ case <-ctx.Done():
return
case <-debounceHead.C:
item := ht.broadcastMB.RetrieveLatestAndClear()
@@ -295,7 +277,7 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) broadcastLoop() {
ht.log.Info("Head sampling is disabled - callback will be called on every head")
for {
select {
- case <-ht.chStop:
+ case <-ctx.Done():
return
case <-ht.broadcastMB.Notify():
for {
@@ -310,15 +292,10 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) broadcastLoop() {
}
}
-func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) backfillLoop() {
- defer ht.wgDone.Done()
-
- ctx, cancel := ht.chStop.NewCtx()
- defer cancel()
-
+func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) backfillLoop(ctx context.Context) {
for {
select {
- case <-ht.chStop:
+ case <-ctx.Done():
return
case <-ht.backfillMB.Notify():
for {
diff --git a/core/bridges/cache.go b/core/bridges/cache.go
index 4b5a6552447..e97874a35e5 100644
--- a/core/bridges/cache.go
+++ b/core/bridges/cache.go
@@ -10,11 +10,9 @@ import (
"golang.org/x/exp/maps"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
-
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
)
const (
@@ -25,13 +23,11 @@ const (
type Cache struct {
// dependencies and configurations
ORM
- lggr logger.Logger
interval time.Duration
// service state
- services.StateMachine
- wg sync.WaitGroup
- chStop services.StopChan
+ services.Service
+ eng *services.Engine
// data state
bridgeTypesCache sync.Map
@@ -43,17 +39,20 @@ var _ ORM = (*Cache)(nil)
var _ services.Service = (*Cache)(nil)
func NewCache(base ORM, lggr logger.Logger, upsertInterval time.Duration) *Cache {
- return &Cache{
+ c := &Cache{
ORM: base,
- lggr: lggr.Named(CacheServiceName),
interval: upsertInterval,
- chStop: make(chan struct{}),
bridgeLastValueCache: make(map[string]BridgeResponse),
}
+ c.Service, c.eng = services.Config{
+ Name: CacheServiceName,
+ Start: c.start,
+ }.NewServiceEngine(lggr)
+ return c
}
func (c *Cache) WithDataSource(ds sqlutil.DataSource) ORM {
- return NewCache(NewORM(ds), c.lggr, c.interval)
+ return NewCache(NewORM(ds), c.eng, c.interval)
}
func (c *Cache) FindBridge(ctx context.Context, name BridgeName) (BridgeType, error) {
@@ -190,51 +189,17 @@ func (c *Cache) UpsertBridgeResponse(ctx context.Context, dotId string, specId i
return nil
}
-func (c *Cache) Start(_ context.Context) error {
- return c.StartOnce(CacheServiceName, func() error {
- c.wg.Add(1)
-
- go c.run()
-
- return nil
- })
-}
-
-func (c *Cache) Close() error {
- return c.StopOnce(CacheServiceName, func() error {
- close(c.chStop)
- c.wg.Wait()
-
- return nil
- })
-}
-
-func (c *Cache) HealthReport() map[string]error {
- return map[string]error{c.Name(): c.Healthy()}
-}
-
-func (c *Cache) Name() string {
- return c.lggr.Name()
-}
-
-func (c *Cache) run() {
- defer c.wg.Done()
-
- for {
- timer := time.NewTimer(utils.WithJitter(c.interval))
+func (c *Cache) start(_ context.Context) error {
+ ticker := services.TickerConfig{
+ Initial: c.interval,
+ JitterPct: services.DefaultJitter,
+ }.NewTicker(c.interval)
+ c.eng.GoTick(ticker, c.doBulkUpsert)
- select {
- case <-timer.C:
- c.doBulkUpsert()
- case <-c.chStop:
- timer.Stop()
-
- return
- }
- }
+ return nil
}
-func (c *Cache) doBulkUpsert() {
+func (c *Cache) doBulkUpsert(ctx context.Context) {
c.mu.RLock()
values := maps.Values(c.bridgeLastValueCache)
c.mu.RUnlock()
@@ -243,11 +208,8 @@ func (c *Cache) doBulkUpsert() {
return
}
- ctx, cancel := c.chStop.NewCtx()
- defer cancel()
-
if err := c.ORM.BulkUpsertBridgeResponse(ctx, values); err != nil {
- c.lggr.Warnf("bulk upsert of bridge responses failed: %s", err.Error())
+ c.eng.Warnf("bulk upsert of bridge responses failed: %s", err.Error())
}
}
diff --git a/core/chains/evm/headtracker/head_listener.go b/core/chains/evm/headtracker/head_listener.go
deleted file mode 100644
index 04535a34868..00000000000
--- a/core/chains/evm/headtracker/head_listener.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package headtracker
-
-import (
- "math/big"
-
- "github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/common"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink/v2/common/headtracker"
-
- htrktypes "github.com/smartcontractkit/chainlink/v2/common/headtracker/types"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
- evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
-)
-
-type headListener = headtracker.HeadListener[*evmtypes.Head, common.Hash]
-
-func NewHeadListener(
- lggr logger.Logger,
- ethClient evmclient.Client,
- config htrktypes.Config, chStop chan struct{},
-) headListener {
- return headtracker.NewHeadListener[
- *evmtypes.Head,
- ethereum.Subscription, *big.Int, common.Hash,
- ](lggr, ethClient, config, chStop)
-}
diff --git a/core/chains/evm/headtracker/head_listener_test.go b/core/chains/evm/headtracker/head_listener_test.go
index 29b090bbffe..2e459af2a2b 100644
--- a/core/chains/evm/headtracker/head_listener_test.go
+++ b/core/chains/evm/headtracker/head_listener_test.go
@@ -16,9 +16,9 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
+ "github.com/smartcontractkit/chainlink/v2/common/headtracker"
commonmocks "github.com/smartcontractkit/chainlink/v2/common/types/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/testutils"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
)
@@ -40,17 +40,10 @@ func Test_HeadListener_HappyPath(t *testing.T) {
evmcfg := testutils.NewTestChainScopedConfig(t, func(c *toml.EVMConfig) {
c.NoNewHeadsThreshold = &commonconfig.Duration{}
})
- chStop := make(chan struct{})
- hl := headtracker.NewHeadListener(lggr, ethClient, evmcfg.EVM(), chStop)
var headCount atomic.Int32
- handler := func(context.Context, *evmtypes.Head) error {
- headCount.Add(1)
- return nil
- }
-
- subscribeAwaiter := testutils.NewAwaiter()
unsubscribeAwaiter := testutils.NewAwaiter()
+ subscribeAwaiter := testutils.NewAwaiter()
var chHeads chan<- *evmtypes.Head
var chErr = make(chan error)
var chSubErr <-chan error = chErr
@@ -66,23 +59,23 @@ func Test_HeadListener_HappyPath(t *testing.T) {
close(chErr)
})
- doneAwaiter := testutils.NewAwaiter()
- done := func() {
- doneAwaiter.ItHappened()
- }
- go hl.ListenForNewHeads(func() {}, handler, done)
-
- subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t))
- require.Eventually(t, hl.Connected, tests.WaitTimeout(t), tests.TestInterval)
+ func() {
+ hl := headtracker.NewHeadListener(lggr, ethClient, evmcfg.EVM(), nil, func(context.Context, *evmtypes.Head) error {
+ headCount.Add(1)
+ return nil
+ })
+ require.NoError(t, hl.Start(tests.Context(t)))
+ defer func() { assert.NoError(t, hl.Close()) }()
- chHeads <- testutils.Head(0)
- chHeads <- testutils.Head(1)
- chHeads <- testutils.Head(2)
+ subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t))
+ require.Eventually(t, hl.Connected, tests.WaitTimeout(t), tests.TestInterval)
- require.True(t, hl.ReceivingHeads())
+ chHeads <- testutils.Head(0)
+ chHeads <- testutils.Head(1)
+ chHeads <- testutils.Head(2)
- close(chStop)
- doneAwaiter.AwaitOrFail(t)
+ require.True(t, hl.ReceivingHeads())
+ }()
unsubscribeAwaiter.AwaitOrFail(t)
require.Equal(t, int32(3), headCount.Load())
@@ -101,14 +94,8 @@ func Test_HeadListener_NotReceivingHeads(t *testing.T) {
evmcfg := testutils.NewTestChainScopedConfig(t, func(c *toml.EVMConfig) {
c.NoNewHeadsThreshold = commonconfig.MustNewDuration(time.Second)
})
- chStop := make(chan struct{})
- hl := headtracker.NewHeadListener(lggr, ethClient, evmcfg.EVM(), chStop)
firstHeadAwaiter := testutils.NewAwaiter()
- handler := func(context.Context, *evmtypes.Head) error {
- firstHeadAwaiter.ItHappened()
- return nil
- }
subscribeAwaiter := testutils.NewAwaiter()
var chHeads chan<- *evmtypes.Head
@@ -125,25 +112,25 @@ func Test_HeadListener_NotReceivingHeads(t *testing.T) {
close(chErr)
})
- doneAwaiter := testutils.NewAwaiter()
- done := func() {
- doneAwaiter.ItHappened()
- }
- go hl.ListenForNewHeads(func() {}, handler, done)
-
- subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t))
+ func() {
+ hl := headtracker.NewHeadListener(lggr, ethClient, evmcfg.EVM(), nil, func(context.Context, *evmtypes.Head) error {
+ firstHeadAwaiter.ItHappened()
+ return nil
+ })
+ require.NoError(t, hl.Start(tests.Context(t)))
+ defer func() { assert.NoError(t, hl.Close()) }()
- chHeads <- testutils.Head(0)
- firstHeadAwaiter.AwaitOrFail(t)
+ subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t))
- require.True(t, hl.ReceivingHeads())
+ chHeads <- testutils.Head(0)
+ firstHeadAwaiter.AwaitOrFail(t)
- time.Sleep(time.Second * 2)
+ require.True(t, hl.ReceivingHeads())
- require.False(t, hl.ReceivingHeads())
+ time.Sleep(time.Second * 2)
- close(chStop)
- doneAwaiter.AwaitOrFail(t)
+ require.False(t, hl.ReceivingHeads())
+ }()
}
func Test_HeadListener_SubscriptionErr(t *testing.T) {
@@ -161,19 +148,11 @@ func Test_HeadListener_SubscriptionErr(t *testing.T) {
for _, test := range cases {
test := test
t.Run(test.name, func(t *testing.T) {
- l := logger.Test(t)
+ lggr := logger.Test(t)
ethClient := testutils.NewEthClientMockWithDefaultChain(t)
evmcfg := testutils.NewTestChainScopedConfig(t, nil)
- chStop := make(chan struct{})
- hl := headtracker.NewHeadListener(l, ethClient, evmcfg.EVM(), chStop)
hnhCalled := make(chan *evmtypes.Head)
- hnh := func(_ context.Context, header *evmtypes.Head) error {
- hnhCalled <- header
- return nil
- }
- doneAwaiter := testutils.NewAwaiter()
- done := doneAwaiter.ItHappened
chSubErrTest := make(chan error)
var chSubErr <-chan error = chSubErrTest
@@ -189,63 +168,66 @@ func Test_HeadListener_SubscriptionErr(t *testing.T) {
headsCh = args.Get(1).(chan<- *evmtypes.Head)
subscribeAwaiter.ItHappened()
})
- go func() {
- hl.ListenForNewHeads(func() {}, hnh, done)
- }()
-
- // Put a head on the channel to ensure we test all code paths
- subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t))
- head := testutils.Head(0)
- headsCh <- head
-
- h := <-hnhCalled
- assert.Equal(t, head, h)
-
- // Expect a call to unsubscribe on error
- sub.On("Unsubscribe").Once().Run(func(_ mock.Arguments) {
- close(headsCh)
- // geth guarantees that Unsubscribe closes the errors channel
- if !test.closeErr {
+ func() {
+ hl := headtracker.NewHeadListener(lggr, ethClient, evmcfg.EVM(), nil, func(_ context.Context, header *evmtypes.Head) error {
+ hnhCalled <- header
+ return nil
+ })
+ require.NoError(t, hl.Start(tests.Context(t)))
+ defer func() { assert.NoError(t, hl.Close()) }()
+
+ // Put a head on the channel to ensure we test all code paths
+ subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t))
+ head := testutils.Head(0)
+ headsCh <- head
+
+ h := <-hnhCalled
+ assert.Equal(t, head, h)
+
+ // Expect a call to unsubscribe on error
+ sub.On("Unsubscribe").Once().Run(func(_ mock.Arguments) {
+ close(headsCh)
+ // geth guarantees that Unsubscribe closes the errors channel
+ if !test.closeErr {
+ close(chSubErrTest)
+ }
+ })
+ // Expect a resubscribe
+ chSubErrTest2 := make(chan error)
+ var chSubErr2 <-chan error = chSubErrTest2
+ sub2 := commonmocks.NewSubscription(t)
+ sub2.On("Err").Return(chSubErr2)
+ subscribeAwaiter2 := testutils.NewAwaiter()
+
+ var headsCh2 chan<- *evmtypes.Head
+ ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub2, nil).Once().Run(func(args mock.Arguments) {
+ headsCh2 = args.Get(1).(chan<- *evmtypes.Head)
+ subscribeAwaiter2.ItHappened()
+ })
+
+ // Sending test error
+ if test.closeErr {
close(chSubErrTest)
+ } else {
+ chSubErrTest <- test.err
}
- })
- // Expect a resubscribe
- chSubErrTest2 := make(chan error)
- var chSubErr2 <-chan error = chSubErrTest2
- sub2 := commonmocks.NewSubscription(t)
- sub2.On("Err").Return(chSubErr2)
- subscribeAwaiter2 := testutils.NewAwaiter()
-
- var headsCh2 chan<- *evmtypes.Head
- ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub2, nil).Once().Run(func(args mock.Arguments) {
- headsCh2 = args.Get(1).(chan<- *evmtypes.Head)
- subscribeAwaiter2.ItHappened()
- })
-
- // Sending test error
- if test.closeErr {
- close(chSubErrTest)
- } else {
- chSubErrTest <- test.err
- }
- // Wait for it to resubscribe
- subscribeAwaiter2.AwaitOrFail(t, tests.WaitTimeout(t))
+ // Wait for it to resubscribe
+ subscribeAwaiter2.AwaitOrFail(t, tests.WaitTimeout(t))
- head2 := testutils.Head(1)
- headsCh2 <- head2
+ head2 := testutils.Head(1)
+ headsCh2 <- head2
- h2 := <-hnhCalled
- assert.Equal(t, head2, h2)
+ h2 := <-hnhCalled
+ assert.Equal(t, head2, h2)
- // Second call to unsubscribe on close
- sub2.On("Unsubscribe").Once().Run(func(_ mock.Arguments) {
- close(headsCh2)
- // geth guarantees that Unsubscribe closes the errors channel
- close(chSubErrTest2)
- })
- close(chStop)
- doneAwaiter.AwaitOrFail(t)
+ // Second call to unsubscribe on close
+ sub2.On("Unsubscribe").Once().Run(func(_ mock.Arguments) {
+ close(headsCh2)
+ // geth guarantees that Unsubscribe closes the errors channel
+ close(chSubErrTest2)
+ })
+ }()
})
}
}
diff --git a/core/chains/evm/headtracker/head_tracker.go b/core/chains/evm/headtracker/head_tracker.go
index d6c2cdc64e7..f7607189f7e 100644
--- a/core/chains/evm/headtracker/head_tracker.go
+++ b/core/chains/evm/headtracker/head_tracker.go
@@ -2,10 +2,8 @@ package headtracker
import (
"context"
- "math/big"
"github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/common"
"go.uber.org/zap/zapcore"
"github.com/smartcontractkit/chainlink-common/pkg/logger"
@@ -27,7 +25,7 @@ func NewHeadTracker(
headSaver httypes.HeadSaver,
mailMon *mailbox.Monitor,
) httypes.HeadTracker {
- return headtracker.NewHeadTracker[*evmtypes.Head, ethereum.Subscription, *big.Int, common.Hash](
+ return headtracker.NewHeadTracker[*evmtypes.Head, ethereum.Subscription](
lggr,
ethClient,
config,
diff --git a/core/chains/evm/monitor/balance.go b/core/chains/evm/monitor/balance.go
index b8194a38af9..3e28d5c436a 100644
--- a/core/chains/evm/monitor/balance.go
+++ b/core/chains/evm/monitor/balance.go
@@ -33,14 +33,15 @@ type (
}
balanceMonitor struct {
- services.StateMachine
- logger logger.Logger
+ services.Service
+ eng *services.Engine
+
ethClient evmclient.Client
chainID *big.Int
chainIDStr string
ethKeyStore keystore.Eth
ethBalances map[gethCommon.Address]*assets.Eth
- ethBalancesMtx *sync.RWMutex
+ ethBalancesMtx sync.RWMutex
sleeperTask *utils.SleeperTask
}
@@ -53,59 +54,42 @@ var _ BalanceMonitor = (*balanceMonitor)(nil)
func NewBalanceMonitor(ethClient evmclient.Client, ethKeyStore keystore.Eth, lggr logger.Logger) *balanceMonitor {
chainId := ethClient.ConfiguredChainID()
bm := &balanceMonitor{
- services.StateMachine{},
- logger.Named(lggr, "BalanceMonitor"),
- ethClient,
- chainId,
- chainId.String(),
- ethKeyStore,
- make(map[gethCommon.Address]*assets.Eth),
- new(sync.RWMutex),
- nil,
+ ethClient: ethClient,
+ chainID: chainId,
+ chainIDStr: chainId.String(),
+ ethKeyStore: ethKeyStore,
+ ethBalances: make(map[gethCommon.Address]*assets.Eth),
}
+ bm.Service, bm.eng = services.Config{
+ Name: "BalanceMonitor",
+ Start: bm.start,
+ Close: bm.close,
+ }.NewServiceEngine(lggr)
bm.sleeperTask = utils.NewSleeperTask(&worker{bm: bm})
return bm
}
-func (bm *balanceMonitor) Start(ctx context.Context) error {
- return bm.StartOnce("BalanceMonitor", func() error {
- // Always query latest balance on start
- (&worker{bm}).WorkCtx(ctx)
- return nil
- })
-}
-
-// Close shuts down the BalanceMonitor, should not be used after this
-func (bm *balanceMonitor) Close() error {
- return bm.StopOnce("BalanceMonitor", func() error {
- return bm.sleeperTask.Stop()
- })
-}
-
-func (bm *balanceMonitor) Ready() error {
+func (bm *balanceMonitor) start(ctx context.Context) error {
+ // Always query latest balance on start
+ (&worker{bm}).WorkCtx(ctx)
return nil
}
-func (bm *balanceMonitor) Name() string {
- return bm.logger.Name()
-}
-
-func (bm *balanceMonitor) HealthReport() map[string]error {
- return map[string]error{bm.Name(): bm.Healthy()}
+// Close shuts down the BalanceMonitor, should not be used after this
+func (bm *balanceMonitor) close() error {
+ return bm.sleeperTask.Stop()
}
// OnNewLongestChain checks the balance for each key
-func (bm *balanceMonitor) OnNewLongestChain(_ context.Context, head *evmtypes.Head) {
- ok := bm.IfStarted(func() {
- bm.checkBalance(head)
- })
+func (bm *balanceMonitor) OnNewLongestChain(_ context.Context, _ *evmtypes.Head) {
+ ok := bm.sleeperTask.IfStarted(bm.checkBalances)
if !ok {
- bm.logger.Debugw("BalanceMonitor: ignoring OnNewLongestChain call, balance monitor is not started", "state", bm.State())
+ bm.eng.Debugw("BalanceMonitor: ignoring OnNewLongestChain call, balance monitor is not started", "state", bm.sleeperTask.State())
}
}
-func (bm *balanceMonitor) checkBalance(head *evmtypes.Head) {
- bm.logger.Debugw("BalanceMonitor: signalling balance worker")
+func (bm *balanceMonitor) checkBalances() {
+ bm.eng.Debugw("BalanceMonitor: signalling balance worker")
bm.sleeperTask.WakeUp()
}
@@ -117,7 +101,7 @@ func (bm *balanceMonitor) updateBalance(ethBal assets.Eth, address gethCommon.Ad
bm.ethBalances[address] = ðBal
bm.ethBalancesMtx.Unlock()
- lgr := logger.Named(bm.logger, "BalanceLog")
+ lgr := logger.Named(bm.eng, "BalanceLog")
lgr = logger.With(lgr,
"address", address.Hex(),
"ethBalance", ethBal.String(),
@@ -151,7 +135,7 @@ func (bm *balanceMonitor) promUpdateEthBalance(balance *assets.Eth, from gethCom
balanceFloat, err := ApproximateFloat64(balance)
if err != nil {
- bm.logger.Error(fmt.Errorf("updatePrometheusEthBalance: %v", err))
+ bm.eng.Error(fmt.Errorf("updatePrometheusEthBalance: %v", err))
return
}
@@ -174,7 +158,7 @@ func (w *worker) Work() {
func (w *worker) WorkCtx(ctx context.Context) {
enabledAddresses, err := w.bm.ethKeyStore.EnabledAddressesForChain(ctx, w.bm.chainID)
if err != nil {
- w.bm.logger.Error("BalanceMonitor: error getting keys", err)
+ w.bm.eng.Error("BalanceMonitor: error getting keys", err)
}
var wg sync.WaitGroup
@@ -198,12 +182,12 @@ func (w *worker) checkAccountBalance(ctx context.Context, address gethCommon.Add
bal, err := w.bm.ethClient.BalanceAt(ctx, address, nil)
if err != nil {
- w.bm.logger.Errorw(fmt.Sprintf("BalanceMonitor: error getting balance for key %s", address.Hex()),
+ w.bm.eng.Errorw(fmt.Sprintf("BalanceMonitor: error getting balance for key %s", address.Hex()),
"err", err,
"address", address,
)
} else if bal == nil {
- w.bm.logger.Errorw(fmt.Sprintf("BalanceMonitor: error getting balance for key %s: invariant violation, bal may not be nil", address.Hex()),
+ w.bm.eng.Errorw(fmt.Sprintf("BalanceMonitor: error getting balance for key %s: invariant violation, bal may not be nil", address.Hex()),
"err", err,
"address", address,
)
diff --git a/core/recovery/recover.go b/core/recovery/recover.go
index 8e485abc556..61315defa9a 100644
--- a/core/recovery/recover.go
+++ b/core/recovery/recover.go
@@ -3,38 +3,38 @@ package recovery
import (
"github.com/getsentry/sentry-go"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+
+ corelogger "github.com/smartcontractkit/chainlink/v2/core/logger"
)
func ReportPanics(fn func()) {
- defer func() {
- if err := recover(); err != nil {
- sentry.CurrentHub().Recover(err)
- sentry.Flush(logger.SentryFlushDeadline)
+ HandleFn(fn, func(err any) {
+ sentry.CurrentHub().Recover(err)
+ sentry.Flush(corelogger.SentryFlushDeadline)
- panic(err)
- }
- }()
- fn()
+ panic(err)
+ })
}
func WrapRecover(lggr logger.Logger, fn func()) {
- defer func() {
- if err := recover(); err != nil {
- lggr.Recover(err)
+ WrapRecoverHandle(lggr, fn, nil)
+}
+
+func WrapRecoverHandle(lggr logger.Logger, fn func(), onPanic func(recovered any)) {
+ HandleFn(fn, func(recovered any) {
+ logger.Sugared(lggr).Criticalw("Recovered goroutine panic", "panic", recovered)
+
+ if onPanic != nil {
+ onPanic(recovered)
}
- }()
- fn()
+ })
}
-func WrapRecoverHandle(lggr logger.Logger, fn func(), onPanic func(interface{})) {
+func HandleFn(fn func(), onPanic func(recovered any)) {
defer func() {
- if err := recover(); err != nil {
- lggr.Recover(err)
-
- if onPanic != nil {
- onPanic(err)
- }
+ if recovered := recover(); recovered != nil {
+ onPanic(recovered)
}
}()
fn()
diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go
index 138ca25ed3b..c23ec08a692 100644
--- a/core/services/chainlink/application.go
+++ b/core/services/chainlink/application.go
@@ -148,7 +148,6 @@ type ChainlinkApplication struct {
shutdownOnce sync.Once
srvcs []services.ServiceCtx
HealthChecker services.Checker
- Nurse *services.Nurse
logger logger.SugaredLogger
AuditLogger audit.AuditLogger
closeLogger func() error
@@ -277,14 +276,9 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
}
ap := cfg.AutoPprof()
- var nurse *services.Nurse
if ap.Enabled() {
globalLogger.Info("Nurse service (automatic pprof profiling) is enabled")
- nurse = services.NewNurse(ap, globalLogger)
- err := nurse.Start()
- if err != nil {
- return nil, err
- }
+ srvcs = append(srvcs, services.NewNurse(ap, globalLogger))
} else {
globalLogger.Info("Nurse service (automatic pprof profiling) is disabled")
}
@@ -588,7 +582,6 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
SessionReaper: sessionReaper,
ExternalInitiatorManager: externalInitiatorManager,
HealthChecker: healthChecker,
- Nurse: nurse,
logger: globalLogger,
AuditLogger: auditLogger,
closeLogger: opts.CloseLogger,
@@ -708,10 +701,6 @@ func (app *ChainlinkApplication) stop() (err error) {
err = multierr.Append(err, app.FeedsService.Close())
}
- if app.Nurse != nil {
- err = multierr.Append(err, app.Nurse.Close())
- }
-
if app.profiler != nil {
err = multierr.Append(err, app.profiler.Stop())
}
diff --git a/core/services/fluxmonitorv2/deviation_checker.go b/core/services/fluxmonitorv2/deviation_checker.go
index 51e85de371e..9dc399b09f9 100644
--- a/core/services/fluxmonitorv2/deviation_checker.go
+++ b/core/services/fluxmonitorv2/deviation_checker.go
@@ -3,7 +3,7 @@ package fluxmonitorv2
import (
"github.com/shopspring/decimal"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
)
// DeviationThresholds carries parameters used by the threshold-trigger logic
@@ -26,7 +26,7 @@ func NewDeviationChecker(rel, abs float64, lggr logger.Logger) *DeviationChecker
Rel: rel,
Abs: abs,
},
- lggr: lggr.Named("DeviationChecker").With("threshold", rel, "absoluteThreshold", abs),
+ lggr: logger.Sugared(lggr).Named("DeviationChecker").With("threshold", rel, "absoluteThreshold", abs),
}
}
diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go
index 9175feb1a68..b8154ab6797 100644
--- a/core/services/fluxmonitorv2/flux_monitor.go
+++ b/core/services/fluxmonitorv2/flux_monitor.go
@@ -13,6 +13,7 @@ import (
"github.com/pkg/errors"
"github.com/shopspring/decimal"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
@@ -22,7 +23,6 @@ import (
evmutils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/flags_wrapper"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/flux_aggregator_wrapper"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/recovery"
"github.com/smartcontractkit/chainlink/v2/core/services/fluxmonitorv2/promfm"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
@@ -56,7 +56,10 @@ const DefaultHibernationPollPeriod = 24 * time.Hour
// FluxMonitor polls external price adapters via HTTP to check for price swings.
type FluxMonitor struct {
- services.StateMachine
+ services.Service
+ eng *services.Engine
+ logger logger.SugaredLogger
+
contractAddress common.Address
oracleAddress common.Address
jobSpec job.Job
@@ -77,13 +80,8 @@ type FluxMonitor struct {
logBroadcaster log.Broadcaster
chainID *big.Int
- logger logger.SugaredLogger
-
backlog *utils.BoundedPriorityQueue[log.Broadcast]
chProcessLogs chan struct{}
-
- chStop services.StopChan
- waitOnStop chan struct{}
}
// NewFluxMonitor returns a new instance of PollingDeviationChecker.
@@ -105,7 +103,7 @@ func NewFluxMonitor(
flags Flags,
fluxAggregator flux_aggregator_wrapper.FluxAggregatorInterface,
logBroadcaster log.Broadcaster,
- fmLogger logger.Logger,
+ lggr logger.Logger,
chainID *big.Int,
) (*FluxMonitor, error) {
fm := &FluxMonitor{
@@ -126,7 +124,6 @@ func NewFluxMonitor(
flags: flags,
logBroadcaster: logBroadcaster,
fluxAggregator: fluxAggregator,
- logger: logger.Sugared(fmLogger),
chainID: chainID,
backlog: utils.NewBoundedPriorityQueue[log.Broadcast](map[uint]int{
// We want reconnecting nodes to be able to submit to a round
@@ -136,9 +133,13 @@ func NewFluxMonitor(
PriorityFlagChangedLog: 2,
}),
chProcessLogs: make(chan struct{}, 1),
- chStop: make(services.StopChan),
- waitOnStop: make(chan struct{}),
}
+ fm.Service, fm.eng = services.Config{
+ Name: "FluxMonitor",
+ Start: fm.start,
+ Close: fm.close,
+ }.NewServiceEngine(lggr)
+ fm.logger = logger.Sugared(fm.eng)
return fm, nil
}
@@ -220,7 +221,7 @@ func NewFromJobSpec(
return nil, err
}
- fmLogger := lggr.With(
+ fmLogger := logger.With(lggr,
"jobID", jobSpec.ID,
"contract", fmSpec.ContractAddress.Hex(),
)
@@ -279,14 +280,9 @@ const (
// Start implements the job.Service interface. It begins the CSP consumer in a
// single goroutine to poll the price adapters and listen to NewRound events.
-func (fm *FluxMonitor) Start(context.Context) error {
- return fm.StartOnce("FluxMonitor", func() error {
- fm.logger.Debug("Starting Flux Monitor for job")
-
- go fm.consume()
-
- return nil
- })
+func (fm *FluxMonitor) start(context.Context) error {
+ fm.eng.Go(fm.consume)
+ return nil
}
func (fm *FluxMonitor) IsHibernating() bool {
@@ -304,16 +300,12 @@ func (fm *FluxMonitor) IsHibernating() bool {
return !isFlagLowered
}
-// Close implements the job.Service interface. It stops this instance from
+// close stops this instance from
// polling, cleaning up resources.
-func (fm *FluxMonitor) Close() error {
- return fm.StopOnce("FluxMonitor", func() error {
- fm.pollManager.Stop()
- close(fm.chStop)
- <-fm.waitOnStop
+func (fm *FluxMonitor) close() error {
+ fm.pollManager.Stop()
- return nil
- })
+ return nil
}
// JobID implements the listener.Listener interface.
@@ -354,10 +346,8 @@ func (fm *FluxMonitor) HandleLog(ctx context.Context, broadcast log.Broadcast) {
}
}
-func (fm *FluxMonitor) consume() {
- defer close(fm.waitOnStop)
-
- if err := fm.SetOracleAddress(); err != nil {
+func (fm *FluxMonitor) consume(ctx context.Context) {
+ if err := fm.SetOracleAddress(ctx); err != nil {
fm.logger.Warnw(
"unable to set oracle address, this flux monitor job may not work correctly",
"err", err,
@@ -398,46 +388,46 @@ func (fm *FluxMonitor) consume() {
for {
select {
- case <-fm.chStop:
+ case <-ctx.Done():
return
case <-fm.chProcessLogs:
- recovery.WrapRecover(fm.logger, fm.processLogs)
+ recovery.WrapRecover(fm.logger, func() { fm.processLogs(ctx) })
case at := <-fm.pollManager.PollTickerTicks():
tickLogger.Debugf("Poll ticker fired on %v", formatTime(at))
recovery.WrapRecover(fm.logger, func() {
- fm.pollIfEligible(PollRequestTypePoll, fm.deviationChecker, nil)
+ fm.pollIfEligible(ctx, PollRequestTypePoll, fm.deviationChecker, nil)
})
case at := <-fm.pollManager.IdleTimerTicks():
tickLogger.Debugf("Idle timer fired on %v", formatTime(at))
recovery.WrapRecover(fm.logger, func() {
- fm.pollIfEligible(PollRequestTypeIdle, NewZeroDeviationChecker(fm.logger), nil)
+ fm.pollIfEligible(ctx, PollRequestTypeIdle, NewZeroDeviationChecker(fm.logger), nil)
})
case at := <-fm.pollManager.RoundTimerTicks():
tickLogger.Debugf("Round timer fired on %v", formatTime(at))
recovery.WrapRecover(fm.logger, func() {
- fm.pollIfEligible(PollRequestTypeRound, fm.deviationChecker, nil)
+ fm.pollIfEligible(ctx, PollRequestTypeRound, fm.deviationChecker, nil)
})
case at := <-fm.pollManager.HibernationTimerTicks():
tickLogger.Debugf("Hibernation timer fired on %v", formatTime(at))
recovery.WrapRecover(fm.logger, func() {
- fm.pollIfEligible(PollRequestTypeHibernation, NewZeroDeviationChecker(fm.logger), nil)
+ fm.pollIfEligible(ctx, PollRequestTypeHibernation, NewZeroDeviationChecker(fm.logger), nil)
})
case at := <-fm.pollManager.RetryTickerTicks():
tickLogger.Debugf("Retry ticker fired on %v", formatTime(at))
recovery.WrapRecover(fm.logger, func() {
- fm.pollIfEligible(PollRequestTypeRetry, NewZeroDeviationChecker(fm.logger), nil)
+ fm.pollIfEligible(ctx, PollRequestTypeRetry, NewZeroDeviationChecker(fm.logger), nil)
})
case at := <-fm.pollManager.DrumbeatTicks():
tickLogger.Debugf("Drumbeat ticker fired on %v", formatTime(at))
recovery.WrapRecover(fm.logger, func() {
- fm.pollIfEligible(PollRequestTypeDrumbeat, NewZeroDeviationChecker(fm.logger), nil)
+ fm.pollIfEligible(ctx, PollRequestTypeDrumbeat, NewZeroDeviationChecker(fm.logger), nil)
})
case request := <-fm.pollManager.Poll():
@@ -446,7 +436,7 @@ func (fm *FluxMonitor) consume() {
break
default:
recovery.WrapRecover(fm.logger, func() {
- fm.pollIfEligible(request.Type, fm.deviationChecker, nil)
+ fm.pollIfEligible(ctx, request.Type, fm.deviationChecker, nil)
})
}
}
@@ -460,11 +450,7 @@ func formatTime(at time.Time) string {
// SetOracleAddress sets the oracle address which matches the node's keys.
// If none match, it uses the first available key
-func (fm *FluxMonitor) SetOracleAddress() error {
- // fm on deprecation path, using dangling context
- ctx, cancel := fm.chStop.NewCtx()
- defer cancel()
-
+func (fm *FluxMonitor) SetOracleAddress(ctx context.Context) error {
oracleAddrs, err := fm.fluxAggregator.GetOracles(nil)
if err != nil {
fm.logger.Error("failed to get list of oracles from FluxAggregator contract")
@@ -502,10 +488,7 @@ func (fm *FluxMonitor) SetOracleAddress() error {
return errors.New("No keys found")
}
-func (fm *FluxMonitor) processLogs() {
- ctx, cancel := fm.chStop.NewCtx()
- defer cancel()
-
+func (fm *FluxMonitor) processLogs(ctx context.Context) {
for ctx.Err() == nil && !fm.backlog.Empty() {
broadcast := fm.backlog.Take()
fm.processBroadcast(ctx, broadcast)
@@ -529,7 +512,7 @@ func (fm *FluxMonitor) processBroadcast(ctx context.Context, broadcast log.Broad
decodedLog := broadcast.DecodedLog()
switch log := decodedLog.(type) {
case *flux_aggregator_wrapper.FluxAggregatorNewRound:
- fm.respondToNewRoundLog(*log, broadcast)
+ fm.respondToNewRoundLog(ctx, *log, broadcast)
case *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated:
fm.respondToAnswerUpdatedLog(*log)
fm.markLogAsConsumed(ctx, broadcast, decodedLog, started)
@@ -540,7 +523,7 @@ func (fm *FluxMonitor) processBroadcast(ctx context.Context, broadcast log.Broad
// Only reactivate if it is hibernating
if fm.pollManager.isHibernating.Load() {
fm.pollManager.Awaken(fm.initialRoundState())
- fm.pollIfEligible(PollRequestTypeAwaken, NewZeroDeviationChecker(fm.logger), broadcast)
+ fm.pollIfEligible(ctx, PollRequestTypeAwaken, NewZeroDeviationChecker(fm.logger), broadcast)
}
default:
fm.logger.Errorf("unknown log %v of type %T", log, log)
@@ -589,10 +572,8 @@ func (fm *FluxMonitor) respondToAnswerUpdatedLog(log flux_aggregator_wrapper.Flu
// The NewRound log tells us that an oracle has initiated a new round. This tells us that we
// need to poll and submit an answer to the contract regardless of the deviation.
-func (fm *FluxMonitor) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggregatorNewRound, lb log.Broadcast) {
+func (fm *FluxMonitor) respondToNewRoundLog(ctx context.Context, log flux_aggregator_wrapper.FluxAggregatorNewRound, lb log.Broadcast) {
started := time.Now()
- ctx, cancel := fm.chStop.NewCtx()
- defer cancel()
newRoundLogger := fm.logger.With(
"round", log.RoundId,
@@ -819,10 +800,8 @@ func (fm *FluxMonitor) checkEligibilityAndAggregatorFunding(roundState flux_aggr
return nil
}
-func (fm *FluxMonitor) pollIfEligible(pollReq PollRequestType, deviationChecker *DeviationChecker, broadcast log.Broadcast) {
+func (fm *FluxMonitor) pollIfEligible(ctx context.Context, pollReq PollRequestType, deviationChecker *DeviationChecker, broadcast log.Broadcast) {
started := time.Now()
- ctx, cancel := fm.chStop.NewCtx()
- defer cancel()
l := fm.logger.With(
"threshold", deviationChecker.Thresholds.Rel,
diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go
index b3a5bcee6b9..1d1ed676e48 100644
--- a/core/services/fluxmonitorv2/flux_monitor_test.go
+++ b/core/services/fluxmonitorv2/flux_monitor_test.go
@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
"github.com/onsi/gomega"
"github.com/pkg/errors"
"github.com/shopspring/decimal"
@@ -18,11 +19,10 @@ import (
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
- "github.com/jmoiron/sqlx"
-
"github.com/smartcontractkit/chainlink-common/pkg/assets"
"github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
logmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/log/mocks"
@@ -491,7 +491,7 @@ func TestFluxMonitor_PollIfEligible(t *testing.T) {
oracles := []common.Address{nodeAddr, testutils.NewAddress()}
tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil)
- require.NoError(t, fm.SetOracleAddress())
+ require.NoError(t, fm.SetOracleAddress(tests.Context(t)))
fm.ExportedPollIfEligible(thresholds.rel, thresholds.abs)
})
}
@@ -526,7 +526,7 @@ func TestFluxMonitor_PollIfEligible_Creates_JobErr(t *testing.T) {
Once()
tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil)
- require.NoError(t, fm.SetOracleAddress())
+ require.NoError(t, fm.SetOracleAddress(tests.Context(t)))
fm.ExportedPollIfEligible(1, 1)
}
@@ -1171,7 +1171,7 @@ func TestFluxMonitor_RoundTimeoutCausesPoll_timesOutAtZero(t *testing.T) {
tm.fluxAggregator.On("Address").Return(common.Address{})
tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil)
- require.NoError(t, fm.SetOracleAddress())
+ require.NoError(t, fm.SetOracleAddress(tests.Context(t)))
fm.ExportedRoundState(t)
servicetest.Run(t, fm)
@@ -1506,7 +1506,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) {
Return(nil)
tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil)
- require.NoError(t, fm.SetOracleAddress())
+ require.NoError(t, fm.SetOracleAddress(tests.Context(t)))
tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(flux_aggregator_wrapper.LatestRoundData{
Answer: big.NewInt(10),
@@ -1635,7 +1635,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) {
Once()
tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil)
- require.NoError(t, fm.SetOracleAddress())
+ require.NoError(t, fm.SetOracleAddress(tests.Context(t)))
fm.ExportedPollIfEligible(0, 0)
// Now fire off the NewRound log and ensure it does not respond this time
@@ -1732,7 +1732,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) {
Once()
tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil)
- require.NoError(t, fm.SetOracleAddress())
+ require.NoError(t, fm.SetOracleAddress(tests.Context(t)))
fm.ExportedPollIfEligible(0, 0)
// Now fire off the NewRound log and ensure it does not respond this time
diff --git a/core/services/fluxmonitorv2/helpers_test.go b/core/services/fluxmonitorv2/helpers_test.go
index d321ddc35c3..80db82351c7 100644
--- a/core/services/fluxmonitorv2/helpers_test.go
+++ b/core/services/fluxmonitorv2/helpers_test.go
@@ -19,11 +19,15 @@ func (fm *FluxMonitor) Format(f fmt.State, verb rune) {
}
func (fm *FluxMonitor) ExportedPollIfEligible(threshold, absoluteThreshold float64) {
- fm.pollIfEligible(PollRequestTypePoll, NewDeviationChecker(threshold, absoluteThreshold, fm.logger), nil)
+ ctx, cancel := fm.eng.NewCtx()
+ defer cancel()
+ fm.pollIfEligible(ctx, PollRequestTypePoll, NewDeviationChecker(threshold, absoluteThreshold, fm.logger), nil)
}
func (fm *FluxMonitor) ExportedProcessLogs() {
- fm.processLogs()
+ ctx, cancel := fm.eng.NewCtx()
+ defer cancel()
+ fm.processLogs(ctx)
}
func (fm *FluxMonitor) ExportedBacklog() *utils.BoundedPriorityQueue[log.Broadcast] {
@@ -36,7 +40,9 @@ func (fm *FluxMonitor) ExportedRoundState(t *testing.T) {
}
func (fm *FluxMonitor) ExportedRespondToNewRoundLog(log *flux_aggregator_wrapper.FluxAggregatorNewRound, broadcast log.Broadcast) {
- fm.respondToNewRoundLog(*log, broadcast)
+ ctx, cancel := fm.eng.NewCtx()
+ defer cancel()
+ fm.respondToNewRoundLog(ctx, *log, broadcast)
}
func (fm *FluxMonitor) ExportedRespondToFlagsRaisedLog() {
diff --git a/core/services/fluxmonitorv2/poll_manager.go b/core/services/fluxmonitorv2/poll_manager.go
index 78b99aec4d5..aca6c75a311 100644
--- a/core/services/fluxmonitorv2/poll_manager.go
+++ b/core/services/fluxmonitorv2/poll_manager.go
@@ -5,8 +5,8 @@ import (
"sync/atomic"
"time"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/flux_aggregator_wrapper"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -64,7 +64,7 @@ type PollManager struct {
}
// NewPollManager initializes a new PollManager
-func NewPollManager(cfg PollManagerConfig, logger logger.Logger) (*PollManager, error) {
+func NewPollManager(cfg PollManagerConfig, lggr logger.Logger) (*PollManager, error) {
minBackoffDuration := cfg.MinRetryBackoffDuration
if cfg.IdleTimerPeriod < minBackoffDuration {
minBackoffDuration = cfg.IdleTimerPeriod
@@ -82,7 +82,7 @@ func NewPollManager(cfg PollManagerConfig, logger logger.Logger) (*PollManager,
p := &PollManager{
cfg: cfg,
- logger: logger.Named("PollManager"),
+ logger: logger.Named(lggr, "PollManager"),
hibernationTimer: utils.NewResettableTimer(),
pollTicker: utils.NewPausableTicker(cfg.PollTickerInterval),
@@ -277,7 +277,7 @@ func (pm *PollManager) startIdleTimer(roundStartedAtUTC uint64) {
deadline := startedAt.Add(pm.cfg.IdleTimerPeriod)
deadlineDuration := time.Until(deadline)
- log := pm.logger.With(
+ log := logger.With(pm.logger,
"pollFrequency", pm.cfg.PollTickerInterval,
"idleDuration", pm.cfg.IdleTimerPeriod,
"startedAt", roundStartedAtUTC,
@@ -300,7 +300,7 @@ func (pm *PollManager) startIdleTimer(roundStartedAtUTC uint64) {
// startRoundTimer starts the round timer
func (pm *PollManager) startRoundTimer(roundTimesOutAt uint64) {
- log := pm.logger.With(
+ log := logger.With(pm.logger,
"pollFrequency", pm.cfg.PollTickerInterval,
"idleDuration", pm.cfg.IdleTimerPeriod,
"timesOutAt", roundTimesOutAt,
diff --git a/core/services/nurse.go b/core/services/nurse.go
index a9069b5181d..7f3cad13e71 100644
--- a/core/services/nurse.go
+++ b/core/services/nurse.go
@@ -3,6 +3,7 @@ package services
import (
"bytes"
"compress/gzip"
+ "context"
"fmt"
"io/fs"
"os"
@@ -19,22 +20,21 @@ import (
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-common/pkg/services"
+ "github.com/smartcontractkit/chainlink-common/pkg/timeutil"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
type Nurse struct {
- services.StateMachine
+ services.Service
+ eng *services.Engine
cfg Config
- log logger.Logger
checks map[string]CheckFunc
checksMu sync.RWMutex
chGather chan gatherRequest
- chStop chan struct{}
- wgDone sync.WaitGroup
}
type Config interface {
@@ -66,85 +66,63 @@ const (
)
func NewNurse(cfg Config, log logger.Logger) *Nurse {
- return &Nurse{
+ n := &Nurse{
cfg: cfg,
- log: log.Named("Nurse"),
checks: make(map[string]CheckFunc),
chGather: make(chan gatherRequest, 1),
- chStop: make(chan struct{}),
}
+ n.Service, n.eng = services.Config{
+ Name: "Nurse",
+ Start: n.start,
+ }.NewServiceEngine(log)
+
+ return n
}
-func (n *Nurse) Start() error {
- return n.StartOnce("Nurse", func() error {
- // This must be set *once*, and it must occur as early as possible
- if n.cfg.MemProfileRate() != runtime.MemProfileRate {
- runtime.MemProfileRate = n.cfg.BlockProfileRate()
- }
+func (n *Nurse) start(_ context.Context) error {
+ // This must be set *once*, and it must occur as early as possible
+ if n.cfg.MemProfileRate() != runtime.MemProfileRate {
+ runtime.MemProfileRate = n.cfg.BlockProfileRate()
+ }
- n.log.Debugf("Starting nurse with config %+v", n.cfg)
- runtime.SetCPUProfileRate(n.cfg.CPUProfileRate())
- runtime.SetBlockProfileRate(n.cfg.BlockProfileRate())
- runtime.SetMutexProfileFraction(n.cfg.MutexProfileFraction())
+ n.eng.Debugf("Starting nurse with config %+v", n.cfg)
+ runtime.SetCPUProfileRate(n.cfg.CPUProfileRate())
+ runtime.SetBlockProfileRate(n.cfg.BlockProfileRate())
+ runtime.SetMutexProfileFraction(n.cfg.MutexProfileFraction())
- err := utils.EnsureDirAndMaxPerms(n.cfg.ProfileRoot(), 0744)
- if err != nil {
- return err
- }
+ err := utils.EnsureDirAndMaxPerms(n.cfg.ProfileRoot(), 0744)
+ if err != nil {
+ return err
+ }
- n.AddCheck("mem", n.checkMem)
- n.AddCheck("goroutines", n.checkGoroutines)
-
- n.wgDone.Add(1)
- // Checker
- go func() {
- defer n.wgDone.Done()
- for {
- select {
- case <-n.chStop:
- return
- case <-time.After(n.cfg.PollInterval().Duration()):
- }
-
- func() {
- n.checksMu.RLock()
- defer n.checksMu.RUnlock()
- for reason, checkFunc := range n.checks {
- if unwell, meta := checkFunc(); unwell {
- n.GatherVitals(reason, meta)
- break
- }
- }
- }()
- }
- }()
-
- n.wgDone.Add(1)
- // Responder
- go func() {
- defer n.wgDone.Done()
- for {
- select {
- case <-n.chStop:
- return
- case req := <-n.chGather:
- n.gatherVitals(req.reason, req.meta)
- }
- }
- }()
+ n.AddCheck("mem", n.checkMem)
+ n.AddCheck("goroutines", n.checkGoroutines)
- return nil
+ // Checker
+ n.eng.GoTick(timeutil.NewTicker(n.cfg.PollInterval().Duration), func(ctx context.Context) {
+ n.checksMu.RLock()
+ defer n.checksMu.RUnlock()
+ for reason, checkFunc := range n.checks {
+ if unwell, meta := checkFunc(); unwell {
+ n.GatherVitals(ctx, reason, meta)
+ break
+ }
+ }
})
-}
-func (n *Nurse) Close() error {
- return n.StopOnce("Nurse", func() error {
- n.log.Debug("Nurse closing...")
- defer n.log.Debug("Nurse closed")
- close(n.chStop)
- n.wgDone.Wait()
- return nil
+ // Responder
+ n.eng.Go(func(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case req := <-n.chGather:
+ n.gatherVitals(req.reason, req.meta)
+ }
+ }
})
+
+ return nil
}
func (n *Nurse) AddCheck(reason string, checkFunc CheckFunc) {
@@ -153,9 +131,9 @@ func (n *Nurse) AddCheck(reason string, checkFunc CheckFunc) {
n.checks[reason] = checkFunc
}
-func (n *Nurse) GatherVitals(reason string, meta Meta) {
+func (n *Nurse) GatherVitals(ctx context.Context, reason string, meta Meta) {
select {
- case <-n.chStop:
+ case <-ctx.Done():
case n.chGather <- gatherRequest{reason, meta}:
default:
}
@@ -189,14 +167,14 @@ func (n *Nurse) checkGoroutines() (bool, Meta) {
func (n *Nurse) gatherVitals(reason string, meta Meta) {
loggerFields := (logger.Fields{"reason": reason}).Merge(logger.Fields(meta))
- n.log.Debugw("Nurse is gathering vitals", loggerFields.Slice()...)
+ n.eng.Debugw("Nurse is gathering vitals", loggerFields.Slice()...)
size, err := n.totalProfileBytes()
if err != nil {
- n.log.Errorw("could not fetch total profile bytes", loggerFields.With("err", err).Slice()...)
+ n.eng.Errorw("could not fetch total profile bytes", loggerFields.With("err", err).Slice()...)
return
} else if size >= uint64(n.cfg.MaxProfileSize()) {
- n.log.Warnw("cannot write pprof profile, total profile size exceeds configured PPROF_MAX_PROFILE_SIZE",
+ n.eng.Warnw("cannot write pprof profile, total profile size exceeds configured PPROF_MAX_PROFILE_SIZE",
loggerFields.With("total", size, "max", n.cfg.MaxProfileSize()).Slice()...,
)
return
@@ -206,7 +184,7 @@ func (n *Nurse) gatherVitals(reason string, meta Meta) {
err = n.appendLog(now, reason, meta)
if err != nil {
- n.log.Warnw("cannot write pprof profile", loggerFields.With("err", err).Slice()...)
+ n.eng.Warnw("cannot write pprof profile", loggerFields.With("err", err).Slice()...)
return
}
var wg sync.WaitGroup
@@ -227,7 +205,7 @@ func (n *Nurse) gatherVitals(reason string, meta Meta) {
wg.Add(1)
go n.gather("heap", now, &wg)
} else {
- n.log.Info("skipping heap collection because runtime.MemProfileRate = 0")
+ n.eng.Info("skipping heap collection because runtime.MemProfileRate = 0")
}
wg.Add(1)
@@ -236,15 +214,13 @@ func (n *Nurse) gatherVitals(reason string, meta Meta) {
go n.gather("threadcreate", now, &wg)
ch := make(chan struct{})
- n.wgDone.Add(1)
- go func() {
- defer n.wgDone.Done()
+ n.eng.Go(func(ctx context.Context) {
defer close(ch)
wg.Wait()
- }()
+ })
select {
- case <-n.chStop:
+ case <-n.eng.StopChan:
case <-ch:
}
}
@@ -252,7 +228,7 @@ func (n *Nurse) gatherVitals(reason string, meta Meta) {
func (n *Nurse) appendLog(now time.Time, reason string, meta Meta) error {
filename := filepath.Join(n.cfg.ProfileRoot(), "nurse.log")
- n.log.Debugf("creating nurse log %s", filename)
+ n.eng.Debugf("creating nurse log %s", filename)
file, err := os.Create(filename)
if err != nil {
@@ -288,34 +264,34 @@ func (n *Nurse) appendLog(now time.Time, reason string, meta Meta) error {
func (n *Nurse) gatherCPU(now time.Time, wg *sync.WaitGroup) {
defer wg.Done()
- n.log.Debugf("gather cpu %d ...", now.UnixMicro())
- defer n.log.Debugf("gather cpu %d done", now.UnixMicro())
+ n.eng.Debugf("gather cpu %d ...", now.UnixMicro())
+ defer n.eng.Debugf("gather cpu %d done", now.UnixMicro())
wc, err := n.createFile(now, cpuProfName, false)
if err != nil {
- n.log.Errorw("could not write cpu profile", "err", err)
+ n.eng.Errorw("could not write cpu profile", "err", err)
return
}
defer wc.Close()
err = pprof.StartCPUProfile(wc)
if err != nil {
- n.log.Errorw("could not start cpu profile", "err", err)
+ n.eng.Errorw("could not start cpu profile", "err", err)
return
}
select {
- case <-n.chStop:
- n.log.Debug("gather cpu received stop")
+ case <-n.eng.StopChan:
+ n.eng.Debug("gather cpu received stop")
case <-time.After(n.cfg.GatherDuration().Duration()):
- n.log.Debugf("gather cpu duration elapsed %s. stoping profiling.", n.cfg.GatherDuration().Duration().String())
+ n.eng.Debugf("gather cpu duration elapsed %s. stoping profiling.", n.cfg.GatherDuration().Duration().String())
}
pprof.StopCPUProfile()
err = wc.Close()
if err != nil {
- n.log.Errorw("could not close cpu profile", "err", err)
+ n.eng.Errorw("could not close cpu profile", "err", err)
return
}
}
@@ -323,23 +299,23 @@ func (n *Nurse) gatherCPU(now time.Time, wg *sync.WaitGroup) {
func (n *Nurse) gatherTrace(now time.Time, wg *sync.WaitGroup) {
defer wg.Done()
- n.log.Debugf("gather trace %d ...", now.UnixMicro())
- defer n.log.Debugf("gather trace %d done", now.UnixMicro())
+ n.eng.Debugf("gather trace %d ...", now.UnixMicro())
+ defer n.eng.Debugf("gather trace %d done", now.UnixMicro())
wc, err := n.createFile(now, traceProfName, true)
if err != nil {
- n.log.Errorw("could not write trace profile", "err", err)
+ n.eng.Errorw("could not write trace profile", "err", err)
return
}
defer wc.Close()
err = trace.Start(wc)
if err != nil {
- n.log.Errorw("could not start trace profile", "err", err)
+ n.eng.Errorw("could not start trace profile", "err", err)
return
}
select {
- case <-n.chStop:
+ case <-n.eng.StopChan:
case <-time.After(n.cfg.GatherTraceDuration().Duration()):
}
@@ -347,7 +323,7 @@ func (n *Nurse) gatherTrace(now time.Time, wg *sync.WaitGroup) {
err = wc.Close()
if err != nil {
- n.log.Errorw("could not close trace profile", "err", err)
+ n.eng.Errorw("could not close trace profile", "err", err)
return
}
}
@@ -355,18 +331,18 @@ func (n *Nurse) gatherTrace(now time.Time, wg *sync.WaitGroup) {
func (n *Nurse) gather(typ string, now time.Time, wg *sync.WaitGroup) {
defer wg.Done()
- n.log.Debugf("gather %s %d ...", typ, now.UnixMicro())
- n.log.Debugf("gather %s %d done", typ, now.UnixMicro())
+ n.eng.Debugf("gather %s %d ...", typ, now.UnixMicro())
+ n.eng.Debugf("gather %s %d done", typ, now.UnixMicro())
p := pprof.Lookup(typ)
if p == nil {
- n.log.Errorf("Invariant violation: pprof type '%v' does not exist", typ)
+ n.eng.Errorf("Invariant violation: pprof type '%v' does not exist", typ)
return
}
p0, err := collectProfile(p)
if err != nil {
- n.log.Errorw(fmt.Sprintf("could not collect %v profile", typ), "err", err)
+ n.eng.Errorw(fmt.Sprintf("could not collect %v profile", typ), "err", err)
return
}
@@ -374,14 +350,14 @@ func (n *Nurse) gather(typ string, now time.Time, wg *sync.WaitGroup) {
defer t.Stop()
select {
- case <-n.chStop:
+ case <-n.eng.StopChan:
return
case <-t.C:
}
p1, err := collectProfile(p)
if err != nil {
- n.log.Errorw(fmt.Sprintf("could not collect %v profile", typ), "err", err)
+ n.eng.Errorw(fmt.Sprintf("could not collect %v profile", typ), "err", err)
return
}
ts := p1.TimeNanos
@@ -391,7 +367,7 @@ func (n *Nurse) gather(typ string, now time.Time, wg *sync.WaitGroup) {
p1, err = profile.Merge([]*profile.Profile{p0, p1})
if err != nil {
- n.log.Errorw(fmt.Sprintf("could not compute delta for %v profile", typ), "err", err)
+ n.eng.Errorw(fmt.Sprintf("could not compute delta for %v profile", typ), "err", err)
return
}
@@ -400,19 +376,19 @@ func (n *Nurse) gather(typ string, now time.Time, wg *sync.WaitGroup) {
wc, err := n.createFile(now, typ, false)
if err != nil {
- n.log.Errorw(fmt.Sprintf("could not write %v profile", typ), "err", err)
+ n.eng.Errorw(fmt.Sprintf("could not write %v profile", typ), "err", err)
return
}
defer wc.Close()
err = p1.Write(wc)
if err != nil {
- n.log.Errorw(fmt.Sprintf("could not write %v profile", typ), "err", err)
+ n.eng.Errorw(fmt.Sprintf("could not write %v profile", typ), "err", err)
return
}
err = wc.Close()
if err != nil {
- n.log.Errorw(fmt.Sprintf("could not close file for %v profile", typ), "err", err)
+ n.eng.Errorw(fmt.Sprintf("could not close file for %v profile", typ), "err", err)
return
}
}
@@ -437,7 +413,7 @@ func (n *Nurse) createFile(now time.Time, typ string, shouldGzip bool) (*utils.D
filename += ".gz"
}
fullpath := filepath.Join(n.cfg.ProfileRoot(), filename)
- n.log.Debugf("creating file %s", fullpath)
+ n.eng.Debugf("creating file %s", fullpath)
file, err := os.Create(fullpath)
if err != nil {
diff --git a/core/services/nurse_test.go b/core/services/nurse_test.go
index 4597eeb456b..ed6f6872dc9 100644
--- a/core/services/nurse_test.go
+++ b/core/services/nurse_test.go
@@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/require"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -102,7 +103,7 @@ func TestNurse(t *testing.T) {
nrse := NewNurse(newMockConfig(t), l)
nrse.AddCheck("test", func() (bool, Meta) { return true, Meta{} })
- require.NoError(t, nrse.Start())
+ require.NoError(t, nrse.Start(tests.Context(t)))
defer func() { require.NoError(t, nrse.Close()) }()
require.NoError(t, nrse.appendLog(time.Now(), "test", Meta{}))
diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go
index 559b1ec33f5..b0d04b11871 100644
--- a/core/services/relay/evm/functions/logpoller_wrapper.go
+++ b/core/services/relay/evm/functions/logpoller_wrapper.go
@@ -22,7 +22,8 @@ import (
)
type logPollerWrapper struct {
- services.StateMachine
+ services.Service
+ eng *services.Engine
routerContract *functions_router.FunctionsRouter
pluginConfig config.PluginConfig
@@ -38,9 +39,6 @@ type logPollerWrapper struct {
detectedRequests detectedEvents
detectedResponses detectedEvents
mu sync.Mutex
- closeWait sync.WaitGroup
- stopCh services.StopChan
- lggr logger.Logger
}
type detectedEvent struct {
@@ -94,7 +92,7 @@ func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig conf
return nil, errors.Errorf("invalid config: number of required confirmation blocks >= pastBlocksToPoll")
}
- return &logPollerWrapper{
+ w := &logPollerWrapper{
routerContract: routerContract,
pluginConfig: pluginConfig,
requestBlockOffset: requestBlockOffset,
@@ -106,40 +104,25 @@ func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig conf
logPoller: logPoller,
client: client,
subscribers: make(map[string]evmRelayTypes.RouteUpdateSubscriber),
- stopCh: make(services.StopChan),
- lggr: lggr.Named("LogPollerWrapper"),
- }, nil
-}
-
-func (l *logPollerWrapper) Start(context.Context) error {
- return l.StartOnce("LogPollerWrapper", func() error {
- l.lggr.Infow("starting LogPollerWrapper", "routerContract", l.routerContract.Address().Hex(), "contractVersion", l.pluginConfig.ContractVersion)
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.pluginConfig.ContractVersion != 1 {
- return errors.New("only contract version 1 is supported")
- }
- l.closeWait.Add(1)
- go l.checkForRouteUpdates()
- return nil
- })
-}
-
-func (l *logPollerWrapper) Close() error {
- return l.StopOnce("LogPollerWrapper", func() (err error) {
- l.lggr.Info("closing LogPollerWrapper")
- close(l.stopCh)
- l.closeWait.Wait()
- return nil
- })
+ }
+ w.Service, w.eng = services.Config{
+ Name: "LoggPollerWrapper",
+ Start: w.start,
+ }.NewServiceEngine(lggr)
+ return w, nil
}
-func (l *logPollerWrapper) HealthReport() map[string]error {
- return map[string]error{l.Name(): l.Ready()}
+func (l *logPollerWrapper) start(context.Context) error {
+ l.eng.Infow("starting LogPollerWrapper", "routerContract", l.routerContract.Address().Hex(), "contractVersion", l.pluginConfig.ContractVersion)
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.pluginConfig.ContractVersion != 1 {
+ return errors.New("only contract version 1 is supported")
+ }
+ l.eng.Go(l.checkForRouteUpdates)
+ return nil
}
-func (l *logPollerWrapper) Name() string { return l.lggr.Name() }
-
// methods of LogPollerWrapper
func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.OracleRequest, []evmRelayTypes.OracleResponse, error) {
l.mu.Lock()
@@ -166,7 +149,7 @@ func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.Or
resultsReq := []evmRelayTypes.OracleRequest{}
resultsResp := []evmRelayTypes.OracleResponse{}
if len(coordinators) == 0 {
- l.lggr.Debug("LatestEvents: no non-zero coordinators to check")
+ l.eng.Debug("LatestEvents: no non-zero coordinators to check")
return resultsReq, resultsResp, errors.New("no non-zero coordinators to check")
}
@@ -174,32 +157,32 @@ func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.Or
requestEndBlock := latestBlockNum - l.requestBlockOffset
requestLogs, err := l.logPoller.Logs(ctx, startBlockNum, requestEndBlock, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), coordinator)
if err != nil {
- l.lggr.Errorw("LatestEvents: fetching request logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", requestEndBlock)
+ l.eng.Errorw("LatestEvents: fetching request logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", requestEndBlock)
return nil, nil, err
}
- l.lggr.Debugw("LatestEvents: fetched request logs", "nRequestLogs", len(requestLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", requestEndBlock)
+ l.eng.Debugw("LatestEvents: fetched request logs", "nRequestLogs", len(requestLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", requestEndBlock)
requestLogs = l.filterPreviouslyDetectedEvents(requestLogs, &l.detectedRequests, "requests")
responseEndBlock := latestBlockNum - l.responseBlockOffset
responseLogs, err := l.logPoller.Logs(ctx, startBlockNum, responseEndBlock, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), coordinator)
if err != nil {
- l.lggr.Errorw("LatestEvents: fetching response logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", responseEndBlock)
+ l.eng.Errorw("LatestEvents: fetching response logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", responseEndBlock)
return nil, nil, err
}
- l.lggr.Debugw("LatestEvents: fetched request logs", "nResponseLogs", len(responseLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", responseEndBlock)
+ l.eng.Debugw("LatestEvents: fetched request logs", "nResponseLogs", len(responseLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", responseEndBlock)
responseLogs = l.filterPreviouslyDetectedEvents(responseLogs, &l.detectedResponses, "responses")
parsingContract, err := functions_coordinator.NewFunctionsCoordinator(coordinator, l.client)
if err != nil {
- l.lggr.Error("LatestEvents: creating a contract instance for parsing failed")
+ l.eng.Error("LatestEvents: creating a contract instance for parsing failed")
return nil, nil, err
}
- l.lggr.Debugw("LatestEvents: parsing logs", "nRequestLogs", len(requestLogs), "nResponseLogs", len(responseLogs), "coordinatorAddress", coordinator.Hex())
+ l.eng.Debugw("LatestEvents: parsing logs", "nRequestLogs", len(requestLogs), "nResponseLogs", len(responseLogs), "coordinatorAddress", coordinator.Hex())
for _, log := range requestLogs {
gethLog := log.ToGethLog()
oracleRequest, err := parsingContract.ParseOracleRequest(gethLog)
if err != nil {
- l.lggr.Errorw("LatestEvents: failed to parse a request log, skipping", "err", err)
+ l.eng.Errorw("LatestEvents: failed to parse a request log, skipping", "err", err)
continue
}
@@ -212,7 +195,7 @@ func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.Or
bytes32Type, errType7 := abi.NewType("bytes32", "bytes32", nil)
if errType1 != nil || errType2 != nil || errType3 != nil || errType4 != nil || errType5 != nil || errType6 != nil || errType7 != nil {
- l.lggr.Errorw("LatestEvents: failed to initialize types", "errType1", errType1,
+ l.eng.Errorw("LatestEvents: failed to initialize types", "errType1", errType1,
"errType2", errType2, "errType3", errType3, "errType4", errType4, "errType5", errType5, "errType6", errType6, "errType7", errType7,
)
continue
@@ -244,7 +227,7 @@ func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.Or
oracleRequest.Commitment.TimeoutTimestamp,
)
if err != nil {
- l.lggr.Errorw("LatestEvents: failed to pack commitment bytes, skipping", "err", err)
+ l.eng.Errorw("LatestEvents: failed to pack commitment bytes, skipping", "err", err)
}
resultsReq = append(resultsReq, evmRelayTypes.OracleRequest{
@@ -266,7 +249,7 @@ func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.Or
gethLog := log.ToGethLog()
oracleResponse, err := parsingContract.ParseOracleResponse(gethLog)
if err != nil {
- l.lggr.Errorw("LatestEvents: failed to parse a response log, skipping")
+ l.eng.Errorw("LatestEvents: failed to parse a response log, skipping")
continue
}
resultsResp = append(resultsResp, evmRelayTypes.OracleResponse{
@@ -275,13 +258,13 @@ func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.Or
}
}
- l.lggr.Debugw("LatestEvents: done", "nRequestLogs", len(resultsReq), "nResponseLogs", len(resultsResp), "startBlock", startBlockNum, "endBlock", latestBlockNum)
+ l.eng.Debugw("LatestEvents: done", "nRequestLogs", len(resultsReq), "nResponseLogs", len(resultsResp), "startBlock", startBlockNum, "endBlock", latestBlockNum)
return resultsReq, resultsResp, nil
}
func (l *logPollerWrapper) filterPreviouslyDetectedEvents(logs []logpoller.Log, detectedEvents *detectedEvents, filterType string) []logpoller.Log {
if len(logs) > maxLogsToProcess {
- l.lggr.Errorw("filterPreviouslyDetectedEvents: too many logs to process, only processing latest maxLogsToProcess logs", "filterType", filterType, "nLogs", len(logs), "maxLogsToProcess", maxLogsToProcess)
+ l.eng.Errorw("filterPreviouslyDetectedEvents: too many logs to process, only processing latest maxLogsToProcess logs", "filterType", filterType, "nLogs", len(logs), "maxLogsToProcess", maxLogsToProcess)
logs = logs[len(logs)-maxLogsToProcess:]
}
l.mu.Lock()
@@ -290,7 +273,7 @@ func (l *logPollerWrapper) filterPreviouslyDetectedEvents(logs []logpoller.Log,
for _, log := range logs {
var requestId [32]byte
if len(log.Topics) < 2 || len(log.Topics[1]) != 32 {
- l.lggr.Errorw("filterPreviouslyDetectedEvents: invalid log, skipping", "filterType", filterType, "log", log)
+ l.eng.Errorw("filterPreviouslyDetectedEvents: invalid log, skipping", "filterType", filterType, "log", log)
continue
}
copy(requestId[:], log.Topics[1]) // requestId is the second topic (1st topic is the event signature)
@@ -310,7 +293,7 @@ func (l *logPollerWrapper) filterPreviouslyDetectedEvents(logs []logpoller.Log,
expiredRequests++
}
detectedEvents.detectedEventsOrdered = detectedEvents.detectedEventsOrdered[expiredRequests:]
- l.lggr.Debugw("filterPreviouslyDetectedEvents: done", "filterType", filterType, "nLogs", len(logs), "nFilteredLogs", len(filteredLogs), "nExpiredRequests", expiredRequests, "previouslyDetectedCacheSize", len(detectedEvents.detectedEventsOrdered))
+ l.eng.Debugw("filterPreviouslyDetectedEvents: done", "filterType", filterType, "nLogs", len(logs), "nFilteredLogs", len(filteredLogs), "nExpiredRequests", expiredRequests, "previouslyDetectedCacheSize", len(detectedEvents.detectedEventsOrdered))
return filteredLogs
}
@@ -319,7 +302,7 @@ func (l *logPollerWrapper) SubscribeToUpdates(ctx context.Context, subscriberNam
if l.pluginConfig.ContractVersion == 0 {
// in V0, immediately set contract address to Oracle contract and never update again
if err := subscriber.UpdateRoutes(ctx, l.routerContract.Address(), l.routerContract.Address()); err != nil {
- l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "subscriberName", subscriberName, "err", err)
+ l.eng.Errorw("LogPollerWrapper: Failed to update routes", "subscriberName", subscriberName, "err", err)
}
} else if l.pluginConfig.ContractVersion == 1 {
l.mu.Lock()
@@ -328,37 +311,36 @@ func (l *logPollerWrapper) SubscribeToUpdates(ctx context.Context, subscriberNam
}
}
-func (l *logPollerWrapper) checkForRouteUpdates() {
- defer l.closeWait.Done()
+func (l *logPollerWrapper) checkForRouteUpdates(ctx context.Context) {
freqSec := l.pluginConfig.ContractUpdateCheckFrequencySec
if freqSec == 0 {
- l.lggr.Errorw("LogPollerWrapper: ContractUpdateCheckFrequencySec is zero - route update checks disabled")
+ l.eng.Errorw("LogPollerWrapper: ContractUpdateCheckFrequencySec is zero - route update checks disabled")
return
}
- updateOnce := func() {
+ updateOnce := func(ctx context.Context) {
// NOTE: timeout == frequency here, could be changed to a separate config value
timeout := time.Duration(l.pluginConfig.ContractUpdateCheckFrequencySec) * time.Second
- ctx, cancel := l.stopCh.CtxCancel(context.WithTimeout(context.Background(), timeout))
+ ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
active, proposed, err := l.getCurrentCoordinators(ctx)
if err != nil {
- l.lggr.Errorw("LogPollerWrapper: error calling getCurrentCoordinators", "err", err)
+ l.eng.Errorw("LogPollerWrapper: error calling getCurrentCoordinators", "err", err)
return
}
l.handleRouteUpdate(ctx, active, proposed)
}
- updateOnce() // update once right away
+ updateOnce(ctx) // update once right away
ticker := time.NewTicker(time.Duration(freqSec) * time.Second)
defer ticker.Stop()
for {
select {
- case <-l.stopCh:
+ case <-ctx.Done():
return
case <-ticker.C:
- updateOnce()
+ updateOnce(ctx)
}
}
}
@@ -394,22 +376,22 @@ func (l *logPollerWrapper) handleRouteUpdate(ctx context.Context, activeCoordina
defer l.mu.Unlock()
if activeCoordinator == (common.Address{}) {
- l.lggr.Error("LogPollerWrapper: cannot update activeCoordinator to zero address")
+ l.eng.Error("LogPollerWrapper: cannot update activeCoordinator to zero address")
return
}
if activeCoordinator == l.activeCoordinator && proposedCoordinator == l.proposedCoordinator {
- l.lggr.Debug("LogPollerWrapper: no changes to routes")
+ l.eng.Debug("LogPollerWrapper: no changes to routes")
return
}
errActive := l.registerFilters(ctx, activeCoordinator)
errProposed := l.registerFilters(ctx, proposedCoordinator)
if errActive != nil || errProposed != nil {
- l.lggr.Errorw("LogPollerWrapper: Failed to register filters", "errorActive", errActive, "errorProposed", errProposed)
+ l.eng.Errorw("LogPollerWrapper: Failed to register filters", "errorActive", errActive, "errorProposed", errProposed)
return
}
- l.lggr.Debugw("LogPollerWrapper: new routes", "activeCoordinator", activeCoordinator.Hex(), "proposedCoordinator", proposedCoordinator.Hex())
+ l.eng.Debugw("LogPollerWrapper: new routes", "activeCoordinator", activeCoordinator.Hex(), "proposedCoordinator", proposedCoordinator.Hex())
l.activeCoordinator = activeCoordinator
l.proposedCoordinator = proposedCoordinator
@@ -417,7 +399,7 @@ func (l *logPollerWrapper) handleRouteUpdate(ctx context.Context, activeCoordina
for _, subscriber := range l.subscribers {
err := subscriber.UpdateRoutes(ctx, activeCoordinator, proposedCoordinator)
if err != nil {
- l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "err", err)
+ l.eng.Errorw("LogPollerWrapper: Failed to update routes", "err", err)
}
}
@@ -430,9 +412,9 @@ func (l *logPollerWrapper) handleRouteUpdate(ctx context.Context, activeCoordina
continue
}
if err := l.logPoller.UnregisterFilter(ctx, filter.Name); err != nil {
- l.lggr.Errorw("LogPollerWrapper: Failed to unregister filter", "filterName", filter.Name, "err", err)
+ l.eng.Errorw("LogPollerWrapper: Failed to unregister filter", "filterName", filter.Name, "err", err)
}
- l.lggr.Debugw("LogPollerWrapper: Successfully unregistered filter", "filterName", filter.Name)
+ l.eng.Debugw("LogPollerWrapper: Successfully unregistered filter", "filterName", filter.Name)
}
}
diff --git a/core/services/synchronization/helpers_test.go b/core/services/synchronization/helpers_test.go
index 7bb2dde7633..aea9bf77f49 100644
--- a/core/services/synchronization/helpers_test.go
+++ b/core/services/synchronization/helpers_test.go
@@ -12,15 +12,15 @@ import (
// NewTestTelemetryIngressClient calls NewTelemetryIngressClient and injects telemClient.
func NewTestTelemetryIngressClient(t *testing.T, url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, telemClient telemPb.TelemClient) TelemetryService {
- tc := NewTelemetryIngressClient(url, serverPubKeyHex, ks, logging, logger.TestLogger(t), 100, "test", "test")
+ tc := NewTelemetryIngressClient(url, serverPubKeyHex, ks, logging, logger.TestLogger(t), 100)
tc.(*telemetryIngressClient).telemClient = telemClient
return tc
}
// NewTestTelemetryIngressBatchClient calls NewTelemetryIngressBatchClient and injects telemClient.
func NewTestTelemetryIngressBatchClient(t *testing.T, url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, telemClient telemPb.TelemClient, sendInterval time.Duration, uniconn bool) TelemetryService {
- tc := NewTelemetryIngressBatchClient(url, serverPubKeyHex, ks, logging, logger.TestLogger(t), 100, 50, sendInterval, time.Second, uniconn, "test", "test")
- tc.(*telemetryIngressBatchClient).close = func() error { return nil }
+ tc := NewTelemetryIngressBatchClient(url, serverPubKeyHex, ks, logging, logger.TestLogger(t), 100, 50, sendInterval, time.Second, uniconn)
+ tc.(*telemetryIngressBatchClient).closeFn = func() error { return nil }
tc.(*telemetryIngressBatchClient).telemClient = telemClient
return tc
}
diff --git a/core/services/synchronization/telemetry_ingress_batch_client.go b/core/services/synchronization/telemetry_ingress_batch_client.go
index cade98cf606..26ce1e3066a 100644
--- a/core/services/synchronization/telemetry_ingress_batch_client.go
+++ b/core/services/synchronization/telemetry_ingress_batch_client.go
@@ -12,8 +12,9 @@ import (
"github.com/smartcontractkit/wsrpc"
"github.com/smartcontractkit/wsrpc/examples/simple/keys"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink-common/pkg/timeutil"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
telemPb "github.com/smartcontractkit/chainlink/v2/core/services/synchronization/telem"
)
@@ -37,21 +38,18 @@ func (NoopTelemetryIngressBatchClient) Name() string { return
func (NoopTelemetryIngressBatchClient) Ready() error { return nil }
type telemetryIngressBatchClient struct {
- services.StateMachine
+ services.Service
+ eng *services.Engine
+
url *url.URL
ks keystore.CSA
serverPubKeyHex string
connected atomic.Bool
telemClient telemPb.TelemClient
- close func() error
-
- globalLogger logger.Logger
- logging bool
- lggr logger.Logger
+ closeFn func() error
- wgDone sync.WaitGroup
- chDone services.StopChan
+ logging bool
telemBufferSize uint
telemMaxBatchSize uint
@@ -66,8 +64,8 @@ type telemetryIngressBatchClient struct {
// NewTelemetryIngressBatchClient returns a client backed by wsrpc that
// can send telemetry to the telemetry ingress server
-func NewTelemetryIngressBatchClient(url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, lggr logger.Logger, telemBufferSize uint, telemMaxBatchSize uint, telemSendInterval time.Duration, telemSendTimeout time.Duration, useUniconn bool, network string, chainID string) TelemetryService {
- return &telemetryIngressBatchClient{
+func NewTelemetryIngressBatchClient(url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, lggr logger.Logger, telemBufferSize uint, telemMaxBatchSize uint, telemSendInterval time.Duration, telemSendTimeout time.Duration, useUniconn bool) TelemetryService {
+ c := &telemetryIngressBatchClient{
telemBufferSize: telemBufferSize,
telemMaxBatchSize: telemMaxBatchSize,
telemSendInterval: telemSendInterval,
@@ -75,13 +73,17 @@ func NewTelemetryIngressBatchClient(url *url.URL, serverPubKeyHex string, ks key
url: url,
ks: ks,
serverPubKeyHex: serverPubKeyHex,
- globalLogger: lggr,
logging: logging,
- lggr: lggr.Named("TelemetryIngressBatchClient").Named(network).Named(chainID),
- chDone: make(services.StopChan),
workers: make(map[string]*telemetryIngressBatchWorker),
useUniConn: useUniconn,
}
+ c.Service, c.eng = services.Config{
+ Name: "TelemetryIngressBatchClient",
+ Start: c.start,
+ Close: c.close,
+ }.NewServiceEngine(lggr)
+
+ return c
}
// Start connects the wsrpc client to the telemetry ingress server
@@ -90,71 +92,53 @@ func NewTelemetryIngressBatchClient(url *url.URL, serverPubKeyHex string, ks key
// an error and wsrpc will continue to retry the connection. Eventually when the ingress
// server does come back up, wsrpc will establish the connection without any interaction
// on behalf of the node operator.
-func (tc *telemetryIngressBatchClient) Start(ctx context.Context) error {
- return tc.StartOnce("TelemetryIngressBatchClient", func() error {
- clientPrivKey, err := tc.getCSAPrivateKey()
- if err != nil {
- return err
- }
+func (tc *telemetryIngressBatchClient) start(ctx context.Context) error {
+ clientPrivKey, err := tc.getCSAPrivateKey()
+ if err != nil {
+ return err
+ }
- serverPubKey := keys.FromHex(tc.serverPubKeyHex)
-
- // Initialize a new wsrpc client caller
- // This is used to call RPC methods on the server
- if tc.telemClient == nil { // only preset for tests
- if tc.useUniConn {
- tc.wgDone.Add(1)
- go func() {
- defer tc.wgDone.Done()
- ctx2, cancel := tc.chDone.NewCtx()
- defer cancel()
- conn, err := wsrpc.DialUniWithContext(ctx2, tc.lggr, tc.url.String(), clientPrivKey, serverPubKey)
- if err != nil {
- if ctx2.Err() != nil {
- tc.lggr.Warnw("gave up connecting to telemetry endpoint", "err", err)
- } else {
- tc.lggr.Criticalw("telemetry endpoint dial errored unexpectedly", "err", err, "server pubkey", tc.serverPubKeyHex)
- tc.SvcErrBuffer.Append(err)
- }
- return
- }
- tc.telemClient = telemPb.NewTelemClient(conn)
- tc.close = conn.Close
- tc.connected.Store(true)
- }()
- } else {
- // Spawns a goroutine that will eventually connect
- conn, err := wsrpc.DialWithContext(ctx, tc.url.String(), wsrpc.WithTransportCreds(clientPrivKey, serverPubKey), wsrpc.WithLogger(tc.lggr))
+ serverPubKey := keys.FromHex(tc.serverPubKeyHex)
+
+ // Initialize a new wsrpc client caller
+ // This is used to call RPC methods on the server
+ if tc.telemClient == nil { // only preset for tests
+ if tc.useUniConn {
+ tc.eng.Go(func(ctx context.Context) {
+ conn, err := wsrpc.DialUniWithContext(ctx, tc.eng, tc.url.String(), clientPrivKey, serverPubKey)
if err != nil {
- return fmt.Errorf("could not start TelemIngressBatchClient, Dial returned error: %v", err)
+ if ctx.Err() != nil {
+ tc.eng.Warnw("gave up connecting to telemetry endpoint", "err", err)
+ } else {
+ tc.eng.Criticalw("telemetry endpoint dial errored unexpectedly", "err", err, "server pubkey", tc.serverPubKeyHex)
+ tc.eng.EmitHealthErr(err)
+ }
+ return
}
tc.telemClient = telemPb.NewTelemClient(conn)
- tc.close = func() error { conn.Close(); return nil }
+ tc.closeFn = conn.Close
+ tc.connected.Store(true)
+ })
+ } else {
+ // Spawns a goroutine that will eventually connect
+ conn, err := wsrpc.DialWithContext(ctx, tc.url.String(), wsrpc.WithTransportCreds(clientPrivKey, serverPubKey), wsrpc.WithLogger(tc.eng))
+ if err != nil {
+ return fmt.Errorf("could not start TelemIngressBatchClient, Dial returned error: %v", err)
}
+ tc.telemClient = telemPb.NewTelemClient(conn)
+ tc.closeFn = func() error { conn.Close(); return nil }
}
+ }
- return nil
- })
+ return nil
}
// Close disconnects the wsrpc client from the ingress server and waits for all workers to exit
-func (tc *telemetryIngressBatchClient) Close() error {
- return tc.StopOnce("TelemetryIngressBatchClient", func() error {
- close(tc.chDone)
- tc.wgDone.Wait()
- if (tc.useUniConn && tc.connected.Load()) || !tc.useUniConn {
- return tc.close()
- }
- return nil
- })
-}
-
-func (tc *telemetryIngressBatchClient) Name() string {
- return tc.lggr.Name()
-}
-
-func (tc *telemetryIngressBatchClient) HealthReport() map[string]error {
- return map[string]error{tc.Name(): tc.Healthy()}
+func (tc *telemetryIngressBatchClient) close() error {
+ if (tc.useUniConn && tc.connected.Load()) || !tc.useUniConn {
+ return tc.closeFn()
+ }
+ return nil
}
// getCSAPrivateKey gets the client's CSA private key
@@ -175,7 +159,7 @@ func (tc *telemetryIngressBatchClient) getCSAPrivateKey() (privkey []byte, err e
// and a warning is logged.
func (tc *telemetryIngressBatchClient) Send(ctx context.Context, telemData []byte, contractID string, telemType TelemetryType) {
if tc.useUniConn && !tc.connected.Load() {
- tc.lggr.Warnw("not connected to telemetry endpoint", "endpoint", tc.url.String())
+ tc.eng.Warnw("not connected to telemetry endpoint", "endpoint", tc.url.String())
return
}
payload := TelemPayload{
@@ -206,18 +190,17 @@ func (tc *telemetryIngressBatchClient) findOrCreateWorker(payload TelemPayload)
if !found {
worker = NewTelemetryIngressBatchWorker(
tc.telemMaxBatchSize,
- tc.telemSendInterval,
tc.telemSendTimeout,
tc.telemClient,
- &tc.wgDone,
- tc.chDone,
make(chan TelemPayload, tc.telemBufferSize),
payload.ContractID,
payload.TelemType,
- tc.globalLogger,
+ tc.eng,
tc.logging,
)
- worker.Start()
+ tc.eng.GoTick(timeutil.NewTicker(func() time.Duration {
+ return tc.telemSendInterval
+ }), worker.Send)
tc.workers[workerKey] = worker
}
diff --git a/core/services/synchronization/telemetry_ingress_batch_worker.go b/core/services/synchronization/telemetry_ingress_batch_worker.go
index e7ea6595811..7eca26f02c9 100644
--- a/core/services/synchronization/telemetry_ingress_batch_worker.go
+++ b/core/services/synchronization/telemetry_ingress_batch_worker.go
@@ -2,13 +2,12 @@ package synchronization
import (
"context"
- "sync"
"sync/atomic"
"time"
"github.com/smartcontractkit/chainlink-common/pkg/services"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
telemPb "github.com/smartcontractkit/chainlink/v2/core/services/synchronization/telem"
)
@@ -18,11 +17,8 @@ type telemetryIngressBatchWorker struct {
services.Service
telemMaxBatchSize uint
- telemSendInterval time.Duration
telemSendTimeout time.Duration
telemClient telemPb.TelemClient
- wgDone *sync.WaitGroup
- chDone services.StopChan
chTelemetry chan TelemPayload
contractID string
telemType TelemetryType
@@ -35,65 +31,45 @@ type telemetryIngressBatchWorker struct {
// telemetry to the ingress server via WSRPC
func NewTelemetryIngressBatchWorker(
telemMaxBatchSize uint,
- telemSendInterval time.Duration,
telemSendTimeout time.Duration,
telemClient telemPb.TelemClient,
- wgDone *sync.WaitGroup,
- chDone chan struct{},
chTelemetry chan TelemPayload,
contractID string,
telemType TelemetryType,
- globalLogger logger.Logger,
+ lggr logger.Logger,
logging bool,
) *telemetryIngressBatchWorker {
return &telemetryIngressBatchWorker{
- telemSendInterval: telemSendInterval,
telemSendTimeout: telemSendTimeout,
telemMaxBatchSize: telemMaxBatchSize,
telemClient: telemClient,
- wgDone: wgDone,
- chDone: chDone,
chTelemetry: chTelemetry,
contractID: contractID,
telemType: telemType,
logging: logging,
- lggr: globalLogger.Named("TelemetryIngressBatchWorker"),
+ lggr: logger.Named(lggr, "TelemetryIngressBatchWorker"),
}
}
-// Start sends batched telemetry to the ingress server on an interval
-func (tw *telemetryIngressBatchWorker) Start() {
- tw.wgDone.Add(1)
- sendTicker := time.NewTicker(tw.telemSendInterval)
-
- go func() {
- defer tw.wgDone.Done()
-
- for {
- select {
- case <-sendTicker.C:
- if len(tw.chTelemetry) == 0 {
- continue
- }
+// Send sends batched telemetry to the ingress server on an interval
+func (tw *telemetryIngressBatchWorker) Send(ctx context.Context) {
+ if len(tw.chTelemetry) == 0 {
+ return
+ }
- // Send batched telemetry to the ingress server, log any errors
- telemBatchReq := tw.BuildTelemBatchReq()
- ctx, cancel := tw.chDone.CtxCancel(context.WithTimeout(context.Background(), tw.telemSendTimeout))
- _, err := tw.telemClient.TelemBatch(ctx, telemBatchReq)
- cancel()
+ // Send batched telemetry to the ingress server, log any errors
+ telemBatchReq := tw.BuildTelemBatchReq()
+ ctx, cancel := context.WithTimeout(ctx, tw.telemSendTimeout)
+ _, err := tw.telemClient.TelemBatch(ctx, telemBatchReq)
+ cancel()
- if err != nil {
- tw.lggr.Warnf("Could not send telemetry: %v", err)
- continue
- }
- if tw.logging {
- tw.lggr.Debugw("Successfully sent telemetry to ingress server", "contractID", telemBatchReq.ContractId, "telemType", telemBatchReq.TelemetryType, "telemetry", telemBatchReq.Telemetry)
- }
- case <-tw.chDone:
- return
- }
- }
- }()
+ if err != nil {
+ tw.lggr.Warnf("Could not send telemetry: %v", err)
+ return
+ }
+ if tw.logging {
+ tw.lggr.Debugw("Successfully sent telemetry to ingress server", "contractID", telemBatchReq.ContractId, "telemType", telemBatchReq.TelemetryType, "telemetry", telemBatchReq.Telemetry)
+ }
}
// logBufferFullWithExpBackoff logs messages at
diff --git a/core/services/synchronization/telemetry_ingress_batch_worker_test.go b/core/services/synchronization/telemetry_ingress_batch_worker_test.go
index 109022c7135..bf44ee9195a 100644
--- a/core/services/synchronization/telemetry_ingress_batch_worker_test.go
+++ b/core/services/synchronization/telemetry_ingress_batch_worker_test.go
@@ -1,7 +1,6 @@
package synchronization_test
import (
- "sync"
"testing"
"time"
@@ -22,11 +21,8 @@ func TestTelemetryIngressWorker_BuildTelemBatchReq(t *testing.T) {
chTelemetry := make(chan synchronization.TelemPayload, 10)
worker := synchronization.NewTelemetryIngressBatchWorker(
uint(maxTelemBatchSize),
- time.Millisecond*1,
time.Second,
mocks.NewTelemClient(t),
- &sync.WaitGroup{},
- make(chan struct{}),
chTelemetry,
"0xa",
synchronization.OCR,
diff --git a/core/services/synchronization/telemetry_ingress_client.go b/core/services/synchronization/telemetry_ingress_client.go
index dc4ced31d09..1ed55bb5468 100644
--- a/core/services/synchronization/telemetry_ingress_client.go
+++ b/core/services/synchronization/telemetry_ingress_client.go
@@ -4,15 +4,14 @@ import (
"context"
"errors"
"net/url"
- "sync"
"sync/atomic"
"time"
"github.com/smartcontractkit/wsrpc"
"github.com/smartcontractkit/wsrpc/examples/simple/keys"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
telemPb "github.com/smartcontractkit/chainlink/v2/core/services/synchronization/telem"
)
@@ -35,82 +34,59 @@ func (NoopTelemetryIngressClient) Name() string { return "Noop
func (NoopTelemetryIngressClient) Ready() error { return nil }
type telemetryIngressClient struct {
- services.StateMachine
+ services.Service
+ eng *services.Engine
+
url *url.URL
ks keystore.CSA
serverPubKeyHex string
telemClient telemPb.TelemClient
logging bool
- lggr logger.Logger
- wgDone sync.WaitGroup
- chDone services.StopChan
dropMessageCount atomic.Uint32
chTelemetry chan TelemPayload
}
// NewTelemetryIngressClient returns a client backed by wsrpc that
// can send telemetry to the telemetry ingress server
-func NewTelemetryIngressClient(url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, lggr logger.Logger, telemBufferSize uint, network string, chainID string) TelemetryService {
- return &telemetryIngressClient{
+func NewTelemetryIngressClient(url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, lggr logger.Logger, telemBufferSize uint) TelemetryService {
+ c := &telemetryIngressClient{
url: url,
ks: ks,
serverPubKeyHex: serverPubKeyHex,
logging: logging,
- lggr: lggr.Named("TelemetryIngressClient").Named(network).Named(chainID),
chTelemetry: make(chan TelemPayload, telemBufferSize),
- chDone: make(services.StopChan),
}
+ c.Service, c.eng = services.Config{
+ Name: "TelemetryIngressClient",
+ Start: c.start,
+ }.NewServiceEngine(lggr)
+ return c
}
// Start connects the wsrpc client to the telemetry ingress server
-func (tc *telemetryIngressClient) Start(context.Context) error {
- return tc.StartOnce("TelemetryIngressClient", func() error {
- privkey, err := tc.getCSAPrivateKey()
- if err != nil {
- return err
- }
-
- tc.connect(privkey)
-
- return nil
- })
-}
-
-// Close disconnects the wsrpc client from the ingress server
-func (tc *telemetryIngressClient) Close() error {
- return tc.StopOnce("TelemetryIngressClient", func() error {
- close(tc.chDone)
- tc.wgDone.Wait()
- return nil
- })
-}
+func (tc *telemetryIngressClient) start(context.Context) error {
+ privkey, err := tc.getCSAPrivateKey()
+ if err != nil {
+ return err
+ }
-func (tc *telemetryIngressClient) Name() string {
- return tc.lggr.Name()
-}
+ tc.connect(privkey)
-func (tc *telemetryIngressClient) HealthReport() map[string]error {
- return map[string]error{tc.Name(): tc.Healthy()}
+ return nil
}
func (tc *telemetryIngressClient) connect(clientPrivKey []byte) {
- tc.wgDone.Add(1)
-
- go func() {
- defer tc.wgDone.Done()
- ctx, cancel := tc.chDone.NewCtx()
- defer cancel()
-
+ tc.eng.Go(func(ctx context.Context) {
serverPubKey := keys.FromHex(tc.serverPubKeyHex)
- conn, err := wsrpc.DialWithContext(ctx, tc.url.String(), wsrpc.WithTransportCreds(clientPrivKey, serverPubKey), wsrpc.WithLogger(tc.lggr))
+ conn, err := wsrpc.DialWithContext(ctx, tc.url.String(), wsrpc.WithTransportCreds(clientPrivKey, serverPubKey), wsrpc.WithLogger(tc.eng))
if err != nil {
if ctx.Err() != nil {
- tc.lggr.Warnw("gave up connecting to telemetry endpoint", "err", err)
+ tc.eng.Warnw("gave up connecting to telemetry endpoint", "err", err)
} else {
- tc.lggr.Criticalw("telemetry endpoint dial errored unexpectedly", "err", err)
- tc.SvcErrBuffer.Append(err)
+ tc.eng.Criticalw("telemetry endpoint dial errored unexpectedly", "err", err)
+ tc.eng.EmitHealthErr(err)
}
return
}
@@ -126,16 +102,12 @@ func (tc *telemetryIngressClient) connect(clientPrivKey []byte) {
tc.handleTelemetry()
// Wait for close
- <-tc.chDone
- }()
+ <-ctx.Done()
+ })
}
func (tc *telemetryIngressClient) handleTelemetry() {
- tc.wgDone.Add(1)
- go func() {
- defer tc.wgDone.Done()
- ctx, cancel := tc.chDone.NewCtx()
- defer cancel()
+ tc.eng.Go(func(ctx context.Context) {
for {
select {
case p := <-tc.chTelemetry:
@@ -148,17 +120,17 @@ func (tc *telemetryIngressClient) handleTelemetry() {
}
_, err := tc.telemClient.Telem(ctx, telemReq)
if err != nil {
- tc.lggr.Errorf("Could not send telemetry: %v", err)
+ tc.eng.Errorf("Could not send telemetry: %v", err)
continue
}
if tc.logging {
- tc.lggr.Debugw("successfully sent telemetry to ingress server", "contractID", p.ContractID, "telemetry", p.Telemetry)
+ tc.eng.Debugw("successfully sent telemetry to ingress server", "contractID", p.ContractID, "telemetry", p.Telemetry)
}
- case <-tc.chDone:
+ case <-ctx.Done():
return
}
}
- }()
+ })
}
// logBufferFullWithExpBackoff logs messages at
@@ -176,7 +148,7 @@ func (tc *telemetryIngressClient) handleTelemetry() {
func (tc *telemetryIngressClient) logBufferFullWithExpBackoff(payload TelemPayload) {
count := tc.dropMessageCount.Add(1)
if count > 0 && (count%100 == 0 || count&(count-1) == 0) {
- tc.lggr.Warnw("telemetry ingress client buffer full, dropping message", "telemetry", payload.Telemetry, "droppedCount", count)
+ tc.eng.Warnw("telemetry ingress client buffer full, dropping message", "telemetry", payload.Telemetry, "droppedCount", count)
}
}
diff --git a/core/services/telemetry/manager.go b/core/services/telemetry/manager.go
index a65759a5c62..73a94b4b127 100644
--- a/core/services/telemetry/manager.go
+++ b/core/services/telemetry/manager.go
@@ -1,29 +1,29 @@
package telemetry
import (
- "context"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
- "go.uber.org/multierr"
-
"github.com/smartcontractkit/libocr/commontypes"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ common "github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink/v2/core/config"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
"github.com/smartcontractkit/chainlink/v2/core/services/synchronization"
)
type Manager struct {
- services.StateMachine
- bufferSize uint
- endpoints []*telemetryEndpoint
- ks keystore.CSA
- lggr logger.Logger
+ services.Service
+ eng *services.Engine
+
+ bufferSize uint
+ endpoints []*telemetryEndpoint
+ ks keystore.CSA
+
logging bool
maxBatchSize uint
sendInterval time.Duration
@@ -45,9 +45,7 @@ type telemetryEndpoint struct {
func NewManager(cfg config.TelemetryIngress, csaKeyStore keystore.CSA, lggr logger.Logger) *Manager {
m := &Manager{
bufferSize: cfg.BufferSize(),
- endpoints: nil,
ks: csaKeyStore,
- lggr: lggr.Named("TelemetryManager"),
logging: cfg.Logging(),
maxBatchSize: cfg.MaxBatchSize(),
sendInterval: cfg.SendInterval(),
@@ -55,44 +53,21 @@ func NewManager(cfg config.TelemetryIngress, csaKeyStore keystore.CSA, lggr logg
uniConn: cfg.UniConn(),
useBatchSend: cfg.UseBatchSend(),
}
- for _, e := range cfg.Endpoints() {
- if err := m.addEndpoint(e); err != nil {
- m.lggr.Error(err)
- }
- }
- return m
-}
-
-func (m *Manager) Start(ctx context.Context) error {
- return m.StartOnce("TelemetryManager", func() error {
- var err error
- for _, e := range m.endpoints {
- err = multierr.Append(err, e.client.Start(ctx))
- }
- return err
- })
-}
-func (m *Manager) Close() error {
- return m.StopOnce("TelemetryManager", func() error {
- var err error
- for _, e := range m.endpoints {
- err = multierr.Append(err, e.client.Close())
- }
- return err
- })
-}
-
-func (m *Manager) Name() string {
- return m.lggr.Name()
-}
+ m.Service, m.eng = services.Config{
+ Name: "TelemetryManager",
+ NewSubServices: func(lggr common.Logger) (subs []services.Service) {
+ for _, e := range cfg.Endpoints() {
+ if sub, err := m.newEndpoint(e, lggr, cfg); err != nil {
+ lggr.Error(err)
+ } else {
+ subs = append(subs, sub)
+ }
+ }
+ return
+ },
+ }.NewServiceEngine(lggr)
-func (m *Manager) HealthReport() map[string]error {
- hr := map[string]error{m.Name(): m.Healthy()}
-
- for _, e := range m.endpoints {
- services.CopyHealth(hr, e.client.HealthReport())
- }
- return hr
+ return m
}
// GenMonitoringEndpoint creates a new monitoring endpoints based on the existing available endpoints defined in the core config TOML, if no endpoint for the network and chainID exists, a NOOP agent will be used and the telemetry will not be sent
@@ -100,7 +75,7 @@ func (m *Manager) GenMonitoringEndpoint(network string, chainID string, contract
e, found := m.getEndpoint(network, chainID)
if !found {
- m.lggr.Warnf("no telemetry endpoint found for network %q chainID %q, telemetry %q for contactID %q will NOT be sent", network, chainID, telemType, contractID)
+ m.eng.Warnf("no telemetry endpoint found for network %q chainID %q, telemetry %q for contactID %q will NOT be sent", network, chainID, telemType, contractID)
return &NoopAgent{}
}
@@ -111,32 +86,33 @@ func (m *Manager) GenMonitoringEndpoint(network string, chainID string, contract
return NewIngressAgent(e.client, network, chainID, contractID, telemType)
}
-func (m *Manager) addEndpoint(e config.TelemetryIngressEndpoint) error {
+func (m *Manager) newEndpoint(e config.TelemetryIngressEndpoint, lggr logger.Logger, cfg config.TelemetryIngress) (services.Service, error) {
if e.Network() == "" {
- return errors.New("cannot add telemetry endpoint, network cannot be empty")
+ return nil, errors.New("cannot add telemetry endpoint, network cannot be empty")
}
if e.ChainID() == "" {
- return errors.New("cannot add telemetry endpoint, chainID cannot be empty")
+ return nil, errors.New("cannot add telemetry endpoint, chainID cannot be empty")
}
if e.URL() == nil {
- return errors.New("cannot add telemetry endpoint, URL cannot be empty")
+ return nil, errors.New("cannot add telemetry endpoint, URL cannot be empty")
}
if e.ServerPubKey() == "" {
- return errors.New("cannot add telemetry endpoint, ServerPubKey cannot be empty")
+ return nil, errors.New("cannot add telemetry endpoint, ServerPubKey cannot be empty")
}
if _, found := m.getEndpoint(e.Network(), e.ChainID()); found {
- return errors.Errorf("cannot add telemetry endpoint for network %q and chainID %q, endpoint already exists", e.Network(), e.ChainID())
+ return nil, errors.Errorf("cannot add telemetry endpoint for network %q and chainID %q, endpoint already exists", e.Network(), e.ChainID())
}
+ lggr = logger.Sugared(lggr).Named(e.Network()).Named(e.ChainID())
var tClient synchronization.TelemetryService
if m.useBatchSend {
- tClient = synchronization.NewTelemetryIngressBatchClient(e.URL(), e.ServerPubKey(), m.ks, m.logging, m.lggr, m.bufferSize, m.maxBatchSize, m.sendInterval, m.sendTimeout, m.uniConn, e.Network(), e.ChainID())
+ tClient = synchronization.NewTelemetryIngressBatchClient(e.URL(), e.ServerPubKey(), m.ks, cfg.Logging(), lggr, cfg.BufferSize(), cfg.MaxBatchSize(), cfg.SendInterval(), cfg.SendTimeout(), cfg.UniConn())
} else {
- tClient = synchronization.NewTelemetryIngressClient(e.URL(), e.ServerPubKey(), m.ks, m.logging, m.lggr, m.bufferSize, e.Network(), e.ChainID())
+ tClient = synchronization.NewTelemetryIngressClient(e.URL(), e.ServerPubKey(), m.ks, cfg.Logging(), lggr, cfg.BufferSize())
}
te := telemetryEndpoint{
@@ -148,7 +124,7 @@ func (m *Manager) addEndpoint(e config.TelemetryIngressEndpoint) error {
}
m.endpoints = append(m.endpoints, &te)
- return nil
+ return te.client, nil
}
func (m *Manager) getEndpoint(network string, chainID string) (*telemetryEndpoint, bool) {
diff --git a/core/services/telemetry/manager_test.go b/core/services/telemetry/manager_test.go
index 4e55cb75752..fef065b572c 100644
--- a/core/services/telemetry/manager_test.go
+++ b/core/services/telemetry/manager_test.go
@@ -156,7 +156,7 @@ func TestNewManager(t *testing.T) {
require.Equal(t, uint(123), m.bufferSize)
require.Equal(t, ks, m.ks)
- require.Equal(t, "TelemetryManager", m.lggr.Name())
+ require.Equal(t, "TelemetryManager", m.Name())
require.Equal(t, true, m.logging)
require.Equal(t, uint(51), m.maxBatchSize)
require.Equal(t, time.Millisecond*512, m.sendInterval)
From 4843d84c260c0300eecfad413cca60af481280bf Mon Sep 17 00:00:00 2001
From: Lukasz <120112546+lukaszcl@users.noreply.github.com>
Date: Wed, 7 Aug 2024 15:39:35 +0200
Subject: [PATCH 22/52] Update e2e tests definition for CI and automation
workflow (#13908)
* Update e2e tests definition for CI
* Update test
---
.github/e2e-tests.yml | 32 ++++++++++++++++---
.../run-automation-ondemand-e2e-tests.yml | 10 +++++-
2 files changed, 37 insertions(+), 5 deletions(-)
diff --git a/.github/e2e-tests.yml b/.github/e2e-tests.yml
index 0d92d1900dc..b2c9f12fcaf 100644
--- a/.github/e2e-tests.yml
+++ b/.github/e2e-tests.yml
@@ -47,6 +47,8 @@ runner-test-matrix:
test_env_type: k8s-remote-runner
runs_on: ubuntu-latest
test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRv1Soak$ -test.parallel=1 -timeout 30m -count=1 -json
+ test_config_override_required: true
+ test_secrets_required: true
test_inputs:
test_suite: soak
@@ -543,15 +545,37 @@ runner-test-matrix:
chainlink_upgrade_version: develop
pyroscope_env: ci-smoke-automation-upgrade-tests
- - id: integration-tests/reorg/automation_reorg_test.go
+ - id: integration-tests/reorg/automation_reorg_test.go^TestAutomationReorg/registry_2_0
path: integration-tests/reorg/automation_reorg_test.go
runs_on: ubuntu-latest
- test_env_type: k8s-remote-runner
+ test_env_type: docker
+ test_inputs:
+ test_suite: reorg
+ workflows:
+ - Run Automation On Demand Tests (TEST WORKFLOW)
+ test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg/registry_2_0 -test.parallel=1 -timeout 30m -count=1 -json
+ pyroscope_env: ci-automation-on-demand-reorg
+
+ - id: integration-tests/reorg/automation_reorg_test.go^TestAutomationReorg/registry_2_1
+ path: integration-tests/reorg/automation_reorg_test.go
+ runs_on: ubuntu-latest
+ test_env_type: docker
+ test_inputs:
+ test_suite: reorg
+ workflows:
+ - Run Automation On Demand Tests (TEST WORKFLOW)
+ test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg/registry_2_1 -test.parallel=2 -timeout 30m -count=1 -json
+ pyroscope_env: ci-automation-on-demand-reorg
+
+ - id: integration-tests/reorg/automation_reorg_test.go^TestAutomationReorg/registry_2_2
+ path: integration-tests/reorg/automation_reorg_test.go
+ runs_on: ubuntu-latest
+ test_env_type: docker
test_inputs:
test_suite: reorg
workflows:
- Run Automation On Demand Tests (TEST WORKFLOW)
- test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg$ -test.parallel=7 -timeout 60m -count=1 -json
+ test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg/registry_2_2 -test.parallel=2 -timeout 30m -count=1 -json
pyroscope_env: ci-automation-on-demand-reorg
- id: integration-tests/chaos/automation_chaos_test.go
@@ -560,7 +584,7 @@ runner-test-matrix:
runs_on: ubuntu-latest
workflows:
- Run Automation On Demand Tests (TEST WORKFLOW)
- test_cmd: cd integration-tests/chaos && DETACH_RUNNER=false go test -v -test.run ^TestAutomationChaos$ -test.parallel=15 -timeout 60m -count=1 -json
+ test_cmd: cd integration-tests/chaos && DETACH_RUNNER=false go test -v -test.run ^TestAutomationChaos$ -test.parallel=20 -timeout 60m -count=1 -json
pyroscope_env: ci-automation-on-demand-chaos
test_inputs:
test_suite: chaos
diff --git a/.github/workflows/run-automation-ondemand-e2e-tests.yml b/.github/workflows/run-automation-ondemand-e2e-tests.yml
index 7bf4691ecc5..8dac3c56994 100644
--- a/.github/workflows/run-automation-ondemand-e2e-tests.yml
+++ b/.github/workflows/run-automation-ondemand-e2e-tests.yml
@@ -116,10 +116,18 @@ jobs:
# Run reorg tests if enabled
if [[ "${{ github.event.inputs.enableReorg }}" == 'true' ]]; then
cat >> test_list.yaml <
Date: Wed, 7 Aug 2024 07:42:01 -0600
Subject: [PATCH 23/52] bump solana commit (#14062)
---
core/scripts/go.mod | 2 +-
core/scripts/go.sum | 4 ++--
go.mod | 2 +-
go.sum | 4 ++--
integration-tests/go.mod | 2 +-
integration-tests/go.sum | 4 ++--
integration-tests/load/go.mod | 2 +-
integration-tests/load/go.sum | 4 ++--
8 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 45b5ee59059..94504897ab0 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -273,7 +273,7 @@ require (
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 // indirect
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
- github.com/smartcontractkit/chainlink-solana v1.1.0 // indirect
+ github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 // indirect
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index dff6f3f356a..f770498cff8 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -1192,8 +1192,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
-github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 h1:8ZzsGNhqYxmQ/QMO1fuXO7u9Vpl9YUvPJK+td/ZaBJA=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo=
diff --git a/go.mod b/go.mod
index 78ec7d29ee1..2179ffc2d21 100644
--- a/go.mod
+++ b/go.mod
@@ -78,7 +78,7 @@ require (
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827
- github.com/smartcontractkit/chainlink-solana v1.1.0
+ github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799
github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1
diff --git a/go.sum b/go.sum
index f5ef0f91e70..b953f315e92 100644
--- a/go.sum
+++ b/go.sum
@@ -1147,8 +1147,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
-github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 h1:8ZzsGNhqYxmQ/QMO1fuXO7u9Vpl9YUvPJK+td/ZaBJA=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo=
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index a648e46e9f0..ff60a8f78b3 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -380,7 +380,7 @@ require (
github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 // indirect
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
- github.com/smartcontractkit/chainlink-solana v1.1.0 // indirect
+ github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 // indirect
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 03e4a9082ff..5d15dfd92f6 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -1496,8 +1496,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
-github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 h1:8ZzsGNhqYxmQ/QMO1fuXO7u9Vpl9YUvPJK+td/ZaBJA=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/chainlink-testing-framework v1.34.2 h1:YL3ft7KJB7SAopdmJeyeR4/kv0j4jOdagNihXq8OZ38=
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index 1aa754f8cfa..c464231c745 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -372,7 +372,7 @@ require (
github.com/smartcontractkit/chain-selectors v1.0.10 // indirect
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect
- github.com/smartcontractkit/chainlink-solana v1.1.0 // indirect
+ github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 // indirect
github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239 // indirect
github.com/smartcontractkit/havoc/k8schaos v0.0.0-20240409145249-e78d20847e37 // indirect
diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum
index 698623c50f1..d1d6f3a4d52 100644
--- a/integration-tests/load/go.sum
+++ b/integration-tests/load/go.sum
@@ -1478,8 +1478,8 @@ github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761
github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f/go.mod h1:V/86loaFSH0dqqUEHqyXVbyNqDRSjvcf9BRomWFTljU=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 h1:BCHu4pNP6arrcHLEWx61XjLaonOd2coQNyL0NTUcaMc=
github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827/go.mod h1:OPX+wC2TWQsyLNpR7daMt2vMpmsNcoBxbZyGTHr6tiA=
-github.com/smartcontractkit/chainlink-solana v1.1.0 h1:+xBeVqx2x0Sx3CBbF8RLSblczsxJDYTkta8h7i8+23I=
-github.com/smartcontractkit/chainlink-solana v1.1.0/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 h1:8ZzsGNhqYxmQ/QMO1fuXO7u9Vpl9YUvPJK+td/ZaBJA=
+github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564/go.mod h1:Ml88TJTwZCj6yHDkAEN/EhxVutzSlk+kDZgfibRIqF0=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799 h1:HyLTySm7BR+oNfZqDTkVJ25wnmcTtxBBD31UkFL+kEM=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240709043547-03612098f799/go.mod h1:UVFRacRkP7O7TQAzFmR52v5mUlxf+G1ovMlCQAB/cHU=
github.com/smartcontractkit/chainlink-testing-framework v1.34.2 h1:YL3ft7KJB7SAopdmJeyeR4/kv0j4jOdagNihXq8OZ38=
From 5a1dd1f74007f365bc52e323e5d6a3a638e6d7ed Mon Sep 17 00:00:00 2001
From: Akshay Aggarwal
Date: Wed, 7 Aug 2024 15:49:50 +0100
Subject: [PATCH 24/52] Update log trigger default values (#14051)
---
.../ocr2keeper/evmregistry/v21/logprovider/factory.go | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go
index 7ec65ff4740..25cc5e939ba 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go
@@ -74,7 +74,7 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) {
func (o *LogTriggersOptions) defaultBlockRate() uint32 {
switch o.chainID.Int64() {
- case 42161, 421613, 421614: // Arbitrum
+ case 42161, 421613, 421614: // Arbitrum, Arb Goerli, Arb Sepolia
return 2
default:
return 1
@@ -83,10 +83,10 @@ func (o *LogTriggersOptions) defaultBlockRate() uint32 {
func (o *LogTriggersOptions) defaultLogLimit() uint32 {
switch o.chainID.Int64() {
- case 1, 4, 5, 42, 11155111: // Eth
+ case 1, 4, 5, 42, 11155111: // Eth, Rinkeby, Goerli, Kovan, Sepolia
return 20
- case 10, 420, 56, 97, 137, 80001, 43113, 43114, 8453, 84531: // Optimism, BSC, Polygon, Avax, Base
- return 5
+ case 10, 420, 11155420, 56, 97, 137, 80001, 80002, 43114, 43113, 8453, 84531, 84532: // Optimism, OP Goerli, OP Sepolia, BSC, BSC Test, Polygon, Mumbai, Amoy, Avax, Avax Fuji, Base, Base Goerli, Base Sepolia
+ return 4
default:
return 1
}
From e500c1a471c4e9bb66a89ff27a763a83824f767c Mon Sep 17 00:00:00 2001
From: frank zhu
Date: Wed, 7 Aug 2024 10:02:24 -0500
Subject: [PATCH 25/52] chore: update dependabot config gomod (#14063)
---
.github/dependabot.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 19e008c8ce4..cea4f07b90d 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -4,7 +4,7 @@ updates:
directory: "/"
schedule:
interval: monthly
- open-pull-requests-limit: 10
+ open-pull-requests-limit: 0
ignore:
# Old versions are pinned for libocr.
- dependency-name: github.com/libp2p/go-libp2p-core
From 215277f9e041d18dc5686c697e6959d5edaaf346 Mon Sep 17 00:00:00 2001
From: FelixFan1992
Date: Wed, 7 Aug 2024 11:21:31 -0400
Subject: [PATCH 26/52] auto-10161: replicate v2_3 to v2_3_zksync (#14035)
* auto-10161: replicate v2_3 to v2_3_zksync
* update
* small fixes
* add an zksync automation forwarder
* fix linter
* update
* update
* lint
---
contracts/.changeset/loud-lobsters-guess.md | 5 +
contracts/.solhintignore | 1 +
.../native_solc_compile_all_automation | 2 +-
.../automation/ZKSyncAutomationForwarder.sol | 92 ++
.../v0.8/automation/test/{v2_3 => }/WETH9.sol | 0
.../v0.8/automation/test/v2_3/BaseTest.t.sol | 4 +-
.../ZKSyncAutomationRegistry2_3.sol | 391 ++++++
.../ZKSyncAutomationRegistryBase2_3.sol | 1216 +++++++++++++++++
.../ZKSyncAutomationRegistryLogicA2_3.sol | 283 ++++
.../ZKSyncAutomationRegistryLogicB2_3.sol | 449 ++++++
.../ZKSyncAutomationRegistryLogicC2_3.sol | 638 +++++++++
.../automation/AutomationRegistry2_3.test.ts | 1 -
contracts/test/v0.8/automation/helpers.ts | 4 +-
13 files changed, 3080 insertions(+), 6 deletions(-)
create mode 100644 contracts/.changeset/loud-lobsters-guess.md
create mode 100644 contracts/src/v0.8/automation/ZKSyncAutomationForwarder.sol
rename contracts/src/v0.8/automation/test/{v2_3 => }/WETH9.sol (100%)
create mode 100644 contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistry2_3.sol
create mode 100644 contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryBase2_3.sol
create mode 100644 contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicA2_3.sol
create mode 100644 contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicB2_3.sol
create mode 100644 contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicC2_3.sol
diff --git a/contracts/.changeset/loud-lobsters-guess.md b/contracts/.changeset/loud-lobsters-guess.md
new file mode 100644
index 00000000000..e470267e4e4
--- /dev/null
+++ b/contracts/.changeset/loud-lobsters-guess.md
@@ -0,0 +1,5 @@
+---
+'@chainlink/contracts': patch
+---
+
+auto: create a replication from v2_3 to v2_3_zksync
diff --git a/contracts/.solhintignore b/contracts/.solhintignore
index bad1935442b..55d195c3059 100644
--- a/contracts/.solhintignore
+++ b/contracts/.solhintignore
@@ -18,6 +18,7 @@
./src/v0.8/automation/libraries/internal/Cron.sol
./src/v0.8/automation/AutomationForwarder.sol
./src/v0.8/automation/AutomationForwarderLogic.sol
+./src/v0.8/automation/ZKSyncAutomationForwarder.sol
./src/v0.8/automation/interfaces/v2_2/IAutomationRegistryMaster.sol
./src/v0.8/automation/interfaces/v2_3/IAutomationRegistryMaster2_3.sol
diff --git a/contracts/scripts/native_solc_compile_all_automation b/contracts/scripts/native_solc_compile_all_automation
index f144e4f7dc8..29326a15c05 100755
--- a/contracts/scripts/native_solc_compile_all_automation
+++ b/contracts/scripts/native_solc_compile_all_automation
@@ -108,4 +108,4 @@ compileContract automation/v2_3/AutomationUtils2_3.sol
compileContract automation/interfaces/v2_3/IAutomationRegistryMaster2_3.sol
compileContract automation/testhelpers/MockETHUSDAggregator.sol
-compileContract automation/test/v2_3/WETH9.sol
+compileContract automation/test/WETH9.sol
diff --git a/contracts/src/v0.8/automation/ZKSyncAutomationForwarder.sol b/contracts/src/v0.8/automation/ZKSyncAutomationForwarder.sol
new file mode 100644
index 00000000000..cfbff1365e1
--- /dev/null
+++ b/contracts/src/v0.8/automation/ZKSyncAutomationForwarder.sol
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity ^0.8.16;
+
+import {IAutomationRegistryConsumer} from "./interfaces/IAutomationRegistryConsumer.sol";
+
+uint256 constant PERFORM_GAS_CUSHION = 5_000;
+
+/**
+ * @title AutomationForwarder is a relayer that sits between the registry and the customer's target contract
+ * @dev The purpose of the forwarder is to give customers a consistent address to authorize against,
+ * which stays consistent between migrations. The Forwarder also exposes the registry address, so that users who
+ * want to programmatically interact with the registry (ie top up funds) can do so.
+ */
+contract ZKSyncAutomationForwarder {
+ /// @notice the user's target contract address
+ address private immutable i_target;
+
+ /// @notice the shared logic address
+ address private immutable i_logic;
+
+ IAutomationRegistryConsumer private s_registry;
+
+ constructor(address target, address registry, address logic) {
+ s_registry = IAutomationRegistryConsumer(registry);
+ i_target = target;
+ i_logic = logic;
+ }
+
+ /**
+ * @notice forward is called by the registry and forwards the call to the target
+ * @param gasAmount is the amount of gas to use in the call
+ * @param data is the 4 bytes function selector + arbitrary function data
+ * @return success indicating whether the target call succeeded or failed
+ */
+ function forward(uint256 gasAmount, bytes memory data) external returns (bool success, uint256 gasUsed) {
+ if (msg.sender != address(s_registry)) revert();
+ address target = i_target;
+ gasUsed = gasleft();
+ assembly {
+ let g := gas()
+ // Compute g -= PERFORM_GAS_CUSHION and check for underflow
+ if lt(g, PERFORM_GAS_CUSHION) {
+ revert(0, 0)
+ }
+ g := sub(g, PERFORM_GAS_CUSHION)
+ // if g - g//64 <= gasAmount, revert
+ // (we subtract g//64 because of EIP-150)
+ if iszero(gt(sub(g, div(g, 64)), gasAmount)) {
+ revert(0, 0)
+ }
+ // solidity calls check that a contract actually exists at the destination, so we do the same
+ if iszero(extcodesize(target)) {
+ revert(0, 0)
+ }
+ // call with exact gas
+ success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0)
+ }
+ gasUsed = gasUsed - gasleft();
+ return (success, gasUsed);
+ }
+
+ function getTarget() external view returns (address) {
+ return i_target;
+ }
+
+ fallback() external {
+ // copy to memory for assembly access
+ address logic = i_logic;
+ // copied directly from OZ's Proxy contract
+ assembly {
+ // Copy msg.data. We take full control of memory in this inline assembly
+ // block because it will not return to Solidity code. We overwrite the
+ // Solidity scratch pad at memory position 0.
+ calldatacopy(0, 0, calldatasize())
+
+ // out and outsize are 0 because we don't know the size yet.
+ let result := delegatecall(gas(), logic, 0, calldatasize(), 0, 0)
+
+ // Copy the returned data.
+ returndatacopy(0, 0, returndatasize())
+
+ switch result
+ // delegatecall returns 0 on error.
+ case 0 {
+ revert(0, returndatasize())
+ }
+ default {
+ return(0, returndatasize())
+ }
+ }
+ }
+}
diff --git a/contracts/src/v0.8/automation/test/v2_3/WETH9.sol b/contracts/src/v0.8/automation/test/WETH9.sol
similarity index 100%
rename from contracts/src/v0.8/automation/test/v2_3/WETH9.sol
rename to contracts/src/v0.8/automation/test/WETH9.sol
diff --git a/contracts/src/v0.8/automation/test/v2_3/BaseTest.t.sol b/contracts/src/v0.8/automation/test/v2_3/BaseTest.t.sol
index 9016f52c55d..9e46e7bb40d 100644
--- a/contracts/src/v0.8/automation/test/v2_3/BaseTest.t.sol
+++ b/contracts/src/v0.8/automation/test/v2_3/BaseTest.t.sol
@@ -20,14 +20,14 @@ import {ChainModuleBase} from "../../chains/ChainModuleBase.sol";
import {IERC20Metadata as IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol";
import {MockUpkeep} from "../../mocks/MockUpkeep.sol";
import {IWrappedNative} from "../../interfaces/v2_3/IWrappedNative.sol";
-import {WETH9} from "./WETH9.sol";
+import {WETH9} from "../WETH9.sol";
/**
* @title BaseTest provides basic test setup procedures and dependencies for use by other
* unit tests
*/
contract BaseTest is Test {
- // test state (not exposed to derrived tests)
+ // test state (not exposed to derived tests)
uint256 private nonce;
// constants
diff --git a/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistry2_3.sol b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistry2_3.sol
new file mode 100644
index 00000000000..027fe59aca7
--- /dev/null
+++ b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistry2_3.sol
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
+import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol";
+import {ZKSyncAutomationRegistryBase2_3} from "./ZKSyncAutomationRegistryBase2_3.sol";
+import {ZKSyncAutomationRegistryLogicA2_3} from "./ZKSyncAutomationRegistryLogicA2_3.sol";
+import {ZKSyncAutomationRegistryLogicC2_3} from "./ZKSyncAutomationRegistryLogicC2_3.sol";
+import {Chainable} from "../Chainable.sol";
+import {OCR2Abstract} from "../../shared/ocr2/OCR2Abstract.sol";
+import {IERC20Metadata as IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol";
+
+/**
+ * @notice Registry for adding work for Chainlink nodes to perform on client
+ * contracts. Clients must support the AutomationCompatibleInterface interface.
+ */
+contract ZKSyncAutomationRegistry2_3 is ZKSyncAutomationRegistryBase2_3, OCR2Abstract, Chainable {
+ using Address for address;
+ using EnumerableSet for EnumerableSet.UintSet;
+ using EnumerableSet for EnumerableSet.AddressSet;
+
+ /**
+ * @notice versions:
+ * AutomationRegistry 2.3.0: supports native and ERC20 billing
+ * changes flat fee to USD-denominated
+ * adds support for custom billing overrides
+ * AutomationRegistry 2.2.0: moves chain-specific integration code into a separate module
+ * KeeperRegistry 2.1.0: introduces support for log triggers
+ * removes the need for "wrapped perform data"
+ * KeeperRegistry 2.0.2: pass revert bytes as performData when target contract reverts
+ * fixes issue with arbitrum block number
+ * does an early return in case of stale report instead of revert
+ * KeeperRegistry 2.0.1: implements workaround for buggy migrate function in 1.X
+ * KeeperRegistry 2.0.0: implement OCR interface
+ * KeeperRegistry 1.3.0: split contract into Proxy and Logic
+ * account for Arbitrum and Optimism L1 gas fee
+ * allow users to configure upkeeps
+ * KeeperRegistry 1.2.0: allow funding within performUpkeep
+ * allow configurable registry maxPerformGas
+ * add function to let admin change upkeep gas limit
+ * add minUpkeepSpend requirement
+ * upgrade to solidity v0.8
+ * KeeperRegistry 1.1.0: added flatFeeMicroLink
+ * KeeperRegistry 1.0.0: initial release
+ */
+ string public constant override typeAndVersion = "AutomationRegistry 2.3.0";
+
+ /**
+ * @param logicA the address of the first logic contract
+ * @dev we cast the contract to logicC in order to call logicC functions (via fallback)
+ */
+ constructor(
+ ZKSyncAutomationRegistryLogicA2_3 logicA
+ )
+ ZKSyncAutomationRegistryBase2_3(
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getLinkAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getLinkUSDFeedAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getNativeUSDFeedAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getFastGasFeedAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getAutomationForwarderLogic(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getAllowedReadOnlyAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getPayoutMode(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicA)).getWrappedNativeTokenAddress()
+ )
+ Chainable(address(logicA))
+ {}
+
+ /**
+ * @notice holds the variables used in the transmit function, necessary to avoid stack too deep errors
+ */
+ struct TransmitVars {
+ uint16 numUpkeepsPassedChecks;
+ uint96 totalReimbursement;
+ uint96 totalPremium;
+ uint256 totalCalldataWeight;
+ }
+
+ // ================================================================
+ // | HOT PATH ACTIONS |
+ // ================================================================
+
+ /**
+ * @inheritdoc OCR2Abstract
+ */
+ function transmit(
+ bytes32[3] calldata reportContext,
+ bytes calldata rawReport,
+ bytes32[] calldata rs,
+ bytes32[] calldata ss,
+ bytes32 rawVs
+ ) external override {
+ uint256 gasOverhead = gasleft();
+ // use this msg.data length check to ensure no extra data is included in the call
+ // 4 is first 4 bytes of the keccak-256 hash of the function signature. ss.length == rs.length so use one of them
+ // 4 + (32 * 3) + (rawReport.length + 32 + 32) + (32 * rs.length + 32 + 32) + (32 * ss.length + 32 + 32) + 32
+ uint256 requiredLength = 324 + rawReport.length + 64 * rs.length;
+ if (msg.data.length != requiredLength) revert InvalidDataLength();
+ HotVars memory hotVars = s_hotVars;
+
+ if (hotVars.paused) revert RegistryPaused();
+ if (!s_transmitters[msg.sender].active) revert OnlyActiveTransmitters();
+
+ // Verify signatures
+ if (s_latestConfigDigest != reportContext[0]) revert ConfigDigestMismatch();
+ if (rs.length != hotVars.f + 1 || rs.length != ss.length) revert IncorrectNumberOfSignatures();
+ _verifyReportSignature(reportContext, rawReport, rs, ss, rawVs);
+
+ Report memory report = _decodeReport(rawReport);
+
+ uint40 epochAndRound = uint40(uint256(reportContext[1]));
+ uint32 epoch = uint32(epochAndRound >> 8);
+
+ _handleReport(hotVars, report, gasOverhead);
+
+ if (epoch > hotVars.latestEpoch) {
+ s_hotVars.latestEpoch = epoch;
+ }
+ }
+
+ /**
+ * @notice handles the report by performing the upkeeps and updating the state
+ * @param hotVars the hot variables of the registry
+ * @param report the report to be handled (already verified and decoded)
+ * @param gasOverhead the running tally of gas overhead to be split across the upkeeps
+ * @dev had to split this function from transmit() to avoid stack too deep errors
+ * @dev all other internal / private functions are generally defined in the Base contract
+ * we leave this here because it is essentially a continuation of the transmit() function,
+ */
+ function _handleReport(HotVars memory hotVars, Report memory report, uint256 gasOverhead) private {
+ UpkeepTransmitInfo[] memory upkeepTransmitInfo = new UpkeepTransmitInfo[](report.upkeepIds.length);
+ TransmitVars memory transmitVars = TransmitVars({
+ numUpkeepsPassedChecks: 0,
+ totalCalldataWeight: 0,
+ totalReimbursement: 0,
+ totalPremium: 0
+ });
+
+ uint256 blocknumber = hotVars.chainModule.blockNumber();
+ uint256 l1Fee = hotVars.chainModule.getCurrentL1Fee();
+
+ for (uint256 i = 0; i < report.upkeepIds.length; i++) {
+ upkeepTransmitInfo[i].upkeep = s_upkeep[report.upkeepIds[i]];
+ upkeepTransmitInfo[i].triggerType = _getTriggerType(report.upkeepIds[i]);
+
+ (upkeepTransmitInfo[i].earlyChecksPassed, upkeepTransmitInfo[i].dedupID) = _prePerformChecks(
+ report.upkeepIds[i],
+ blocknumber,
+ report.triggers[i],
+ upkeepTransmitInfo[i],
+ hotVars
+ );
+
+ if (upkeepTransmitInfo[i].earlyChecksPassed) {
+ transmitVars.numUpkeepsPassedChecks += 1;
+ } else {
+ continue;
+ }
+
+ // Actually perform the target upkeep
+ (upkeepTransmitInfo[i].performSuccess, upkeepTransmitInfo[i].gasUsed) = _performUpkeep(
+ upkeepTransmitInfo[i].upkeep.forwarder,
+ report.gasLimits[i],
+ report.performDatas[i]
+ );
+
+ // To split L1 fee across the upkeeps, assign a weight to this upkeep based on the length
+ // of the perform data and calldata overhead
+ upkeepTransmitInfo[i].calldataWeight =
+ report.performDatas[i].length +
+ TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD +
+ (TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD * (hotVars.f + 1));
+ transmitVars.totalCalldataWeight += upkeepTransmitInfo[i].calldataWeight;
+
+ // Deduct the gasUsed by upkeep from the overhead tally - upkeeps pay for their own gas individually
+ gasOverhead -= upkeepTransmitInfo[i].gasUsed;
+
+ // Store last perform block number / deduping key for upkeep
+ _updateTriggerMarker(report.upkeepIds[i], blocknumber, upkeepTransmitInfo[i]);
+ }
+ // No upkeeps to be performed in this report
+ if (transmitVars.numUpkeepsPassedChecks == 0) {
+ return;
+ }
+
+ // This is the overall gas overhead that will be split across performed upkeeps
+ // Take upper bound of 16 gas per callData bytes
+ gasOverhead = (gasOverhead - gasleft()) + (16 * msg.data.length) + ACCOUNTING_FIXED_GAS_OVERHEAD;
+ gasOverhead = gasOverhead / transmitVars.numUpkeepsPassedChecks + ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD;
+
+ {
+ BillingTokenPaymentParams memory billingTokenParams;
+ uint256 nativeUSD = _getNativeUSD(hotVars);
+ for (uint256 i = 0; i < report.upkeepIds.length; i++) {
+ if (upkeepTransmitInfo[i].earlyChecksPassed) {
+ if (i == 0 || upkeepTransmitInfo[i].upkeep.billingToken != upkeepTransmitInfo[i - 1].upkeep.billingToken) {
+ billingTokenParams = _getBillingTokenPaymentParams(hotVars, upkeepTransmitInfo[i].upkeep.billingToken);
+ }
+ PaymentReceipt memory receipt = _handlePayment(
+ hotVars,
+ PaymentParams({
+ gasLimit: upkeepTransmitInfo[i].gasUsed,
+ gasOverhead: gasOverhead,
+ l1CostWei: (l1Fee * upkeepTransmitInfo[i].calldataWeight) / transmitVars.totalCalldataWeight,
+ fastGasWei: report.fastGasWei,
+ linkUSD: report.linkUSD,
+ nativeUSD: nativeUSD,
+ billingToken: upkeepTransmitInfo[i].upkeep.billingToken,
+ billingTokenParams: billingTokenParams,
+ isTransaction: true
+ }),
+ report.upkeepIds[i],
+ upkeepTransmitInfo[i].upkeep
+ );
+ transmitVars.totalPremium += receipt.premiumInJuels;
+ transmitVars.totalReimbursement += receipt.gasReimbursementInJuels;
+
+ emit UpkeepPerformed(
+ report.upkeepIds[i],
+ upkeepTransmitInfo[i].performSuccess,
+ receipt.gasChargeInBillingToken + receipt.premiumInBillingToken,
+ upkeepTransmitInfo[i].gasUsed,
+ gasOverhead,
+ report.triggers[i]
+ );
+ }
+ }
+ }
+ // record payments to NOPs, all in LINK
+ s_transmitters[msg.sender].balance += transmitVars.totalReimbursement;
+ s_hotVars.totalPremium += transmitVars.totalPremium;
+ s_reserveAmounts[IERC20(address(i_link))] += transmitVars.totalReimbursement + transmitVars.totalPremium;
+ }
+
+ // ================================================================
+ // | OCR2ABSTRACT |
+ // ================================================================
+
+ /**
+ * @inheritdoc OCR2Abstract
+ * @dev prefer the type-safe version of setConfig (below) whenever possible. The OnchainConfig could differ between registry versions
+ * @dev this function takes up precious space on the root contract, but must be implemented to conform to the OCR2Abstract interface
+ */
+ function setConfig(
+ address[] memory signers,
+ address[] memory transmitters,
+ uint8 f,
+ bytes memory onchainConfigBytes,
+ uint64 offchainConfigVersion,
+ bytes memory offchainConfig
+ ) external override {
+ (OnchainConfig memory config, IERC20[] memory billingTokens, BillingConfig[] memory billingConfigs) = abi.decode(
+ onchainConfigBytes,
+ (OnchainConfig, IERC20[], BillingConfig[])
+ );
+
+ setConfigTypeSafe(
+ signers,
+ transmitters,
+ f,
+ config,
+ offchainConfigVersion,
+ offchainConfig,
+ billingTokens,
+ billingConfigs
+ );
+ }
+
+ /**
+ * @notice sets the configuration for the registry
+ * @param signers the list of permitted signers
+ * @param transmitters the list of permitted transmitters
+ * @param f the maximum tolerance for faulty nodes
+ * @param onchainConfig configuration values that are used on-chain
+ * @param offchainConfigVersion the version of the offchainConfig
+ * @param offchainConfig configuration values that are used off-chain
+ * @param billingTokens the list of valid billing tokens
+ * @param billingConfigs the configurations for each billing token
+ */
+ function setConfigTypeSafe(
+ address[] memory signers,
+ address[] memory transmitters,
+ uint8 f,
+ OnchainConfig memory onchainConfig,
+ uint64 offchainConfigVersion,
+ bytes memory offchainConfig,
+ IERC20[] memory billingTokens,
+ BillingConfig[] memory billingConfigs
+ ) public onlyOwner {
+ if (signers.length > MAX_NUM_ORACLES) revert TooManyOracles();
+ if (f == 0) revert IncorrectNumberOfFaultyOracles();
+ if (signers.length != transmitters.length || signers.length <= 3 * f) revert IncorrectNumberOfSigners();
+ if (billingTokens.length != billingConfigs.length) revert ParameterLengthError();
+ // set billing config for tokens
+ _setBillingConfig(billingTokens, billingConfigs);
+
+ _updateTransmitters(signers, transmitters);
+
+ s_hotVars = HotVars({
+ f: f,
+ stalenessSeconds: onchainConfig.stalenessSeconds,
+ gasCeilingMultiplier: onchainConfig.gasCeilingMultiplier,
+ paused: s_hotVars.paused,
+ reentrancyGuard: s_hotVars.reentrancyGuard,
+ totalPremium: s_hotVars.totalPremium,
+ latestEpoch: 0, // DON restarts epoch
+ reorgProtectionEnabled: onchainConfig.reorgProtectionEnabled,
+ chainModule: onchainConfig.chainModule
+ });
+
+ uint32 previousConfigBlockNumber = s_storage.latestConfigBlockNumber;
+ uint32 newLatestConfigBlockNumber = uint32(onchainConfig.chainModule.blockNumber());
+ uint32 newConfigCount = s_storage.configCount + 1;
+
+ s_storage = Storage({
+ checkGasLimit: onchainConfig.checkGasLimit,
+ maxPerformGas: onchainConfig.maxPerformGas,
+ transcoder: onchainConfig.transcoder,
+ maxCheckDataSize: onchainConfig.maxCheckDataSize,
+ maxPerformDataSize: onchainConfig.maxPerformDataSize,
+ maxRevertDataSize: onchainConfig.maxRevertDataSize,
+ upkeepPrivilegeManager: onchainConfig.upkeepPrivilegeManager,
+ financeAdmin: onchainConfig.financeAdmin,
+ nonce: s_storage.nonce,
+ configCount: newConfigCount,
+ latestConfigBlockNumber: newLatestConfigBlockNumber
+ });
+ s_fallbackGasPrice = onchainConfig.fallbackGasPrice;
+ s_fallbackLinkPrice = onchainConfig.fallbackLinkPrice;
+ s_fallbackNativePrice = onchainConfig.fallbackNativePrice;
+
+ bytes memory onchainConfigBytes = abi.encode(onchainConfig);
+
+ s_latestConfigDigest = _configDigestFromConfigData(
+ block.chainid,
+ address(this),
+ s_storage.configCount,
+ signers,
+ transmitters,
+ f,
+ onchainConfigBytes,
+ offchainConfigVersion,
+ offchainConfig
+ );
+
+ for (uint256 idx = s_registrars.length(); idx > 0; idx--) {
+ s_registrars.remove(s_registrars.at(idx - 1));
+ }
+
+ for (uint256 idx = 0; idx < onchainConfig.registrars.length; idx++) {
+ s_registrars.add(onchainConfig.registrars[idx]);
+ }
+
+ emit ConfigSet(
+ previousConfigBlockNumber,
+ s_latestConfigDigest,
+ s_storage.configCount,
+ signers,
+ transmitters,
+ f,
+ onchainConfigBytes,
+ offchainConfigVersion,
+ offchainConfig
+ );
+ }
+
+ /**
+ * @inheritdoc OCR2Abstract
+ * @dev this function takes up precious space on the root contract, but must be implemented to conform to the OCR2Abstract interface
+ */
+ function latestConfigDetails()
+ external
+ view
+ override
+ returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest)
+ {
+ return (s_storage.configCount, s_storage.latestConfigBlockNumber, s_latestConfigDigest);
+ }
+
+ /**
+ * @inheritdoc OCR2Abstract
+ * @dev this function takes up precious space on the root contract, but must be implemented to conform to the OCR2Abstract interface
+ */
+ function latestConfigDigestAndEpoch()
+ external
+ view
+ override
+ returns (bool scanLogs, bytes32 configDigest, uint32 epoch)
+ {
+ return (false, s_latestConfigDigest, s_hotVars.latestEpoch);
+ }
+}
diff --git a/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryBase2_3.sol b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryBase2_3.sol
new file mode 100644
index 00000000000..524ecacc826
--- /dev/null
+++ b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryBase2_3.sol
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
+import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol";
+import {StreamsLookupCompatibleInterface} from "../interfaces/StreamsLookupCompatibleInterface.sol";
+import {ILogAutomation, Log} from "../interfaces/ILogAutomation.sol";
+import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol";
+import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol";
+import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol";
+import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol";
+import {KeeperCompatibleInterface} from "../interfaces/KeeperCompatibleInterface.sol";
+import {IChainModule} from "../interfaces/IChainModule.sol";
+import {IERC20Metadata as IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol";
+import {SafeCast} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol";
+import {IWrappedNative} from "../interfaces/v2_3/IWrappedNative.sol";
+
+/**
+ * @notice Base Keeper Registry contract, contains shared logic between
+ * AutomationRegistry and AutomationRegistryLogic
+ * @dev all errors, events, and internal functions should live here
+ */
+// solhint-disable-next-line max-states-count
+abstract contract ZKSyncAutomationRegistryBase2_3 is ConfirmedOwner {
+ using Address for address;
+ using EnumerableSet for EnumerableSet.UintSet;
+ using EnumerableSet for EnumerableSet.AddressSet;
+
+ address internal constant ZERO_ADDRESS = address(0);
+ address internal constant IGNORE_ADDRESS = 0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF;
+ bytes4 internal constant CHECK_SELECTOR = KeeperCompatibleInterface.checkUpkeep.selector;
+ bytes4 internal constant PERFORM_SELECTOR = KeeperCompatibleInterface.performUpkeep.selector;
+ bytes4 internal constant CHECK_CALLBACK_SELECTOR = StreamsLookupCompatibleInterface.checkCallback.selector;
+ bytes4 internal constant CHECK_LOG_SELECTOR = ILogAutomation.checkLog.selector;
+ uint256 internal constant PERFORM_GAS_MIN = 2_300;
+ uint256 internal constant CANCELLATION_DELAY = 50;
+ uint256 internal constant PERFORM_GAS_CUSHION = 5_000;
+ uint256 internal constant PPB_BASE = 1_000_000_000;
+ uint32 internal constant UINT32_MAX = type(uint32).max;
+ // The first byte of the mask can be 0, because we only ever have 31 oracles
+ uint256 internal constant ORACLE_MASK = 0x0001010101010101010101010101010101010101010101010101010101010101;
+ uint8 internal constant UPKEEP_VERSION_BASE = 4;
+
+ // Next block of constants are only used in maxPayment estimation during checkUpkeep simulation
+ // These values are calibrated using hardhat tests which simulate various cases and verify that
+ // the variables result in accurate estimation
+ uint256 internal constant REGISTRY_CONDITIONAL_OVERHEAD = 98_200; // Fixed gas overhead for conditional upkeeps
+ uint256 internal constant REGISTRY_LOG_OVERHEAD = 122_500; // Fixed gas overhead for log upkeeps
+ uint256 internal constant REGISTRY_PER_SIGNER_GAS_OVERHEAD = 5_600; // Value scales with f
+ uint256 internal constant REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD = 24; // Per perform data byte overhead
+
+ // The overhead (in bytes) in addition to perform data for upkeep sent in calldata
+ // This includes overhead for all struct encoding as well as report signatures
+ // There is a fixed component and a per signer component. This is calculated exactly by doing abi encoding
+ uint256 internal constant TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD = 932;
+ uint256 internal constant TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD = 64;
+
+ // Next block of constants are used in actual payment calculation. We calculate the exact gas used within the
+ // tx itself, but since payment processing itself takes gas, and it needs the overhead as input, we use fixed constants
+ // to account for gas used in payment processing. These values are calibrated using hardhat tests which simulates various cases and verifies that
+ // the variables result in accurate estimation
+ uint256 internal constant ACCOUNTING_FIXED_GAS_OVERHEAD = 51_200; // Fixed overhead per tx
+ uint256 internal constant ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD = 14_200; // Overhead per upkeep performed in batch
+
+ LinkTokenInterface internal immutable i_link;
+ AggregatorV3Interface internal immutable i_linkUSDFeed;
+ AggregatorV3Interface internal immutable i_nativeUSDFeed;
+ AggregatorV3Interface internal immutable i_fastGasFeed;
+ address internal immutable i_automationForwarderLogic;
+ address internal immutable i_allowedReadOnlyAddress;
+ IWrappedNative internal immutable i_wrappedNativeToken;
+
+ /**
+ * @dev - The storage is gas optimised for one and only one function - transmit. All the storage accessed in transmit
+ * is stored compactly. Rest of the storage layout is not of much concern as transmit is the only hot path
+ */
+
+ // Upkeep storage
+ EnumerableSet.UintSet internal s_upkeepIDs;
+ mapping(uint256 => Upkeep) internal s_upkeep; // accessed during transmit
+ mapping(uint256 => address) internal s_upkeepAdmin;
+ mapping(uint256 => address) internal s_proposedAdmin;
+ mapping(uint256 => bytes) internal s_checkData;
+ mapping(bytes32 => bool) internal s_dedupKeys;
+ // Registry config and state
+ EnumerableSet.AddressSet internal s_registrars;
+ mapping(address => Transmitter) internal s_transmitters;
+ mapping(address => Signer) internal s_signers;
+ address[] internal s_signersList; // s_signersList contains the signing address of each oracle
+ address[] internal s_transmittersList; // s_transmittersList contains the transmission address of each oracle
+ EnumerableSet.AddressSet internal s_deactivatedTransmitters;
+ mapping(address => address) internal s_transmitterPayees; // s_payees contains the mapping from transmitter to payee.
+ mapping(address => address) internal s_proposedPayee; // proposed payee for a transmitter
+ bytes32 internal s_latestConfigDigest; // Read on transmit path in case of signature verification
+ HotVars internal s_hotVars; // Mixture of config and state, used in transmit
+ Storage internal s_storage; // Mixture of config and state, not used in transmit
+ uint256 internal s_fallbackGasPrice;
+ uint256 internal s_fallbackLinkPrice;
+ uint256 internal s_fallbackNativePrice;
+ mapping(address => MigrationPermission) internal s_peerRegistryMigrationPermission; // Permissions for migration to and fro
+ mapping(uint256 => bytes) internal s_upkeepTriggerConfig; // upkeep triggers
+ mapping(uint256 => bytes) internal s_upkeepOffchainConfig; // general config set by users for each upkeep
+ mapping(uint256 => bytes) internal s_upkeepPrivilegeConfig; // general config set by an administrative role for an upkeep
+ mapping(address => bytes) internal s_adminPrivilegeConfig; // general config set by an administrative role for an admin
+ // billing
+ mapping(IERC20 billingToken => uint256 reserveAmount) internal s_reserveAmounts; // unspent user deposits + unwithdrawn NOP payments
+ mapping(IERC20 billingToken => BillingConfig billingConfig) internal s_billingConfigs; // billing configurations for different tokens
+ mapping(uint256 upkeepID => BillingOverrides billingOverrides) internal s_billingOverrides; // billing overrides for specific upkeeps
+ IERC20[] internal s_billingTokens; // list of billing tokens
+ PayoutMode internal s_payoutMode;
+
+ error ArrayHasNoEntries();
+ error CannotCancel();
+ error CheckDataExceedsLimit();
+ error ConfigDigestMismatch();
+ error DuplicateEntry();
+ error DuplicateSigners();
+ error GasLimitCanOnlyIncrease();
+ error GasLimitOutsideRange();
+ error IncorrectNumberOfFaultyOracles();
+ error IncorrectNumberOfSignatures();
+ error IncorrectNumberOfSigners();
+ error IndexOutOfRange();
+ error InsufficientBalance(uint256 available, uint256 requested);
+ error InsufficientLinkLiquidity();
+ error InvalidDataLength();
+ error InvalidFeed();
+ error InvalidTrigger();
+ error InvalidPayee();
+ error InvalidRecipient();
+ error InvalidReport();
+ error InvalidSigner();
+ error InvalidToken();
+ error InvalidTransmitter();
+ error InvalidTriggerType();
+ error MigrationNotPermitted();
+ error MustSettleOffchain();
+ error MustSettleOnchain();
+ error NotAContract();
+ error OnlyActiveSigners();
+ error OnlyActiveTransmitters();
+ error OnlyCallableByAdmin();
+ error OnlyCallableByLINKToken();
+ error OnlyCallableByOwnerOrAdmin();
+ error OnlyCallableByOwnerOrRegistrar();
+ error OnlyCallableByPayee();
+ error OnlyCallableByProposedAdmin();
+ error OnlyCallableByProposedPayee();
+ error OnlyCallableByUpkeepPrivilegeManager();
+ error OnlyFinanceAdmin();
+ error OnlyPausedUpkeep();
+ error OnlySimulatedBackend();
+ error OnlyUnpausedUpkeep();
+ error ParameterLengthError();
+ error ReentrantCall();
+ error RegistryPaused();
+ error RepeatedSigner();
+ error RepeatedTransmitter();
+ error TargetCheckReverted(bytes reason);
+ error TooManyOracles();
+ error TranscoderNotSet();
+ error TransferFailed();
+ error UpkeepAlreadyExists();
+ error UpkeepCancelled();
+ error UpkeepNotCanceled();
+ error UpkeepNotNeeded();
+ error ValueNotChanged();
+ error ZeroAddressNotAllowed();
+
+ enum MigrationPermission {
+ NONE,
+ OUTGOING,
+ INCOMING,
+ BIDIRECTIONAL
+ }
+
+ enum Trigger {
+ CONDITION,
+ LOG
+ }
+
+ enum UpkeepFailureReason {
+ NONE,
+ UPKEEP_CANCELLED,
+ UPKEEP_PAUSED,
+ TARGET_CHECK_REVERTED,
+ UPKEEP_NOT_NEEDED,
+ PERFORM_DATA_EXCEEDS_LIMIT,
+ INSUFFICIENT_BALANCE,
+ CALLBACK_REVERTED,
+ REVERT_DATA_EXCEEDS_LIMIT,
+ REGISTRY_PAUSED
+ }
+
+ enum PayoutMode {
+ ON_CHAIN,
+ OFF_CHAIN
+ }
+
+ /**
+ * @notice OnchainConfig of the registry
+ * @dev used only in setConfig()
+ * @member checkGasLimit gas limit when checking for upkeep
+ * @member stalenessSeconds number of seconds that is allowed for feed data to
+ * be stale before switching to the fallback pricing
+ * @member gasCeilingMultiplier multiplier to apply to the fast gas feed price
+ * when calculating the payment ceiling for keepers
+ * @member maxPerformGas max performGas allowed for an upkeep on this registry
+ * @member maxCheckDataSize max length of checkData bytes
+ * @member maxPerformDataSize max length of performData bytes
+ * @member maxRevertDataSize max length of revertData bytes
+ * @member fallbackGasPrice gas price used if the gas price feed is stale
+ * @member fallbackLinkPrice LINK price used if the LINK price feed is stale
+ * @member transcoder address of the transcoder contract
+ * @member registrars addresses of the registrar contracts
+ * @member upkeepPrivilegeManager address which can set privilege for upkeeps
+ * @member reorgProtectionEnabled if this registry enables re-org protection checks
+ * @member chainModule the chain specific module
+ */
+ struct OnchainConfig {
+ uint32 checkGasLimit;
+ uint32 maxPerformGas;
+ uint32 maxCheckDataSize;
+ address transcoder;
+ // 1 word full
+ bool reorgProtectionEnabled;
+ uint24 stalenessSeconds;
+ uint32 maxPerformDataSize;
+ uint32 maxRevertDataSize;
+ address upkeepPrivilegeManager;
+ // 2 words full
+ uint16 gasCeilingMultiplier;
+ address financeAdmin;
+ // 3 words
+ uint256 fallbackGasPrice;
+ uint256 fallbackLinkPrice;
+ uint256 fallbackNativePrice;
+ address[] registrars;
+ IChainModule chainModule;
+ }
+
+ /**
+ * @notice relevant state of an upkeep which is used in transmit function
+ * @member paused if this upkeep has been paused
+ * @member overridesEnabled if this upkeep has overrides enabled
+ * @member performGas the gas limit of upkeep execution
+ * @member maxValidBlocknumber until which block this upkeep is valid
+ * @member forwarder the forwarder contract to use for this upkeep
+ * @member amountSpent the amount this upkeep has spent, in the upkeep's billing token
+ * @member balance the balance of this upkeep
+ * @member lastPerformedBlockNumber the last block number when this upkeep was performed
+ */
+ struct Upkeep {
+ bool paused;
+ bool overridesEnabled;
+ uint32 performGas;
+ uint32 maxValidBlocknumber;
+ IAutomationForwarder forwarder;
+ // 2 bytes left in 1st EVM word - read in transmit path
+ uint128 amountSpent;
+ uint96 balance;
+ uint32 lastPerformedBlockNumber;
+ // 0 bytes left in 2nd EVM word - written in transmit path
+ IERC20 billingToken;
+ // 12 bytes left in 3rd EVM word - read in transmit path
+ }
+
+ /// @dev Config + State storage struct which is on hot transmit path
+ struct HotVars {
+ uint96 totalPremium; // ─────────╮ total historical payment to oracles for premium
+ uint32 latestEpoch; // │ latest epoch for which a report was transmitted
+ uint24 stalenessSeconds; // │ Staleness tolerance for feeds
+ uint16 gasCeilingMultiplier; // │ multiplier on top of fast gas feed for upper bound
+ uint8 f; // │ maximum number of faulty oracles
+ bool paused; // │ pause switch for all upkeeps in the registry
+ bool reentrancyGuard; // | guard against reentrancy
+ bool reorgProtectionEnabled; // ─╯ if this registry should enable the re-org protection mechanism
+ IChainModule chainModule; // the interface of chain specific module
+ }
+
+ /// @dev Config + State storage struct which is not on hot transmit path
+ struct Storage {
+ address transcoder; // Address of transcoder contract used in migrations
+ uint32 checkGasLimit; // Gas limit allowed in checkUpkeep
+ uint32 maxPerformGas; // Max gas an upkeep can use on this registry
+ uint32 nonce; // Nonce for each upkeep created
+ // 1 EVM word full
+ address upkeepPrivilegeManager; // address which can set privilege for upkeeps
+ uint32 configCount; // incremented each time a new config is posted, The count is incorporated into the config digest to prevent replay attacks.
+ uint32 latestConfigBlockNumber; // makes it easier for offchain systems to extract config from logs
+ uint32 maxCheckDataSize; // max length of checkData bytes
+ // 2 EVM word full
+ address financeAdmin; // address which can withdraw funds from the contract
+ uint32 maxPerformDataSize; // max length of performData bytes
+ uint32 maxRevertDataSize; // max length of revertData bytes
+ // 4 bytes left in 3rd EVM word
+ }
+
+ /// @dev Report transmitted by OCR to transmit function
+ struct Report {
+ uint256 fastGasWei;
+ uint256 linkUSD;
+ uint256[] upkeepIds;
+ uint256[] gasLimits;
+ bytes[] triggers;
+ bytes[] performDatas;
+ }
+
+ /**
+ * @dev This struct is used to maintain run time information about an upkeep in transmit function
+ * @member upkeep the upkeep struct
+ * @member earlyChecksPassed whether the upkeep passed early checks before perform
+ * @member performSuccess whether the perform was successful
+ * @member triggerType the type of trigger
+ * @member gasUsed gasUsed by this upkeep in perform
+ * @member calldataWeight weight assigned to this upkeep for its contribution to calldata. It is used to split L1 fee
+ * @member dedupID unique ID used to dedup an upkeep/trigger combo
+ */
+ struct UpkeepTransmitInfo {
+ Upkeep upkeep;
+ bool earlyChecksPassed;
+ bool performSuccess;
+ Trigger triggerType;
+ uint256 gasUsed;
+ uint256 calldataWeight;
+ bytes32 dedupID;
+ }
+
+ /**
+ * @notice holds information about a transmiter / node in the DON
+ * @member active can this transmitter submit reports
+ * @member index of oracle in s_signersList/s_transmittersList
+ * @member balance a node's balance in LINK
+ * @member lastCollected the total balance at which the node last withdrew
+ * @dev uint96 is safe for balance / last collected because transmitters are only ever paid in LINK
+ */
+ struct Transmitter {
+ bool active;
+ uint8 index;
+ uint96 balance;
+ uint96 lastCollected;
+ }
+
+ struct TransmitterPayeeInfo {
+ address transmitterAddress;
+ address payeeAddress;
+ }
+
+ struct Signer {
+ bool active;
+ // Index of oracle in s_signersList/s_transmittersList
+ uint8 index;
+ }
+
+ /**
+ * @notice the trigger structure conditional trigger type
+ */
+ struct ConditionalTrigger {
+ uint32 blockNum;
+ bytes32 blockHash;
+ }
+
+ /**
+ * @notice the trigger structure of log upkeeps
+ * @dev NOTE that blockNum / blockHash describe the block used for the callback,
+ * not necessarily the block number that the log was emitted in!!!!
+ */
+ struct LogTrigger {
+ bytes32 logBlockHash;
+ bytes32 txHash;
+ uint32 logIndex;
+ uint32 blockNum;
+ bytes32 blockHash;
+ }
+
+ /**
+ * @notice the billing config of a token
+ * @dev this is a storage struct
+ */
+ // solhint-disable-next-line gas-struct-packing
+ struct BillingConfig {
+ uint32 gasFeePPB;
+ uint24 flatFeeMilliCents; // min fee is $0.00001, max fee is $167
+ AggregatorV3Interface priceFeed;
+ uint8 decimals;
+ // 1st word, read in calculating BillingTokenPaymentParams
+ uint256 fallbackPrice;
+ // 2nd word only read if stale
+ uint96 minSpend;
+ // 3rd word only read during cancellation
+ }
+
+ /**
+ * @notice override-able billing params of a billing token
+ */
+ struct BillingOverrides {
+ uint32 gasFeePPB;
+ uint24 flatFeeMilliCents;
+ }
+
+ /**
+ * @notice pricing params for a billing token
+ * @dev this is a memory-only struct, so struct packing is less important
+ */
+ struct BillingTokenPaymentParams {
+ uint8 decimals;
+ uint32 gasFeePPB;
+ uint24 flatFeeMilliCents;
+ uint256 priceUSD;
+ }
+
+ /**
+ * @notice struct containing price & payment information used in calculating payment amount
+ * @member gasLimit the amount of gas used
+ * @member gasOverhead the amount of gas overhead
+ * @member l1CostWei the amount to be charged for L1 fee in wei
+ * @member fastGasWei the fast gas price
+ * @member linkUSD the exchange ratio between LINK and USD
+ * @member nativeUSD the exchange ratio between the chain's native token and USD
+ * @member billingToken the billing token
+ * @member billingTokenParams the payment params specific to a particular payment token
+ * @member isTransaction is this an eth_call or a transaction
+ */
+ struct PaymentParams {
+ uint256 gasLimit;
+ uint256 gasOverhead;
+ uint256 l1CostWei;
+ uint256 fastGasWei;
+ uint256 linkUSD;
+ uint256 nativeUSD;
+ IERC20 billingToken;
+ BillingTokenPaymentParams billingTokenParams;
+ bool isTransaction;
+ }
+
+ /**
+ * @notice struct containing receipt information about a payment or cost estimation
+ * @member gasChargeInBillingToken the amount to charge a user for gas spent using the billing token's native decimals
+ * @member premiumInBillingToken the premium charged to the user, shared between all nodes, using the billing token's native decimals
+ * @member gasReimbursementInJuels the amount to reimburse a node for gas spent
+ * @member premiumInJuels the premium paid to NOPs, shared between all nodes
+ */
+ // solhint-disable-next-line gas-struct-packing
+ struct PaymentReceipt {
+ uint96 gasChargeInBillingToken;
+ uint96 premiumInBillingToken;
+ // one word ends
+ uint96 gasReimbursementInJuels;
+ uint96 premiumInJuels;
+ // second word ends
+ IERC20 billingToken;
+ uint96 linkUSD;
+ // third word ends
+ uint96 nativeUSD;
+ uint96 billingUSD;
+ // fourth word ends
+ }
+
+ event AdminPrivilegeConfigSet(address indexed admin, bytes privilegeConfig);
+ event BillingConfigOverridden(uint256 indexed id, BillingOverrides overrides);
+ event BillingConfigOverrideRemoved(uint256 indexed id);
+ event BillingConfigSet(IERC20 indexed token, BillingConfig config);
+ event CancelledUpkeepReport(uint256 indexed id, bytes trigger);
+ event ChainSpecificModuleUpdated(address newModule);
+ event DedupKeyAdded(bytes32 indexed dedupKey);
+ event FeesWithdrawn(address indexed assetAddress, address indexed recipient, uint256 amount);
+ event FundsAdded(uint256 indexed id, address indexed from, uint96 amount);
+ event FundsWithdrawn(uint256 indexed id, uint256 amount, address to);
+ event InsufficientFundsUpkeepReport(uint256 indexed id, bytes trigger);
+ event NOPsSettledOffchain(address[] payees, uint256[] payments);
+ event Paused(address account);
+ event PayeesUpdated(address[] transmitters, address[] payees);
+ event PayeeshipTransferRequested(address indexed transmitter, address indexed from, address indexed to);
+ event PayeeshipTransferred(address indexed transmitter, address indexed from, address indexed to);
+ event PaymentWithdrawn(address indexed transmitter, uint256 indexed amount, address indexed to, address payee);
+ event ReorgedUpkeepReport(uint256 indexed id, bytes trigger);
+ event StaleUpkeepReport(uint256 indexed id, bytes trigger);
+ event UpkeepAdminTransferred(uint256 indexed id, address indexed from, address indexed to);
+ event UpkeepAdminTransferRequested(uint256 indexed id, address indexed from, address indexed to);
+ event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight);
+ event UpkeepCheckDataSet(uint256 indexed id, bytes newCheckData);
+ event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit);
+ event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination);
+ event UpkeepOffchainConfigSet(uint256 indexed id, bytes offchainConfig);
+ event UpkeepPaused(uint256 indexed id);
+ event UpkeepPerformed(
+ uint256 indexed id,
+ bool indexed success,
+ uint96 totalPayment,
+ uint256 gasUsed,
+ uint256 gasOverhead,
+ bytes trigger
+ );
+ event UpkeepCharged(uint256 indexed id, PaymentReceipt receipt);
+ event UpkeepPrivilegeConfigSet(uint256 indexed id, bytes privilegeConfig);
+ event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom);
+ event UpkeepRegistered(uint256 indexed id, uint32 performGas, address admin);
+ event UpkeepTriggerConfigSet(uint256 indexed id, bytes triggerConfig);
+ event UpkeepUnpaused(uint256 indexed id);
+ event Unpaused(address account);
+
+ /**
+ * @param link address of the LINK Token
+ * @param linkUSDFeed address of the LINK/USD price feed
+ * @param nativeUSDFeed address of the Native/USD price feed
+ * @param fastGasFeed address of the Fast Gas price feed
+ * @param automationForwarderLogic the address of automation forwarder logic
+ * @param allowedReadOnlyAddress the address of the allowed read only address
+ * @param payoutMode the payout mode
+ */
+ constructor(
+ address link,
+ address linkUSDFeed,
+ address nativeUSDFeed,
+ address fastGasFeed,
+ address automationForwarderLogic,
+ address allowedReadOnlyAddress,
+ PayoutMode payoutMode,
+ address wrappedNativeTokenAddress
+ ) ConfirmedOwner(msg.sender) {
+ i_link = LinkTokenInterface(link);
+ i_linkUSDFeed = AggregatorV3Interface(linkUSDFeed);
+ i_nativeUSDFeed = AggregatorV3Interface(nativeUSDFeed);
+ i_fastGasFeed = AggregatorV3Interface(fastGasFeed);
+ i_automationForwarderLogic = automationForwarderLogic;
+ i_allowedReadOnlyAddress = allowedReadOnlyAddress;
+ s_payoutMode = payoutMode;
+ i_wrappedNativeToken = IWrappedNative(wrappedNativeTokenAddress);
+ if (i_linkUSDFeed.decimals() != i_nativeUSDFeed.decimals()) {
+ revert InvalidFeed();
+ }
+ }
+
+ // ================================================================
+ // | INTERNAL FUNCTIONS ONLY |
+ // ================================================================
+
+ /**
+ * @dev creates a new upkeep with the given fields
+ * @param id the id of the upkeep
+ * @param upkeep the upkeep to create
+ * @param admin address to cancel upkeep and withdraw remaining funds
+ * @param checkData data which is passed to user's checkUpkeep
+ * @param triggerConfig the trigger config for this upkeep
+ * @param offchainConfig the off-chain config of this upkeep
+ */
+ function _createUpkeep(
+ uint256 id,
+ Upkeep memory upkeep,
+ address admin,
+ bytes memory checkData,
+ bytes memory triggerConfig,
+ bytes memory offchainConfig
+ ) internal {
+ if (s_hotVars.paused) revert RegistryPaused();
+ if (checkData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit();
+ if (upkeep.performGas < PERFORM_GAS_MIN || upkeep.performGas > s_storage.maxPerformGas)
+ revert GasLimitOutsideRange();
+ if (address(s_upkeep[id].forwarder) != address(0)) revert UpkeepAlreadyExists();
+ if (address(s_billingConfigs[upkeep.billingToken].priceFeed) == address(0)) revert InvalidToken();
+ s_upkeep[id] = upkeep;
+ s_upkeepAdmin[id] = admin;
+ s_checkData[id] = checkData;
+ s_reserveAmounts[upkeep.billingToken] = s_reserveAmounts[upkeep.billingToken] + upkeep.balance;
+ s_upkeepTriggerConfig[id] = triggerConfig;
+ s_upkeepOffchainConfig[id] = offchainConfig;
+ s_upkeepIDs.add(id);
+ }
+
+ /**
+ * @dev creates an ID for the upkeep based on the upkeep's type
+ * @dev the format of the ID looks like this:
+ * ****00000000000X****************
+ * 4 bytes of entropy
+ * 11 bytes of zeros
+ * 1 identifying byte for the trigger type
+ * 16 bytes of entropy
+ * @dev this maintains the same level of entropy as eth addresses, so IDs will still be unique
+ * @dev we add the "identifying" part in the middle so that it is mostly hidden from users who usually only
+ * see the first 4 and last 4 hex values ex 0x1234...ABCD
+ */
+ function _createID(Trigger triggerType) internal view returns (uint256) {
+ bytes1 empty;
+ IChainModule chainModule = s_hotVars.chainModule;
+ bytes memory idBytes = abi.encodePacked(
+ keccak256(abi.encode(chainModule.blockHash((chainModule.blockNumber() - 1)), address(this), s_storage.nonce))
+ );
+ for (uint256 idx = 4; idx < 15; idx++) {
+ idBytes[idx] = empty;
+ }
+ idBytes[15] = bytes1(uint8(triggerType));
+ return uint256(bytes32(idBytes));
+ }
+
+ /**
+ * @dev retrieves feed data for fast gas/native and link/native prices. if the feed
+ * data is stale it uses the configured fallback price. Once a price is picked
+ * for gas it takes the min of gas price in the transaction or the fast gas
+ * price in order to reduce costs for the upkeep clients.
+ */
+ function _getFeedData(
+ HotVars memory hotVars
+ ) internal view returns (uint256 gasWei, uint256 linkUSD, uint256 nativeUSD) {
+ uint32 stalenessSeconds = hotVars.stalenessSeconds;
+ bool staleFallback = stalenessSeconds > 0;
+ uint256 timestamp;
+ int256 feedValue;
+ (, feedValue, , timestamp, ) = i_fastGasFeed.latestRoundData();
+ if (
+ feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp)
+ ) {
+ gasWei = s_fallbackGasPrice;
+ } else {
+ gasWei = uint256(feedValue);
+ }
+ (, feedValue, , timestamp, ) = i_linkUSDFeed.latestRoundData();
+ if (
+ feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp)
+ ) {
+ linkUSD = s_fallbackLinkPrice;
+ } else {
+ linkUSD = uint256(feedValue);
+ }
+ return (gasWei, linkUSD, _getNativeUSD(hotVars));
+ }
+
+ /**
+ * @dev this price has it's own getter for use in the transmit() hot path
+ * in the future, all price data should be included in the report instead of
+ * getting read during execution
+ */
+ function _getNativeUSD(HotVars memory hotVars) internal view returns (uint256) {
+ (, int256 feedValue, , uint256 timestamp, ) = i_nativeUSDFeed.latestRoundData();
+ if (
+ feedValue <= 0 ||
+ block.timestamp < timestamp ||
+ (hotVars.stalenessSeconds > 0 && hotVars.stalenessSeconds < block.timestamp - timestamp)
+ ) {
+ return s_fallbackNativePrice;
+ } else {
+ return uint256(feedValue);
+ }
+ }
+
+ /**
+ * @dev gets the price and billing params for a specific billing token
+ */
+ function _getBillingTokenPaymentParams(
+ HotVars memory hotVars,
+ IERC20 billingToken
+ ) internal view returns (BillingTokenPaymentParams memory paymentParams) {
+ BillingConfig storage config = s_billingConfigs[billingToken];
+ paymentParams.flatFeeMilliCents = config.flatFeeMilliCents;
+ paymentParams.gasFeePPB = config.gasFeePPB;
+ paymentParams.decimals = config.decimals;
+ (, int256 feedValue, , uint256 timestamp, ) = config.priceFeed.latestRoundData();
+ if (
+ feedValue <= 0 ||
+ block.timestamp < timestamp ||
+ (hotVars.stalenessSeconds > 0 && hotVars.stalenessSeconds < block.timestamp - timestamp)
+ ) {
+ paymentParams.priceUSD = config.fallbackPrice;
+ } else {
+ paymentParams.priceUSD = uint256(feedValue);
+ }
+ return paymentParams;
+ }
+
+ /**
+ * @param hotVars the hot path variables
+ * @param paymentParams the pricing data and gas usage data
+ * @return receipt the receipt of payment with pricing breakdown
+ * @dev use of PaymentParams struct is necessary to avoid stack too deep errors
+ * @dev calculates LINK paid for gas spent plus a configure premium percentage
+ * @dev 1 USD = 1e18 attoUSD
+ * @dev 1 USD = 1e26 hexaicosaUSD (had to borrow this prefix from geometry because there is no metric prefix for 1e-26)
+ * @dev 1 millicent = 1e-5 USD = 1e13 attoUSD
+ */
+ function _calculatePaymentAmount(
+ HotVars memory hotVars,
+ PaymentParams memory paymentParams
+ ) internal view returns (PaymentReceipt memory receipt) {
+ uint256 decimals = paymentParams.billingTokenParams.decimals;
+ uint256 gasWei = paymentParams.fastGasWei * hotVars.gasCeilingMultiplier;
+ // in case it's actual execution use actual gas price, capped by fastGasWei * gasCeilingMultiplier
+ if (paymentParams.isTransaction && tx.gasprice < gasWei) {
+ gasWei = tx.gasprice;
+ }
+
+ // scaling factor is based on decimals of billing token, and applies to premium and gasCharge
+ uint256 numeratorScalingFactor = decimals > 18 ? 10 ** (decimals - 18) : 1;
+ uint256 denominatorScalingFactor = decimals < 18 ? 10 ** (18 - decimals) : 1;
+
+ // gas calculation
+ uint256 gasPaymentHexaicosaUSD = (gasWei *
+ (paymentParams.gasLimit + paymentParams.gasOverhead) +
+ paymentParams.l1CostWei) * paymentParams.nativeUSD; // gasPaymentHexaicosaUSD has an extra 8 zeros because of decimals on nativeUSD feed
+ // gasChargeInBillingToken is scaled by the billing token's decimals. Round up to ensure a minimum billing token is charged for gas
+ receipt.gasChargeInBillingToken = SafeCast.toUint96(
+ ((gasPaymentHexaicosaUSD * numeratorScalingFactor) +
+ (paymentParams.billingTokenParams.priceUSD * denominatorScalingFactor - 1)) /
+ (paymentParams.billingTokenParams.priceUSD * denominatorScalingFactor)
+ );
+ // 18 decimals: 26 decimals / 8 decimals
+ receipt.gasReimbursementInJuels = SafeCast.toUint96(gasPaymentHexaicosaUSD / paymentParams.linkUSD);
+
+ // premium calculation
+ uint256 flatFeeHexaicosaUSD = uint256(paymentParams.billingTokenParams.flatFeeMilliCents) * 1e21; // 1e13 for milliCents to attoUSD and 1e8 for attoUSD to hexaicosaUSD
+ uint256 premiumHexaicosaUSD = ((((gasWei * paymentParams.gasLimit) + paymentParams.l1CostWei) *
+ paymentParams.billingTokenParams.gasFeePPB *
+ paymentParams.nativeUSD) / 1e9) + flatFeeHexaicosaUSD;
+ // premium is scaled by the billing token's decimals. Round up to ensure at least minimum charge
+ receipt.premiumInBillingToken = SafeCast.toUint96(
+ ((premiumHexaicosaUSD * numeratorScalingFactor) +
+ (paymentParams.billingTokenParams.priceUSD * denominatorScalingFactor - 1)) /
+ (paymentParams.billingTokenParams.priceUSD * denominatorScalingFactor)
+ );
+ receipt.premiumInJuels = SafeCast.toUint96(premiumHexaicosaUSD / paymentParams.linkUSD);
+
+ receipt.billingToken = paymentParams.billingToken;
+ receipt.linkUSD = SafeCast.toUint96(paymentParams.linkUSD);
+ receipt.nativeUSD = SafeCast.toUint96(paymentParams.nativeUSD);
+ receipt.billingUSD = SafeCast.toUint96(paymentParams.billingTokenParams.priceUSD);
+
+ return receipt;
+ }
+
+ /**
+ * @dev calculates the max payment for an upkeep. Called during checkUpkeep simulation and assumes
+ * maximum gas overhead, L1 fee
+ */
+ function _getMaxPayment(
+ uint256 upkeepId,
+ HotVars memory hotVars,
+ Trigger triggerType,
+ uint32 performGas,
+ uint256 fastGasWei,
+ uint256 linkUSD,
+ uint256 nativeUSD,
+ IERC20 billingToken
+ ) internal view returns (uint96) {
+ uint256 maxL1Fee;
+ uint256 maxGasOverhead;
+
+ {
+ if (triggerType == Trigger.CONDITION) {
+ maxGasOverhead = REGISTRY_CONDITIONAL_OVERHEAD;
+ } else if (triggerType == Trigger.LOG) {
+ maxGasOverhead = REGISTRY_LOG_OVERHEAD;
+ } else {
+ revert InvalidTriggerType();
+ }
+ uint256 maxCalldataSize = s_storage.maxPerformDataSize +
+ TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD +
+ (TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD * (hotVars.f + 1));
+ (uint256 chainModuleFixedOverhead, uint256 chainModulePerByteOverhead) = s_hotVars.chainModule.getGasOverhead();
+ maxGasOverhead +=
+ (REGISTRY_PER_SIGNER_GAS_OVERHEAD * (hotVars.f + 1)) +
+ ((REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD + chainModulePerByteOverhead) * maxCalldataSize) +
+ chainModuleFixedOverhead;
+ maxL1Fee = hotVars.gasCeilingMultiplier * hotVars.chainModule.getMaxL1Fee(maxCalldataSize);
+ }
+
+ BillingTokenPaymentParams memory paymentParams = _getBillingTokenPaymentParams(hotVars, billingToken);
+ if (s_upkeep[upkeepId].overridesEnabled) {
+ BillingOverrides memory billingOverrides = s_billingOverrides[upkeepId];
+ // use the overridden configs
+ paymentParams.gasFeePPB = billingOverrides.gasFeePPB;
+ paymentParams.flatFeeMilliCents = billingOverrides.flatFeeMilliCents;
+ }
+
+ PaymentReceipt memory receipt = _calculatePaymentAmount(
+ hotVars,
+ PaymentParams({
+ gasLimit: performGas,
+ gasOverhead: maxGasOverhead,
+ l1CostWei: maxL1Fee,
+ fastGasWei: fastGasWei,
+ linkUSD: linkUSD,
+ nativeUSD: nativeUSD,
+ billingToken: billingToken,
+ billingTokenParams: paymentParams,
+ isTransaction: false
+ })
+ );
+
+ return receipt.gasChargeInBillingToken + receipt.premiumInBillingToken;
+ }
+
+ /**
+ * @dev move a transmitter's balance from total pool to withdrawable balance
+ */
+ function _updateTransmitterBalanceFromPool(
+ address transmitterAddress,
+ uint96 totalPremium,
+ uint96 payeeCount
+ ) internal returns (uint96) {
+ Transmitter memory transmitter = s_transmitters[transmitterAddress];
+
+ if (transmitter.active) {
+ uint96 uncollected = totalPremium - transmitter.lastCollected;
+ uint96 due = uncollected / payeeCount;
+ transmitter.balance += due;
+ transmitter.lastCollected += due * payeeCount;
+ s_transmitters[transmitterAddress] = transmitter;
+ }
+
+ return transmitter.balance;
+ }
+
+ /**
+ * @dev gets the trigger type from an upkeepID (trigger type is encoded in the middle of the ID)
+ */
+ function _getTriggerType(uint256 upkeepId) internal pure returns (Trigger) {
+ bytes32 rawID = bytes32(upkeepId);
+ bytes1 empty = bytes1(0);
+ for (uint256 idx = 4; idx < 15; idx++) {
+ if (rawID[idx] != empty) {
+ // old IDs that were created before this standard and migrated to this registry
+ return Trigger.CONDITION;
+ }
+ }
+ return Trigger(uint8(rawID[15]));
+ }
+
+ function _checkPayload(
+ uint256 upkeepId,
+ Trigger triggerType,
+ bytes memory triggerData
+ ) internal view returns (bytes memory) {
+ if (triggerType == Trigger.CONDITION) {
+ return abi.encodeWithSelector(CHECK_SELECTOR, s_checkData[upkeepId]);
+ } else if (triggerType == Trigger.LOG) {
+ Log memory log = abi.decode(triggerData, (Log));
+ return abi.encodeWithSelector(CHECK_LOG_SELECTOR, log, s_checkData[upkeepId]);
+ }
+ revert InvalidTriggerType();
+ }
+
+ /**
+ * @dev _decodeReport decodes a serialized report into a Report struct
+ */
+ function _decodeReport(bytes calldata rawReport) internal pure returns (Report memory) {
+ Report memory report = abi.decode(rawReport, (Report));
+ uint256 expectedLength = report.upkeepIds.length;
+ if (
+ report.gasLimits.length != expectedLength ||
+ report.triggers.length != expectedLength ||
+ report.performDatas.length != expectedLength
+ ) {
+ revert InvalidReport();
+ }
+ return report;
+ }
+
+ /**
+ * @dev Does some early sanity checks before actually performing an upkeep
+ * @return bool whether the upkeep should be performed
+ * @return bytes32 dedupID for preventing duplicate performances of this trigger
+ */
+ function _prePerformChecks(
+ uint256 upkeepId,
+ uint256 blocknumber,
+ bytes memory rawTrigger,
+ UpkeepTransmitInfo memory transmitInfo,
+ HotVars memory hotVars
+ ) internal returns (bool, bytes32) {
+ bytes32 dedupID;
+ if (transmitInfo.triggerType == Trigger.CONDITION) {
+ if (!_validateConditionalTrigger(upkeepId, blocknumber, rawTrigger, transmitInfo, hotVars))
+ return (false, dedupID);
+ } else if (transmitInfo.triggerType == Trigger.LOG) {
+ bool valid;
+ (valid, dedupID) = _validateLogTrigger(upkeepId, blocknumber, rawTrigger, hotVars);
+ if (!valid) return (false, dedupID);
+ } else {
+ revert InvalidTriggerType();
+ }
+ if (transmitInfo.upkeep.maxValidBlocknumber <= blocknumber) {
+ // Can happen when an upkeep got cancelled after report was generated.
+ // However we have a CANCELLATION_DELAY of 50 blocks so shouldn't happen in practice
+ emit CancelledUpkeepReport(upkeepId, rawTrigger);
+ return (false, dedupID);
+ }
+ return (true, dedupID);
+ }
+
+ /**
+ * @dev Does some early sanity checks before actually performing an upkeep
+ */
+ function _validateConditionalTrigger(
+ uint256 upkeepId,
+ uint256 blocknumber,
+ bytes memory rawTrigger,
+ UpkeepTransmitInfo memory transmitInfo,
+ HotVars memory hotVars
+ ) internal returns (bool) {
+ ConditionalTrigger memory trigger = abi.decode(rawTrigger, (ConditionalTrigger));
+ if (trigger.blockNum < transmitInfo.upkeep.lastPerformedBlockNumber) {
+ // Can happen when another report performed this upkeep after this report was generated
+ emit StaleUpkeepReport(upkeepId, rawTrigger);
+ return false;
+ }
+ if (
+ (hotVars.reorgProtectionEnabled &&
+ (trigger.blockHash != bytes32("") && hotVars.chainModule.blockHash(trigger.blockNum) != trigger.blockHash)) ||
+ trigger.blockNum >= blocknumber
+ ) {
+ // There are two cases of reorged report
+ // 1. trigger block number is in future: this is an edge case during extreme deep reorgs of chain
+ // which is always protected against
+ // 2. blockHash at trigger block number was same as trigger time. This is an optional check which is
+ // applied if DON sends non empty trigger.blockHash. Note: It only works for last 256 blocks on chain
+ // when it is sent
+ emit ReorgedUpkeepReport(upkeepId, rawTrigger);
+ return false;
+ }
+ return true;
+ }
+
+ function _validateLogTrigger(
+ uint256 upkeepId,
+ uint256 blocknumber,
+ bytes memory rawTrigger,
+ HotVars memory hotVars
+ ) internal returns (bool, bytes32) {
+ LogTrigger memory trigger = abi.decode(rawTrigger, (LogTrigger));
+ bytes32 dedupID = keccak256(abi.encodePacked(upkeepId, trigger.logBlockHash, trigger.txHash, trigger.logIndex));
+ if (
+ (hotVars.reorgProtectionEnabled &&
+ (trigger.blockHash != bytes32("") && hotVars.chainModule.blockHash(trigger.blockNum) != trigger.blockHash)) ||
+ trigger.blockNum >= blocknumber
+ ) {
+ // Reorg protection is same as conditional trigger upkeeps
+ emit ReorgedUpkeepReport(upkeepId, rawTrigger);
+ return (false, dedupID);
+ }
+ if (s_dedupKeys[dedupID]) {
+ emit StaleUpkeepReport(upkeepId, rawTrigger);
+ return (false, dedupID);
+ }
+ return (true, dedupID);
+ }
+
+ /**
+ * @dev Verify signatures attached to report
+ */
+ function _verifyReportSignature(
+ bytes32[3] calldata reportContext,
+ bytes calldata report,
+ bytes32[] calldata rs,
+ bytes32[] calldata ss,
+ bytes32 rawVs
+ ) internal view {
+ bytes32 h = keccak256(abi.encode(keccak256(report), reportContext));
+ // i-th byte counts number of sigs made by i-th signer
+ uint256 signedCount = 0;
+
+ Signer memory signer;
+ address signerAddress;
+ for (uint256 i = 0; i < rs.length; i++) {
+ signerAddress = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]);
+ signer = s_signers[signerAddress];
+ if (!signer.active) revert OnlyActiveSigners();
+ unchecked {
+ signedCount += 1 << (8 * signer.index);
+ }
+ }
+
+ if (signedCount & ORACLE_MASK != signedCount) revert DuplicateSigners();
+ }
+
+ /**
+ * @dev updates a storage marker for this upkeep to prevent duplicate and out of order performances
+ * @dev for conditional triggers we set the latest block number, for log triggers we store a dedupID
+ */
+ function _updateTriggerMarker(
+ uint256 upkeepID,
+ uint256 blocknumber,
+ UpkeepTransmitInfo memory upkeepTransmitInfo
+ ) internal {
+ if (upkeepTransmitInfo.triggerType == Trigger.CONDITION) {
+ s_upkeep[upkeepID].lastPerformedBlockNumber = uint32(blocknumber);
+ } else if (upkeepTransmitInfo.triggerType == Trigger.LOG) {
+ s_dedupKeys[upkeepTransmitInfo.dedupID] = true;
+ emit DedupKeyAdded(upkeepTransmitInfo.dedupID);
+ }
+ }
+
+ /**
+ * @dev calls the Upkeep target with the performData param passed in by the
+ * transmitter and the exact gas required by the Upkeep
+ */
+ function _performUpkeep(
+ IAutomationForwarder forwarder,
+ uint256 performGas,
+ bytes memory performData
+ ) internal nonReentrant returns (bool success, uint256 gasUsed) {
+ performData = abi.encodeWithSelector(PERFORM_SELECTOR, performData);
+ return forwarder.forward(performGas, performData);
+ }
+
+ /**
+ * @dev handles the payment processing after an upkeep has been performed.
+ * Deducts an upkeep's balance and increases the amount spent.
+ */
+ function _handlePayment(
+ HotVars memory hotVars,
+ PaymentParams memory paymentParams,
+ uint256 upkeepId,
+ Upkeep memory upkeep
+ ) internal returns (PaymentReceipt memory) {
+ if (upkeep.overridesEnabled) {
+ BillingOverrides memory billingOverrides = s_billingOverrides[upkeepId];
+ // use the overridden configs
+ paymentParams.billingTokenParams.gasFeePPB = billingOverrides.gasFeePPB;
+ paymentParams.billingTokenParams.flatFeeMilliCents = billingOverrides.flatFeeMilliCents;
+ }
+
+ PaymentReceipt memory receipt = _calculatePaymentAmount(hotVars, paymentParams);
+
+ // balance is in the token's native decimals
+ uint96 balance = upkeep.balance;
+ // payment is in the token's native decimals
+ uint96 payment = receipt.gasChargeInBillingToken + receipt.premiumInBillingToken;
+
+ // scaling factors to adjust decimals between billing token and LINK
+ uint256 decimals = paymentParams.billingTokenParams.decimals;
+ uint256 scalingFactor1 = decimals < 18 ? 10 ** (18 - decimals) : 1;
+ uint256 scalingFactor2 = decimals > 18 ? 10 ** (decimals - 18) : 1;
+
+ // this shouldn't happen, but in rare edge cases, we charge the full balance in case the user
+ // can't cover the amount owed
+ if (balance < receipt.gasChargeInBillingToken) {
+ // if the user can't cover the gas fee, then direct all of the payment to the transmitter and distribute no premium to the DON
+ payment = balance;
+ receipt.gasReimbursementInJuels = SafeCast.toUint96(
+ (balance * paymentParams.billingTokenParams.priceUSD * scalingFactor1) /
+ (paymentParams.linkUSD * scalingFactor2)
+ );
+ receipt.premiumInJuels = 0;
+ receipt.premiumInBillingToken = 0;
+ receipt.gasChargeInBillingToken = balance;
+ } else if (balance < payment) {
+ // if the user can cover the gas fee, but not the premium, then reduce the premium
+ payment = balance;
+ receipt.premiumInJuels = SafeCast.toUint96(
+ ((balance * paymentParams.billingTokenParams.priceUSD * scalingFactor1) /
+ (paymentParams.linkUSD * scalingFactor2)) - receipt.gasReimbursementInJuels
+ );
+ // round up
+ receipt.premiumInBillingToken = SafeCast.toUint96(
+ ((receipt.premiumInJuels * paymentParams.linkUSD * scalingFactor2) +
+ (paymentParams.billingTokenParams.priceUSD * scalingFactor1 - 1)) /
+ (paymentParams.billingTokenParams.priceUSD * scalingFactor1)
+ );
+ }
+
+ s_upkeep[upkeepId].balance -= payment;
+ s_upkeep[upkeepId].amountSpent += payment;
+ s_reserveAmounts[paymentParams.billingToken] -= payment;
+
+ emit UpkeepCharged(upkeepId, receipt);
+ return receipt;
+ }
+
+ /**
+ * @dev ensures the upkeep is not cancelled and the caller is the upkeep admin
+ */
+ function _requireAdminAndNotCancelled(uint256 upkeepId) internal view {
+ if (msg.sender != s_upkeepAdmin[upkeepId]) revert OnlyCallableByAdmin();
+ if (s_upkeep[upkeepId].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled();
+ }
+
+ /**
+ * @dev replicates Open Zeppelin's ReentrancyGuard but optimized to fit our storage
+ */
+ modifier nonReentrant() {
+ if (s_hotVars.reentrancyGuard) revert ReentrantCall();
+ s_hotVars.reentrancyGuard = true;
+ _;
+ s_hotVars.reentrancyGuard = false;
+ }
+
+ /**
+ * @notice only allows a pre-configured address to initiate offchain read
+ */
+ function _preventExecution() internal view {
+ // solhint-disable-next-line avoid-tx-origin
+ if (tx.origin != i_allowedReadOnlyAddress) {
+ revert OnlySimulatedBackend();
+ }
+ }
+
+ /**
+ * @notice only allows finance admin to call the function
+ */
+ function _onlyFinanceAdminAllowed() internal view {
+ if (msg.sender != s_storage.financeAdmin) {
+ revert OnlyFinanceAdmin();
+ }
+ }
+
+ /**
+ * @notice only allows privilege manager to call the function
+ */
+ function _onlyPrivilegeManagerAllowed() internal view {
+ if (msg.sender != s_storage.upkeepPrivilegeManager) {
+ revert OnlyCallableByUpkeepPrivilegeManager();
+ }
+ }
+
+ /**
+ * @notice sets billing configuration for a token
+ * @param billingTokens the addresses of tokens
+ * @param billingConfigs the configs for tokens
+ */
+ function _setBillingConfig(IERC20[] memory billingTokens, BillingConfig[] memory billingConfigs) internal {
+ // Clear existing data
+ for (uint256 i = 0; i < s_billingTokens.length; i++) {
+ delete s_billingConfigs[s_billingTokens[i]];
+ }
+ delete s_billingTokens;
+
+ PayoutMode mode = s_payoutMode;
+ for (uint256 i = 0; i < billingTokens.length; i++) {
+ IERC20 token = billingTokens[i];
+ BillingConfig memory config = billingConfigs[i];
+
+ // most ERC20 tokens are 18 decimals, priceFeed must be 8 decimals
+ if (config.decimals != token.decimals() || config.priceFeed.decimals() != 8) {
+ revert InvalidToken();
+ }
+
+ // if LINK is a billing option, payout mode must be ON_CHAIN
+ if (address(token) == address(i_link) && mode == PayoutMode.OFF_CHAIN) {
+ revert InvalidToken();
+ }
+ if (address(token) == ZERO_ADDRESS || address(config.priceFeed) == ZERO_ADDRESS) {
+ revert ZeroAddressNotAllowed();
+ }
+
+ // if this is a new token, add it to tokens list. Otherwise revert
+ if (address(s_billingConfigs[token].priceFeed) != ZERO_ADDRESS) {
+ revert DuplicateEntry();
+ }
+ s_billingTokens.push(token);
+
+ // update the billing config for an existing token or add a new one
+ s_billingConfigs[token] = config;
+
+ emit BillingConfigSet(token, config);
+ }
+ }
+
+ /**
+ * @notice updates the signers and transmitters lists
+ */
+ function _updateTransmitters(address[] memory signers, address[] memory transmitters) internal {
+ uint96 transmittersListLength = uint96(s_transmittersList.length);
+ uint96 totalPremium = s_hotVars.totalPremium;
+
+ // move all pooled payments out of the pool to each transmitter's balance
+ for (uint256 i = 0; i < s_transmittersList.length; i++) {
+ _updateTransmitterBalanceFromPool(s_transmittersList[i], totalPremium, transmittersListLength);
+ }
+
+ // remove any old signer/transmitter addresses
+ address transmitterAddress;
+ PayoutMode mode = s_payoutMode;
+ for (uint256 i = 0; i < s_transmittersList.length; i++) {
+ transmitterAddress = s_transmittersList[i];
+ delete s_signers[s_signersList[i]];
+ // Do not delete the whole transmitter struct as it has balance information stored
+ s_transmitters[transmitterAddress].active = false;
+ if (mode == PayoutMode.OFF_CHAIN && s_transmitters[transmitterAddress].balance > 0) {
+ s_deactivatedTransmitters.add(transmitterAddress);
+ }
+ }
+ delete s_signersList;
+ delete s_transmittersList;
+
+ // add new signer/transmitter addresses
+ Transmitter memory transmitter;
+ for (uint256 i = 0; i < signers.length; i++) {
+ if (s_signers[signers[i]].active) revert RepeatedSigner();
+ if (signers[i] == ZERO_ADDRESS) revert InvalidSigner();
+ s_signers[signers[i]] = Signer({active: true, index: uint8(i)});
+
+ transmitterAddress = transmitters[i];
+ if (transmitterAddress == ZERO_ADDRESS) revert InvalidTransmitter();
+ transmitter = s_transmitters[transmitterAddress];
+ if (transmitter.active) revert RepeatedTransmitter();
+ transmitter.active = true;
+ transmitter.index = uint8(i);
+ // new transmitters start afresh from current totalPremium
+ // some spare change of premium from previous pool will be forfeited
+ transmitter.lastCollected = s_hotVars.totalPremium;
+ s_transmitters[transmitterAddress] = transmitter;
+ if (mode == PayoutMode.OFF_CHAIN) {
+ s_deactivatedTransmitters.remove(transmitterAddress);
+ }
+ }
+
+ s_signersList = signers;
+ s_transmittersList = transmitters;
+ }
+
+ /**
+ * @notice returns the size of the LINK liquidity pool
+ # @dev LINK max supply < 2^96, so casting to int256 is safe
+ */
+ function _linkAvailableForPayment() internal view returns (int256) {
+ return int256(i_link.balanceOf(address(this))) - int256(s_reserveAmounts[IERC20(address(i_link))]);
+ }
+}
diff --git a/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicA2_3.sol b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicA2_3.sol
new file mode 100644
index 00000000000..64d697c70f9
--- /dev/null
+++ b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicA2_3.sol
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
+import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol";
+import {ZKSyncAutomationRegistryBase2_3} from "./ZKSyncAutomationRegistryBase2_3.sol";
+import {ZKSyncAutomationRegistryLogicC2_3} from "./ZKSyncAutomationRegistryLogicC2_3.sol";
+import {ZKSyncAutomationRegistryLogicB2_3} from "./ZKSyncAutomationRegistryLogicB2_3.sol";
+import {Chainable} from "../Chainable.sol";
+import {ZKSyncAutomationForwarder} from "../ZKSyncAutomationForwarder.sol";
+import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol";
+import {UpkeepTranscoderInterfaceV2} from "../interfaces/UpkeepTranscoderInterfaceV2.sol";
+import {MigratableKeeperRegistryInterfaceV2} from "../interfaces/MigratableKeeperRegistryInterfaceV2.sol";
+import {IERC20Metadata as IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol";
+import {SafeERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol";
+import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol";
+
+/**
+ * @notice Logic contract, works in tandem with AutomationRegistry as a proxy
+ */
+contract ZKSyncAutomationRegistryLogicA2_3 is ZKSyncAutomationRegistryBase2_3, Chainable, IERC677Receiver {
+ using Address for address;
+ using EnumerableSet for EnumerableSet.UintSet;
+ using EnumerableSet for EnumerableSet.AddressSet;
+ using SafeERC20 for IERC20;
+
+ /**
+ * @param logicB the address of the second logic contract
+ * @dev we cast the contract to logicC in order to call logicC functions (via fallback)
+ */
+ constructor(
+ ZKSyncAutomationRegistryLogicB2_3 logicB
+ )
+ ZKSyncAutomationRegistryBase2_3(
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getLinkAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getLinkUSDFeedAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getNativeUSDFeedAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getFastGasFeedAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getAutomationForwarderLogic(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getAllowedReadOnlyAddress(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getPayoutMode(),
+ ZKSyncAutomationRegistryLogicC2_3(address(logicB)).getWrappedNativeTokenAddress()
+ )
+ Chainable(address(logicB))
+ {}
+
+ /**
+ * @notice uses LINK's transferAndCall to LINK and add funding to an upkeep
+ * @dev safe to cast uint256 to uint96 as total LINK supply is under UINT96MAX
+ * @param sender the account which transferred the funds
+ * @param amount number of LINK transfer
+ */
+ function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external override {
+ if (msg.sender != address(i_link)) revert OnlyCallableByLINKToken();
+ if (data.length != 32) revert InvalidDataLength();
+ uint256 id = abi.decode(data, (uint256));
+ if (s_upkeep[id].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled();
+ if (address(s_upkeep[id].billingToken) != address(i_link)) revert InvalidToken();
+ s_upkeep[id].balance = s_upkeep[id].balance + uint96(amount);
+ s_reserveAmounts[IERC20(address(i_link))] = s_reserveAmounts[IERC20(address(i_link))] + amount;
+ emit FundsAdded(id, sender, uint96(amount));
+ }
+
+ // ================================================================
+ // | UPKEEP MANAGEMENT |
+ // ================================================================
+
+ /**
+ * @notice adds a new upkeep
+ * @param target address to perform upkeep on
+ * @param gasLimit amount of gas to provide the target contract when
+ * performing upkeep
+ * @param admin address to cancel upkeep and withdraw remaining funds
+ * @param triggerType the trigger for the upkeep
+ * @param billingToken the billing token for the upkeep
+ * @param checkData data passed to the contract when checking for upkeep
+ * @param triggerConfig the config for the trigger
+ * @param offchainConfig arbitrary offchain config for the upkeep
+ */
+ function registerUpkeep(
+ address target,
+ uint32 gasLimit,
+ address admin,
+ Trigger triggerType,
+ IERC20 billingToken,
+ bytes calldata checkData,
+ bytes memory triggerConfig,
+ bytes memory offchainConfig
+ ) public returns (uint256 id) {
+ if (msg.sender != owner() && !s_registrars.contains(msg.sender)) revert OnlyCallableByOwnerOrRegistrar();
+ if (!target.isContract()) revert NotAContract();
+ id = _createID(triggerType);
+ IAutomationForwarder forwarder = IAutomationForwarder(
+ address(new ZKSyncAutomationForwarder(target, address(this), i_automationForwarderLogic))
+ );
+ _createUpkeep(
+ id,
+ Upkeep({
+ overridesEnabled: false,
+ performGas: gasLimit,
+ balance: 0,
+ maxValidBlocknumber: UINT32_MAX,
+ lastPerformedBlockNumber: 0,
+ amountSpent: 0,
+ paused: false,
+ forwarder: forwarder,
+ billingToken: billingToken
+ }),
+ admin,
+ checkData,
+ triggerConfig,
+ offchainConfig
+ );
+ s_storage.nonce++;
+ emit UpkeepRegistered(id, gasLimit, admin);
+ emit UpkeepCheckDataSet(id, checkData);
+ emit UpkeepTriggerConfigSet(id, triggerConfig);
+ emit UpkeepOffchainConfigSet(id, offchainConfig);
+ return (id);
+ }
+
+ /**
+ * @notice cancels an upkeep
+ * @param id the upkeepID to cancel
+ * @dev if a user cancels an upkeep, their funds are locked for CANCELLATION_DELAY blocks to
+ * allow any pending performUpkeep txs time to get confirmed
+ */
+ function cancelUpkeep(uint256 id) external {
+ Upkeep memory upkeep = s_upkeep[id];
+ bool isOwner = msg.sender == owner();
+ uint96 minSpend = s_billingConfigs[upkeep.billingToken].minSpend;
+
+ uint256 height = s_hotVars.chainModule.blockNumber();
+ if (upkeep.maxValidBlocknumber == 0) revert CannotCancel();
+ if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled();
+ if (!isOwner && msg.sender != s_upkeepAdmin[id]) revert OnlyCallableByOwnerOrAdmin();
+
+ if (!isOwner) {
+ height = height + CANCELLATION_DELAY;
+ }
+ s_upkeep[id].maxValidBlocknumber = uint32(height);
+ s_upkeepIDs.remove(id);
+
+ // charge the cancellation fee if the minSpend is not met
+ uint96 cancellationFee = 0;
+ // cancellationFee is min(max(minSpend - amountSpent, 0), amountLeft)
+ if (upkeep.amountSpent < minSpend) {
+ cancellationFee = minSpend - uint96(upkeep.amountSpent);
+ if (cancellationFee > upkeep.balance) {
+ cancellationFee = upkeep.balance;
+ }
+ }
+ s_upkeep[id].balance = upkeep.balance - cancellationFee;
+ s_reserveAmounts[upkeep.billingToken] = s_reserveAmounts[upkeep.billingToken] - cancellationFee;
+
+ emit UpkeepCanceled(id, uint64(height));
+ }
+
+ /**
+ * @notice migrates upkeeps from one registry to another.
+ * @param ids the upkeepIDs to migrate
+ * @param destination the destination registry address
+ * @dev a transcoder must be set in order to enable migration
+ * @dev migration permissions must be set on *both* sending and receiving registries
+ * @dev only an upkeep admin can migrate their upkeeps
+ * @dev this function is most gas-efficient if upkeepIDs are sorted by billing token
+ * @dev s_billingOverrides and s_upkeepPrivilegeConfig are not migrated in this function
+ */
+ function migrateUpkeeps(uint256[] calldata ids, address destination) external {
+ if (
+ s_peerRegistryMigrationPermission[destination] != MigrationPermission.OUTGOING &&
+ s_peerRegistryMigrationPermission[destination] != MigrationPermission.BIDIRECTIONAL
+ ) revert MigrationNotPermitted();
+ if (s_storage.transcoder == ZERO_ADDRESS) revert TranscoderNotSet();
+ if (ids.length == 0) revert ArrayHasNoEntries();
+
+ IERC20 billingToken;
+ uint256 balanceToTransfer;
+ uint256 id;
+ Upkeep memory upkeep;
+ address[] memory admins = new address[](ids.length);
+ Upkeep[] memory upkeeps = new Upkeep[](ids.length);
+ bytes[] memory checkDatas = new bytes[](ids.length);
+ bytes[] memory triggerConfigs = new bytes[](ids.length);
+ bytes[] memory offchainConfigs = new bytes[](ids.length);
+
+ for (uint256 idx = 0; idx < ids.length; idx++) {
+ id = ids[idx];
+ upkeep = s_upkeep[id];
+
+ if (idx == 0) {
+ billingToken = upkeep.billingToken;
+ balanceToTransfer = upkeep.balance;
+ }
+
+ // if we encounter a new billing token, send the sum from the last billing token to the destination registry
+ if (upkeep.billingToken != billingToken) {
+ s_reserveAmounts[billingToken] = s_reserveAmounts[billingToken] - balanceToTransfer;
+ billingToken.safeTransfer(destination, balanceToTransfer);
+ billingToken = upkeep.billingToken;
+ balanceToTransfer = upkeep.balance;
+ } else if (idx != 0) {
+ balanceToTransfer += upkeep.balance;
+ }
+
+ _requireAdminAndNotCancelled(id);
+ upkeep.forwarder.updateRegistry(destination);
+
+ upkeeps[idx] = upkeep;
+ admins[idx] = s_upkeepAdmin[id];
+ checkDatas[idx] = s_checkData[id];
+ triggerConfigs[idx] = s_upkeepTriggerConfig[id];
+ offchainConfigs[idx] = s_upkeepOffchainConfig[id];
+ delete s_upkeep[id];
+ delete s_checkData[id];
+ delete s_upkeepTriggerConfig[id];
+ delete s_upkeepOffchainConfig[id];
+ // nullify existing proposed admin change if an upkeep is being migrated
+ delete s_proposedAdmin[id];
+ delete s_upkeepAdmin[id];
+ s_upkeepIDs.remove(id);
+ emit UpkeepMigrated(id, upkeep.balance, destination);
+ }
+ // always transfer the rolling sum in the end
+ s_reserveAmounts[billingToken] = s_reserveAmounts[billingToken] - balanceToTransfer;
+ billingToken.safeTransfer(destination, balanceToTransfer);
+
+ bytes memory encodedUpkeeps = abi.encode(
+ ids,
+ upkeeps,
+ new address[](ids.length),
+ admins,
+ checkDatas,
+ triggerConfigs,
+ offchainConfigs
+ );
+ MigratableKeeperRegistryInterfaceV2(destination).receiveUpkeeps(
+ UpkeepTranscoderInterfaceV2(s_storage.transcoder).transcodeUpkeeps(
+ UPKEEP_VERSION_BASE,
+ MigratableKeeperRegistryInterfaceV2(destination).upkeepVersion(),
+ encodedUpkeeps
+ )
+ );
+ }
+
+ /**
+ * @notice received upkeeps migrated from another registry
+ * @param encodedUpkeeps the raw upkeep data to import
+ * @dev this function is never called directly, it is only called by another registry's migrate function
+ * @dev s_billingOverrides and s_upkeepPrivilegeConfig are not handled in this function
+ */
+ function receiveUpkeeps(bytes calldata encodedUpkeeps) external {
+ if (
+ s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.INCOMING &&
+ s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.BIDIRECTIONAL
+ ) revert MigrationNotPermitted();
+ (
+ uint256[] memory ids,
+ Upkeep[] memory upkeeps,
+ address[] memory targets,
+ address[] memory upkeepAdmins,
+ bytes[] memory checkDatas,
+ bytes[] memory triggerConfigs,
+ bytes[] memory offchainConfigs
+ ) = abi.decode(encodedUpkeeps, (uint256[], Upkeep[], address[], address[], bytes[], bytes[], bytes[]));
+ for (uint256 idx = 0; idx < ids.length; idx++) {
+ if (address(upkeeps[idx].forwarder) == ZERO_ADDRESS) {
+ upkeeps[idx].forwarder = IAutomationForwarder(
+ address(new ZKSyncAutomationForwarder(targets[idx], address(this), i_automationForwarderLogic))
+ );
+ }
+ _createUpkeep(
+ ids[idx],
+ upkeeps[idx],
+ upkeepAdmins[idx],
+ checkDatas[idx],
+ triggerConfigs[idx],
+ offchainConfigs[idx]
+ );
+ emit UpkeepReceived(ids[idx], upkeeps[idx].balance, msg.sender);
+ }
+ }
+}
diff --git a/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicB2_3.sol b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicB2_3.sol
new file mode 100644
index 00000000000..55af99fde87
--- /dev/null
+++ b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicB2_3.sol
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {ZKSyncAutomationRegistryBase2_3} from "./ZKSyncAutomationRegistryBase2_3.sol";
+import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
+import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol";
+import {ZKSyncAutomationRegistryLogicC2_3} from "./ZKSyncAutomationRegistryLogicC2_3.sol";
+import {Chainable} from "../Chainable.sol";
+import {IERC20Metadata as IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol";
+import {SafeERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol";
+import {SafeCast} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol";
+
+contract ZKSyncAutomationRegistryLogicB2_3 is ZKSyncAutomationRegistryBase2_3, Chainable {
+ using Address for address;
+ using EnumerableSet for EnumerableSet.UintSet;
+ using EnumerableSet for EnumerableSet.AddressSet;
+ using SafeERC20 for IERC20;
+
+ /**
+ * @param logicC the address of the third logic contract
+ */
+ constructor(
+ ZKSyncAutomationRegistryLogicC2_3 logicC
+ )
+ ZKSyncAutomationRegistryBase2_3(
+ logicC.getLinkAddress(),
+ logicC.getLinkUSDFeedAddress(),
+ logicC.getNativeUSDFeedAddress(),
+ logicC.getFastGasFeedAddress(),
+ logicC.getAutomationForwarderLogic(),
+ logicC.getAllowedReadOnlyAddress(),
+ logicC.getPayoutMode(),
+ logicC.getWrappedNativeTokenAddress()
+ )
+ Chainable(address(logicC))
+ {}
+
+ // ================================================================
+ // | PIPELINE FUNCTIONS |
+ // ================================================================
+
+ /**
+ * @notice called by the automation DON to check if work is needed
+ * @param id the upkeep ID to check for work needed
+ * @param triggerData extra contextual data about the trigger (not used in all code paths)
+ * @dev this one of the core functions called in the hot path
+ * @dev there is a 2nd checkUpkeep function (below) that is being maintained for backwards compatibility
+ * @dev there is an incongruency on what gets returned during failure modes
+ * ex sometimes we include price data, sometimes we omit it depending on the failure
+ */
+ function checkUpkeep(
+ uint256 id,
+ bytes memory triggerData
+ )
+ public
+ returns (
+ bool upkeepNeeded,
+ bytes memory performData,
+ UpkeepFailureReason upkeepFailureReason,
+ uint256 gasUsed,
+ uint256 gasLimit,
+ uint256 fastGasWei,
+ uint256 linkUSD
+ )
+ {
+ _preventExecution();
+
+ Trigger triggerType = _getTriggerType(id);
+ HotVars memory hotVars = s_hotVars;
+ Upkeep memory upkeep = s_upkeep[id];
+
+ {
+ uint256 nativeUSD;
+ uint96 maxPayment;
+ if (hotVars.paused) return (false, bytes(""), UpkeepFailureReason.REGISTRY_PAUSED, 0, upkeep.performGas, 0, 0);
+ if (upkeep.maxValidBlocknumber != UINT32_MAX)
+ return (false, bytes(""), UpkeepFailureReason.UPKEEP_CANCELLED, 0, upkeep.performGas, 0, 0);
+ if (upkeep.paused) return (false, bytes(""), UpkeepFailureReason.UPKEEP_PAUSED, 0, upkeep.performGas, 0, 0);
+ (fastGasWei, linkUSD, nativeUSD) = _getFeedData(hotVars);
+ maxPayment = _getMaxPayment(
+ id,
+ hotVars,
+ triggerType,
+ upkeep.performGas,
+ fastGasWei,
+ linkUSD,
+ nativeUSD,
+ upkeep.billingToken
+ );
+ if (upkeep.balance < maxPayment) {
+ return (false, bytes(""), UpkeepFailureReason.INSUFFICIENT_BALANCE, 0, upkeep.performGas, 0, 0);
+ }
+ }
+
+ bytes memory callData = _checkPayload(id, triggerType, triggerData);
+
+ gasUsed = gasleft();
+ // solhint-disable-next-line avoid-low-level-calls
+ (bool success, bytes memory result) = upkeep.forwarder.getTarget().call{gas: s_storage.checkGasLimit}(callData);
+ gasUsed = gasUsed - gasleft();
+
+ if (!success) {
+ // User's target check reverted. We capture the revert data here and pass it within performData
+ if (result.length > s_storage.maxRevertDataSize) {
+ return (
+ false,
+ bytes(""),
+ UpkeepFailureReason.REVERT_DATA_EXCEEDS_LIMIT,
+ gasUsed,
+ upkeep.performGas,
+ fastGasWei,
+ linkUSD
+ );
+ }
+ return (
+ upkeepNeeded,
+ result,
+ UpkeepFailureReason.TARGET_CHECK_REVERTED,
+ gasUsed,
+ upkeep.performGas,
+ fastGasWei,
+ linkUSD
+ );
+ }
+
+ (upkeepNeeded, performData) = abi.decode(result, (bool, bytes));
+ if (!upkeepNeeded)
+ return (false, bytes(""), UpkeepFailureReason.UPKEEP_NOT_NEEDED, gasUsed, upkeep.performGas, fastGasWei, linkUSD);
+
+ if (performData.length > s_storage.maxPerformDataSize)
+ return (
+ false,
+ bytes(""),
+ UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT,
+ gasUsed,
+ upkeep.performGas,
+ fastGasWei,
+ linkUSD
+ );
+
+ return (upkeepNeeded, performData, upkeepFailureReason, gasUsed, upkeep.performGas, fastGasWei, linkUSD);
+ }
+
+ /**
+ * @notice see other checkUpkeep function for description
+ * @dev this function may be deprecated in a future version of chainlink automation
+ */
+ function checkUpkeep(
+ uint256 id
+ )
+ external
+ returns (
+ bool upkeepNeeded,
+ bytes memory performData,
+ UpkeepFailureReason upkeepFailureReason,
+ uint256 gasUsed,
+ uint256 gasLimit,
+ uint256 fastGasWei,
+ uint256 linkUSD
+ )
+ {
+ return checkUpkeep(id, bytes(""));
+ }
+
+ /**
+ * @dev checkCallback is used specifically for automation data streams lookups (see StreamsLookupCompatibleInterface.sol)
+ * @param id the upkeepID to execute a callback for
+ * @param values the values returned from the data streams lookup
+ * @param extraData the user-provided extra context data
+ */
+ function checkCallback(
+ uint256 id,
+ bytes[] memory values,
+ bytes calldata extraData
+ )
+ external
+ returns (bool upkeepNeeded, bytes memory performData, UpkeepFailureReason upkeepFailureReason, uint256 gasUsed)
+ {
+ bytes memory payload = abi.encodeWithSelector(CHECK_CALLBACK_SELECTOR, values, extraData);
+ return executeCallback(id, payload);
+ }
+
+ /**
+ * @notice this is a generic callback executor that forwards a call to a user's contract with the configured
+ * gas limit
+ * @param id the upkeepID to execute a callback for
+ * @param payload the data (including function selector) to call on the upkeep target contract
+ */
+ function executeCallback(
+ uint256 id,
+ bytes memory payload
+ )
+ public
+ returns (bool upkeepNeeded, bytes memory performData, UpkeepFailureReason upkeepFailureReason, uint256 gasUsed)
+ {
+ _preventExecution();
+
+ Upkeep memory upkeep = s_upkeep[id];
+ gasUsed = gasleft();
+ // solhint-disable-next-line avoid-low-level-calls
+ (bool success, bytes memory result) = upkeep.forwarder.getTarget().call{gas: s_storage.checkGasLimit}(payload);
+ gasUsed = gasUsed - gasleft();
+ if (!success) {
+ return (false, bytes(""), UpkeepFailureReason.CALLBACK_REVERTED, gasUsed);
+ }
+ (upkeepNeeded, performData) = abi.decode(result, (bool, bytes));
+ if (!upkeepNeeded) {
+ return (false, bytes(""), UpkeepFailureReason.UPKEEP_NOT_NEEDED, gasUsed);
+ }
+ if (performData.length > s_storage.maxPerformDataSize) {
+ return (false, bytes(""), UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, gasUsed);
+ }
+ return (upkeepNeeded, performData, upkeepFailureReason, gasUsed);
+ }
+
+ /**
+ * @notice simulates the upkeep with the perform data returned from checkUpkeep
+ * @param id identifier of the upkeep to execute the data with.
+ * @param performData calldata parameter to be passed to the target upkeep.
+ * @return success whether the call reverted or not
+ * @return gasUsed the amount of gas the target contract consumed
+ */
+ function simulatePerformUpkeep(
+ uint256 id,
+ bytes calldata performData
+ ) external returns (bool success, uint256 gasUsed) {
+ _preventExecution();
+
+ if (s_hotVars.paused) revert RegistryPaused();
+ Upkeep memory upkeep = s_upkeep[id];
+ (success, gasUsed) = _performUpkeep(upkeep.forwarder, upkeep.performGas, performData);
+ return (success, gasUsed);
+ }
+
+ // ================================================================
+ // | UPKEEP MANAGEMENT |
+ // ================================================================
+
+ /**
+ * @notice adds fund to an upkeep
+ * @param id the upkeepID
+ * @param amount the amount of funds to add, in the upkeep's billing token
+ */
+ function addFunds(uint256 id, uint96 amount) external payable {
+ Upkeep memory upkeep = s_upkeep[id];
+ if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled();
+
+ if (msg.value != 0) {
+ if (upkeep.billingToken != IERC20(i_wrappedNativeToken)) {
+ revert InvalidToken();
+ }
+ amount = SafeCast.toUint96(msg.value);
+ }
+
+ s_upkeep[id].balance = upkeep.balance + amount;
+ s_reserveAmounts[upkeep.billingToken] = s_reserveAmounts[upkeep.billingToken] + amount;
+
+ if (msg.value == 0) {
+ // ERC20 payment
+ upkeep.billingToken.safeTransferFrom(msg.sender, address(this), amount);
+ } else {
+ // native payment
+ i_wrappedNativeToken.deposit{value: amount}();
+ }
+
+ emit FundsAdded(id, msg.sender, amount);
+ }
+
+ /**
+ * @notice overrides the billing config for an upkeep
+ * @param id the upkeepID
+ * @param billingOverrides the override-able billing config
+ */
+ function setBillingOverrides(uint256 id, BillingOverrides calldata billingOverrides) external {
+ _onlyPrivilegeManagerAllowed();
+ if (s_upkeep[id].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled();
+
+ s_upkeep[id].overridesEnabled = true;
+ s_billingOverrides[id] = billingOverrides;
+ emit BillingConfigOverridden(id, billingOverrides);
+ }
+
+ /**
+ * @notice remove the overridden billing config for an upkeep
+ * @param id the upkeepID
+ */
+ function removeBillingOverrides(uint256 id) external {
+ _onlyPrivilegeManagerAllowed();
+
+ s_upkeep[id].overridesEnabled = false;
+ delete s_billingOverrides[id];
+ emit BillingConfigOverrideRemoved(id);
+ }
+
+ /**
+ * @notice transfers the address of an admin for an upkeep
+ */
+ function transferUpkeepAdmin(uint256 id, address proposed) external {
+ _requireAdminAndNotCancelled(id);
+ if (proposed == msg.sender) revert ValueNotChanged();
+
+ if (s_proposedAdmin[id] != proposed) {
+ s_proposedAdmin[id] = proposed;
+ emit UpkeepAdminTransferRequested(id, msg.sender, proposed);
+ }
+ }
+
+ /**
+ * @notice accepts the transfer of an upkeep admin
+ */
+ function acceptUpkeepAdmin(uint256 id) external {
+ Upkeep memory upkeep = s_upkeep[id];
+ if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled();
+ if (s_proposedAdmin[id] != msg.sender) revert OnlyCallableByProposedAdmin();
+ address past = s_upkeepAdmin[id];
+ s_upkeepAdmin[id] = msg.sender;
+ s_proposedAdmin[id] = ZERO_ADDRESS;
+
+ emit UpkeepAdminTransferred(id, past, msg.sender);
+ }
+
+ /**
+ * @notice pauses an upkeep - an upkeep will be neither checked nor performed while paused
+ */
+ function pauseUpkeep(uint256 id) external {
+ _requireAdminAndNotCancelled(id);
+ Upkeep memory upkeep = s_upkeep[id];
+ if (upkeep.paused) revert OnlyUnpausedUpkeep();
+ s_upkeep[id].paused = true;
+ s_upkeepIDs.remove(id);
+ emit UpkeepPaused(id);
+ }
+
+ /**
+ * @notice unpauses an upkeep
+ */
+ function unpauseUpkeep(uint256 id) external {
+ _requireAdminAndNotCancelled(id);
+ Upkeep memory upkeep = s_upkeep[id];
+ if (!upkeep.paused) revert OnlyPausedUpkeep();
+ s_upkeep[id].paused = false;
+ s_upkeepIDs.add(id);
+ emit UpkeepUnpaused(id);
+ }
+
+ /**
+ * @notice updates the checkData for an upkeep
+ */
+ function setUpkeepCheckData(uint256 id, bytes calldata newCheckData) external {
+ _requireAdminAndNotCancelled(id);
+ if (newCheckData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit();
+ s_checkData[id] = newCheckData;
+ emit UpkeepCheckDataSet(id, newCheckData);
+ }
+
+ /**
+ * @notice updates the gas limit for an upkeep
+ */
+ function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external {
+ if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange();
+ _requireAdminAndNotCancelled(id);
+ s_upkeep[id].performGas = gasLimit;
+
+ emit UpkeepGasLimitSet(id, gasLimit);
+ }
+
+ /**
+ * @notice updates the offchain config for an upkeep
+ */
+ function setUpkeepOffchainConfig(uint256 id, bytes calldata config) external {
+ _requireAdminAndNotCancelled(id);
+ s_upkeepOffchainConfig[id] = config;
+ emit UpkeepOffchainConfigSet(id, config);
+ }
+
+ /**
+ * @notice sets the upkeep trigger config
+ * @param id the upkeepID to change the trigger for
+ * @param triggerConfig the new trigger config
+ */
+ function setUpkeepTriggerConfig(uint256 id, bytes calldata triggerConfig) external {
+ _requireAdminAndNotCancelled(id);
+ s_upkeepTriggerConfig[id] = triggerConfig;
+ emit UpkeepTriggerConfigSet(id, triggerConfig);
+ }
+
+ /**
+ * @notice withdraws an upkeep's funds from an upkeep
+ * @dev note that an upkeep must be cancelled first!!
+ */
+ function withdrawFunds(uint256 id, address to) external nonReentrant {
+ if (to == ZERO_ADDRESS) revert InvalidRecipient();
+ Upkeep memory upkeep = s_upkeep[id];
+ if (s_upkeepAdmin[id] != msg.sender) revert OnlyCallableByAdmin();
+ if (upkeep.maxValidBlocknumber > s_hotVars.chainModule.blockNumber()) revert UpkeepNotCanceled();
+ uint96 amountToWithdraw = s_upkeep[id].balance;
+ s_reserveAmounts[upkeep.billingToken] = s_reserveAmounts[upkeep.billingToken] - amountToWithdraw;
+ s_upkeep[id].balance = 0;
+ upkeep.billingToken.safeTransfer(to, amountToWithdraw);
+ emit FundsWithdrawn(id, amountToWithdraw, to);
+ }
+
+ // ================================================================
+ // | FINANCE ACTIONS |
+ // ================================================================
+
+ /**
+ * @notice withdraws excess LINK from the liquidity pool
+ * @param to the address to send the fees to
+ * @param amount the amount to withdraw
+ */
+ function withdrawLink(address to, uint256 amount) external {
+ _onlyFinanceAdminAllowed();
+ if (to == ZERO_ADDRESS) revert InvalidRecipient();
+
+ int256 available = _linkAvailableForPayment();
+ if (available < 0) {
+ revert InsufficientBalance(0, amount);
+ } else if (amount > uint256(available)) {
+ revert InsufficientBalance(uint256(available), amount);
+ }
+
+ bool transferStatus = i_link.transfer(to, amount);
+ if (!transferStatus) {
+ revert TransferFailed();
+ }
+ emit FeesWithdrawn(address(i_link), to, amount);
+ }
+
+ /**
+ * @notice withdraws non-LINK fees earned by the contract
+ * @param asset the asset to withdraw
+ * @param to the address to send the fees to
+ * @param amount the amount to withdraw
+ * @dev in ON_CHAIN mode, we prevent withdrawing non-LINK fees unless there is sufficient LINK liquidity
+ * to cover all outstanding debts on the registry
+ */
+ function withdrawERC20Fees(IERC20 asset, address to, uint256 amount) external {
+ _onlyFinanceAdminAllowed();
+ if (to == ZERO_ADDRESS) revert InvalidRecipient();
+ if (address(asset) == address(i_link)) revert InvalidToken();
+ if (_linkAvailableForPayment() < 0 && s_payoutMode == PayoutMode.ON_CHAIN) revert InsufficientLinkLiquidity();
+ uint256 available = asset.balanceOf(address(this)) - s_reserveAmounts[asset];
+ if (amount > available) revert InsufficientBalance(available, amount);
+
+ asset.safeTransfer(to, amount);
+ emit FeesWithdrawn(address(asset), to, amount);
+ }
+}
diff --git a/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicC2_3.sol b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicC2_3.sol
new file mode 100644
index 00000000000..61d0eecfbaf
--- /dev/null
+++ b/contracts/src/v0.8/automation/v2_3_zksync/ZKSyncAutomationRegistryLogicC2_3.sol
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {ZKSyncAutomationRegistryBase2_3} from "./ZKSyncAutomationRegistryBase2_3.sol";
+import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
+import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol";
+import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol";
+import {IChainModule} from "../interfaces/IChainModule.sol";
+import {IERC20Metadata as IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol";
+import {IAutomationV21PlusCommon} from "../interfaces/IAutomationV21PlusCommon.sol";
+
+contract ZKSyncAutomationRegistryLogicC2_3 is ZKSyncAutomationRegistryBase2_3 {
+ using Address for address;
+ using EnumerableSet for EnumerableSet.UintSet;
+ using EnumerableSet for EnumerableSet.AddressSet;
+
+ /**
+ * @dev see AutomationRegistry master contract for constructor description
+ */
+ constructor(
+ address link,
+ address linkUSDFeed,
+ address nativeUSDFeed,
+ address fastGasFeed,
+ address automationForwarderLogic,
+ address allowedReadOnlyAddress,
+ PayoutMode payoutMode,
+ address wrappedNativeTokenAddress
+ )
+ ZKSyncAutomationRegistryBase2_3(
+ link,
+ linkUSDFeed,
+ nativeUSDFeed,
+ fastGasFeed,
+ automationForwarderLogic,
+ allowedReadOnlyAddress,
+ payoutMode,
+ wrappedNativeTokenAddress
+ )
+ {}
+
+ // ================================================================
+ // | NODE ACTIONS |
+ // ================================================================
+
+ /**
+ * @notice transfers the address of payee for a transmitter
+ */
+ function transferPayeeship(address transmitter, address proposed) external {
+ if (s_transmitterPayees[transmitter] != msg.sender) revert OnlyCallableByPayee();
+ if (proposed == msg.sender) revert ValueNotChanged();
+
+ if (s_proposedPayee[transmitter] != proposed) {
+ s_proposedPayee[transmitter] = proposed;
+ emit PayeeshipTransferRequested(transmitter, msg.sender, proposed);
+ }
+ }
+
+ /**
+ * @notice accepts the transfer of the payee
+ */
+ function acceptPayeeship(address transmitter) external {
+ if (s_proposedPayee[transmitter] != msg.sender) revert OnlyCallableByProposedPayee();
+ address past = s_transmitterPayees[transmitter];
+ s_transmitterPayees[transmitter] = msg.sender;
+ s_proposedPayee[transmitter] = ZERO_ADDRESS;
+
+ emit PayeeshipTransferred(transmitter, past, msg.sender);
+ }
+
+ /**
+ * @notice this is for NOPs to withdraw LINK received as payment for work performed
+ */
+ function withdrawPayment(address from, address to) external {
+ if (to == ZERO_ADDRESS) revert InvalidRecipient();
+ if (s_payoutMode == PayoutMode.OFF_CHAIN) revert MustSettleOffchain();
+ if (s_transmitterPayees[from] != msg.sender) revert OnlyCallableByPayee();
+ uint96 balance = _updateTransmitterBalanceFromPool(from, s_hotVars.totalPremium, uint96(s_transmittersList.length));
+ s_transmitters[from].balance = 0;
+ s_reserveAmounts[IERC20(address(i_link))] = s_reserveAmounts[IERC20(address(i_link))] - balance;
+ bool transferStatus = i_link.transfer(to, balance);
+ if (!transferStatus) {
+ revert TransferFailed();
+ }
+ emit PaymentWithdrawn(from, balance, to, msg.sender);
+ }
+
+ // ================================================================
+ // | OWNER / MANAGER ACTIONS |
+ // ================================================================
+
+ /**
+ * @notice sets the privilege config for an upkeep
+ */
+ function setUpkeepPrivilegeConfig(uint256 upkeepId, bytes calldata newPrivilegeConfig) external {
+ _onlyPrivilegeManagerAllowed();
+ s_upkeepPrivilegeConfig[upkeepId] = newPrivilegeConfig;
+ emit UpkeepPrivilegeConfigSet(upkeepId, newPrivilegeConfig);
+ }
+
+ /**
+ * @notice this is used by the owner to set the initial payees for newly added transmitters. The owner is not allowed to change payees for existing transmitters.
+ * @dev the IGNORE_ADDRESS is a "helper" that makes it easier to construct a list of payees when you only care about setting the payee for a small number of transmitters.
+ */
+ function setPayees(address[] calldata payees) external onlyOwner {
+ if (s_transmittersList.length != payees.length) revert ParameterLengthError();
+ for (uint256 i = 0; i < s_transmittersList.length; i++) {
+ address transmitter = s_transmittersList[i];
+ address oldPayee = s_transmitterPayees[transmitter];
+ address newPayee = payees[i];
+
+ if (
+ (newPayee == ZERO_ADDRESS) || (oldPayee != ZERO_ADDRESS && oldPayee != newPayee && newPayee != IGNORE_ADDRESS)
+ ) {
+ revert InvalidPayee();
+ }
+
+ if (newPayee != IGNORE_ADDRESS) {
+ s_transmitterPayees[transmitter] = newPayee;
+ }
+ }
+ emit PayeesUpdated(s_transmittersList, payees);
+ }
+
+ /**
+ * @notice sets the migration permission for a peer registry
+ * @dev this must be done before upkeeps can be migrated to/from another registry
+ */
+ function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external onlyOwner {
+ s_peerRegistryMigrationPermission[peer] = permission;
+ }
+
+ /**
+ * @notice pauses the entire registry
+ */
+ function pause() external onlyOwner {
+ s_hotVars.paused = true;
+ emit Paused(msg.sender);
+ }
+
+ /**
+ * @notice unpauses the entire registry
+ */
+ function unpause() external onlyOwner {
+ s_hotVars.paused = false;
+ emit Unpaused(msg.sender);
+ }
+
+ /**
+ * @notice sets a generic bytes field used to indicate the privilege that this admin address had
+ * @param admin the address to set privilege for
+ * @param newPrivilegeConfig the privileges that this admin has
+ */
+ function setAdminPrivilegeConfig(address admin, bytes calldata newPrivilegeConfig) external {
+ _onlyPrivilegeManagerAllowed();
+ s_adminPrivilegeConfig[admin] = newPrivilegeConfig;
+ emit AdminPrivilegeConfigSet(admin, newPrivilegeConfig);
+ }
+
+ /**
+ * @notice settles NOPs' LINK payment offchain
+ */
+ function settleNOPsOffchain() external {
+ _onlyFinanceAdminAllowed();
+ if (s_payoutMode == PayoutMode.ON_CHAIN) revert MustSettleOnchain();
+
+ uint96 totalPremium = s_hotVars.totalPremium;
+ uint256 activeTransmittersLength = s_transmittersList.length;
+ uint256 deactivatedTransmittersLength = s_deactivatedTransmitters.length();
+ uint256 length = activeTransmittersLength + deactivatedTransmittersLength;
+ uint256[] memory payments = new uint256[](length);
+ address[] memory payees = new address[](length);
+
+ for (uint256 i = 0; i < activeTransmittersLength; i++) {
+ address transmitterAddr = s_transmittersList[i];
+ uint96 balance = _updateTransmitterBalanceFromPool(
+ transmitterAddr,
+ totalPremium,
+ uint96(activeTransmittersLength)
+ );
+
+ payments[i] = balance;
+ payees[i] = s_transmitterPayees[transmitterAddr];
+ s_transmitters[transmitterAddr].balance = 0;
+ }
+
+ for (uint256 i = 0; i < deactivatedTransmittersLength; i++) {
+ address deactivatedAddr = s_deactivatedTransmitters.at(i);
+ Transmitter memory transmitter = s_transmitters[deactivatedAddr];
+
+ payees[i + activeTransmittersLength] = s_transmitterPayees[deactivatedAddr];
+ payments[i + activeTransmittersLength] = transmitter.balance;
+ s_transmitters[deactivatedAddr].balance = 0;
+ }
+
+ // reserve amount of LINK is reset to 0 since no user deposits of LINK are expected in offchain mode
+ s_reserveAmounts[IERC20(address(i_link))] = 0;
+
+ for (uint256 idx = s_deactivatedTransmitters.length(); idx > 0; idx--) {
+ s_deactivatedTransmitters.remove(s_deactivatedTransmitters.at(idx - 1));
+ }
+
+ emit NOPsSettledOffchain(payees, payments);
+ }
+
+ /**
+ * @notice disables offchain payment for NOPs
+ */
+ function disableOffchainPayments() external onlyOwner {
+ s_payoutMode = PayoutMode.ON_CHAIN;
+ }
+
+ // ================================================================
+ // | GETTERS |
+ // ================================================================
+
+ function getConditionalGasOverhead() external pure returns (uint256) {
+ return REGISTRY_CONDITIONAL_OVERHEAD;
+ }
+
+ function getLogGasOverhead() external pure returns (uint256) {
+ return REGISTRY_LOG_OVERHEAD;
+ }
+
+ function getPerPerformByteGasOverhead() external pure returns (uint256) {
+ return REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD;
+ }
+
+ function getPerSignerGasOverhead() external pure returns (uint256) {
+ return REGISTRY_PER_SIGNER_GAS_OVERHEAD;
+ }
+
+ function getTransmitCalldataFixedBytesOverhead() external pure returns (uint256) {
+ return TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD;
+ }
+
+ function getTransmitCalldataPerSignerBytesOverhead() external pure returns (uint256) {
+ return TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD;
+ }
+
+ function getCancellationDelay() external pure returns (uint256) {
+ return CANCELLATION_DELAY;
+ }
+
+ function getLinkAddress() external view returns (address) {
+ return address(i_link);
+ }
+
+ function getLinkUSDFeedAddress() external view returns (address) {
+ return address(i_linkUSDFeed);
+ }
+
+ function getNativeUSDFeedAddress() external view returns (address) {
+ return address(i_nativeUSDFeed);
+ }
+
+ function getFastGasFeedAddress() external view returns (address) {
+ return address(i_fastGasFeed);
+ }
+
+ function getAutomationForwarderLogic() external view returns (address) {
+ return i_automationForwarderLogic;
+ }
+
+ function getAllowedReadOnlyAddress() external view returns (address) {
+ return i_allowedReadOnlyAddress;
+ }
+
+ function getWrappedNativeTokenAddress() external view returns (address) {
+ return address(i_wrappedNativeToken);
+ }
+
+ function getBillingToken(uint256 upkeepID) external view returns (IERC20) {
+ return s_upkeep[upkeepID].billingToken;
+ }
+
+ function getBillingTokens() external view returns (IERC20[] memory) {
+ return s_billingTokens;
+ }
+
+ function supportsBillingToken(IERC20 token) external view returns (bool) {
+ return address(s_billingConfigs[token].priceFeed) != address(0);
+ }
+
+ function getBillingTokenConfig(IERC20 token) external view returns (BillingConfig memory) {
+ return s_billingConfigs[token];
+ }
+
+ function getBillingOverridesEnabled(uint256 upkeepID) external view returns (bool) {
+ return s_upkeep[upkeepID].overridesEnabled;
+ }
+
+ function getPayoutMode() external view returns (PayoutMode) {
+ return s_payoutMode;
+ }
+
+ function upkeepVersion() public pure returns (uint8) {
+ return UPKEEP_VERSION_BASE;
+ }
+
+ /**
+ * @notice gets the number of upkeeps on the registry
+ */
+ function getNumUpkeeps() external view returns (uint256) {
+ return s_upkeepIDs.length();
+ }
+
+ /**
+ * @notice read all of the details about an upkeep
+ * @dev this function may be deprecated in a future version of automation in favor of individual
+ * getters for each field
+ */
+ function getUpkeep(uint256 id) external view returns (IAutomationV21PlusCommon.UpkeepInfoLegacy memory upkeepInfo) {
+ Upkeep memory reg = s_upkeep[id];
+ address target = address(reg.forwarder) == address(0) ? address(0) : reg.forwarder.getTarget();
+ upkeepInfo = IAutomationV21PlusCommon.UpkeepInfoLegacy({
+ target: target,
+ performGas: reg.performGas,
+ checkData: s_checkData[id],
+ balance: reg.balance,
+ admin: s_upkeepAdmin[id],
+ maxValidBlocknumber: reg.maxValidBlocknumber,
+ lastPerformedBlockNumber: reg.lastPerformedBlockNumber,
+ amountSpent: uint96(reg.amountSpent), // force casting to uint96 for backwards compatibility. Not an issue if it overflows.
+ paused: reg.paused,
+ offchainConfig: s_upkeepOffchainConfig[id]
+ });
+ return upkeepInfo;
+ }
+
+ /**
+ * @notice retrieve active upkeep IDs. Active upkeep is defined as an upkeep which is not paused and not canceled.
+ * @param startIndex starting index in list
+ * @param maxCount max count to retrieve (0 = unlimited)
+ * @dev the order of IDs in the list is **not guaranteed**, therefore, if making successive calls, one
+ * should consider keeping the blockheight constant to ensure a holistic picture of the contract state
+ */
+ function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory) {
+ uint256 numUpkeeps = s_upkeepIDs.length();
+ if (startIndex >= numUpkeeps) revert IndexOutOfRange();
+ uint256 endIndex = startIndex + maxCount;
+ endIndex = endIndex > numUpkeeps || maxCount == 0 ? numUpkeeps : endIndex;
+ uint256[] memory ids = new uint256[](endIndex - startIndex);
+ for (uint256 idx = 0; idx < ids.length; idx++) {
+ ids[idx] = s_upkeepIDs.at(idx + startIndex);
+ }
+ return ids;
+ }
+
+ /**
+ * @notice returns the upkeep's trigger type
+ */
+ function getTriggerType(uint256 upkeepId) external pure returns (Trigger) {
+ return _getTriggerType(upkeepId);
+ }
+
+ /**
+ * @notice returns the trigger config for an upkeeep
+ */
+ function getUpkeepTriggerConfig(uint256 upkeepId) public view returns (bytes memory) {
+ return s_upkeepTriggerConfig[upkeepId];
+ }
+
+ /**
+ * @notice read the current info about any transmitter address
+ */
+ function getTransmitterInfo(
+ address query
+ ) external view returns (bool active, uint8 index, uint96 balance, uint96 lastCollected, address payee) {
+ Transmitter memory transmitter = s_transmitters[query];
+
+ uint96 pooledShare = 0;
+ if (transmitter.active) {
+ uint96 totalDifference = s_hotVars.totalPremium - transmitter.lastCollected;
+ pooledShare = totalDifference / uint96(s_transmittersList.length);
+ }
+
+ return (
+ transmitter.active,
+ transmitter.index,
+ (transmitter.balance + pooledShare),
+ transmitter.lastCollected,
+ s_transmitterPayees[query]
+ );
+ }
+
+ /**
+ * @notice read the current info about any signer address
+ */
+ function getSignerInfo(address query) external view returns (bool active, uint8 index) {
+ Signer memory signer = s_signers[query];
+ return (signer.active, signer.index);
+ }
+
+ /**
+ * @notice read the current on-chain config of the registry
+ * @dev this function will change between versions, it should never be used where
+ * backwards compatibility matters!
+ */
+ function getConfig() external view returns (OnchainConfig memory) {
+ return
+ OnchainConfig({
+ checkGasLimit: s_storage.checkGasLimit,
+ stalenessSeconds: s_hotVars.stalenessSeconds,
+ gasCeilingMultiplier: s_hotVars.gasCeilingMultiplier,
+ maxPerformGas: s_storage.maxPerformGas,
+ maxCheckDataSize: s_storage.maxCheckDataSize,
+ maxPerformDataSize: s_storage.maxPerformDataSize,
+ maxRevertDataSize: s_storage.maxRevertDataSize,
+ fallbackGasPrice: s_fallbackGasPrice,
+ fallbackLinkPrice: s_fallbackLinkPrice,
+ fallbackNativePrice: s_fallbackNativePrice,
+ transcoder: s_storage.transcoder,
+ registrars: s_registrars.values(),
+ upkeepPrivilegeManager: s_storage.upkeepPrivilegeManager,
+ chainModule: s_hotVars.chainModule,
+ reorgProtectionEnabled: s_hotVars.reorgProtectionEnabled,
+ financeAdmin: s_storage.financeAdmin
+ });
+ }
+
+ /**
+ * @notice read the current state of the registry
+ * @dev this function is deprecated
+ */
+ function getState()
+ external
+ view
+ returns (
+ IAutomationV21PlusCommon.StateLegacy memory state,
+ IAutomationV21PlusCommon.OnchainConfigLegacy memory config,
+ address[] memory signers,
+ address[] memory transmitters,
+ uint8 f
+ )
+ {
+ state = IAutomationV21PlusCommon.StateLegacy({
+ nonce: s_storage.nonce,
+ ownerLinkBalance: 0, // deprecated
+ expectedLinkBalance: 0, // deprecated
+ totalPremium: s_hotVars.totalPremium,
+ numUpkeeps: s_upkeepIDs.length(),
+ configCount: s_storage.configCount,
+ latestConfigBlockNumber: s_storage.latestConfigBlockNumber,
+ latestConfigDigest: s_latestConfigDigest,
+ latestEpoch: s_hotVars.latestEpoch,
+ paused: s_hotVars.paused
+ });
+
+ config = IAutomationV21PlusCommon.OnchainConfigLegacy({
+ paymentPremiumPPB: 0, // deprecated
+ flatFeeMicroLink: 0, // deprecated
+ checkGasLimit: s_storage.checkGasLimit,
+ stalenessSeconds: s_hotVars.stalenessSeconds,
+ gasCeilingMultiplier: s_hotVars.gasCeilingMultiplier,
+ minUpkeepSpend: 0, // deprecated
+ maxPerformGas: s_storage.maxPerformGas,
+ maxCheckDataSize: s_storage.maxCheckDataSize,
+ maxPerformDataSize: s_storage.maxPerformDataSize,
+ maxRevertDataSize: s_storage.maxRevertDataSize,
+ fallbackGasPrice: s_fallbackGasPrice,
+ fallbackLinkPrice: s_fallbackLinkPrice,
+ transcoder: s_storage.transcoder,
+ registrars: s_registrars.values(),
+ upkeepPrivilegeManager: s_storage.upkeepPrivilegeManager
+ });
+
+ return (state, config, s_signersList, s_transmittersList, s_hotVars.f);
+ }
+
+ /**
+ * @notice read the Storage data
+ * @dev this function signature will change with each version of automation
+ * this should not be treated as a stable function
+ */
+ function getStorage() external view returns (Storage memory) {
+ return s_storage;
+ }
+
+ /**
+ * @notice read the HotVars data
+ * @dev this function signature will change with each version of automation
+ * this should not be treated as a stable function
+ */
+ function getHotVars() external view returns (HotVars memory) {
+ return s_hotVars;
+ }
+
+ /**
+ * @notice get the chain module
+ */
+ function getChainModule() external view returns (IChainModule chainModule) {
+ return s_hotVars.chainModule;
+ }
+
+ /**
+ * @notice if this registry has reorg protection enabled
+ */
+ function getReorgProtectionEnabled() external view returns (bool reorgProtectionEnabled) {
+ return s_hotVars.reorgProtectionEnabled;
+ }
+
+ /**
+ * @notice calculates the minimum balance required for an upkeep to remain eligible
+ * @param id the upkeep id to calculate minimum balance for
+ */
+ function getBalance(uint256 id) external view returns (uint96 balance) {
+ return s_upkeep[id].balance;
+ }
+
+ /**
+ * @notice calculates the minimum balance required for an upkeep to remain eligible
+ * @param id the upkeep id to calculate minimum balance for
+ */
+ function getMinBalance(uint256 id) external view returns (uint96) {
+ return getMinBalanceForUpkeep(id);
+ }
+
+ /**
+ * @notice calculates the minimum balance required for an upkeep to remain eligible
+ * @param id the upkeep id to calculate minimum balance for
+ * @dev this will be deprecated in a future version in favor of getMinBalance
+ */
+ function getMinBalanceForUpkeep(uint256 id) public view returns (uint96 minBalance) {
+ Upkeep memory upkeep = s_upkeep[id];
+ return getMaxPaymentForGas(id, _getTriggerType(id), upkeep.performGas, upkeep.billingToken);
+ }
+
+ /**
+ * @notice calculates the maximum payment for a given gas limit
+ * @param gasLimit the gas to calculate payment for
+ */
+ function getMaxPaymentForGas(
+ uint256 id,
+ Trigger triggerType,
+ uint32 gasLimit,
+ IERC20 billingToken
+ ) public view returns (uint96 maxPayment) {
+ HotVars memory hotVars = s_hotVars;
+ (uint256 fastGasWei, uint256 linkUSD, uint256 nativeUSD) = _getFeedData(hotVars);
+ return _getMaxPayment(id, hotVars, triggerType, gasLimit, fastGasWei, linkUSD, nativeUSD, billingToken);
+ }
+
+ /**
+ * @notice retrieves the migration permission for a peer registry
+ */
+ function getPeerRegistryMigrationPermission(address peer) external view returns (MigrationPermission) {
+ return s_peerRegistryMigrationPermission[peer];
+ }
+
+ /**
+ * @notice returns the upkeep privilege config
+ */
+ function getUpkeepPrivilegeConfig(uint256 upkeepId) external view returns (bytes memory) {
+ return s_upkeepPrivilegeConfig[upkeepId];
+ }
+
+ /**
+ * @notice returns the admin's privilege config
+ */
+ function getAdminPrivilegeConfig(address admin) external view returns (bytes memory) {
+ return s_adminPrivilegeConfig[admin];
+ }
+
+ /**
+ * @notice returns the upkeep's forwarder contract
+ */
+ function getForwarder(uint256 upkeepID) external view returns (IAutomationForwarder) {
+ return s_upkeep[upkeepID].forwarder;
+ }
+
+ /**
+ * @notice returns if the dedupKey exists or not
+ */
+ function hasDedupKey(bytes32 dedupKey) external view returns (bool) {
+ return s_dedupKeys[dedupKey];
+ }
+
+ /**
+ * @notice returns the fallback native price
+ */
+ function getFallbackNativePrice() external view returns (uint256) {
+ return s_fallbackNativePrice;
+ }
+
+ /**
+ * @notice returns the amount of a particular token that is reserved as
+ * user deposits / NOP payments
+ */
+ function getReserveAmount(IERC20 billingToken) external view returns (uint256) {
+ return s_reserveAmounts[billingToken];
+ }
+
+ /**
+ * @notice returns the amount of a particular token that is withdraw-able by finance admin
+ */
+ function getAvailableERC20ForPayment(IERC20 billingToken) external view returns (uint256) {
+ return billingToken.balanceOf(address(this)) - s_reserveAmounts[IERC20(address(billingToken))];
+ }
+
+ /**
+ * @notice returns the size of the LINK liquidity pool
+ */
+ function linkAvailableForPayment() public view returns (int256) {
+ return _linkAvailableForPayment();
+ }
+
+ /**
+ * @notice returns the BillingOverrides config for a given upkeep
+ */
+ function getBillingOverrides(uint256 upkeepID) external view returns (BillingOverrides memory) {
+ return s_billingOverrides[upkeepID];
+ }
+
+ /**
+ * @notice returns the BillingConfig for a given billing token, this includes decimals and price feed etc
+ */
+ function getBillingConfig(IERC20 billingToken) external view returns (BillingConfig memory) {
+ return s_billingConfigs[billingToken];
+ }
+
+ /**
+ * @notice returns all active transmitters with their associated payees
+ */
+ function getTransmittersWithPayees() external view returns (TransmitterPayeeInfo[] memory) {
+ uint256 transmitterCount = s_transmittersList.length;
+ TransmitterPayeeInfo[] memory transmitters = new TransmitterPayeeInfo[](transmitterCount);
+
+ for (uint256 i = 0; i < transmitterCount; i++) {
+ address transmitterAddress = s_transmittersList[i];
+ address payeeAddress = s_transmitterPayees[transmitterAddress];
+
+ transmitters[i] = TransmitterPayeeInfo(transmitterAddress, payeeAddress);
+ }
+
+ return transmitters;
+ }
+}
diff --git a/contracts/test/v0.8/automation/AutomationRegistry2_3.test.ts b/contracts/test/v0.8/automation/AutomationRegistry2_3.test.ts
index 9a572269695..f993271fbbc 100644
--- a/contracts/test/v0.8/automation/AutomationRegistry2_3.test.ts
+++ b/contracts/test/v0.8/automation/AutomationRegistry2_3.test.ts
@@ -25,7 +25,6 @@ import { ChainModuleBase__factory as ChainModuleBaseFactory } from '../../../typ
import { ArbitrumModule__factory as ArbitrumModuleFactory } from '../../../typechain/factories/ArbitrumModule__factory'
import { OptimismModule__factory as OptimismModuleFactory } from '../../../typechain/factories/OptimismModule__factory'
import { ILogAutomation__factory as ILogAutomationactory } from '../../../typechain/factories/ILogAutomation__factory'
-import { IAutomationForwarder__factory as IAutomationForwarderFactory } from '../../../typechain/factories/IAutomationForwarder__factory'
import { MockArbSys__factory as MockArbSysFactory } from '../../../typechain/factories/MockArbSys__factory'
import { AutomationCompatibleUtils } from '../../../typechain/AutomationCompatibleUtils'
import { MockArbGasInfo } from '../../../typechain/MockArbGasInfo'
diff --git a/contracts/test/v0.8/automation/helpers.ts b/contracts/test/v0.8/automation/helpers.ts
index 5a95fb482cd..b2cdfb4efd9 100644
--- a/contracts/test/v0.8/automation/helpers.ts
+++ b/contracts/test/v0.8/automation/helpers.ts
@@ -170,10 +170,10 @@ export const deployRegistry23 = async (
link: Parameters[0],
linkUSD: Parameters[1],
nativeUSD: Parameters[2],
- fastgas: Parameters[2],
+ fastgas: Parameters[3],
allowedReadOnlyAddress: Parameters<
AutomationRegistryLogicC2_3Factory['deploy']
- >[3],
+ >[5],
payoutMode: Parameters[6],
wrappedNativeTokenAddress: Parameters<
AutomationRegistryLogicC2_3Factory['deploy']
From 477c8ce4b5b0a33a1645c15027bce3d23cff8c44 Mon Sep 17 00:00:00 2001
From: Jordan Krage
Date: Wed, 7 Aug 2024 17:38:04 +0200
Subject: [PATCH 27/52] enable gomods (#14042)
---
GNUmakefile | 18 +++++-------------
1 file changed, 5 insertions(+), 13 deletions(-)
diff --git a/GNUmakefile b/GNUmakefile
index 3cba1738d5d..3b781a665d2 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -27,12 +27,8 @@ gomod: ## Ensure chainlink's go dependencies are installed.
go mod download
.PHONY: gomodtidy
-gomodtidy: ## Run go mod tidy on all modules.
- go mod tidy
- cd ./core/scripts && go mod tidy
- cd ./integration-tests && go mod tidy
- cd ./integration-tests/load && go mod tidy
- cd ./dashboard-lib && go mod tidy
+gomodtidy: gomods ## Run go mod tidy on all modules.
+ gomods tidy
.PHONY: docs
docs: ## Install and run pkgsite to view Go docs
@@ -89,12 +85,8 @@ abigen: ## Build & install abigen.
./tools/bin/build_abigen
.PHONY: generate
-generate: abigen codecgen mockery protoc ## Execute all go:generate commands.
- go generate -x ./...
- cd ./core/scripts && go generate -x ./...
- cd ./integration-tests && go generate -x ./...
- cd ./integration-tests/load && go generate -x ./...
- cd ./dashboard-lib && go generate -x ./...
+generate: abigen codecgen mockery protoc gomods ## Execute all go:generate commands.
+ gomods -w go generate -x ./...
mockery
.PHONY: rm-mocked
@@ -136,7 +128,7 @@ presubmit: ## Format go files and imports.
.PHONY: gomods
gomods: ## Install gomods
- go install github.com/jmank88/gomods@v0.1.1
+ go install github.com/jmank88/gomods@v0.1.3
.PHONY: mockery
mockery: $(mockery) ## Install mockery.
From 499a67705ac7ea525685c4a064ff4aa52b08fa44 Mon Sep 17 00:00:00 2001
From: Ryan Hall
Date: Wed, 7 Aug 2024 12:51:02 -0400
Subject: [PATCH 28/52] add OZ 5.0.2 contracts (#14065)
---
contracts/.changeset/mean-zoos-fly.md | 5 +
.../v5.0.2/contracts/access/AccessControl.sol | 209 +++
.../contracts/access/IAccessControl.sol | 98 ++
.../v5.0.2/contracts/interfaces/IERC165.sol | 6 +
.../v5.0.2/contracts/interfaces/IERC20.sol | 6 +
.../v5.0.2/contracts/interfaces/IERC5267.sol | 28 +
.../contracts/interfaces/draft-IERC6093.sol | 161 +++
.../v5.0.2/contracts/token/ERC20/ERC20.sol | 316 +++++
.../v5.0.2/contracts/token/ERC20/IERC20.sol | 79 ++
.../token/ERC20/extensions/ERC20Burnable.sol | 39 +
.../token/ERC20/extensions/IERC20Metadata.sol | 26 +
.../token/ERC20/extensions/IERC20Permit.sol | 90 ++
.../contracts/token/ERC20/utils/SafeERC20.sol | 118 ++
.../v5.0.2/contracts/utils/Address.sol | 159 +++
.../v5.0.2/contracts/utils/Context.sol | 28 +
.../v5.0.2/contracts/utils/Pausable.sol | 119 ++
.../v5.0.2/contracts/utils/ShortStrings.sol | 123 ++
.../v5.0.2/contracts/utils/StorageSlot.sol | 135 ++
.../v5.0.2/contracts/utils/Strings.sol | 94 ++
.../contracts/utils/cryptography/ECDSA.sol | 174 +++
.../contracts/utils/cryptography/EIP712.sol | 160 +++
.../utils/cryptography/MessageHashUtils.sol | 86 ++
.../contracts/utils/introspection/ERC165.sol | 27 +
.../utils/introspection/ERC165Checker.sol | 124 ++
.../contracts/utils/introspection/IERC165.sol | 25 +
.../v5.0.2/contracts/utils/math/Math.sol | 415 ++++++
.../v5.0.2/contracts/utils/math/SafeCast.sol | 1153 +++++++++++++++++
.../contracts/utils/math/SignedMath.sol | 43 +
.../contracts/utils/structs/EnumerableMap.sol | 533 ++++++++
.../contracts/utils/structs/EnumerableSet.sol | 378 ++++++
30 files changed, 4957 insertions(+)
create mode 100644 contracts/.changeset/mean-zoos-fly.md
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/AccessControl.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/IAccessControl.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC165.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC20.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC5267.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/draft-IERC6093.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/ERC20.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/IERC20.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/ERC20Burnable.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Metadata.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Permit.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/utils/SafeERC20.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Address.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Context.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Pausable.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/ShortStrings.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/StorageSlot.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Strings.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/ECDSA.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/EIP712.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/MessageHashUtils.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165Checker.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/Math.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SafeCast.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SignedMath.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableMap.sol
create mode 100644 contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableSet.sol
diff --git a/contracts/.changeset/mean-zoos-fly.md b/contracts/.changeset/mean-zoos-fly.md
new file mode 100644
index 00000000000..72eb98198d0
--- /dev/null
+++ b/contracts/.changeset/mean-zoos-fly.md
@@ -0,0 +1,5 @@
+---
+'@chainlink/contracts': patch
+---
+
+add OZ v0.5 contracts
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/AccessControl.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/AccessControl.sol
new file mode 100644
index 00000000000..3e3341e9cfd
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/AccessControl.sol
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (access/AccessControl.sol)
+
+pragma solidity ^0.8.20;
+
+import {IAccessControl} from "./IAccessControl.sol";
+import {Context} from "../utils/Context.sol";
+import {ERC165} from "../utils/introspection/ERC165.sol";
+
+/**
+ * @dev Contract module that allows children to implement role-based access
+ * control mechanisms. This is a lightweight version that doesn't allow enumerating role
+ * members except through off-chain means by accessing the contract event logs. Some
+ * applications may benefit from on-chain enumerability, for those cases see
+ * {AccessControlEnumerable}.
+ *
+ * Roles are referred to by their `bytes32` identifier. These should be exposed
+ * in the external API and be unique. The best way to achieve this is by
+ * using `public constant` hash digests:
+ *
+ * ```solidity
+ * bytes32 public constant MY_ROLE = keccak256("MY_ROLE");
+ * ```
+ *
+ * Roles can be used to represent a set of permissions. To restrict access to a
+ * function call, use {hasRole}:
+ *
+ * ```solidity
+ * function foo() public {
+ * require(hasRole(MY_ROLE, msg.sender));
+ * ...
+ * }
+ * ```
+ *
+ * Roles can be granted and revoked dynamically via the {grantRole} and
+ * {revokeRole} functions. Each role has an associated admin role, and only
+ * accounts that have a role's admin role can call {grantRole} and {revokeRole}.
+ *
+ * By default, the admin role for all roles is `DEFAULT_ADMIN_ROLE`, which means
+ * that only accounts with this role will be able to grant or revoke other
+ * roles. More complex role relationships can be created by using
+ * {_setRoleAdmin}.
+ *
+ * WARNING: The `DEFAULT_ADMIN_ROLE` is also its own admin: it has permission to
+ * grant and revoke this role. Extra precautions should be taken to secure
+ * accounts that have been granted it. We recommend using {AccessControlDefaultAdminRules}
+ * to enforce additional security measures for this role.
+ */
+abstract contract AccessControl is Context, IAccessControl, ERC165 {
+ struct RoleData {
+ mapping(address account => bool) hasRole;
+ bytes32 adminRole;
+ }
+
+ mapping(bytes32 role => RoleData) private _roles;
+
+ bytes32 public constant DEFAULT_ADMIN_ROLE = 0x00;
+
+ /**
+ * @dev Modifier that checks that an account has a specific role. Reverts
+ * with an {AccessControlUnauthorizedAccount} error including the required role.
+ */
+ modifier onlyRole(bytes32 role) {
+ _checkRole(role);
+ _;
+ }
+
+ /**
+ * @dev See {IERC165-supportsInterface}.
+ */
+ function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) {
+ return interfaceId == type(IAccessControl).interfaceId || super.supportsInterface(interfaceId);
+ }
+
+ /**
+ * @dev Returns `true` if `account` has been granted `role`.
+ */
+ function hasRole(bytes32 role, address account) public view virtual returns (bool) {
+ return _roles[role].hasRole[account];
+ }
+
+ /**
+ * @dev Reverts with an {AccessControlUnauthorizedAccount} error if `_msgSender()`
+ * is missing `role`. Overriding this function changes the behavior of the {onlyRole} modifier.
+ */
+ function _checkRole(bytes32 role) internal view virtual {
+ _checkRole(role, _msgSender());
+ }
+
+ /**
+ * @dev Reverts with an {AccessControlUnauthorizedAccount} error if `account`
+ * is missing `role`.
+ */
+ function _checkRole(bytes32 role, address account) internal view virtual {
+ if (!hasRole(role, account)) {
+ revert AccessControlUnauthorizedAccount(account, role);
+ }
+ }
+
+ /**
+ * @dev Returns the admin role that controls `role`. See {grantRole} and
+ * {revokeRole}.
+ *
+ * To change a role's admin, use {_setRoleAdmin}.
+ */
+ function getRoleAdmin(bytes32 role) public view virtual returns (bytes32) {
+ return _roles[role].adminRole;
+ }
+
+ /**
+ * @dev Grants `role` to `account`.
+ *
+ * If `account` had not been already granted `role`, emits a {RoleGranted}
+ * event.
+ *
+ * Requirements:
+ *
+ * - the caller must have ``role``'s admin role.
+ *
+ * May emit a {RoleGranted} event.
+ */
+ function grantRole(bytes32 role, address account) public virtual onlyRole(getRoleAdmin(role)) {
+ _grantRole(role, account);
+ }
+
+ /**
+ * @dev Revokes `role` from `account`.
+ *
+ * If `account` had been granted `role`, emits a {RoleRevoked} event.
+ *
+ * Requirements:
+ *
+ * - the caller must have ``role``'s admin role.
+ *
+ * May emit a {RoleRevoked} event.
+ */
+ function revokeRole(bytes32 role, address account) public virtual onlyRole(getRoleAdmin(role)) {
+ _revokeRole(role, account);
+ }
+
+ /**
+ * @dev Revokes `role` from the calling account.
+ *
+ * Roles are often managed via {grantRole} and {revokeRole}: this function's
+ * purpose is to provide a mechanism for accounts to lose their privileges
+ * if they are compromised (such as when a trusted device is misplaced).
+ *
+ * If the calling account had been revoked `role`, emits a {RoleRevoked}
+ * event.
+ *
+ * Requirements:
+ *
+ * - the caller must be `callerConfirmation`.
+ *
+ * May emit a {RoleRevoked} event.
+ */
+ function renounceRole(bytes32 role, address callerConfirmation) public virtual {
+ if (callerConfirmation != _msgSender()) {
+ revert AccessControlBadConfirmation();
+ }
+
+ _revokeRole(role, callerConfirmation);
+ }
+
+ /**
+ * @dev Sets `adminRole` as ``role``'s admin role.
+ *
+ * Emits a {RoleAdminChanged} event.
+ */
+ function _setRoleAdmin(bytes32 role, bytes32 adminRole) internal virtual {
+ bytes32 previousAdminRole = getRoleAdmin(role);
+ _roles[role].adminRole = adminRole;
+ emit RoleAdminChanged(role, previousAdminRole, adminRole);
+ }
+
+ /**
+ * @dev Attempts to grant `role` to `account` and returns a boolean indicating if `role` was granted.
+ *
+ * Internal function without access restriction.
+ *
+ * May emit a {RoleGranted} event.
+ */
+ function _grantRole(bytes32 role, address account) internal virtual returns (bool) {
+ if (!hasRole(role, account)) {
+ _roles[role].hasRole[account] = true;
+ emit RoleGranted(role, account, _msgSender());
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * @dev Attempts to revoke `role` to `account` and returns a boolean indicating if `role` was revoked.
+ *
+ * Internal function without access restriction.
+ *
+ * May emit a {RoleRevoked} event.
+ */
+ function _revokeRole(bytes32 role, address account) internal virtual returns (bool) {
+ if (hasRole(role, account)) {
+ _roles[role].hasRole[account] = false;
+ emit RoleRevoked(role, account, _msgSender());
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/IAccessControl.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/IAccessControl.sol
new file mode 100644
index 00000000000..2ac89ca7356
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/access/IAccessControl.sol
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (access/IAccessControl.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev External interface of AccessControl declared to support ERC165 detection.
+ */
+interface IAccessControl {
+ /**
+ * @dev The `account` is missing a role.
+ */
+ error AccessControlUnauthorizedAccount(address account, bytes32 neededRole);
+
+ /**
+ * @dev The caller of a function is not the expected one.
+ *
+ * NOTE: Don't confuse with {AccessControlUnauthorizedAccount}.
+ */
+ error AccessControlBadConfirmation();
+
+ /**
+ * @dev Emitted when `newAdminRole` is set as ``role``'s admin role, replacing `previousAdminRole`
+ *
+ * `DEFAULT_ADMIN_ROLE` is the starting admin for all roles, despite
+ * {RoleAdminChanged} not being emitted signaling this.
+ */
+ event RoleAdminChanged(bytes32 indexed role, bytes32 indexed previousAdminRole, bytes32 indexed newAdminRole);
+
+ /**
+ * @dev Emitted when `account` is granted `role`.
+ *
+ * `sender` is the account that originated the contract call, an admin role
+ * bearer except when using {AccessControl-_setupRole}.
+ */
+ event RoleGranted(bytes32 indexed role, address indexed account, address indexed sender);
+
+ /**
+ * @dev Emitted when `account` is revoked `role`.
+ *
+ * `sender` is the account that originated the contract call:
+ * - if using `revokeRole`, it is the admin role bearer
+ * - if using `renounceRole`, it is the role bearer (i.e. `account`)
+ */
+ event RoleRevoked(bytes32 indexed role, address indexed account, address indexed sender);
+
+ /**
+ * @dev Returns `true` if `account` has been granted `role`.
+ */
+ function hasRole(bytes32 role, address account) external view returns (bool);
+
+ /**
+ * @dev Returns the admin role that controls `role`. See {grantRole} and
+ * {revokeRole}.
+ *
+ * To change a role's admin, use {AccessControl-_setRoleAdmin}.
+ */
+ function getRoleAdmin(bytes32 role) external view returns (bytes32);
+
+ /**
+ * @dev Grants `role` to `account`.
+ *
+ * If `account` had not been already granted `role`, emits a {RoleGranted}
+ * event.
+ *
+ * Requirements:
+ *
+ * - the caller must have ``role``'s admin role.
+ */
+ function grantRole(bytes32 role, address account) external;
+
+ /**
+ * @dev Revokes `role` from `account`.
+ *
+ * If `account` had been granted `role`, emits a {RoleRevoked} event.
+ *
+ * Requirements:
+ *
+ * - the caller must have ``role``'s admin role.
+ */
+ function revokeRole(bytes32 role, address account) external;
+
+ /**
+ * @dev Revokes `role` from the calling account.
+ *
+ * Roles are often managed via {grantRole} and {revokeRole}: this function's
+ * purpose is to provide a mechanism for accounts to lose their privileges
+ * if they are compromised (such as when a trusted device is misplaced).
+ *
+ * If the calling account had been granted `role`, emits a {RoleRevoked}
+ * event.
+ *
+ * Requirements:
+ *
+ * - the caller must be `callerConfirmation`.
+ */
+ function renounceRole(bytes32 role, address callerConfirmation) external;
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC165.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC165.sol
new file mode 100644
index 00000000000..944dd0d5912
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC165.sol
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (interfaces/IERC165.sol)
+
+pragma solidity ^0.8.20;
+
+import {IERC165} from "../utils/introspection/IERC165.sol";
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC20.sol
new file mode 100644
index 00000000000..21d5a413275
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC20.sol
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (interfaces/IERC20.sol)
+
+pragma solidity ^0.8.20;
+
+import {IERC20} from "../token/ERC20/IERC20.sol";
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC5267.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC5267.sol
new file mode 100644
index 00000000000..47a9fd58855
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/IERC5267.sol
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (interfaces/IERC5267.sol)
+
+pragma solidity ^0.8.20;
+
+interface IERC5267 {
+ /**
+ * @dev MAY be emitted to signal that the domain could have changed.
+ */
+ event EIP712DomainChanged();
+
+ /**
+ * @dev returns the fields and values that describe the domain separator used by this contract for EIP-712
+ * signature.
+ */
+ function eip712Domain()
+ external
+ view
+ returns (
+ bytes1 fields,
+ string memory name,
+ string memory version,
+ uint256 chainId,
+ address verifyingContract,
+ bytes32 salt,
+ uint256[] memory extensions
+ );
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/draft-IERC6093.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/draft-IERC6093.sol
new file mode 100644
index 00000000000..f6990e607c9
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/interfaces/draft-IERC6093.sol
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (interfaces/draft-IERC6093.sol)
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Standard ERC20 Errors
+ * Interface of the https://eips.ethereum.org/EIPS/eip-6093[ERC-6093] custom errors for ERC20 tokens.
+ */
+interface IERC20Errors {
+ /**
+ * @dev Indicates an error related to the current `balance` of a `sender`. Used in transfers.
+ * @param sender Address whose tokens are being transferred.
+ * @param balance Current balance for the interacting account.
+ * @param needed Minimum amount required to perform a transfer.
+ */
+ error ERC20InsufficientBalance(address sender, uint256 balance, uint256 needed);
+
+ /**
+ * @dev Indicates a failure with the token `sender`. Used in transfers.
+ * @param sender Address whose tokens are being transferred.
+ */
+ error ERC20InvalidSender(address sender);
+
+ /**
+ * @dev Indicates a failure with the token `receiver`. Used in transfers.
+ * @param receiver Address to which tokens are being transferred.
+ */
+ error ERC20InvalidReceiver(address receiver);
+
+ /**
+ * @dev Indicates a failure with the `spender`’s `allowance`. Used in transfers.
+ * @param spender Address that may be allowed to operate on tokens without being their owner.
+ * @param allowance Amount of tokens a `spender` is allowed to operate with.
+ * @param needed Minimum amount required to perform a transfer.
+ */
+ error ERC20InsufficientAllowance(address spender, uint256 allowance, uint256 needed);
+
+ /**
+ * @dev Indicates a failure with the `approver` of a token to be approved. Used in approvals.
+ * @param approver Address initiating an approval operation.
+ */
+ error ERC20InvalidApprover(address approver);
+
+ /**
+ * @dev Indicates a failure with the `spender` to be approved. Used in approvals.
+ * @param spender Address that may be allowed to operate on tokens without being their owner.
+ */
+ error ERC20InvalidSpender(address spender);
+}
+
+/**
+ * @dev Standard ERC721 Errors
+ * Interface of the https://eips.ethereum.org/EIPS/eip-6093[ERC-6093] custom errors for ERC721 tokens.
+ */
+interface IERC721Errors {
+ /**
+ * @dev Indicates that an address can't be an owner. For example, `address(0)` is a forbidden owner in EIP-20.
+ * Used in balance queries.
+ * @param owner Address of the current owner of a token.
+ */
+ error ERC721InvalidOwner(address owner);
+
+ /**
+ * @dev Indicates a `tokenId` whose `owner` is the zero address.
+ * @param tokenId Identifier number of a token.
+ */
+ error ERC721NonexistentToken(uint256 tokenId);
+
+ /**
+ * @dev Indicates an error related to the ownership over a particular token. Used in transfers.
+ * @param sender Address whose tokens are being transferred.
+ * @param tokenId Identifier number of a token.
+ * @param owner Address of the current owner of a token.
+ */
+ error ERC721IncorrectOwner(address sender, uint256 tokenId, address owner);
+
+ /**
+ * @dev Indicates a failure with the token `sender`. Used in transfers.
+ * @param sender Address whose tokens are being transferred.
+ */
+ error ERC721InvalidSender(address sender);
+
+ /**
+ * @dev Indicates a failure with the token `receiver`. Used in transfers.
+ * @param receiver Address to which tokens are being transferred.
+ */
+ error ERC721InvalidReceiver(address receiver);
+
+ /**
+ * @dev Indicates a failure with the `operator`’s approval. Used in transfers.
+ * @param operator Address that may be allowed to operate on tokens without being their owner.
+ * @param tokenId Identifier number of a token.
+ */
+ error ERC721InsufficientApproval(address operator, uint256 tokenId);
+
+ /**
+ * @dev Indicates a failure with the `approver` of a token to be approved. Used in approvals.
+ * @param approver Address initiating an approval operation.
+ */
+ error ERC721InvalidApprover(address approver);
+
+ /**
+ * @dev Indicates a failure with the `operator` to be approved. Used in approvals.
+ * @param operator Address that may be allowed to operate on tokens without being their owner.
+ */
+ error ERC721InvalidOperator(address operator);
+}
+
+/**
+ * @dev Standard ERC1155 Errors
+ * Interface of the https://eips.ethereum.org/EIPS/eip-6093[ERC-6093] custom errors for ERC1155 tokens.
+ */
+interface IERC1155Errors {
+ /**
+ * @dev Indicates an error related to the current `balance` of a `sender`. Used in transfers.
+ * @param sender Address whose tokens are being transferred.
+ * @param balance Current balance for the interacting account.
+ * @param needed Minimum amount required to perform a transfer.
+ * @param tokenId Identifier number of a token.
+ */
+ error ERC1155InsufficientBalance(address sender, uint256 balance, uint256 needed, uint256 tokenId);
+
+ /**
+ * @dev Indicates a failure with the token `sender`. Used in transfers.
+ * @param sender Address whose tokens are being transferred.
+ */
+ error ERC1155InvalidSender(address sender);
+
+ /**
+ * @dev Indicates a failure with the token `receiver`. Used in transfers.
+ * @param receiver Address to which tokens are being transferred.
+ */
+ error ERC1155InvalidReceiver(address receiver);
+
+ /**
+ * @dev Indicates a failure with the `operator`’s approval. Used in transfers.
+ * @param operator Address that may be allowed to operate on tokens without being their owner.
+ * @param owner Address of the current owner of a token.
+ */
+ error ERC1155MissingApprovalForAll(address operator, address owner);
+
+ /**
+ * @dev Indicates a failure with the `approver` of a token to be approved. Used in approvals.
+ * @param approver Address initiating an approval operation.
+ */
+ error ERC1155InvalidApprover(address approver);
+
+ /**
+ * @dev Indicates a failure with the `operator` to be approved. Used in approvals.
+ * @param operator Address that may be allowed to operate on tokens without being their owner.
+ */
+ error ERC1155InvalidOperator(address operator);
+
+ /**
+ * @dev Indicates an array length mismatch between ids and values in a safeBatchTransferFrom operation.
+ * Used in batch transfers.
+ * @param idsLength Length of the array of token identifiers
+ * @param valuesLength Length of the array of token amounts
+ */
+ error ERC1155InvalidArrayLength(uint256 idsLength, uint256 valuesLength);
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/ERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/ERC20.sol
new file mode 100644
index 00000000000..1fde5279d00
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/ERC20.sol
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (token/ERC20/ERC20.sol)
+
+pragma solidity ^0.8.20;
+
+import {IERC20} from "./IERC20.sol";
+import {IERC20Metadata} from "./extensions/IERC20Metadata.sol";
+import {Context} from "../../utils/Context.sol";
+import {IERC20Errors} from "../../interfaces/draft-IERC6093.sol";
+
+/**
+ * @dev Implementation of the {IERC20} interface.
+ *
+ * This implementation is agnostic to the way tokens are created. This means
+ * that a supply mechanism has to be added in a derived contract using {_mint}.
+ *
+ * TIP: For a detailed writeup see our guide
+ * https://forum.openzeppelin.com/t/how-to-implement-erc20-supply-mechanisms/226[How
+ * to implement supply mechanisms].
+ *
+ * The default value of {decimals} is 18. To change this, you should override
+ * this function so it returns a different value.
+ *
+ * We have followed general OpenZeppelin Contracts guidelines: functions revert
+ * instead returning `false` on failure. This behavior is nonetheless
+ * conventional and does not conflict with the expectations of ERC20
+ * applications.
+ *
+ * Additionally, an {Approval} event is emitted on calls to {transferFrom}.
+ * This allows applications to reconstruct the allowance for all accounts just
+ * by listening to said events. Other implementations of the EIP may not emit
+ * these events, as it isn't required by the specification.
+ */
+abstract contract ERC20 is Context, IERC20, IERC20Metadata, IERC20Errors {
+ mapping(address account => uint256) private _balances;
+
+ mapping(address account => mapping(address spender => uint256)) private _allowances;
+
+ uint256 private _totalSupply;
+
+ string private _name;
+ string private _symbol;
+
+ /**
+ * @dev Sets the values for {name} and {symbol}.
+ *
+ * All two of these values are immutable: they can only be set once during
+ * construction.
+ */
+ constructor(string memory name_, string memory symbol_) {
+ _name = name_;
+ _symbol = symbol_;
+ }
+
+ /**
+ * @dev Returns the name of the token.
+ */
+ function name() public view virtual returns (string memory) {
+ return _name;
+ }
+
+ /**
+ * @dev Returns the symbol of the token, usually a shorter version of the
+ * name.
+ */
+ function symbol() public view virtual returns (string memory) {
+ return _symbol;
+ }
+
+ /**
+ * @dev Returns the number of decimals used to get its user representation.
+ * For example, if `decimals` equals `2`, a balance of `505` tokens should
+ * be displayed to a user as `5.05` (`505 / 10 ** 2`).
+ *
+ * Tokens usually opt for a value of 18, imitating the relationship between
+ * Ether and Wei. This is the default value returned by this function, unless
+ * it's overridden.
+ *
+ * NOTE: This information is only used for _display_ purposes: it in
+ * no way affects any of the arithmetic of the contract, including
+ * {IERC20-balanceOf} and {IERC20-transfer}.
+ */
+ function decimals() public view virtual returns (uint8) {
+ return 18;
+ }
+
+ /**
+ * @dev See {IERC20-totalSupply}.
+ */
+ function totalSupply() public view virtual returns (uint256) {
+ return _totalSupply;
+ }
+
+ /**
+ * @dev See {IERC20-balanceOf}.
+ */
+ function balanceOf(address account) public view virtual returns (uint256) {
+ return _balances[account];
+ }
+
+ /**
+ * @dev See {IERC20-transfer}.
+ *
+ * Requirements:
+ *
+ * - `to` cannot be the zero address.
+ * - the caller must have a balance of at least `value`.
+ */
+ function transfer(address to, uint256 value) public virtual returns (bool) {
+ address owner = _msgSender();
+ _transfer(owner, to, value);
+ return true;
+ }
+
+ /**
+ * @dev See {IERC20-allowance}.
+ */
+ function allowance(address owner, address spender) public view virtual returns (uint256) {
+ return _allowances[owner][spender];
+ }
+
+ /**
+ * @dev See {IERC20-approve}.
+ *
+ * NOTE: If `value` is the maximum `uint256`, the allowance is not updated on
+ * `transferFrom`. This is semantically equivalent to an infinite approval.
+ *
+ * Requirements:
+ *
+ * - `spender` cannot be the zero address.
+ */
+ function approve(address spender, uint256 value) public virtual returns (bool) {
+ address owner = _msgSender();
+ _approve(owner, spender, value);
+ return true;
+ }
+
+ /**
+ * @dev See {IERC20-transferFrom}.
+ *
+ * Emits an {Approval} event indicating the updated allowance. This is not
+ * required by the EIP. See the note at the beginning of {ERC20}.
+ *
+ * NOTE: Does not update the allowance if the current allowance
+ * is the maximum `uint256`.
+ *
+ * Requirements:
+ *
+ * - `from` and `to` cannot be the zero address.
+ * - `from` must have a balance of at least `value`.
+ * - the caller must have allowance for ``from``'s tokens of at least
+ * `value`.
+ */
+ function transferFrom(address from, address to, uint256 value) public virtual returns (bool) {
+ address spender = _msgSender();
+ _spendAllowance(from, spender, value);
+ _transfer(from, to, value);
+ return true;
+ }
+
+ /**
+ * @dev Moves a `value` amount of tokens from `from` to `to`.
+ *
+ * This internal function is equivalent to {transfer}, and can be used to
+ * e.g. implement automatic token fees, slashing mechanisms, etc.
+ *
+ * Emits a {Transfer} event.
+ *
+ * NOTE: This function is not virtual, {_update} should be overridden instead.
+ */
+ function _transfer(address from, address to, uint256 value) internal {
+ if (from == address(0)) {
+ revert ERC20InvalidSender(address(0));
+ }
+ if (to == address(0)) {
+ revert ERC20InvalidReceiver(address(0));
+ }
+ _update(from, to, value);
+ }
+
+ /**
+ * @dev Transfers a `value` amount of tokens from `from` to `to`, or alternatively mints (or burns) if `from`
+ * (or `to`) is the zero address. All customizations to transfers, mints, and burns should be done by overriding
+ * this function.
+ *
+ * Emits a {Transfer} event.
+ */
+ function _update(address from, address to, uint256 value) internal virtual {
+ if (from == address(0)) {
+ // Overflow check required: The rest of the code assumes that totalSupply never overflows
+ _totalSupply += value;
+ } else {
+ uint256 fromBalance = _balances[from];
+ if (fromBalance < value) {
+ revert ERC20InsufficientBalance(from, fromBalance, value);
+ }
+ unchecked {
+ // Overflow not possible: value <= fromBalance <= totalSupply.
+ _balances[from] = fromBalance - value;
+ }
+ }
+
+ if (to == address(0)) {
+ unchecked {
+ // Overflow not possible: value <= totalSupply or value <= fromBalance <= totalSupply.
+ _totalSupply -= value;
+ }
+ } else {
+ unchecked {
+ // Overflow not possible: balance + value is at most totalSupply, which we know fits into a uint256.
+ _balances[to] += value;
+ }
+ }
+
+ emit Transfer(from, to, value);
+ }
+
+ /**
+ * @dev Creates a `value` amount of tokens and assigns them to `account`, by transferring it from address(0).
+ * Relies on the `_update` mechanism
+ *
+ * Emits a {Transfer} event with `from` set to the zero address.
+ *
+ * NOTE: This function is not virtual, {_update} should be overridden instead.
+ */
+ function _mint(address account, uint256 value) internal {
+ if (account == address(0)) {
+ revert ERC20InvalidReceiver(address(0));
+ }
+ _update(address(0), account, value);
+ }
+
+ /**
+ * @dev Destroys a `value` amount of tokens from `account`, lowering the total supply.
+ * Relies on the `_update` mechanism.
+ *
+ * Emits a {Transfer} event with `to` set to the zero address.
+ *
+ * NOTE: This function is not virtual, {_update} should be overridden instead
+ */
+ function _burn(address account, uint256 value) internal {
+ if (account == address(0)) {
+ revert ERC20InvalidSender(address(0));
+ }
+ _update(account, address(0), value);
+ }
+
+ /**
+ * @dev Sets `value` as the allowance of `spender` over the `owner` s tokens.
+ *
+ * This internal function is equivalent to `approve`, and can be used to
+ * e.g. set automatic allowances for certain subsystems, etc.
+ *
+ * Emits an {Approval} event.
+ *
+ * Requirements:
+ *
+ * - `owner` cannot be the zero address.
+ * - `spender` cannot be the zero address.
+ *
+ * Overrides to this logic should be done to the variant with an additional `bool emitEvent` argument.
+ */
+ function _approve(address owner, address spender, uint256 value) internal {
+ _approve(owner, spender, value, true);
+ }
+
+ /**
+ * @dev Variant of {_approve} with an optional flag to enable or disable the {Approval} event.
+ *
+ * By default (when calling {_approve}) the flag is set to true. On the other hand, approval changes made by
+ * `_spendAllowance` during the `transferFrom` operation set the flag to false. This saves gas by not emitting any
+ * `Approval` event during `transferFrom` operations.
+ *
+ * Anyone who wishes to continue emitting `Approval` events on the`transferFrom` operation can force the flag to
+ * true using the following override:
+ * ```
+ * function _approve(address owner, address spender, uint256 value, bool) internal virtual override {
+ * super._approve(owner, spender, value, true);
+ * }
+ * ```
+ *
+ * Requirements are the same as {_approve}.
+ */
+ function _approve(address owner, address spender, uint256 value, bool emitEvent) internal virtual {
+ if (owner == address(0)) {
+ revert ERC20InvalidApprover(address(0));
+ }
+ if (spender == address(0)) {
+ revert ERC20InvalidSpender(address(0));
+ }
+ _allowances[owner][spender] = value;
+ if (emitEvent) {
+ emit Approval(owner, spender, value);
+ }
+ }
+
+ /**
+ * @dev Updates `owner` s allowance for `spender` based on spent `value`.
+ *
+ * Does not update the allowance value in case of infinite allowance.
+ * Revert if not enough allowance is available.
+ *
+ * Does not emit an {Approval} event.
+ */
+ function _spendAllowance(address owner, address spender, uint256 value) internal virtual {
+ uint256 currentAllowance = allowance(owner, spender);
+ if (currentAllowance != type(uint256).max) {
+ if (currentAllowance < value) {
+ revert ERC20InsufficientAllowance(spender, currentAllowance, value);
+ }
+ unchecked {
+ _approve(owner, spender, currentAllowance - value, false);
+ }
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/IERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/IERC20.sol
new file mode 100644
index 00000000000..db01cf4c751
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/IERC20.sol
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (token/ERC20/IERC20.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Interface of the ERC20 standard as defined in the EIP.
+ */
+interface IERC20 {
+ /**
+ * @dev Emitted when `value` tokens are moved from one account (`from`) to
+ * another (`to`).
+ *
+ * Note that `value` may be zero.
+ */
+ event Transfer(address indexed from, address indexed to, uint256 value);
+
+ /**
+ * @dev Emitted when the allowance of a `spender` for an `owner` is set by
+ * a call to {approve}. `value` is the new allowance.
+ */
+ event Approval(address indexed owner, address indexed spender, uint256 value);
+
+ /**
+ * @dev Returns the value of tokens in existence.
+ */
+ function totalSupply() external view returns (uint256);
+
+ /**
+ * @dev Returns the value of tokens owned by `account`.
+ */
+ function balanceOf(address account) external view returns (uint256);
+
+ /**
+ * @dev Moves a `value` amount of tokens from the caller's account to `to`.
+ *
+ * Returns a boolean value indicating whether the operation succeeded.
+ *
+ * Emits a {Transfer} event.
+ */
+ function transfer(address to, uint256 value) external returns (bool);
+
+ /**
+ * @dev Returns the remaining number of tokens that `spender` will be
+ * allowed to spend on behalf of `owner` through {transferFrom}. This is
+ * zero by default.
+ *
+ * This value changes when {approve} or {transferFrom} are called.
+ */
+ function allowance(address owner, address spender) external view returns (uint256);
+
+ /**
+ * @dev Sets a `value` amount of tokens as the allowance of `spender` over the
+ * caller's tokens.
+ *
+ * Returns a boolean value indicating whether the operation succeeded.
+ *
+ * IMPORTANT: Beware that changing an allowance with this method brings the risk
+ * that someone may use both the old and the new allowance by unfortunate
+ * transaction ordering. One possible solution to mitigate this race
+ * condition is to first reduce the spender's allowance to 0 and set the
+ * desired value afterwards:
+ * https://github.com/ethereum/EIPs/issues/20#issuecomment-263524729
+ *
+ * Emits an {Approval} event.
+ */
+ function approve(address spender, uint256 value) external returns (bool);
+
+ /**
+ * @dev Moves a `value` amount of tokens from `from` to `to` using the
+ * allowance mechanism. `value` is then deducted from the caller's
+ * allowance.
+ *
+ * Returns a boolean value indicating whether the operation succeeded.
+ *
+ * Emits a {Transfer} event.
+ */
+ function transferFrom(address from, address to, uint256 value) external returns (bool);
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/ERC20Burnable.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/ERC20Burnable.sol
new file mode 100644
index 00000000000..4d482d8ec83
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/ERC20Burnable.sol
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (token/ERC20/extensions/ERC20Burnable.sol)
+
+pragma solidity ^0.8.20;
+
+import {ERC20} from "../ERC20.sol";
+import {Context} from "../../../utils/Context.sol";
+
+/**
+ * @dev Extension of {ERC20} that allows token holders to destroy both their own
+ * tokens and those that they have an allowance for, in a way that can be
+ * recognized off-chain (via event analysis).
+ */
+abstract contract ERC20Burnable is Context, ERC20 {
+ /**
+ * @dev Destroys a `value` amount of tokens from the caller.
+ *
+ * See {ERC20-_burn}.
+ */
+ function burn(uint256 value) public virtual {
+ _burn(_msgSender(), value);
+ }
+
+ /**
+ * @dev Destroys a `value` amount of tokens from `account`, deducting from
+ * the caller's allowance.
+ *
+ * See {ERC20-_burn} and {ERC20-allowance}.
+ *
+ * Requirements:
+ *
+ * - the caller must have allowance for ``accounts``'s tokens of at least
+ * `value`.
+ */
+ function burnFrom(address account, uint256 value) public virtual {
+ _spendAllowance(account, _msgSender(), value);
+ _burn(account, value);
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Metadata.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Metadata.sol
new file mode 100644
index 00000000000..1a38cba3e06
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Metadata.sol
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (token/ERC20/extensions/IERC20Metadata.sol)
+
+pragma solidity ^0.8.20;
+
+import {IERC20} from "../IERC20.sol";
+
+/**
+ * @dev Interface for the optional metadata functions from the ERC20 standard.
+ */
+interface IERC20Metadata is IERC20 {
+ /**
+ * @dev Returns the name of the token.
+ */
+ function name() external view returns (string memory);
+
+ /**
+ * @dev Returns the symbol of the token.
+ */
+ function symbol() external view returns (string memory);
+
+ /**
+ * @dev Returns the decimals places of the token.
+ */
+ function decimals() external view returns (uint8);
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Permit.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Permit.sol
new file mode 100644
index 00000000000..5af48101ab8
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/extensions/IERC20Permit.sol
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (token/ERC20/extensions/IERC20Permit.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Interface of the ERC20 Permit extension allowing approvals to be made via signatures, as defined in
+ * https://eips.ethereum.org/EIPS/eip-2612[EIP-2612].
+ *
+ * Adds the {permit} method, which can be used to change an account's ERC20 allowance (see {IERC20-allowance}) by
+ * presenting a message signed by the account. By not relying on {IERC20-approve}, the token holder account doesn't
+ * need to send a transaction, and thus is not required to hold Ether at all.
+ *
+ * ==== Security Considerations
+ *
+ * There are two important considerations concerning the use of `permit`. The first is that a valid permit signature
+ * expresses an allowance, and it should not be assumed to convey additional meaning. In particular, it should not be
+ * considered as an intention to spend the allowance in any specific way. The second is that because permits have
+ * built-in replay protection and can be submitted by anyone, they can be frontrun. A protocol that uses permits should
+ * take this into consideration and allow a `permit` call to fail. Combining these two aspects, a pattern that may be
+ * generally recommended is:
+ *
+ * ```solidity
+ * function doThingWithPermit(..., uint256 value, uint256 deadline, uint8 v, bytes32 r, bytes32 s) public {
+ * try token.permit(msg.sender, address(this), value, deadline, v, r, s) {} catch {}
+ * doThing(..., value);
+ * }
+ *
+ * function doThing(..., uint256 value) public {
+ * token.safeTransferFrom(msg.sender, address(this), value);
+ * ...
+ * }
+ * ```
+ *
+ * Observe that: 1) `msg.sender` is used as the owner, leaving no ambiguity as to the signer intent, and 2) the use of
+ * `try/catch` allows the permit to fail and makes the code tolerant to frontrunning. (See also
+ * {SafeERC20-safeTransferFrom}).
+ *
+ * Additionally, note that smart contract wallets (such as Argent or Safe) are not able to produce permit signatures, so
+ * contracts should have entry points that don't rely on permit.
+ */
+interface IERC20Permit {
+ /**
+ * @dev Sets `value` as the allowance of `spender` over ``owner``'s tokens,
+ * given ``owner``'s signed approval.
+ *
+ * IMPORTANT: The same issues {IERC20-approve} has related to transaction
+ * ordering also apply here.
+ *
+ * Emits an {Approval} event.
+ *
+ * Requirements:
+ *
+ * - `spender` cannot be the zero address.
+ * - `deadline` must be a timestamp in the future.
+ * - `v`, `r` and `s` must be a valid `secp256k1` signature from `owner`
+ * over the EIP712-formatted function arguments.
+ * - the signature must use ``owner``'s current nonce (see {nonces}).
+ *
+ * For more information on the signature format, see the
+ * https://eips.ethereum.org/EIPS/eip-2612#specification[relevant EIP
+ * section].
+ *
+ * CAUTION: See Security Considerations above.
+ */
+ function permit(
+ address owner,
+ address spender,
+ uint256 value,
+ uint256 deadline,
+ uint8 v,
+ bytes32 r,
+ bytes32 s
+ ) external;
+
+ /**
+ * @dev Returns the current nonce for `owner`. This value must be
+ * included whenever a signature is generated for {permit}.
+ *
+ * Every successful call to {permit} increases ``owner``'s nonce by one. This
+ * prevents a signature from being used multiple times.
+ */
+ function nonces(address owner) external view returns (uint256);
+
+ /**
+ * @dev Returns the domain separator used in the encoding of the signature for {permit}, as defined by {EIP712}.
+ */
+ // solhint-disable-next-line func-name-mixedcase
+ function DOMAIN_SEPARATOR() external view returns (bytes32);
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/utils/SafeERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/utils/SafeERC20.sol
new file mode 100644
index 00000000000..bb65709b46b
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/token/ERC20/utils/SafeERC20.sol
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (token/ERC20/utils/SafeERC20.sol)
+
+pragma solidity ^0.8.20;
+
+import {IERC20} from "../IERC20.sol";
+import {IERC20Permit} from "../extensions/IERC20Permit.sol";
+import {Address} from "../../../utils/Address.sol";
+
+/**
+ * @title SafeERC20
+ * @dev Wrappers around ERC20 operations that throw on failure (when the token
+ * contract returns false). Tokens that return no value (and instead revert or
+ * throw on failure) are also supported, non-reverting calls are assumed to be
+ * successful.
+ * To use this library you can add a `using SafeERC20 for IERC20;` statement to your contract,
+ * which allows you to call the safe operations as `token.safeTransfer(...)`, etc.
+ */
+library SafeERC20 {
+ using Address for address;
+
+ /**
+ * @dev An operation with an ERC20 token failed.
+ */
+ error SafeERC20FailedOperation(address token);
+
+ /**
+ * @dev Indicates a failed `decreaseAllowance` request.
+ */
+ error SafeERC20FailedDecreaseAllowance(address spender, uint256 currentAllowance, uint256 requestedDecrease);
+
+ /**
+ * @dev Transfer `value` amount of `token` from the calling contract to `to`. If `token` returns no value,
+ * non-reverting calls are assumed to be successful.
+ */
+ function safeTransfer(IERC20 token, address to, uint256 value) internal {
+ _callOptionalReturn(token, abi.encodeCall(token.transfer, (to, value)));
+ }
+
+ /**
+ * @dev Transfer `value` amount of `token` from `from` to `to`, spending the approval given by `from` to the
+ * calling contract. If `token` returns no value, non-reverting calls are assumed to be successful.
+ */
+ function safeTransferFrom(IERC20 token, address from, address to, uint256 value) internal {
+ _callOptionalReturn(token, abi.encodeCall(token.transferFrom, (from, to, value)));
+ }
+
+ /**
+ * @dev Increase the calling contract's allowance toward `spender` by `value`. If `token` returns no value,
+ * non-reverting calls are assumed to be successful.
+ */
+ function safeIncreaseAllowance(IERC20 token, address spender, uint256 value) internal {
+ uint256 oldAllowance = token.allowance(address(this), spender);
+ forceApprove(token, spender, oldAllowance + value);
+ }
+
+ /**
+ * @dev Decrease the calling contract's allowance toward `spender` by `requestedDecrease`. If `token` returns no
+ * value, non-reverting calls are assumed to be successful.
+ */
+ function safeDecreaseAllowance(IERC20 token, address spender, uint256 requestedDecrease) internal {
+ unchecked {
+ uint256 currentAllowance = token.allowance(address(this), spender);
+ if (currentAllowance < requestedDecrease) {
+ revert SafeERC20FailedDecreaseAllowance(spender, currentAllowance, requestedDecrease);
+ }
+ forceApprove(token, spender, currentAllowance - requestedDecrease);
+ }
+ }
+
+ /**
+ * @dev Set the calling contract's allowance toward `spender` to `value`. If `token` returns no value,
+ * non-reverting calls are assumed to be successful. Meant to be used with tokens that require the approval
+ * to be set to zero before setting it to a non-zero value, such as USDT.
+ */
+ function forceApprove(IERC20 token, address spender, uint256 value) internal {
+ bytes memory approvalCall = abi.encodeCall(token.approve, (spender, value));
+
+ if (!_callOptionalReturnBool(token, approvalCall)) {
+ _callOptionalReturn(token, abi.encodeCall(token.approve, (spender, 0)));
+ _callOptionalReturn(token, approvalCall);
+ }
+ }
+
+ /**
+ * @dev Imitates a Solidity high-level call (i.e. a regular function call to a contract), relaxing the requirement
+ * on the return value: the return value is optional (but if data is returned, it must not be false).
+ * @param token The token targeted by the call.
+ * @param data The call data (encoded using abi.encode or one of its variants).
+ */
+ function _callOptionalReturn(IERC20 token, bytes memory data) private {
+ // We need to perform a low level call here, to bypass Solidity's return data size checking mechanism, since
+ // we're implementing it ourselves. We use {Address-functionCall} to perform this call, which verifies that
+ // the target address contains contract code and also asserts for success in the low-level call.
+
+ bytes memory returndata = address(token).functionCall(data);
+ if (returndata.length != 0 && !abi.decode(returndata, (bool))) {
+ revert SafeERC20FailedOperation(address(token));
+ }
+ }
+
+ /**
+ * @dev Imitates a Solidity high-level call (i.e. a regular function call to a contract), relaxing the requirement
+ * on the return value: the return value is optional (but if data is returned, it must not be false).
+ * @param token The token targeted by the call.
+ * @param data The call data (encoded using abi.encode or one of its variants).
+ *
+ * This is a variant of {_callOptionalReturn} that silents catches all reverts and returns a bool instead.
+ */
+ function _callOptionalReturnBool(IERC20 token, bytes memory data) private returns (bool) {
+ // We need to perform a low level call here, to bypass Solidity's return data size checking mechanism, since
+ // we're implementing it ourselves. We cannot use {Address-functionCall} here since this should return false
+ // and not revert is the subcall reverts.
+
+ (bool success, bytes memory returndata) = address(token).call(data);
+ return success && (returndata.length == 0 || abi.decode(returndata, (bool))) && address(token).code.length > 0;
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Address.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Address.sol
new file mode 100644
index 00000000000..b7e3059529a
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Address.sol
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/Address.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Collection of functions related to the address type
+ */
+library Address {
+ /**
+ * @dev The ETH balance of the account is not enough to perform the operation.
+ */
+ error AddressInsufficientBalance(address account);
+
+ /**
+ * @dev There's no code at `target` (it is not a contract).
+ */
+ error AddressEmptyCode(address target);
+
+ /**
+ * @dev A call to an address target failed. The target may have reverted.
+ */
+ error FailedInnerCall();
+
+ /**
+ * @dev Replacement for Solidity's `transfer`: sends `amount` wei to
+ * `recipient`, forwarding all available gas and reverting on errors.
+ *
+ * https://eips.ethereum.org/EIPS/eip-1884[EIP1884] increases the gas cost
+ * of certain opcodes, possibly making contracts go over the 2300 gas limit
+ * imposed by `transfer`, making them unable to receive funds via
+ * `transfer`. {sendValue} removes this limitation.
+ *
+ * https://consensys.net/diligence/blog/2019/09/stop-using-soliditys-transfer-now/[Learn more].
+ *
+ * IMPORTANT: because control is transferred to `recipient`, care must be
+ * taken to not create reentrancy vulnerabilities. Consider using
+ * {ReentrancyGuard} or the
+ * https://solidity.readthedocs.io/en/v0.8.20/security-considerations.html#use-the-checks-effects-interactions-pattern[checks-effects-interactions pattern].
+ */
+ function sendValue(address payable recipient, uint256 amount) internal {
+ if (address(this).balance < amount) {
+ revert AddressInsufficientBalance(address(this));
+ }
+
+ (bool success, ) = recipient.call{value: amount}("");
+ if (!success) {
+ revert FailedInnerCall();
+ }
+ }
+
+ /**
+ * @dev Performs a Solidity function call using a low level `call`. A
+ * plain `call` is an unsafe replacement for a function call: use this
+ * function instead.
+ *
+ * If `target` reverts with a revert reason or custom error, it is bubbled
+ * up by this function (like regular Solidity function calls). However, if
+ * the call reverted with no returned reason, this function reverts with a
+ * {FailedInnerCall} error.
+ *
+ * Returns the raw returned data. To convert to the expected return value,
+ * use https://solidity.readthedocs.io/en/latest/units-and-global-variables.html?highlight=abi.decode#abi-encoding-and-decoding-functions[`abi.decode`].
+ *
+ * Requirements:
+ *
+ * - `target` must be a contract.
+ * - calling `target` with `data` must not revert.
+ */
+ function functionCall(address target, bytes memory data) internal returns (bytes memory) {
+ return functionCallWithValue(target, data, 0);
+ }
+
+ /**
+ * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`],
+ * but also transferring `value` wei to `target`.
+ *
+ * Requirements:
+ *
+ * - the calling contract must have an ETH balance of at least `value`.
+ * - the called Solidity function must be `payable`.
+ */
+ function functionCallWithValue(address target, bytes memory data, uint256 value) internal returns (bytes memory) {
+ if (address(this).balance < value) {
+ revert AddressInsufficientBalance(address(this));
+ }
+ (bool success, bytes memory returndata) = target.call{value: value}(data);
+ return verifyCallResultFromTarget(target, success, returndata);
+ }
+
+ /**
+ * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`],
+ * but performing a static call.
+ */
+ function functionStaticCall(address target, bytes memory data) internal view returns (bytes memory) {
+ (bool success, bytes memory returndata) = target.staticcall(data);
+ return verifyCallResultFromTarget(target, success, returndata);
+ }
+
+ /**
+ * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`],
+ * but performing a delegate call.
+ */
+ function functionDelegateCall(address target, bytes memory data) internal returns (bytes memory) {
+ (bool success, bytes memory returndata) = target.delegatecall(data);
+ return verifyCallResultFromTarget(target, success, returndata);
+ }
+
+ /**
+ * @dev Tool to verify that a low level call to smart-contract was successful, and reverts if the target
+ * was not a contract or bubbling up the revert reason (falling back to {FailedInnerCall}) in case of an
+ * unsuccessful call.
+ */
+ function verifyCallResultFromTarget(
+ address target,
+ bool success,
+ bytes memory returndata
+ ) internal view returns (bytes memory) {
+ if (!success) {
+ _revert(returndata);
+ } else {
+ // only check if target is a contract if the call was successful and the return data is empty
+ // otherwise we already know that it was a contract
+ if (returndata.length == 0 && target.code.length == 0) {
+ revert AddressEmptyCode(target);
+ }
+ return returndata;
+ }
+ }
+
+ /**
+ * @dev Tool to verify that a low level call was successful, and reverts if it wasn't, either by bubbling the
+ * revert reason or with a default {FailedInnerCall} error.
+ */
+ function verifyCallResult(bool success, bytes memory returndata) internal pure returns (bytes memory) {
+ if (!success) {
+ _revert(returndata);
+ } else {
+ return returndata;
+ }
+ }
+
+ /**
+ * @dev Reverts with returndata if present. Otherwise reverts with {FailedInnerCall}.
+ */
+ function _revert(bytes memory returndata) private pure {
+ // Look for revert reason and bubble it up if present
+ if (returndata.length > 0) {
+ // The easiest way to bubble the revert reason is using memory via assembly
+ /// @solidity memory-safe-assembly
+ assembly {
+ let returndata_size := mload(returndata)
+ revert(add(32, returndata), returndata_size)
+ }
+ } else {
+ revert FailedInnerCall();
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Context.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Context.sol
new file mode 100644
index 00000000000..4e535fe03c2
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Context.sol
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.1) (utils/Context.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Provides information about the current execution context, including the
+ * sender of the transaction and its data. While these are generally available
+ * via msg.sender and msg.data, they should not be accessed in such a direct
+ * manner, since when dealing with meta-transactions the account sending and
+ * paying for execution may not be the actual sender (as far as an application
+ * is concerned).
+ *
+ * This contract is only required for intermediate, library-like contracts.
+ */
+abstract contract Context {
+ function _msgSender() internal view virtual returns (address) {
+ return msg.sender;
+ }
+
+ function _msgData() internal view virtual returns (bytes calldata) {
+ return msg.data;
+ }
+
+ function _contextSuffixLength() internal view virtual returns (uint256) {
+ return 0;
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Pausable.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Pausable.sol
new file mode 100644
index 00000000000..312f1cb90fe
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Pausable.sol
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/Pausable.sol)
+
+pragma solidity ^0.8.20;
+
+import {Context} from "../utils/Context.sol";
+
+/**
+ * @dev Contract module which allows children to implement an emergency stop
+ * mechanism that can be triggered by an authorized account.
+ *
+ * This module is used through inheritance. It will make available the
+ * modifiers `whenNotPaused` and `whenPaused`, which can be applied to
+ * the functions of your contract. Note that they will not be pausable by
+ * simply including this module, only once the modifiers are put in place.
+ */
+abstract contract Pausable is Context {
+ bool private _paused;
+
+ /**
+ * @dev Emitted when the pause is triggered by `account`.
+ */
+ event Paused(address account);
+
+ /**
+ * @dev Emitted when the pause is lifted by `account`.
+ */
+ event Unpaused(address account);
+
+ /**
+ * @dev The operation failed because the contract is paused.
+ */
+ error EnforcedPause();
+
+ /**
+ * @dev The operation failed because the contract is not paused.
+ */
+ error ExpectedPause();
+
+ /**
+ * @dev Initializes the contract in unpaused state.
+ */
+ constructor() {
+ _paused = false;
+ }
+
+ /**
+ * @dev Modifier to make a function callable only when the contract is not paused.
+ *
+ * Requirements:
+ *
+ * - The contract must not be paused.
+ */
+ modifier whenNotPaused() {
+ _requireNotPaused();
+ _;
+ }
+
+ /**
+ * @dev Modifier to make a function callable only when the contract is paused.
+ *
+ * Requirements:
+ *
+ * - The contract must be paused.
+ */
+ modifier whenPaused() {
+ _requirePaused();
+ _;
+ }
+
+ /**
+ * @dev Returns true if the contract is paused, and false otherwise.
+ */
+ function paused() public view virtual returns (bool) {
+ return _paused;
+ }
+
+ /**
+ * @dev Throws if the contract is paused.
+ */
+ function _requireNotPaused() internal view virtual {
+ if (paused()) {
+ revert EnforcedPause();
+ }
+ }
+
+ /**
+ * @dev Throws if the contract is not paused.
+ */
+ function _requirePaused() internal view virtual {
+ if (!paused()) {
+ revert ExpectedPause();
+ }
+ }
+
+ /**
+ * @dev Triggers stopped state.
+ *
+ * Requirements:
+ *
+ * - The contract must not be paused.
+ */
+ function _pause() internal virtual whenNotPaused {
+ _paused = true;
+ emit Paused(_msgSender());
+ }
+
+ /**
+ * @dev Returns to normal state.
+ *
+ * Requirements:
+ *
+ * - The contract must be paused.
+ */
+ function _unpause() internal virtual whenPaused {
+ _paused = false;
+ emit Unpaused(_msgSender());
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/ShortStrings.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/ShortStrings.sol
new file mode 100644
index 00000000000..fdfe774d635
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/ShortStrings.sol
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/ShortStrings.sol)
+
+pragma solidity ^0.8.20;
+
+import {StorageSlot} from "./StorageSlot.sol";
+
+// | string | 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
+// | length | 0x BB |
+type ShortString is bytes32;
+
+/**
+ * @dev This library provides functions to convert short memory strings
+ * into a `ShortString` type that can be used as an immutable variable.
+ *
+ * Strings of arbitrary length can be optimized using this library if
+ * they are short enough (up to 31 bytes) by packing them with their
+ * length (1 byte) in a single EVM word (32 bytes). Additionally, a
+ * fallback mechanism can be used for every other case.
+ *
+ * Usage example:
+ *
+ * ```solidity
+ * contract Named {
+ * using ShortStrings for *;
+ *
+ * ShortString private immutable _name;
+ * string private _nameFallback;
+ *
+ * constructor(string memory contractName) {
+ * _name = contractName.toShortStringWithFallback(_nameFallback);
+ * }
+ *
+ * function name() external view returns (string memory) {
+ * return _name.toStringWithFallback(_nameFallback);
+ * }
+ * }
+ * ```
+ */
+library ShortStrings {
+ // Used as an identifier for strings longer than 31 bytes.
+ bytes32 private constant FALLBACK_SENTINEL = 0x00000000000000000000000000000000000000000000000000000000000000FF;
+
+ error StringTooLong(string str);
+ error InvalidShortString();
+
+ /**
+ * @dev Encode a string of at most 31 chars into a `ShortString`.
+ *
+ * This will trigger a `StringTooLong` error is the input string is too long.
+ */
+ function toShortString(string memory str) internal pure returns (ShortString) {
+ bytes memory bstr = bytes(str);
+ if (bstr.length > 31) {
+ revert StringTooLong(str);
+ }
+ return ShortString.wrap(bytes32(uint256(bytes32(bstr)) | bstr.length));
+ }
+
+ /**
+ * @dev Decode a `ShortString` back to a "normal" string.
+ */
+ function toString(ShortString sstr) internal pure returns (string memory) {
+ uint256 len = byteLength(sstr);
+ // using `new string(len)` would work locally but is not memory safe.
+ string memory str = new string(32);
+ /// @solidity memory-safe-assembly
+ assembly {
+ mstore(str, len)
+ mstore(add(str, 0x20), sstr)
+ }
+ return str;
+ }
+
+ /**
+ * @dev Return the length of a `ShortString`.
+ */
+ function byteLength(ShortString sstr) internal pure returns (uint256) {
+ uint256 result = uint256(ShortString.unwrap(sstr)) & 0xFF;
+ if (result > 31) {
+ revert InvalidShortString();
+ }
+ return result;
+ }
+
+ /**
+ * @dev Encode a string into a `ShortString`, or write it to storage if it is too long.
+ */
+ function toShortStringWithFallback(string memory value, string storage store) internal returns (ShortString) {
+ if (bytes(value).length < 32) {
+ return toShortString(value);
+ } else {
+ StorageSlot.getStringSlot(store).value = value;
+ return ShortString.wrap(FALLBACK_SENTINEL);
+ }
+ }
+
+ /**
+ * @dev Decode a string that was encoded to `ShortString` or written to storage using {setWithFallback}.
+ */
+ function toStringWithFallback(ShortString value, string storage store) internal pure returns (string memory) {
+ if (ShortString.unwrap(value) != FALLBACK_SENTINEL) {
+ return toString(value);
+ } else {
+ return store;
+ }
+ }
+
+ /**
+ * @dev Return the length of a string that was encoded to `ShortString` or written to storage using
+ * {setWithFallback}.
+ *
+ * WARNING: This will return the "byte length" of the string. This may not reflect the actual length in terms of
+ * actual characters as the UTF-8 encoding of a single character can span over multiple bytes.
+ */
+ function byteLengthWithFallback(ShortString value, string storage store) internal view returns (uint256) {
+ if (ShortString.unwrap(value) != FALLBACK_SENTINEL) {
+ return byteLength(value);
+ } else {
+ return bytes(store).length;
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/StorageSlot.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/StorageSlot.sol
new file mode 100644
index 00000000000..08418327a59
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/StorageSlot.sol
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/StorageSlot.sol)
+// This file was procedurally generated from scripts/generate/templates/StorageSlot.js.
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Library for reading and writing primitive types to specific storage slots.
+ *
+ * Storage slots are often used to avoid storage conflict when dealing with upgradeable contracts.
+ * This library helps with reading and writing to such slots without the need for inline assembly.
+ *
+ * The functions in this library return Slot structs that contain a `value` member that can be used to read or write.
+ *
+ * Example usage to set ERC1967 implementation slot:
+ * ```solidity
+ * contract ERC1967 {
+ * bytes32 internal constant _IMPLEMENTATION_SLOT = 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc;
+ *
+ * function _getImplementation() internal view returns (address) {
+ * return StorageSlot.getAddressSlot(_IMPLEMENTATION_SLOT).value;
+ * }
+ *
+ * function _setImplementation(address newImplementation) internal {
+ * require(newImplementation.code.length > 0);
+ * StorageSlot.getAddressSlot(_IMPLEMENTATION_SLOT).value = newImplementation;
+ * }
+ * }
+ * ```
+ */
+library StorageSlot {
+ struct AddressSlot {
+ address value;
+ }
+
+ struct BooleanSlot {
+ bool value;
+ }
+
+ struct Bytes32Slot {
+ bytes32 value;
+ }
+
+ struct Uint256Slot {
+ uint256 value;
+ }
+
+ struct StringSlot {
+ string value;
+ }
+
+ struct BytesSlot {
+ bytes value;
+ }
+
+ /**
+ * @dev Returns an `AddressSlot` with member `value` located at `slot`.
+ */
+ function getAddressSlot(bytes32 slot) internal pure returns (AddressSlot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := slot
+ }
+ }
+
+ /**
+ * @dev Returns an `BooleanSlot` with member `value` located at `slot`.
+ */
+ function getBooleanSlot(bytes32 slot) internal pure returns (BooleanSlot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := slot
+ }
+ }
+
+ /**
+ * @dev Returns an `Bytes32Slot` with member `value` located at `slot`.
+ */
+ function getBytes32Slot(bytes32 slot) internal pure returns (Bytes32Slot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := slot
+ }
+ }
+
+ /**
+ * @dev Returns an `Uint256Slot` with member `value` located at `slot`.
+ */
+ function getUint256Slot(bytes32 slot) internal pure returns (Uint256Slot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := slot
+ }
+ }
+
+ /**
+ * @dev Returns an `StringSlot` with member `value` located at `slot`.
+ */
+ function getStringSlot(bytes32 slot) internal pure returns (StringSlot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := slot
+ }
+ }
+
+ /**
+ * @dev Returns an `StringSlot` representation of the string storage pointer `store`.
+ */
+ function getStringSlot(string storage store) internal pure returns (StringSlot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := store.slot
+ }
+ }
+
+ /**
+ * @dev Returns an `BytesSlot` with member `value` located at `slot`.
+ */
+ function getBytesSlot(bytes32 slot) internal pure returns (BytesSlot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := slot
+ }
+ }
+
+ /**
+ * @dev Returns an `BytesSlot` representation of the bytes storage pointer `store`.
+ */
+ function getBytesSlot(bytes storage store) internal pure returns (BytesSlot storage r) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ r.slot := store.slot
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Strings.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Strings.sol
new file mode 100644
index 00000000000..b2c0a40fb2a
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/Strings.sol
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/Strings.sol)
+
+pragma solidity ^0.8.20;
+
+import {Math} from "./math/Math.sol";
+import {SignedMath} from "./math/SignedMath.sol";
+
+/**
+ * @dev String operations.
+ */
+library Strings {
+ bytes16 private constant HEX_DIGITS = "0123456789abcdef";
+ uint8 private constant ADDRESS_LENGTH = 20;
+
+ /**
+ * @dev The `value` string doesn't fit in the specified `length`.
+ */
+ error StringsInsufficientHexLength(uint256 value, uint256 length);
+
+ /**
+ * @dev Converts a `uint256` to its ASCII `string` decimal representation.
+ */
+ function toString(uint256 value) internal pure returns (string memory) {
+ unchecked {
+ uint256 length = Math.log10(value) + 1;
+ string memory buffer = new string(length);
+ uint256 ptr;
+ /// @solidity memory-safe-assembly
+ assembly {
+ ptr := add(buffer, add(32, length))
+ }
+ while (true) {
+ ptr--;
+ /// @solidity memory-safe-assembly
+ assembly {
+ mstore8(ptr, byte(mod(value, 10), HEX_DIGITS))
+ }
+ value /= 10;
+ if (value == 0) break;
+ }
+ return buffer;
+ }
+ }
+
+ /**
+ * @dev Converts a `int256` to its ASCII `string` decimal representation.
+ */
+ function toStringSigned(int256 value) internal pure returns (string memory) {
+ return string.concat(value < 0 ? "-" : "", toString(SignedMath.abs(value)));
+ }
+
+ /**
+ * @dev Converts a `uint256` to its ASCII `string` hexadecimal representation.
+ */
+ function toHexString(uint256 value) internal pure returns (string memory) {
+ unchecked {
+ return toHexString(value, Math.log256(value) + 1);
+ }
+ }
+
+ /**
+ * @dev Converts a `uint256` to its ASCII `string` hexadecimal representation with fixed length.
+ */
+ function toHexString(uint256 value, uint256 length) internal pure returns (string memory) {
+ uint256 localValue = value;
+ bytes memory buffer = new bytes(2 * length + 2);
+ buffer[0] = "0";
+ buffer[1] = "x";
+ for (uint256 i = 2 * length + 1; i > 1; --i) {
+ buffer[i] = HEX_DIGITS[localValue & 0xf];
+ localValue >>= 4;
+ }
+ if (localValue != 0) {
+ revert StringsInsufficientHexLength(value, length);
+ }
+ return string(buffer);
+ }
+
+ /**
+ * @dev Converts an `address` with fixed length of 20 bytes to its not checksummed ASCII `string` hexadecimal
+ * representation.
+ */
+ function toHexString(address addr) internal pure returns (string memory) {
+ return toHexString(uint256(uint160(addr)), ADDRESS_LENGTH);
+ }
+
+ /**
+ * @dev Returns true if the two strings are equal.
+ */
+ function equal(string memory a, string memory b) internal pure returns (bool) {
+ return bytes(a).length == bytes(b).length && keccak256(bytes(a)) == keccak256(bytes(b));
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/ECDSA.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/ECDSA.sol
new file mode 100644
index 00000000000..04b3e5e0646
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/ECDSA.sol
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/cryptography/ECDSA.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Elliptic Curve Digital Signature Algorithm (ECDSA) operations.
+ *
+ * These functions can be used to verify that a message was signed by the holder
+ * of the private keys of a given address.
+ */
+library ECDSA {
+ enum RecoverError {
+ NoError,
+ InvalidSignature,
+ InvalidSignatureLength,
+ InvalidSignatureS
+ }
+
+ /**
+ * @dev The signature derives the `address(0)`.
+ */
+ error ECDSAInvalidSignature();
+
+ /**
+ * @dev The signature has an invalid length.
+ */
+ error ECDSAInvalidSignatureLength(uint256 length);
+
+ /**
+ * @dev The signature has an S value that is in the upper half order.
+ */
+ error ECDSAInvalidSignatureS(bytes32 s);
+
+ /**
+ * @dev Returns the address that signed a hashed message (`hash`) with `signature` or an error. This will not
+ * return address(0) without also returning an error description. Errors are documented using an enum (error type)
+ * and a bytes32 providing additional information about the error.
+ *
+ * If no error is returned, then the address can be used for verification purposes.
+ *
+ * The `ecrecover` EVM precompile allows for malleable (non-unique) signatures:
+ * this function rejects them by requiring the `s` value to be in the lower
+ * half order, and the `v` value to be either 27 or 28.
+ *
+ * IMPORTANT: `hash` _must_ be the result of a hash operation for the
+ * verification to be secure: it is possible to craft signatures that
+ * recover to arbitrary addresses for non-hashed data. A safe way to ensure
+ * this is by receiving a hash of the original message (which may otherwise
+ * be too long), and then calling {MessageHashUtils-toEthSignedMessageHash} on it.
+ *
+ * Documentation for signature generation:
+ * - with https://web3js.readthedocs.io/en/v1.3.4/web3-eth-accounts.html#sign[Web3.js]
+ * - with https://docs.ethers.io/v5/api/signer/#Signer-signMessage[ethers]
+ */
+ function tryRecover(bytes32 hash, bytes memory signature) internal pure returns (address, RecoverError, bytes32) {
+ if (signature.length == 65) {
+ bytes32 r;
+ bytes32 s;
+ uint8 v;
+ // ecrecover takes the signature parameters, and the only way to get them
+ // currently is to use assembly.
+ /// @solidity memory-safe-assembly
+ assembly {
+ r := mload(add(signature, 0x20))
+ s := mload(add(signature, 0x40))
+ v := byte(0, mload(add(signature, 0x60)))
+ }
+ return tryRecover(hash, v, r, s);
+ } else {
+ return (address(0), RecoverError.InvalidSignatureLength, bytes32(signature.length));
+ }
+ }
+
+ /**
+ * @dev Returns the address that signed a hashed message (`hash`) with
+ * `signature`. This address can then be used for verification purposes.
+ *
+ * The `ecrecover` EVM precompile allows for malleable (non-unique) signatures:
+ * this function rejects them by requiring the `s` value to be in the lower
+ * half order, and the `v` value to be either 27 or 28.
+ *
+ * IMPORTANT: `hash` _must_ be the result of a hash operation for the
+ * verification to be secure: it is possible to craft signatures that
+ * recover to arbitrary addresses for non-hashed data. A safe way to ensure
+ * this is by receiving a hash of the original message (which may otherwise
+ * be too long), and then calling {MessageHashUtils-toEthSignedMessageHash} on it.
+ */
+ function recover(bytes32 hash, bytes memory signature) internal pure returns (address) {
+ (address recovered, RecoverError error, bytes32 errorArg) = tryRecover(hash, signature);
+ _throwError(error, errorArg);
+ return recovered;
+ }
+
+ /**
+ * @dev Overload of {ECDSA-tryRecover} that receives the `r` and `vs` short-signature fields separately.
+ *
+ * See https://eips.ethereum.org/EIPS/eip-2098[EIP-2098 short signatures]
+ */
+ function tryRecover(bytes32 hash, bytes32 r, bytes32 vs) internal pure returns (address, RecoverError, bytes32) {
+ unchecked {
+ bytes32 s = vs & bytes32(0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff);
+ // We do not check for an overflow here since the shift operation results in 0 or 1.
+ uint8 v = uint8((uint256(vs) >> 255) + 27);
+ return tryRecover(hash, v, r, s);
+ }
+ }
+
+ /**
+ * @dev Overload of {ECDSA-recover} that receives the `r and `vs` short-signature fields separately.
+ */
+ function recover(bytes32 hash, bytes32 r, bytes32 vs) internal pure returns (address) {
+ (address recovered, RecoverError error, bytes32 errorArg) = tryRecover(hash, r, vs);
+ _throwError(error, errorArg);
+ return recovered;
+ }
+
+ /**
+ * @dev Overload of {ECDSA-tryRecover} that receives the `v`,
+ * `r` and `s` signature fields separately.
+ */
+ function tryRecover(
+ bytes32 hash,
+ uint8 v,
+ bytes32 r,
+ bytes32 s
+ ) internal pure returns (address, RecoverError, bytes32) {
+ // EIP-2 still allows signature malleability for ecrecover(). Remove this possibility and make the signature
+ // unique. Appendix F in the Ethereum Yellow paper (https://ethereum.github.io/yellowpaper/paper.pdf), defines
+ // the valid range for s in (301): 0 < s < secp256k1n ÷ 2 + 1, and for v in (302): v ∈ {27, 28}. Most
+ // signatures from current libraries generate a unique signature with an s-value in the lower half order.
+ //
+ // If your library generates malleable signatures, such as s-values in the upper range, calculate a new s-value
+ // with 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 - s1 and flip v from 27 to 28 or
+ // vice versa. If your library also generates signatures with 0/1 for v instead 27/28, add 27 to v to accept
+ // these malleable signatures as well.
+ if (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) {
+ return (address(0), RecoverError.InvalidSignatureS, s);
+ }
+
+ // If the signature is valid (and not malleable), return the signer address
+ address signer = ecrecover(hash, v, r, s);
+ if (signer == address(0)) {
+ return (address(0), RecoverError.InvalidSignature, bytes32(0));
+ }
+
+ return (signer, RecoverError.NoError, bytes32(0));
+ }
+
+ /**
+ * @dev Overload of {ECDSA-recover} that receives the `v`,
+ * `r` and `s` signature fields separately.
+ */
+ function recover(bytes32 hash, uint8 v, bytes32 r, bytes32 s) internal pure returns (address) {
+ (address recovered, RecoverError error, bytes32 errorArg) = tryRecover(hash, v, r, s);
+ _throwError(error, errorArg);
+ return recovered;
+ }
+
+ /**
+ * @dev Optionally reverts with the corresponding custom error according to the `error` argument provided.
+ */
+ function _throwError(RecoverError error, bytes32 errorArg) private pure {
+ if (error == RecoverError.NoError) {
+ return; // no error: do nothing
+ } else if (error == RecoverError.InvalidSignature) {
+ revert ECDSAInvalidSignature();
+ } else if (error == RecoverError.InvalidSignatureLength) {
+ revert ECDSAInvalidSignatureLength(uint256(errorArg));
+ } else if (error == RecoverError.InvalidSignatureS) {
+ revert ECDSAInvalidSignatureS(errorArg);
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/EIP712.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/EIP712.sol
new file mode 100644
index 00000000000..8e548cdd8f0
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/EIP712.sol
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/cryptography/EIP712.sol)
+
+pragma solidity ^0.8.20;
+
+import {MessageHashUtils} from "./MessageHashUtils.sol";
+import {ShortStrings, ShortString} from "../ShortStrings.sol";
+import {IERC5267} from "../../interfaces/IERC5267.sol";
+
+/**
+ * @dev https://eips.ethereum.org/EIPS/eip-712[EIP 712] is a standard for hashing and signing of typed structured data.
+ *
+ * The encoding scheme specified in the EIP requires a domain separator and a hash of the typed structured data, whose
+ * encoding is very generic and therefore its implementation in Solidity is not feasible, thus this contract
+ * does not implement the encoding itself. Protocols need to implement the type-specific encoding they need in order to
+ * produce the hash of their typed data using a combination of `abi.encode` and `keccak256`.
+ *
+ * This contract implements the EIP 712 domain separator ({_domainSeparatorV4}) that is used as part of the encoding
+ * scheme, and the final step of the encoding to obtain the message digest that is then signed via ECDSA
+ * ({_hashTypedDataV4}).
+ *
+ * The implementation of the domain separator was designed to be as efficient as possible while still properly updating
+ * the chain id to protect against replay attacks on an eventual fork of the chain.
+ *
+ * NOTE: This contract implements the version of the encoding known as "v4", as implemented by the JSON RPC method
+ * https://docs.metamask.io/guide/signing-data.html[`eth_signTypedDataV4` in MetaMask].
+ *
+ * NOTE: In the upgradeable version of this contract, the cached values will correspond to the address, and the domain
+ * separator of the implementation contract. This will cause the {_domainSeparatorV4} function to always rebuild the
+ * separator from the immutable values, which is cheaper than accessing a cached version in cold storage.
+ *
+ * @custom:oz-upgrades-unsafe-allow state-variable-immutable
+ */
+abstract contract EIP712 is IERC5267 {
+ using ShortStrings for *;
+
+ bytes32 private constant TYPE_HASH =
+ keccak256("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)");
+
+ // Cache the domain separator as an immutable value, but also store the chain id that it corresponds to, in order to
+ // invalidate the cached domain separator if the chain id changes.
+ bytes32 private immutable _cachedDomainSeparator;
+ uint256 private immutable _cachedChainId;
+ address private immutable _cachedThis;
+
+ bytes32 private immutable _hashedName;
+ bytes32 private immutable _hashedVersion;
+
+ ShortString private immutable _name;
+ ShortString private immutable _version;
+ string private _nameFallback;
+ string private _versionFallback;
+
+ /**
+ * @dev Initializes the domain separator and parameter caches.
+ *
+ * The meaning of `name` and `version` is specified in
+ * https://eips.ethereum.org/EIPS/eip-712#definition-of-domainseparator[EIP 712]:
+ *
+ * - `name`: the user readable name of the signing domain, i.e. the name of the DApp or the protocol.
+ * - `version`: the current major version of the signing domain.
+ *
+ * NOTE: These parameters cannot be changed except through a xref:learn::upgrading-smart-contracts.adoc[smart
+ * contract upgrade].
+ */
+ constructor(string memory name, string memory version) {
+ _name = name.toShortStringWithFallback(_nameFallback);
+ _version = version.toShortStringWithFallback(_versionFallback);
+ _hashedName = keccak256(bytes(name));
+ _hashedVersion = keccak256(bytes(version));
+
+ _cachedChainId = block.chainid;
+ _cachedDomainSeparator = _buildDomainSeparator();
+ _cachedThis = address(this);
+ }
+
+ /**
+ * @dev Returns the domain separator for the current chain.
+ */
+ function _domainSeparatorV4() internal view returns (bytes32) {
+ if (address(this) == _cachedThis && block.chainid == _cachedChainId) {
+ return _cachedDomainSeparator;
+ } else {
+ return _buildDomainSeparator();
+ }
+ }
+
+ function _buildDomainSeparator() private view returns (bytes32) {
+ return keccak256(abi.encode(TYPE_HASH, _hashedName, _hashedVersion, block.chainid, address(this)));
+ }
+
+ /**
+ * @dev Given an already https://eips.ethereum.org/EIPS/eip-712#definition-of-hashstruct[hashed struct], this
+ * function returns the hash of the fully encoded EIP712 message for this domain.
+ *
+ * This hash can be used together with {ECDSA-recover} to obtain the signer of a message. For example:
+ *
+ * ```solidity
+ * bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(
+ * keccak256("Mail(address to,string contents)"),
+ * mailTo,
+ * keccak256(bytes(mailContents))
+ * )));
+ * address signer = ECDSA.recover(digest, signature);
+ * ```
+ */
+ function _hashTypedDataV4(bytes32 structHash) internal view virtual returns (bytes32) {
+ return MessageHashUtils.toTypedDataHash(_domainSeparatorV4(), structHash);
+ }
+
+ /**
+ * @dev See {IERC-5267}.
+ */
+ function eip712Domain()
+ public
+ view
+ virtual
+ returns (
+ bytes1 fields,
+ string memory name,
+ string memory version,
+ uint256 chainId,
+ address verifyingContract,
+ bytes32 salt,
+ uint256[] memory extensions
+ )
+ {
+ return (
+ hex"0f", // 01111
+ _EIP712Name(),
+ _EIP712Version(),
+ block.chainid,
+ address(this),
+ bytes32(0),
+ new uint256[](0)
+ );
+ }
+
+ /**
+ * @dev The name parameter for the EIP712 domain.
+ *
+ * NOTE: By default this function reads _name which is an immutable value.
+ * It only reads from storage if necessary (in case the value is too large to fit in a ShortString).
+ */
+ // solhint-disable-next-line func-name-mixedcase
+ function _EIP712Name() internal view returns (string memory) {
+ return _name.toStringWithFallback(_nameFallback);
+ }
+
+ /**
+ * @dev The version parameter for the EIP712 domain.
+ *
+ * NOTE: By default this function reads _version which is an immutable value.
+ * It only reads from storage if necessary (in case the value is too large to fit in a ShortString).
+ */
+ // solhint-disable-next-line func-name-mixedcase
+ function _EIP712Version() internal view returns (string memory) {
+ return _version.toStringWithFallback(_versionFallback);
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/MessageHashUtils.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/MessageHashUtils.sol
new file mode 100644
index 00000000000..8836693e79b
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/cryptography/MessageHashUtils.sol
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/cryptography/MessageHashUtils.sol)
+
+pragma solidity ^0.8.20;
+
+import {Strings} from "../Strings.sol";
+
+/**
+ * @dev Signature message hash utilities for producing digests to be consumed by {ECDSA} recovery or signing.
+ *
+ * The library provides methods for generating a hash of a message that conforms to the
+ * https://eips.ethereum.org/EIPS/eip-191[EIP 191] and https://eips.ethereum.org/EIPS/eip-712[EIP 712]
+ * specifications.
+ */
+library MessageHashUtils {
+ /**
+ * @dev Returns the keccak256 digest of an EIP-191 signed data with version
+ * `0x45` (`personal_sign` messages).
+ *
+ * The digest is calculated by prefixing a bytes32 `messageHash` with
+ * `"\x19Ethereum Signed Message:\n32"` and hashing the result. It corresponds with the
+ * hash signed when using the https://eth.wiki/json-rpc/API#eth_sign[`eth_sign`] JSON-RPC method.
+ *
+ * NOTE: The `messageHash` parameter is intended to be the result of hashing a raw message with
+ * keccak256, although any bytes32 value can be safely used because the final digest will
+ * be re-hashed.
+ *
+ * See {ECDSA-recover}.
+ */
+ function toEthSignedMessageHash(bytes32 messageHash) internal pure returns (bytes32 digest) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ mstore(0x00, "\x19Ethereum Signed Message:\n32") // 32 is the bytes-length of messageHash
+ mstore(0x1c, messageHash) // 0x1c (28) is the length of the prefix
+ digest := keccak256(0x00, 0x3c) // 0x3c is the length of the prefix (0x1c) + messageHash (0x20)
+ }
+ }
+
+ /**
+ * @dev Returns the keccak256 digest of an EIP-191 signed data with version
+ * `0x45` (`personal_sign` messages).
+ *
+ * The digest is calculated by prefixing an arbitrary `message` with
+ * `"\x19Ethereum Signed Message:\n" + len(message)` and hashing the result. It corresponds with the
+ * hash signed when using the https://eth.wiki/json-rpc/API#eth_sign[`eth_sign`] JSON-RPC method.
+ *
+ * See {ECDSA-recover}.
+ */
+ function toEthSignedMessageHash(bytes memory message) internal pure returns (bytes32) {
+ return
+ keccak256(bytes.concat("\x19Ethereum Signed Message:\n", bytes(Strings.toString(message.length)), message));
+ }
+
+ /**
+ * @dev Returns the keccak256 digest of an EIP-191 signed data with version
+ * `0x00` (data with intended validator).
+ *
+ * The digest is calculated by prefixing an arbitrary `data` with `"\x19\x00"` and the intended
+ * `validator` address. Then hashing the result.
+ *
+ * See {ECDSA-recover}.
+ */
+ function toDataWithIntendedValidatorHash(address validator, bytes memory data) internal pure returns (bytes32) {
+ return keccak256(abi.encodePacked(hex"19_00", validator, data));
+ }
+
+ /**
+ * @dev Returns the keccak256 digest of an EIP-712 typed data (EIP-191 version `0x01`).
+ *
+ * The digest is calculated from a `domainSeparator` and a `structHash`, by prefixing them with
+ * `\x19\x01` and hashing the result. It corresponds to the hash signed by the
+ * https://eips.ethereum.org/EIPS/eip-712[`eth_signTypedData`] JSON-RPC method as part of EIP-712.
+ *
+ * See {ECDSA-recover}.
+ */
+ function toTypedDataHash(bytes32 domainSeparator, bytes32 structHash) internal pure returns (bytes32 digest) {
+ /// @solidity memory-safe-assembly
+ assembly {
+ let ptr := mload(0x40)
+ mstore(ptr, hex"19_01")
+ mstore(add(ptr, 0x02), domainSeparator)
+ mstore(add(ptr, 0x22), structHash)
+ digest := keccak256(ptr, 0x42)
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165.sol
new file mode 100644
index 00000000000..1e77b60d739
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165.sol
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/introspection/ERC165.sol)
+
+pragma solidity ^0.8.20;
+
+import {IERC165} from "./IERC165.sol";
+
+/**
+ * @dev Implementation of the {IERC165} interface.
+ *
+ * Contracts that want to implement ERC165 should inherit from this contract and override {supportsInterface} to check
+ * for the additional interface id that will be supported. For example:
+ *
+ * ```solidity
+ * function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) {
+ * return interfaceId == type(MyInterface).interfaceId || super.supportsInterface(interfaceId);
+ * }
+ * ```
+ */
+abstract contract ERC165 is IERC165 {
+ /**
+ * @dev See {IERC165-supportsInterface}.
+ */
+ function supportsInterface(bytes4 interfaceId) public view virtual returns (bool) {
+ return interfaceId == type(IERC165).interfaceId;
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165Checker.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165Checker.sol
new file mode 100644
index 00000000000..7b52241446d
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/ERC165Checker.sol
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/introspection/ERC165Checker.sol)
+
+pragma solidity ^0.8.20;
+
+import {IERC165} from "./IERC165.sol";
+
+/**
+ * @dev Library used to query support of an interface declared via {IERC165}.
+ *
+ * Note that these functions return the actual result of the query: they do not
+ * `revert` if an interface is not supported. It is up to the caller to decide
+ * what to do in these cases.
+ */
+library ERC165Checker {
+ // As per the EIP-165 spec, no interface should ever match 0xffffffff
+ bytes4 private constant INTERFACE_ID_INVALID = 0xffffffff;
+
+ /**
+ * @dev Returns true if `account` supports the {IERC165} interface.
+ */
+ function supportsERC165(address account) internal view returns (bool) {
+ // Any contract that implements ERC165 must explicitly indicate support of
+ // InterfaceId_ERC165 and explicitly indicate non-support of InterfaceId_Invalid
+ return
+ supportsERC165InterfaceUnchecked(account, type(IERC165).interfaceId) &&
+ !supportsERC165InterfaceUnchecked(account, INTERFACE_ID_INVALID);
+ }
+
+ /**
+ * @dev Returns true if `account` supports the interface defined by
+ * `interfaceId`. Support for {IERC165} itself is queried automatically.
+ *
+ * See {IERC165-supportsInterface}.
+ */
+ function supportsInterface(address account, bytes4 interfaceId) internal view returns (bool) {
+ // query support of both ERC165 as per the spec and support of _interfaceId
+ return supportsERC165(account) && supportsERC165InterfaceUnchecked(account, interfaceId);
+ }
+
+ /**
+ * @dev Returns a boolean array where each value corresponds to the
+ * interfaces passed in and whether they're supported or not. This allows
+ * you to batch check interfaces for a contract where your expectation
+ * is that some interfaces may not be supported.
+ *
+ * See {IERC165-supportsInterface}.
+ */
+ function getSupportedInterfaces(
+ address account,
+ bytes4[] memory interfaceIds
+ ) internal view returns (bool[] memory) {
+ // an array of booleans corresponding to interfaceIds and whether they're supported or not
+ bool[] memory interfaceIdsSupported = new bool[](interfaceIds.length);
+
+ // query support of ERC165 itself
+ if (supportsERC165(account)) {
+ // query support of each interface in interfaceIds
+ for (uint256 i = 0; i < interfaceIds.length; i++) {
+ interfaceIdsSupported[i] = supportsERC165InterfaceUnchecked(account, interfaceIds[i]);
+ }
+ }
+
+ return interfaceIdsSupported;
+ }
+
+ /**
+ * @dev Returns true if `account` supports all the interfaces defined in
+ * `interfaceIds`. Support for {IERC165} itself is queried automatically.
+ *
+ * Batch-querying can lead to gas savings by skipping repeated checks for
+ * {IERC165} support.
+ *
+ * See {IERC165-supportsInterface}.
+ */
+ function supportsAllInterfaces(address account, bytes4[] memory interfaceIds) internal view returns (bool) {
+ // query support of ERC165 itself
+ if (!supportsERC165(account)) {
+ return false;
+ }
+
+ // query support of each interface in interfaceIds
+ for (uint256 i = 0; i < interfaceIds.length; i++) {
+ if (!supportsERC165InterfaceUnchecked(account, interfaceIds[i])) {
+ return false;
+ }
+ }
+
+ // all interfaces supported
+ return true;
+ }
+
+ /**
+ * @notice Query if a contract implements an interface, does not check ERC165 support
+ * @param account The address of the contract to query for support of an interface
+ * @param interfaceId The interface identifier, as specified in ERC-165
+ * @return true if the contract at account indicates support of the interface with
+ * identifier interfaceId, false otherwise
+ * @dev Assumes that account contains a contract that supports ERC165, otherwise
+ * the behavior of this method is undefined. This precondition can be checked
+ * with {supportsERC165}.
+ *
+ * Some precompiled contracts will falsely indicate support for a given interface, so caution
+ * should be exercised when using this function.
+ *
+ * Interface identification is specified in ERC-165.
+ */
+ function supportsERC165InterfaceUnchecked(address account, bytes4 interfaceId) internal view returns (bool) {
+ // prepare call
+ bytes memory encodedParams = abi.encodeCall(IERC165.supportsInterface, (interfaceId));
+
+ // perform static call
+ bool success;
+ uint256 returnSize;
+ uint256 returnValue;
+ assembly {
+ success := staticcall(30000, account, add(encodedParams, 0x20), mload(encodedParams), 0x00, 0x20)
+ returnSize := returndatasize()
+ returnValue := mload(0x00)
+ }
+
+ return success && returnSize >= 0x20 && returnValue > 0;
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol
new file mode 100644
index 00000000000..c09f31fe128
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/introspection/IERC165.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Interface of the ERC165 standard, as defined in the
+ * https://eips.ethereum.org/EIPS/eip-165[EIP].
+ *
+ * Implementers can declare support of contract interfaces, which can then be
+ * queried by others ({ERC165Checker}).
+ *
+ * For an implementation, see {ERC165}.
+ */
+interface IERC165 {
+ /**
+ * @dev Returns true if this contract implements the interface defined by
+ * `interfaceId`. See the corresponding
+ * https://eips.ethereum.org/EIPS/eip-165#how-interfaces-are-identified[EIP section]
+ * to learn more about how these ids are created.
+ *
+ * This function call must use less than 30 000 gas.
+ */
+ function supportsInterface(bytes4 interfaceId) external view returns (bool);
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/Math.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/Math.sol
new file mode 100644
index 00000000000..9681524529b
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/Math.sol
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/math/Math.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Standard math utilities missing in the Solidity language.
+ */
+library Math {
+ /**
+ * @dev Muldiv operation overflow.
+ */
+ error MathOverflowedMulDiv();
+
+ enum Rounding {
+ Floor, // Toward negative infinity
+ Ceil, // Toward positive infinity
+ Trunc, // Toward zero
+ Expand // Away from zero
+ }
+
+ /**
+ * @dev Returns the addition of two unsigned integers, with an overflow flag.
+ */
+ function tryAdd(uint256 a, uint256 b) internal pure returns (bool, uint256) {
+ unchecked {
+ uint256 c = a + b;
+ if (c < a) return (false, 0);
+ return (true, c);
+ }
+ }
+
+ /**
+ * @dev Returns the subtraction of two unsigned integers, with an overflow flag.
+ */
+ function trySub(uint256 a, uint256 b) internal pure returns (bool, uint256) {
+ unchecked {
+ if (b > a) return (false, 0);
+ return (true, a - b);
+ }
+ }
+
+ /**
+ * @dev Returns the multiplication of two unsigned integers, with an overflow flag.
+ */
+ function tryMul(uint256 a, uint256 b) internal pure returns (bool, uint256) {
+ unchecked {
+ // Gas optimization: this is cheaper than requiring 'a' not being zero, but the
+ // benefit is lost if 'b' is also tested.
+ // See: https://github.com/OpenZeppelin/openzeppelin-contracts/pull/522
+ if (a == 0) return (true, 0);
+ uint256 c = a * b;
+ if (c / a != b) return (false, 0);
+ return (true, c);
+ }
+ }
+
+ /**
+ * @dev Returns the division of two unsigned integers, with a division by zero flag.
+ */
+ function tryDiv(uint256 a, uint256 b) internal pure returns (bool, uint256) {
+ unchecked {
+ if (b == 0) return (false, 0);
+ return (true, a / b);
+ }
+ }
+
+ /**
+ * @dev Returns the remainder of dividing two unsigned integers, with a division by zero flag.
+ */
+ function tryMod(uint256 a, uint256 b) internal pure returns (bool, uint256) {
+ unchecked {
+ if (b == 0) return (false, 0);
+ return (true, a % b);
+ }
+ }
+
+ /**
+ * @dev Returns the largest of two numbers.
+ */
+ function max(uint256 a, uint256 b) internal pure returns (uint256) {
+ return a > b ? a : b;
+ }
+
+ /**
+ * @dev Returns the smallest of two numbers.
+ */
+ function min(uint256 a, uint256 b) internal pure returns (uint256) {
+ return a < b ? a : b;
+ }
+
+ /**
+ * @dev Returns the average of two numbers. The result is rounded towards
+ * zero.
+ */
+ function average(uint256 a, uint256 b) internal pure returns (uint256) {
+ // (a + b) / 2 can overflow.
+ return (a & b) + (a ^ b) / 2;
+ }
+
+ /**
+ * @dev Returns the ceiling of the division of two numbers.
+ *
+ * This differs from standard division with `/` in that it rounds towards infinity instead
+ * of rounding towards zero.
+ */
+ function ceilDiv(uint256 a, uint256 b) internal pure returns (uint256) {
+ if (b == 0) {
+ // Guarantee the same behavior as in a regular Solidity division.
+ return a / b;
+ }
+
+ // (a + b - 1) / b can overflow on addition, so we distribute.
+ return a == 0 ? 0 : (a - 1) / b + 1;
+ }
+
+ /**
+ * @notice Calculates floor(x * y / denominator) with full precision. Throws if result overflows a uint256 or
+ * denominator == 0.
+ * @dev Original credit to Remco Bloemen under MIT license (https://xn--2-umb.com/21/muldiv) with further edits by
+ * Uniswap Labs also under MIT license.
+ */
+ function mulDiv(uint256 x, uint256 y, uint256 denominator) internal pure returns (uint256 result) {
+ unchecked {
+ // 512-bit multiply [prod1 prod0] = x * y. Compute the product mod 2^256 and mod 2^256 - 1, then use
+ // use the Chinese Remainder Theorem to reconstruct the 512 bit result. The result is stored in two 256
+ // variables such that product = prod1 * 2^256 + prod0.
+ uint256 prod0 = x * y; // Least significant 256 bits of the product
+ uint256 prod1; // Most significant 256 bits of the product
+ assembly {
+ let mm := mulmod(x, y, not(0))
+ prod1 := sub(sub(mm, prod0), lt(mm, prod0))
+ }
+
+ // Handle non-overflow cases, 256 by 256 division.
+ if (prod1 == 0) {
+ // Solidity will revert if denominator == 0, unlike the div opcode on its own.
+ // The surrounding unchecked block does not change this fact.
+ // See https://docs.soliditylang.org/en/latest/control-structures.html#checked-or-unchecked-arithmetic.
+ return prod0 / denominator;
+ }
+
+ // Make sure the result is less than 2^256. Also prevents denominator == 0.
+ if (denominator <= prod1) {
+ revert MathOverflowedMulDiv();
+ }
+
+ ///////////////////////////////////////////////
+ // 512 by 256 division.
+ ///////////////////////////////////////////////
+
+ // Make division exact by subtracting the remainder from [prod1 prod0].
+ uint256 remainder;
+ assembly {
+ // Compute remainder using mulmod.
+ remainder := mulmod(x, y, denominator)
+
+ // Subtract 256 bit number from 512 bit number.
+ prod1 := sub(prod1, gt(remainder, prod0))
+ prod0 := sub(prod0, remainder)
+ }
+
+ // Factor powers of two out of denominator and compute largest power of two divisor of denominator.
+ // Always >= 1. See https://cs.stackexchange.com/q/138556/92363.
+
+ uint256 twos = denominator & (0 - denominator);
+ assembly {
+ // Divide denominator by twos.
+ denominator := div(denominator, twos)
+
+ // Divide [prod1 prod0] by twos.
+ prod0 := div(prod0, twos)
+
+ // Flip twos such that it is 2^256 / twos. If twos is zero, then it becomes one.
+ twos := add(div(sub(0, twos), twos), 1)
+ }
+
+ // Shift in bits from prod1 into prod0.
+ prod0 |= prod1 * twos;
+
+ // Invert denominator mod 2^256. Now that denominator is an odd number, it has an inverse modulo 2^256 such
+ // that denominator * inv = 1 mod 2^256. Compute the inverse by starting with a seed that is correct for
+ // four bits. That is, denominator * inv = 1 mod 2^4.
+ uint256 inverse = (3 * denominator) ^ 2;
+
+ // Use the Newton-Raphson iteration to improve the precision. Thanks to Hensel's lifting lemma, this also
+ // works in modular arithmetic, doubling the correct bits in each step.
+ inverse *= 2 - denominator * inverse; // inverse mod 2^8
+ inverse *= 2 - denominator * inverse; // inverse mod 2^16
+ inverse *= 2 - denominator * inverse; // inverse mod 2^32
+ inverse *= 2 - denominator * inverse; // inverse mod 2^64
+ inverse *= 2 - denominator * inverse; // inverse mod 2^128
+ inverse *= 2 - denominator * inverse; // inverse mod 2^256
+
+ // Because the division is now exact we can divide by multiplying with the modular inverse of denominator.
+ // This will give us the correct result modulo 2^256. Since the preconditions guarantee that the outcome is
+ // less than 2^256, this is the final result. We don't need to compute the high bits of the result and prod1
+ // is no longer required.
+ result = prod0 * inverse;
+ return result;
+ }
+ }
+
+ /**
+ * @notice Calculates x * y / denominator with full precision, following the selected rounding direction.
+ */
+ function mulDiv(uint256 x, uint256 y, uint256 denominator, Rounding rounding) internal pure returns (uint256) {
+ uint256 result = mulDiv(x, y, denominator);
+ if (unsignedRoundsUp(rounding) && mulmod(x, y, denominator) > 0) {
+ result += 1;
+ }
+ return result;
+ }
+
+ /**
+ * @dev Returns the square root of a number. If the number is not a perfect square, the value is rounded
+ * towards zero.
+ *
+ * Inspired by Henry S. Warren, Jr.'s "Hacker's Delight" (Chapter 11).
+ */
+ function sqrt(uint256 a) internal pure returns (uint256) {
+ if (a == 0) {
+ return 0;
+ }
+
+ // For our first guess, we get the biggest power of 2 which is smaller than the square root of the target.
+ //
+ // We know that the "msb" (most significant bit) of our target number `a` is a power of 2 such that we have
+ // `msb(a) <= a < 2*msb(a)`. This value can be written `msb(a)=2**k` with `k=log2(a)`.
+ //
+ // This can be rewritten `2**log2(a) <= a < 2**(log2(a) + 1)`
+ // → `sqrt(2**k) <= sqrt(a) < sqrt(2**(k+1))`
+ // → `2**(k/2) <= sqrt(a) < 2**((k+1)/2) <= 2**(k/2 + 1)`
+ //
+ // Consequently, `2**(log2(a) / 2)` is a good first approximation of `sqrt(a)` with at least 1 correct bit.
+ uint256 result = 1 << (log2(a) >> 1);
+
+ // At this point `result` is an estimation with one bit of precision. We know the true value is a uint128,
+ // since it is the square root of a uint256. Newton's method converges quadratically (precision doubles at
+ // every iteration). We thus need at most 7 iteration to turn our partial result with one bit of precision
+ // into the expected uint128 result.
+ unchecked {
+ result = (result + a / result) >> 1;
+ result = (result + a / result) >> 1;
+ result = (result + a / result) >> 1;
+ result = (result + a / result) >> 1;
+ result = (result + a / result) >> 1;
+ result = (result + a / result) >> 1;
+ result = (result + a / result) >> 1;
+ return min(result, a / result);
+ }
+ }
+
+ /**
+ * @notice Calculates sqrt(a), following the selected rounding direction.
+ */
+ function sqrt(uint256 a, Rounding rounding) internal pure returns (uint256) {
+ unchecked {
+ uint256 result = sqrt(a);
+ return result + (unsignedRoundsUp(rounding) && result * result < a ? 1 : 0);
+ }
+ }
+
+ /**
+ * @dev Return the log in base 2 of a positive value rounded towards zero.
+ * Returns 0 if given 0.
+ */
+ function log2(uint256 value) internal pure returns (uint256) {
+ uint256 result = 0;
+ unchecked {
+ if (value >> 128 > 0) {
+ value >>= 128;
+ result += 128;
+ }
+ if (value >> 64 > 0) {
+ value >>= 64;
+ result += 64;
+ }
+ if (value >> 32 > 0) {
+ value >>= 32;
+ result += 32;
+ }
+ if (value >> 16 > 0) {
+ value >>= 16;
+ result += 16;
+ }
+ if (value >> 8 > 0) {
+ value >>= 8;
+ result += 8;
+ }
+ if (value >> 4 > 0) {
+ value >>= 4;
+ result += 4;
+ }
+ if (value >> 2 > 0) {
+ value >>= 2;
+ result += 2;
+ }
+ if (value >> 1 > 0) {
+ result += 1;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * @dev Return the log in base 2, following the selected rounding direction, of a positive value.
+ * Returns 0 if given 0.
+ */
+ function log2(uint256 value, Rounding rounding) internal pure returns (uint256) {
+ unchecked {
+ uint256 result = log2(value);
+ return result + (unsignedRoundsUp(rounding) && 1 << result < value ? 1 : 0);
+ }
+ }
+
+ /**
+ * @dev Return the log in base 10 of a positive value rounded towards zero.
+ * Returns 0 if given 0.
+ */
+ function log10(uint256 value) internal pure returns (uint256) {
+ uint256 result = 0;
+ unchecked {
+ if (value >= 10 ** 64) {
+ value /= 10 ** 64;
+ result += 64;
+ }
+ if (value >= 10 ** 32) {
+ value /= 10 ** 32;
+ result += 32;
+ }
+ if (value >= 10 ** 16) {
+ value /= 10 ** 16;
+ result += 16;
+ }
+ if (value >= 10 ** 8) {
+ value /= 10 ** 8;
+ result += 8;
+ }
+ if (value >= 10 ** 4) {
+ value /= 10 ** 4;
+ result += 4;
+ }
+ if (value >= 10 ** 2) {
+ value /= 10 ** 2;
+ result += 2;
+ }
+ if (value >= 10 ** 1) {
+ result += 1;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * @dev Return the log in base 10, following the selected rounding direction, of a positive value.
+ * Returns 0 if given 0.
+ */
+ function log10(uint256 value, Rounding rounding) internal pure returns (uint256) {
+ unchecked {
+ uint256 result = log10(value);
+ return result + (unsignedRoundsUp(rounding) && 10 ** result < value ? 1 : 0);
+ }
+ }
+
+ /**
+ * @dev Return the log in base 256 of a positive value rounded towards zero.
+ * Returns 0 if given 0.
+ *
+ * Adding one to the result gives the number of pairs of hex symbols needed to represent `value` as a hex string.
+ */
+ function log256(uint256 value) internal pure returns (uint256) {
+ uint256 result = 0;
+ unchecked {
+ if (value >> 128 > 0) {
+ value >>= 128;
+ result += 16;
+ }
+ if (value >> 64 > 0) {
+ value >>= 64;
+ result += 8;
+ }
+ if (value >> 32 > 0) {
+ value >>= 32;
+ result += 4;
+ }
+ if (value >> 16 > 0) {
+ value >>= 16;
+ result += 2;
+ }
+ if (value >> 8 > 0) {
+ result += 1;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * @dev Return the log in base 256, following the selected rounding direction, of a positive value.
+ * Returns 0 if given 0.
+ */
+ function log256(uint256 value, Rounding rounding) internal pure returns (uint256) {
+ unchecked {
+ uint256 result = log256(value);
+ return result + (unsignedRoundsUp(rounding) && 1 << (result << 3) < value ? 1 : 0);
+ }
+ }
+
+ /**
+ * @dev Returns whether a provided rounding mode is considered rounding up for unsigned integers.
+ */
+ function unsignedRoundsUp(Rounding rounding) internal pure returns (bool) {
+ return uint8(rounding) % 2 == 1;
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SafeCast.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SafeCast.sol
new file mode 100644
index 00000000000..0ed458b43c2
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SafeCast.sol
@@ -0,0 +1,1153 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/math/SafeCast.sol)
+// This file was procedurally generated from scripts/generate/templates/SafeCast.js.
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Wrappers over Solidity's uintXX/intXX casting operators with added overflow
+ * checks.
+ *
+ * Downcasting from uint256/int256 in Solidity does not revert on overflow. This can
+ * easily result in undesired exploitation or bugs, since developers usually
+ * assume that overflows raise errors. `SafeCast` restores this intuition by
+ * reverting the transaction when such an operation overflows.
+ *
+ * Using this library instead of the unchecked operations eliminates an entire
+ * class of bugs, so it's recommended to use it always.
+ */
+library SafeCast {
+ /**
+ * @dev Value doesn't fit in an uint of `bits` size.
+ */
+ error SafeCastOverflowedUintDowncast(uint8 bits, uint256 value);
+
+ /**
+ * @dev An int value doesn't fit in an uint of `bits` size.
+ */
+ error SafeCastOverflowedIntToUint(int256 value);
+
+ /**
+ * @dev Value doesn't fit in an int of `bits` size.
+ */
+ error SafeCastOverflowedIntDowncast(uint8 bits, int256 value);
+
+ /**
+ * @dev An uint value doesn't fit in an int of `bits` size.
+ */
+ error SafeCastOverflowedUintToInt(uint256 value);
+
+ /**
+ * @dev Returns the downcasted uint248 from uint256, reverting on
+ * overflow (when the input is greater than largest uint248).
+ *
+ * Counterpart to Solidity's `uint248` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 248 bits
+ */
+ function toUint248(uint256 value) internal pure returns (uint248) {
+ if (value > type(uint248).max) {
+ revert SafeCastOverflowedUintDowncast(248, value);
+ }
+ return uint248(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint240 from uint256, reverting on
+ * overflow (when the input is greater than largest uint240).
+ *
+ * Counterpart to Solidity's `uint240` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 240 bits
+ */
+ function toUint240(uint256 value) internal pure returns (uint240) {
+ if (value > type(uint240).max) {
+ revert SafeCastOverflowedUintDowncast(240, value);
+ }
+ return uint240(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint232 from uint256, reverting on
+ * overflow (when the input is greater than largest uint232).
+ *
+ * Counterpart to Solidity's `uint232` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 232 bits
+ */
+ function toUint232(uint256 value) internal pure returns (uint232) {
+ if (value > type(uint232).max) {
+ revert SafeCastOverflowedUintDowncast(232, value);
+ }
+ return uint232(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint224 from uint256, reverting on
+ * overflow (when the input is greater than largest uint224).
+ *
+ * Counterpart to Solidity's `uint224` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 224 bits
+ */
+ function toUint224(uint256 value) internal pure returns (uint224) {
+ if (value > type(uint224).max) {
+ revert SafeCastOverflowedUintDowncast(224, value);
+ }
+ return uint224(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint216 from uint256, reverting on
+ * overflow (when the input is greater than largest uint216).
+ *
+ * Counterpart to Solidity's `uint216` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 216 bits
+ */
+ function toUint216(uint256 value) internal pure returns (uint216) {
+ if (value > type(uint216).max) {
+ revert SafeCastOverflowedUintDowncast(216, value);
+ }
+ return uint216(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint208 from uint256, reverting on
+ * overflow (when the input is greater than largest uint208).
+ *
+ * Counterpart to Solidity's `uint208` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 208 bits
+ */
+ function toUint208(uint256 value) internal pure returns (uint208) {
+ if (value > type(uint208).max) {
+ revert SafeCastOverflowedUintDowncast(208, value);
+ }
+ return uint208(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint200 from uint256, reverting on
+ * overflow (when the input is greater than largest uint200).
+ *
+ * Counterpart to Solidity's `uint200` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 200 bits
+ */
+ function toUint200(uint256 value) internal pure returns (uint200) {
+ if (value > type(uint200).max) {
+ revert SafeCastOverflowedUintDowncast(200, value);
+ }
+ return uint200(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint192 from uint256, reverting on
+ * overflow (when the input is greater than largest uint192).
+ *
+ * Counterpart to Solidity's `uint192` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 192 bits
+ */
+ function toUint192(uint256 value) internal pure returns (uint192) {
+ if (value > type(uint192).max) {
+ revert SafeCastOverflowedUintDowncast(192, value);
+ }
+ return uint192(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint184 from uint256, reverting on
+ * overflow (when the input is greater than largest uint184).
+ *
+ * Counterpart to Solidity's `uint184` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 184 bits
+ */
+ function toUint184(uint256 value) internal pure returns (uint184) {
+ if (value > type(uint184).max) {
+ revert SafeCastOverflowedUintDowncast(184, value);
+ }
+ return uint184(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint176 from uint256, reverting on
+ * overflow (when the input is greater than largest uint176).
+ *
+ * Counterpart to Solidity's `uint176` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 176 bits
+ */
+ function toUint176(uint256 value) internal pure returns (uint176) {
+ if (value > type(uint176).max) {
+ revert SafeCastOverflowedUintDowncast(176, value);
+ }
+ return uint176(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint168 from uint256, reverting on
+ * overflow (when the input is greater than largest uint168).
+ *
+ * Counterpart to Solidity's `uint168` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 168 bits
+ */
+ function toUint168(uint256 value) internal pure returns (uint168) {
+ if (value > type(uint168).max) {
+ revert SafeCastOverflowedUintDowncast(168, value);
+ }
+ return uint168(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint160 from uint256, reverting on
+ * overflow (when the input is greater than largest uint160).
+ *
+ * Counterpart to Solidity's `uint160` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 160 bits
+ */
+ function toUint160(uint256 value) internal pure returns (uint160) {
+ if (value > type(uint160).max) {
+ revert SafeCastOverflowedUintDowncast(160, value);
+ }
+ return uint160(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint152 from uint256, reverting on
+ * overflow (when the input is greater than largest uint152).
+ *
+ * Counterpart to Solidity's `uint152` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 152 bits
+ */
+ function toUint152(uint256 value) internal pure returns (uint152) {
+ if (value > type(uint152).max) {
+ revert SafeCastOverflowedUintDowncast(152, value);
+ }
+ return uint152(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint144 from uint256, reverting on
+ * overflow (when the input is greater than largest uint144).
+ *
+ * Counterpart to Solidity's `uint144` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 144 bits
+ */
+ function toUint144(uint256 value) internal pure returns (uint144) {
+ if (value > type(uint144).max) {
+ revert SafeCastOverflowedUintDowncast(144, value);
+ }
+ return uint144(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint136 from uint256, reverting on
+ * overflow (when the input is greater than largest uint136).
+ *
+ * Counterpart to Solidity's `uint136` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 136 bits
+ */
+ function toUint136(uint256 value) internal pure returns (uint136) {
+ if (value > type(uint136).max) {
+ revert SafeCastOverflowedUintDowncast(136, value);
+ }
+ return uint136(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint128 from uint256, reverting on
+ * overflow (when the input is greater than largest uint128).
+ *
+ * Counterpart to Solidity's `uint128` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 128 bits
+ */
+ function toUint128(uint256 value) internal pure returns (uint128) {
+ if (value > type(uint128).max) {
+ revert SafeCastOverflowedUintDowncast(128, value);
+ }
+ return uint128(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint120 from uint256, reverting on
+ * overflow (when the input is greater than largest uint120).
+ *
+ * Counterpart to Solidity's `uint120` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 120 bits
+ */
+ function toUint120(uint256 value) internal pure returns (uint120) {
+ if (value > type(uint120).max) {
+ revert SafeCastOverflowedUintDowncast(120, value);
+ }
+ return uint120(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint112 from uint256, reverting on
+ * overflow (when the input is greater than largest uint112).
+ *
+ * Counterpart to Solidity's `uint112` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 112 bits
+ */
+ function toUint112(uint256 value) internal pure returns (uint112) {
+ if (value > type(uint112).max) {
+ revert SafeCastOverflowedUintDowncast(112, value);
+ }
+ return uint112(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint104 from uint256, reverting on
+ * overflow (when the input is greater than largest uint104).
+ *
+ * Counterpart to Solidity's `uint104` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 104 bits
+ */
+ function toUint104(uint256 value) internal pure returns (uint104) {
+ if (value > type(uint104).max) {
+ revert SafeCastOverflowedUintDowncast(104, value);
+ }
+ return uint104(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint96 from uint256, reverting on
+ * overflow (when the input is greater than largest uint96).
+ *
+ * Counterpart to Solidity's `uint96` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 96 bits
+ */
+ function toUint96(uint256 value) internal pure returns (uint96) {
+ if (value > type(uint96).max) {
+ revert SafeCastOverflowedUintDowncast(96, value);
+ }
+ return uint96(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint88 from uint256, reverting on
+ * overflow (when the input is greater than largest uint88).
+ *
+ * Counterpart to Solidity's `uint88` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 88 bits
+ */
+ function toUint88(uint256 value) internal pure returns (uint88) {
+ if (value > type(uint88).max) {
+ revert SafeCastOverflowedUintDowncast(88, value);
+ }
+ return uint88(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint80 from uint256, reverting on
+ * overflow (when the input is greater than largest uint80).
+ *
+ * Counterpart to Solidity's `uint80` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 80 bits
+ */
+ function toUint80(uint256 value) internal pure returns (uint80) {
+ if (value > type(uint80).max) {
+ revert SafeCastOverflowedUintDowncast(80, value);
+ }
+ return uint80(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint72 from uint256, reverting on
+ * overflow (when the input is greater than largest uint72).
+ *
+ * Counterpart to Solidity's `uint72` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 72 bits
+ */
+ function toUint72(uint256 value) internal pure returns (uint72) {
+ if (value > type(uint72).max) {
+ revert SafeCastOverflowedUintDowncast(72, value);
+ }
+ return uint72(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint64 from uint256, reverting on
+ * overflow (when the input is greater than largest uint64).
+ *
+ * Counterpart to Solidity's `uint64` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 64 bits
+ */
+ function toUint64(uint256 value) internal pure returns (uint64) {
+ if (value > type(uint64).max) {
+ revert SafeCastOverflowedUintDowncast(64, value);
+ }
+ return uint64(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint56 from uint256, reverting on
+ * overflow (when the input is greater than largest uint56).
+ *
+ * Counterpart to Solidity's `uint56` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 56 bits
+ */
+ function toUint56(uint256 value) internal pure returns (uint56) {
+ if (value > type(uint56).max) {
+ revert SafeCastOverflowedUintDowncast(56, value);
+ }
+ return uint56(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint48 from uint256, reverting on
+ * overflow (when the input is greater than largest uint48).
+ *
+ * Counterpart to Solidity's `uint48` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 48 bits
+ */
+ function toUint48(uint256 value) internal pure returns (uint48) {
+ if (value > type(uint48).max) {
+ revert SafeCastOverflowedUintDowncast(48, value);
+ }
+ return uint48(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint40 from uint256, reverting on
+ * overflow (when the input is greater than largest uint40).
+ *
+ * Counterpart to Solidity's `uint40` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 40 bits
+ */
+ function toUint40(uint256 value) internal pure returns (uint40) {
+ if (value > type(uint40).max) {
+ revert SafeCastOverflowedUintDowncast(40, value);
+ }
+ return uint40(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint32 from uint256, reverting on
+ * overflow (when the input is greater than largest uint32).
+ *
+ * Counterpart to Solidity's `uint32` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 32 bits
+ */
+ function toUint32(uint256 value) internal pure returns (uint32) {
+ if (value > type(uint32).max) {
+ revert SafeCastOverflowedUintDowncast(32, value);
+ }
+ return uint32(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint24 from uint256, reverting on
+ * overflow (when the input is greater than largest uint24).
+ *
+ * Counterpart to Solidity's `uint24` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 24 bits
+ */
+ function toUint24(uint256 value) internal pure returns (uint24) {
+ if (value > type(uint24).max) {
+ revert SafeCastOverflowedUintDowncast(24, value);
+ }
+ return uint24(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint16 from uint256, reverting on
+ * overflow (when the input is greater than largest uint16).
+ *
+ * Counterpart to Solidity's `uint16` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 16 bits
+ */
+ function toUint16(uint256 value) internal pure returns (uint16) {
+ if (value > type(uint16).max) {
+ revert SafeCastOverflowedUintDowncast(16, value);
+ }
+ return uint16(value);
+ }
+
+ /**
+ * @dev Returns the downcasted uint8 from uint256, reverting on
+ * overflow (when the input is greater than largest uint8).
+ *
+ * Counterpart to Solidity's `uint8` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 8 bits
+ */
+ function toUint8(uint256 value) internal pure returns (uint8) {
+ if (value > type(uint8).max) {
+ revert SafeCastOverflowedUintDowncast(8, value);
+ }
+ return uint8(value);
+ }
+
+ /**
+ * @dev Converts a signed int256 into an unsigned uint256.
+ *
+ * Requirements:
+ *
+ * - input must be greater than or equal to 0.
+ */
+ function toUint256(int256 value) internal pure returns (uint256) {
+ if (value < 0) {
+ revert SafeCastOverflowedIntToUint(value);
+ }
+ return uint256(value);
+ }
+
+ /**
+ * @dev Returns the downcasted int248 from int256, reverting on
+ * overflow (when the input is less than smallest int248 or
+ * greater than largest int248).
+ *
+ * Counterpart to Solidity's `int248` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 248 bits
+ */
+ function toInt248(int256 value) internal pure returns (int248 downcasted) {
+ downcasted = int248(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(248, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int240 from int256, reverting on
+ * overflow (when the input is less than smallest int240 or
+ * greater than largest int240).
+ *
+ * Counterpart to Solidity's `int240` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 240 bits
+ */
+ function toInt240(int256 value) internal pure returns (int240 downcasted) {
+ downcasted = int240(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(240, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int232 from int256, reverting on
+ * overflow (when the input is less than smallest int232 or
+ * greater than largest int232).
+ *
+ * Counterpart to Solidity's `int232` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 232 bits
+ */
+ function toInt232(int256 value) internal pure returns (int232 downcasted) {
+ downcasted = int232(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(232, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int224 from int256, reverting on
+ * overflow (when the input is less than smallest int224 or
+ * greater than largest int224).
+ *
+ * Counterpart to Solidity's `int224` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 224 bits
+ */
+ function toInt224(int256 value) internal pure returns (int224 downcasted) {
+ downcasted = int224(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(224, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int216 from int256, reverting on
+ * overflow (when the input is less than smallest int216 or
+ * greater than largest int216).
+ *
+ * Counterpart to Solidity's `int216` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 216 bits
+ */
+ function toInt216(int256 value) internal pure returns (int216 downcasted) {
+ downcasted = int216(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(216, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int208 from int256, reverting on
+ * overflow (when the input is less than smallest int208 or
+ * greater than largest int208).
+ *
+ * Counterpart to Solidity's `int208` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 208 bits
+ */
+ function toInt208(int256 value) internal pure returns (int208 downcasted) {
+ downcasted = int208(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(208, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int200 from int256, reverting on
+ * overflow (when the input is less than smallest int200 or
+ * greater than largest int200).
+ *
+ * Counterpart to Solidity's `int200` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 200 bits
+ */
+ function toInt200(int256 value) internal pure returns (int200 downcasted) {
+ downcasted = int200(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(200, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int192 from int256, reverting on
+ * overflow (when the input is less than smallest int192 or
+ * greater than largest int192).
+ *
+ * Counterpart to Solidity's `int192` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 192 bits
+ */
+ function toInt192(int256 value) internal pure returns (int192 downcasted) {
+ downcasted = int192(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(192, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int184 from int256, reverting on
+ * overflow (when the input is less than smallest int184 or
+ * greater than largest int184).
+ *
+ * Counterpart to Solidity's `int184` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 184 bits
+ */
+ function toInt184(int256 value) internal pure returns (int184 downcasted) {
+ downcasted = int184(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(184, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int176 from int256, reverting on
+ * overflow (when the input is less than smallest int176 or
+ * greater than largest int176).
+ *
+ * Counterpart to Solidity's `int176` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 176 bits
+ */
+ function toInt176(int256 value) internal pure returns (int176 downcasted) {
+ downcasted = int176(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(176, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int168 from int256, reverting on
+ * overflow (when the input is less than smallest int168 or
+ * greater than largest int168).
+ *
+ * Counterpart to Solidity's `int168` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 168 bits
+ */
+ function toInt168(int256 value) internal pure returns (int168 downcasted) {
+ downcasted = int168(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(168, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int160 from int256, reverting on
+ * overflow (when the input is less than smallest int160 or
+ * greater than largest int160).
+ *
+ * Counterpart to Solidity's `int160` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 160 bits
+ */
+ function toInt160(int256 value) internal pure returns (int160 downcasted) {
+ downcasted = int160(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(160, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int152 from int256, reverting on
+ * overflow (when the input is less than smallest int152 or
+ * greater than largest int152).
+ *
+ * Counterpart to Solidity's `int152` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 152 bits
+ */
+ function toInt152(int256 value) internal pure returns (int152 downcasted) {
+ downcasted = int152(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(152, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int144 from int256, reverting on
+ * overflow (when the input is less than smallest int144 or
+ * greater than largest int144).
+ *
+ * Counterpart to Solidity's `int144` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 144 bits
+ */
+ function toInt144(int256 value) internal pure returns (int144 downcasted) {
+ downcasted = int144(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(144, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int136 from int256, reverting on
+ * overflow (when the input is less than smallest int136 or
+ * greater than largest int136).
+ *
+ * Counterpart to Solidity's `int136` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 136 bits
+ */
+ function toInt136(int256 value) internal pure returns (int136 downcasted) {
+ downcasted = int136(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(136, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int128 from int256, reverting on
+ * overflow (when the input is less than smallest int128 or
+ * greater than largest int128).
+ *
+ * Counterpart to Solidity's `int128` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 128 bits
+ */
+ function toInt128(int256 value) internal pure returns (int128 downcasted) {
+ downcasted = int128(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(128, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int120 from int256, reverting on
+ * overflow (when the input is less than smallest int120 or
+ * greater than largest int120).
+ *
+ * Counterpart to Solidity's `int120` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 120 bits
+ */
+ function toInt120(int256 value) internal pure returns (int120 downcasted) {
+ downcasted = int120(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(120, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int112 from int256, reverting on
+ * overflow (when the input is less than smallest int112 or
+ * greater than largest int112).
+ *
+ * Counterpart to Solidity's `int112` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 112 bits
+ */
+ function toInt112(int256 value) internal pure returns (int112 downcasted) {
+ downcasted = int112(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(112, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int104 from int256, reverting on
+ * overflow (when the input is less than smallest int104 or
+ * greater than largest int104).
+ *
+ * Counterpart to Solidity's `int104` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 104 bits
+ */
+ function toInt104(int256 value) internal pure returns (int104 downcasted) {
+ downcasted = int104(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(104, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int96 from int256, reverting on
+ * overflow (when the input is less than smallest int96 or
+ * greater than largest int96).
+ *
+ * Counterpart to Solidity's `int96` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 96 bits
+ */
+ function toInt96(int256 value) internal pure returns (int96 downcasted) {
+ downcasted = int96(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(96, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int88 from int256, reverting on
+ * overflow (when the input is less than smallest int88 or
+ * greater than largest int88).
+ *
+ * Counterpart to Solidity's `int88` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 88 bits
+ */
+ function toInt88(int256 value) internal pure returns (int88 downcasted) {
+ downcasted = int88(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(88, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int80 from int256, reverting on
+ * overflow (when the input is less than smallest int80 or
+ * greater than largest int80).
+ *
+ * Counterpart to Solidity's `int80` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 80 bits
+ */
+ function toInt80(int256 value) internal pure returns (int80 downcasted) {
+ downcasted = int80(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(80, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int72 from int256, reverting on
+ * overflow (when the input is less than smallest int72 or
+ * greater than largest int72).
+ *
+ * Counterpart to Solidity's `int72` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 72 bits
+ */
+ function toInt72(int256 value) internal pure returns (int72 downcasted) {
+ downcasted = int72(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(72, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int64 from int256, reverting on
+ * overflow (when the input is less than smallest int64 or
+ * greater than largest int64).
+ *
+ * Counterpart to Solidity's `int64` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 64 bits
+ */
+ function toInt64(int256 value) internal pure returns (int64 downcasted) {
+ downcasted = int64(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(64, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int56 from int256, reverting on
+ * overflow (when the input is less than smallest int56 or
+ * greater than largest int56).
+ *
+ * Counterpart to Solidity's `int56` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 56 bits
+ */
+ function toInt56(int256 value) internal pure returns (int56 downcasted) {
+ downcasted = int56(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(56, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int48 from int256, reverting on
+ * overflow (when the input is less than smallest int48 or
+ * greater than largest int48).
+ *
+ * Counterpart to Solidity's `int48` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 48 bits
+ */
+ function toInt48(int256 value) internal pure returns (int48 downcasted) {
+ downcasted = int48(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(48, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int40 from int256, reverting on
+ * overflow (when the input is less than smallest int40 or
+ * greater than largest int40).
+ *
+ * Counterpart to Solidity's `int40` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 40 bits
+ */
+ function toInt40(int256 value) internal pure returns (int40 downcasted) {
+ downcasted = int40(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(40, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int32 from int256, reverting on
+ * overflow (when the input is less than smallest int32 or
+ * greater than largest int32).
+ *
+ * Counterpart to Solidity's `int32` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 32 bits
+ */
+ function toInt32(int256 value) internal pure returns (int32 downcasted) {
+ downcasted = int32(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(32, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int24 from int256, reverting on
+ * overflow (when the input is less than smallest int24 or
+ * greater than largest int24).
+ *
+ * Counterpart to Solidity's `int24` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 24 bits
+ */
+ function toInt24(int256 value) internal pure returns (int24 downcasted) {
+ downcasted = int24(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(24, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int16 from int256, reverting on
+ * overflow (when the input is less than smallest int16 or
+ * greater than largest int16).
+ *
+ * Counterpart to Solidity's `int16` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 16 bits
+ */
+ function toInt16(int256 value) internal pure returns (int16 downcasted) {
+ downcasted = int16(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(16, value);
+ }
+ }
+
+ /**
+ * @dev Returns the downcasted int8 from int256, reverting on
+ * overflow (when the input is less than smallest int8 or
+ * greater than largest int8).
+ *
+ * Counterpart to Solidity's `int8` operator.
+ *
+ * Requirements:
+ *
+ * - input must fit into 8 bits
+ */
+ function toInt8(int256 value) internal pure returns (int8 downcasted) {
+ downcasted = int8(value);
+ if (downcasted != value) {
+ revert SafeCastOverflowedIntDowncast(8, value);
+ }
+ }
+
+ /**
+ * @dev Converts an unsigned uint256 into a signed int256.
+ *
+ * Requirements:
+ *
+ * - input must be less than or equal to maxInt256.
+ */
+ function toInt256(uint256 value) internal pure returns (int256) {
+ // Note: Unsafe cast below is okay because `type(int256).max` is guaranteed to be positive
+ if (value > uint256(type(int256).max)) {
+ revert SafeCastOverflowedUintToInt(value);
+ }
+ return int256(value);
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SignedMath.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SignedMath.sol
new file mode 100644
index 00000000000..66a61516292
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/math/SignedMath.sol
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/math/SignedMath.sol)
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Standard signed math utilities missing in the Solidity language.
+ */
+library SignedMath {
+ /**
+ * @dev Returns the largest of two signed numbers.
+ */
+ function max(int256 a, int256 b) internal pure returns (int256) {
+ return a > b ? a : b;
+ }
+
+ /**
+ * @dev Returns the smallest of two signed numbers.
+ */
+ function min(int256 a, int256 b) internal pure returns (int256) {
+ return a < b ? a : b;
+ }
+
+ /**
+ * @dev Returns the average of two signed numbers without overflow.
+ * The result is rounded towards zero.
+ */
+ function average(int256 a, int256 b) internal pure returns (int256) {
+ // Formula from the book "Hacker's Delight"
+ int256 x = (a & b) + ((a ^ b) >> 1);
+ return x + (int256(uint256(x) >> 255) & (a ^ b));
+ }
+
+ /**
+ * @dev Returns the absolute unsigned value of a signed value.
+ */
+ function abs(int256 n) internal pure returns (uint256) {
+ unchecked {
+ // must be unchecked in order to support `n = type(int256).min`
+ return uint256(n >= 0 ? n : -n);
+ }
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableMap.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableMap.sol
new file mode 100644
index 00000000000..929ae7c536e
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableMap.sol
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/structs/EnumerableMap.sol)
+// This file was procedurally generated from scripts/generate/templates/EnumerableMap.js.
+
+pragma solidity ^0.8.20;
+
+import {EnumerableSet} from "./EnumerableSet.sol";
+
+/**
+ * @dev Library for managing an enumerable variant of Solidity's
+ * https://solidity.readthedocs.io/en/latest/types.html#mapping-types[`mapping`]
+ * type.
+ *
+ * Maps have the following properties:
+ *
+ * - Entries are added, removed, and checked for existence in constant time
+ * (O(1)).
+ * - Entries are enumerated in O(n). No guarantees are made on the ordering.
+ *
+ * ```solidity
+ * contract Example {
+ * // Add the library methods
+ * using EnumerableMap for EnumerableMap.UintToAddressMap;
+ *
+ * // Declare a set state variable
+ * EnumerableMap.UintToAddressMap private myMap;
+ * }
+ * ```
+ *
+ * The following map types are supported:
+ *
+ * - `uint256 -> address` (`UintToAddressMap`) since v3.0.0
+ * - `address -> uint256` (`AddressToUintMap`) since v4.6.0
+ * - `bytes32 -> bytes32` (`Bytes32ToBytes32Map`) since v4.6.0
+ * - `uint256 -> uint256` (`UintToUintMap`) since v4.7.0
+ * - `bytes32 -> uint256` (`Bytes32ToUintMap`) since v4.7.0
+ *
+ * [WARNING]
+ * ====
+ * Trying to delete such a structure from storage will likely result in data corruption, rendering the structure
+ * unusable.
+ * See https://github.com/ethereum/solidity/pull/11843[ethereum/solidity#11843] for more info.
+ *
+ * In order to clean an EnumerableMap, you can either remove all elements one by one or create a fresh instance using an
+ * array of EnumerableMap.
+ * ====
+ */
+library EnumerableMap {
+ using EnumerableSet for EnumerableSet.Bytes32Set;
+
+ // To implement this library for multiple types with as little code repetition as possible, we write it in
+ // terms of a generic Map type with bytes32 keys and values. The Map implementation uses private functions,
+ // and user-facing implementations such as `UintToAddressMap` are just wrappers around the underlying Map.
+ // This means that we can only create new EnumerableMaps for types that fit in bytes32.
+
+ /**
+ * @dev Query for a nonexistent map key.
+ */
+ error EnumerableMapNonexistentKey(bytes32 key);
+
+ struct Bytes32ToBytes32Map {
+ // Storage of keys
+ EnumerableSet.Bytes32Set _keys;
+ mapping(bytes32 key => bytes32) _values;
+ }
+
+ /**
+ * @dev Adds a key-value pair to a map, or updates the value for an existing
+ * key. O(1).
+ *
+ * Returns true if the key was added to the map, that is if it was not
+ * already present.
+ */
+ function set(Bytes32ToBytes32Map storage map, bytes32 key, bytes32 value) internal returns (bool) {
+ map._values[key] = value;
+ return map._keys.add(key);
+ }
+
+ /**
+ * @dev Removes a key-value pair from a map. O(1).
+ *
+ * Returns true if the key was removed from the map, that is if it was present.
+ */
+ function remove(Bytes32ToBytes32Map storage map, bytes32 key) internal returns (bool) {
+ delete map._values[key];
+ return map._keys.remove(key);
+ }
+
+ /**
+ * @dev Returns true if the key is in the map. O(1).
+ */
+ function contains(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bool) {
+ return map._keys.contains(key);
+ }
+
+ /**
+ * @dev Returns the number of key-value pairs in the map. O(1).
+ */
+ function length(Bytes32ToBytes32Map storage map) internal view returns (uint256) {
+ return map._keys.length();
+ }
+
+ /**
+ * @dev Returns the key-value pair stored at position `index` in the map. O(1).
+ *
+ * Note that there are no guarantees on the ordering of entries inside the
+ * array, and it may change when more entries are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(Bytes32ToBytes32Map storage map, uint256 index) internal view returns (bytes32, bytes32) {
+ bytes32 key = map._keys.at(index);
+ return (key, map._values[key]);
+ }
+
+ /**
+ * @dev Tries to returns the value associated with `key`. O(1).
+ * Does not revert if `key` is not in the map.
+ */
+ function tryGet(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bool, bytes32) {
+ bytes32 value = map._values[key];
+ if (value == bytes32(0)) {
+ return (contains(map, key), bytes32(0));
+ } else {
+ return (true, value);
+ }
+ }
+
+ /**
+ * @dev Returns the value associated with `key`. O(1).
+ *
+ * Requirements:
+ *
+ * - `key` must be in the map.
+ */
+ function get(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bytes32) {
+ bytes32 value = map._values[key];
+ if (value == 0 && !contains(map, key)) {
+ revert EnumerableMapNonexistentKey(key);
+ }
+ return value;
+ }
+
+ /**
+ * @dev Return the an array containing all the keys
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the map grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function keys(Bytes32ToBytes32Map storage map) internal view returns (bytes32[] memory) {
+ return map._keys.values();
+ }
+
+ // UintToUintMap
+
+ struct UintToUintMap {
+ Bytes32ToBytes32Map _inner;
+ }
+
+ /**
+ * @dev Adds a key-value pair to a map, or updates the value for an existing
+ * key. O(1).
+ *
+ * Returns true if the key was added to the map, that is if it was not
+ * already present.
+ */
+ function set(UintToUintMap storage map, uint256 key, uint256 value) internal returns (bool) {
+ return set(map._inner, bytes32(key), bytes32(value));
+ }
+
+ /**
+ * @dev Removes a value from a map. O(1).
+ *
+ * Returns true if the key was removed from the map, that is if it was present.
+ */
+ function remove(UintToUintMap storage map, uint256 key) internal returns (bool) {
+ return remove(map._inner, bytes32(key));
+ }
+
+ /**
+ * @dev Returns true if the key is in the map. O(1).
+ */
+ function contains(UintToUintMap storage map, uint256 key) internal view returns (bool) {
+ return contains(map._inner, bytes32(key));
+ }
+
+ /**
+ * @dev Returns the number of elements in the map. O(1).
+ */
+ function length(UintToUintMap storage map) internal view returns (uint256) {
+ return length(map._inner);
+ }
+
+ /**
+ * @dev Returns the element stored at position `index` in the map. O(1).
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(UintToUintMap storage map, uint256 index) internal view returns (uint256, uint256) {
+ (bytes32 key, bytes32 value) = at(map._inner, index);
+ return (uint256(key), uint256(value));
+ }
+
+ /**
+ * @dev Tries to returns the value associated with `key`. O(1).
+ * Does not revert if `key` is not in the map.
+ */
+ function tryGet(UintToUintMap storage map, uint256 key) internal view returns (bool, uint256) {
+ (bool success, bytes32 value) = tryGet(map._inner, bytes32(key));
+ return (success, uint256(value));
+ }
+
+ /**
+ * @dev Returns the value associated with `key`. O(1).
+ *
+ * Requirements:
+ *
+ * - `key` must be in the map.
+ */
+ function get(UintToUintMap storage map, uint256 key) internal view returns (uint256) {
+ return uint256(get(map._inner, bytes32(key)));
+ }
+
+ /**
+ * @dev Return the an array containing all the keys
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the map grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function keys(UintToUintMap storage map) internal view returns (uint256[] memory) {
+ bytes32[] memory store = keys(map._inner);
+ uint256[] memory result;
+
+ /// @solidity memory-safe-assembly
+ assembly {
+ result := store
+ }
+
+ return result;
+ }
+
+ // UintToAddressMap
+
+ struct UintToAddressMap {
+ Bytes32ToBytes32Map _inner;
+ }
+
+ /**
+ * @dev Adds a key-value pair to a map, or updates the value for an existing
+ * key. O(1).
+ *
+ * Returns true if the key was added to the map, that is if it was not
+ * already present.
+ */
+ function set(UintToAddressMap storage map, uint256 key, address value) internal returns (bool) {
+ return set(map._inner, bytes32(key), bytes32(uint256(uint160(value))));
+ }
+
+ /**
+ * @dev Removes a value from a map. O(1).
+ *
+ * Returns true if the key was removed from the map, that is if it was present.
+ */
+ function remove(UintToAddressMap storage map, uint256 key) internal returns (bool) {
+ return remove(map._inner, bytes32(key));
+ }
+
+ /**
+ * @dev Returns true if the key is in the map. O(1).
+ */
+ function contains(UintToAddressMap storage map, uint256 key) internal view returns (bool) {
+ return contains(map._inner, bytes32(key));
+ }
+
+ /**
+ * @dev Returns the number of elements in the map. O(1).
+ */
+ function length(UintToAddressMap storage map) internal view returns (uint256) {
+ return length(map._inner);
+ }
+
+ /**
+ * @dev Returns the element stored at position `index` in the map. O(1).
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(UintToAddressMap storage map, uint256 index) internal view returns (uint256, address) {
+ (bytes32 key, bytes32 value) = at(map._inner, index);
+ return (uint256(key), address(uint160(uint256(value))));
+ }
+
+ /**
+ * @dev Tries to returns the value associated with `key`. O(1).
+ * Does not revert if `key` is not in the map.
+ */
+ function tryGet(UintToAddressMap storage map, uint256 key) internal view returns (bool, address) {
+ (bool success, bytes32 value) = tryGet(map._inner, bytes32(key));
+ return (success, address(uint160(uint256(value))));
+ }
+
+ /**
+ * @dev Returns the value associated with `key`. O(1).
+ *
+ * Requirements:
+ *
+ * - `key` must be in the map.
+ */
+ function get(UintToAddressMap storage map, uint256 key) internal view returns (address) {
+ return address(uint160(uint256(get(map._inner, bytes32(key)))));
+ }
+
+ /**
+ * @dev Return the an array containing all the keys
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the map grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function keys(UintToAddressMap storage map) internal view returns (uint256[] memory) {
+ bytes32[] memory store = keys(map._inner);
+ uint256[] memory result;
+
+ /// @solidity memory-safe-assembly
+ assembly {
+ result := store
+ }
+
+ return result;
+ }
+
+ // AddressToUintMap
+
+ struct AddressToUintMap {
+ Bytes32ToBytes32Map _inner;
+ }
+
+ /**
+ * @dev Adds a key-value pair to a map, or updates the value for an existing
+ * key. O(1).
+ *
+ * Returns true if the key was added to the map, that is if it was not
+ * already present.
+ */
+ function set(AddressToUintMap storage map, address key, uint256 value) internal returns (bool) {
+ return set(map._inner, bytes32(uint256(uint160(key))), bytes32(value));
+ }
+
+ /**
+ * @dev Removes a value from a map. O(1).
+ *
+ * Returns true if the key was removed from the map, that is if it was present.
+ */
+ function remove(AddressToUintMap storage map, address key) internal returns (bool) {
+ return remove(map._inner, bytes32(uint256(uint160(key))));
+ }
+
+ /**
+ * @dev Returns true if the key is in the map. O(1).
+ */
+ function contains(AddressToUintMap storage map, address key) internal view returns (bool) {
+ return contains(map._inner, bytes32(uint256(uint160(key))));
+ }
+
+ /**
+ * @dev Returns the number of elements in the map. O(1).
+ */
+ function length(AddressToUintMap storage map) internal view returns (uint256) {
+ return length(map._inner);
+ }
+
+ /**
+ * @dev Returns the element stored at position `index` in the map. O(1).
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(AddressToUintMap storage map, uint256 index) internal view returns (address, uint256) {
+ (bytes32 key, bytes32 value) = at(map._inner, index);
+ return (address(uint160(uint256(key))), uint256(value));
+ }
+
+ /**
+ * @dev Tries to returns the value associated with `key`. O(1).
+ * Does not revert if `key` is not in the map.
+ */
+ function tryGet(AddressToUintMap storage map, address key) internal view returns (bool, uint256) {
+ (bool success, bytes32 value) = tryGet(map._inner, bytes32(uint256(uint160(key))));
+ return (success, uint256(value));
+ }
+
+ /**
+ * @dev Returns the value associated with `key`. O(1).
+ *
+ * Requirements:
+ *
+ * - `key` must be in the map.
+ */
+ function get(AddressToUintMap storage map, address key) internal view returns (uint256) {
+ return uint256(get(map._inner, bytes32(uint256(uint160(key)))));
+ }
+
+ /**
+ * @dev Return the an array containing all the keys
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the map grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function keys(AddressToUintMap storage map) internal view returns (address[] memory) {
+ bytes32[] memory store = keys(map._inner);
+ address[] memory result;
+
+ /// @solidity memory-safe-assembly
+ assembly {
+ result := store
+ }
+
+ return result;
+ }
+
+ // Bytes32ToUintMap
+
+ struct Bytes32ToUintMap {
+ Bytes32ToBytes32Map _inner;
+ }
+
+ /**
+ * @dev Adds a key-value pair to a map, or updates the value for an existing
+ * key. O(1).
+ *
+ * Returns true if the key was added to the map, that is if it was not
+ * already present.
+ */
+ function set(Bytes32ToUintMap storage map, bytes32 key, uint256 value) internal returns (bool) {
+ return set(map._inner, key, bytes32(value));
+ }
+
+ /**
+ * @dev Removes a value from a map. O(1).
+ *
+ * Returns true if the key was removed from the map, that is if it was present.
+ */
+ function remove(Bytes32ToUintMap storage map, bytes32 key) internal returns (bool) {
+ return remove(map._inner, key);
+ }
+
+ /**
+ * @dev Returns true if the key is in the map. O(1).
+ */
+ function contains(Bytes32ToUintMap storage map, bytes32 key) internal view returns (bool) {
+ return contains(map._inner, key);
+ }
+
+ /**
+ * @dev Returns the number of elements in the map. O(1).
+ */
+ function length(Bytes32ToUintMap storage map) internal view returns (uint256) {
+ return length(map._inner);
+ }
+
+ /**
+ * @dev Returns the element stored at position `index` in the map. O(1).
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(Bytes32ToUintMap storage map, uint256 index) internal view returns (bytes32, uint256) {
+ (bytes32 key, bytes32 value) = at(map._inner, index);
+ return (key, uint256(value));
+ }
+
+ /**
+ * @dev Tries to returns the value associated with `key`. O(1).
+ * Does not revert if `key` is not in the map.
+ */
+ function tryGet(Bytes32ToUintMap storage map, bytes32 key) internal view returns (bool, uint256) {
+ (bool success, bytes32 value) = tryGet(map._inner, key);
+ return (success, uint256(value));
+ }
+
+ /**
+ * @dev Returns the value associated with `key`. O(1).
+ *
+ * Requirements:
+ *
+ * - `key` must be in the map.
+ */
+ function get(Bytes32ToUintMap storage map, bytes32 key) internal view returns (uint256) {
+ return uint256(get(map._inner, key));
+ }
+
+ /**
+ * @dev Return the an array containing all the keys
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the map grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function keys(Bytes32ToUintMap storage map) internal view returns (bytes32[] memory) {
+ bytes32[] memory store = keys(map._inner);
+ bytes32[] memory result;
+
+ /// @solidity memory-safe-assembly
+ assembly {
+ result := store
+ }
+
+ return result;
+ }
+}
diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableSet.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableSet.sol
new file mode 100644
index 00000000000..4c7fc5e1d76
--- /dev/null
+++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableSet.sol
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: MIT
+// OpenZeppelin Contracts (last updated v5.0.0) (utils/structs/EnumerableSet.sol)
+// This file was procedurally generated from scripts/generate/templates/EnumerableSet.js.
+
+pragma solidity ^0.8.20;
+
+/**
+ * @dev Library for managing
+ * https://en.wikipedia.org/wiki/Set_(abstract_data_type)[sets] of primitive
+ * types.
+ *
+ * Sets have the following properties:
+ *
+ * - Elements are added, removed, and checked for existence in constant time
+ * (O(1)).
+ * - Elements are enumerated in O(n). No guarantees are made on the ordering.
+ *
+ * ```solidity
+ * contract Example {
+ * // Add the library methods
+ * using EnumerableSet for EnumerableSet.AddressSet;
+ *
+ * // Declare a set state variable
+ * EnumerableSet.AddressSet private mySet;
+ * }
+ * ```
+ *
+ * As of v3.3.0, sets of type `bytes32` (`Bytes32Set`), `address` (`AddressSet`)
+ * and `uint256` (`UintSet`) are supported.
+ *
+ * [WARNING]
+ * ====
+ * Trying to delete such a structure from storage will likely result in data corruption, rendering the structure
+ * unusable.
+ * See https://github.com/ethereum/solidity/pull/11843[ethereum/solidity#11843] for more info.
+ *
+ * In order to clean an EnumerableSet, you can either remove all elements one by one or create a fresh instance using an
+ * array of EnumerableSet.
+ * ====
+ */
+library EnumerableSet {
+ // To implement this library for multiple types with as little code
+ // repetition as possible, we write it in terms of a generic Set type with
+ // bytes32 values.
+ // The Set implementation uses private functions, and user-facing
+ // implementations (such as AddressSet) are just wrappers around the
+ // underlying Set.
+ // This means that we can only create new EnumerableSets for types that fit
+ // in bytes32.
+
+ struct Set {
+ // Storage of set values
+ bytes32[] _values;
+ // Position is the index of the value in the `values` array plus 1.
+ // Position 0 is used to mean a value is not in the set.
+ mapping(bytes32 value => uint256) _positions;
+ }
+
+ /**
+ * @dev Add a value to a set. O(1).
+ *
+ * Returns true if the value was added to the set, that is if it was not
+ * already present.
+ */
+ function _add(Set storage set, bytes32 value) private returns (bool) {
+ if (!_contains(set, value)) {
+ set._values.push(value);
+ // The value is stored at length-1, but we add 1 to all indexes
+ // and use 0 as a sentinel value
+ set._positions[value] = set._values.length;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * @dev Removes a value from a set. O(1).
+ *
+ * Returns true if the value was removed from the set, that is if it was
+ * present.
+ */
+ function _remove(Set storage set, bytes32 value) private returns (bool) {
+ // We cache the value's position to prevent multiple reads from the same storage slot
+ uint256 position = set._positions[value];
+
+ if (position != 0) {
+ // Equivalent to contains(set, value)
+ // To delete an element from the _values array in O(1), we swap the element to delete with the last one in
+ // the array, and then remove the last element (sometimes called as 'swap and pop').
+ // This modifies the order of the array, as noted in {at}.
+
+ uint256 valueIndex = position - 1;
+ uint256 lastIndex = set._values.length - 1;
+
+ if (valueIndex != lastIndex) {
+ bytes32 lastValue = set._values[lastIndex];
+
+ // Move the lastValue to the index where the value to delete is
+ set._values[valueIndex] = lastValue;
+ // Update the tracked position of the lastValue (that was just moved)
+ set._positions[lastValue] = position;
+ }
+
+ // Delete the slot where the moved value was stored
+ set._values.pop();
+
+ // Delete the tracked position for the deleted slot
+ delete set._positions[value];
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * @dev Returns true if the value is in the set. O(1).
+ */
+ function _contains(Set storage set, bytes32 value) private view returns (bool) {
+ return set._positions[value] != 0;
+ }
+
+ /**
+ * @dev Returns the number of values on the set. O(1).
+ */
+ function _length(Set storage set) private view returns (uint256) {
+ return set._values.length;
+ }
+
+ /**
+ * @dev Returns the value stored at position `index` in the set. O(1).
+ *
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function _at(Set storage set, uint256 index) private view returns (bytes32) {
+ return set._values[index];
+ }
+
+ /**
+ * @dev Return the entire set in an array
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function _values(Set storage set) private view returns (bytes32[] memory) {
+ return set._values;
+ }
+
+ // Bytes32Set
+
+ struct Bytes32Set {
+ Set _inner;
+ }
+
+ /**
+ * @dev Add a value to a set. O(1).
+ *
+ * Returns true if the value was added to the set, that is if it was not
+ * already present.
+ */
+ function add(Bytes32Set storage set, bytes32 value) internal returns (bool) {
+ return _add(set._inner, value);
+ }
+
+ /**
+ * @dev Removes a value from a set. O(1).
+ *
+ * Returns true if the value was removed from the set, that is if it was
+ * present.
+ */
+ function remove(Bytes32Set storage set, bytes32 value) internal returns (bool) {
+ return _remove(set._inner, value);
+ }
+
+ /**
+ * @dev Returns true if the value is in the set. O(1).
+ */
+ function contains(Bytes32Set storage set, bytes32 value) internal view returns (bool) {
+ return _contains(set._inner, value);
+ }
+
+ /**
+ * @dev Returns the number of values in the set. O(1).
+ */
+ function length(Bytes32Set storage set) internal view returns (uint256) {
+ return _length(set._inner);
+ }
+
+ /**
+ * @dev Returns the value stored at position `index` in the set. O(1).
+ *
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(Bytes32Set storage set, uint256 index) internal view returns (bytes32) {
+ return _at(set._inner, index);
+ }
+
+ /**
+ * @dev Return the entire set in an array
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function values(Bytes32Set storage set) internal view returns (bytes32[] memory) {
+ bytes32[] memory store = _values(set._inner);
+ bytes32[] memory result;
+
+ /// @solidity memory-safe-assembly
+ assembly {
+ result := store
+ }
+
+ return result;
+ }
+
+ // AddressSet
+
+ struct AddressSet {
+ Set _inner;
+ }
+
+ /**
+ * @dev Add a value to a set. O(1).
+ *
+ * Returns true if the value was added to the set, that is if it was not
+ * already present.
+ */
+ function add(AddressSet storage set, address value) internal returns (bool) {
+ return _add(set._inner, bytes32(uint256(uint160(value))));
+ }
+
+ /**
+ * @dev Removes a value from a set. O(1).
+ *
+ * Returns true if the value was removed from the set, that is if it was
+ * present.
+ */
+ function remove(AddressSet storage set, address value) internal returns (bool) {
+ return _remove(set._inner, bytes32(uint256(uint160(value))));
+ }
+
+ /**
+ * @dev Returns true if the value is in the set. O(1).
+ */
+ function contains(AddressSet storage set, address value) internal view returns (bool) {
+ return _contains(set._inner, bytes32(uint256(uint160(value))));
+ }
+
+ /**
+ * @dev Returns the number of values in the set. O(1).
+ */
+ function length(AddressSet storage set) internal view returns (uint256) {
+ return _length(set._inner);
+ }
+
+ /**
+ * @dev Returns the value stored at position `index` in the set. O(1).
+ *
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(AddressSet storage set, uint256 index) internal view returns (address) {
+ return address(uint160(uint256(_at(set._inner, index))));
+ }
+
+ /**
+ * @dev Return the entire set in an array
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function values(AddressSet storage set) internal view returns (address[] memory) {
+ bytes32[] memory store = _values(set._inner);
+ address[] memory result;
+
+ /// @solidity memory-safe-assembly
+ assembly {
+ result := store
+ }
+
+ return result;
+ }
+
+ // UintSet
+
+ struct UintSet {
+ Set _inner;
+ }
+
+ /**
+ * @dev Add a value to a set. O(1).
+ *
+ * Returns true if the value was added to the set, that is if it was not
+ * already present.
+ */
+ function add(UintSet storage set, uint256 value) internal returns (bool) {
+ return _add(set._inner, bytes32(value));
+ }
+
+ /**
+ * @dev Removes a value from a set. O(1).
+ *
+ * Returns true if the value was removed from the set, that is if it was
+ * present.
+ */
+ function remove(UintSet storage set, uint256 value) internal returns (bool) {
+ return _remove(set._inner, bytes32(value));
+ }
+
+ /**
+ * @dev Returns true if the value is in the set. O(1).
+ */
+ function contains(UintSet storage set, uint256 value) internal view returns (bool) {
+ return _contains(set._inner, bytes32(value));
+ }
+
+ /**
+ * @dev Returns the number of values in the set. O(1).
+ */
+ function length(UintSet storage set) internal view returns (uint256) {
+ return _length(set._inner);
+ }
+
+ /**
+ * @dev Returns the value stored at position `index` in the set. O(1).
+ *
+ * Note that there are no guarantees on the ordering of values inside the
+ * array, and it may change when more values are added or removed.
+ *
+ * Requirements:
+ *
+ * - `index` must be strictly less than {length}.
+ */
+ function at(UintSet storage set, uint256 index) internal view returns (uint256) {
+ return uint256(_at(set._inner, index));
+ }
+
+ /**
+ * @dev Return the entire set in an array
+ *
+ * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed
+ * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that
+ * this function has an unbounded cost, and using it as part of a state-changing function may render the function
+ * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block.
+ */
+ function values(UintSet storage set) internal view returns (uint256[] memory) {
+ bytes32[] memory store = _values(set._inner);
+ uint256[] memory result;
+
+ /// @solidity memory-safe-assembly
+ assembly {
+ result := store
+ }
+
+ return result;
+ }
+}
From 6ab3eb5b67739ff88d3c4cf8ea125fd8273bc2b1 Mon Sep 17 00:00:00 2001
From: "Abdelrahman Soliman (Boda)"
<2677789+asoliman92@users.noreply.github.com>
Date: Wed, 7 Aug 2024 22:32:07 +0400
Subject: [PATCH 29/52] [CCIP Merge] Capabilities [CCIP-2943] (#14068)
* [CCIP Merge] Add ccip capabilities directory [CCIP-2943] (#14044)
* Add ccip capabilities directory
* [CCIP Merge] Capabilities fix [CCIP-2943] (#14048)
* Fix compilation for launcher, diff
Make application.go ready for adding more fixes
* Fix launcher tests
* ks-409 fix the mock trigger to ensure events are sent (#14047)
* Add ccip to job orm
* Add capabilities directory under BUSL license
* Prep to instantiate separate registrysyncer for CCIP
* Move registrySyncer creation into ccip delegate
* [chore] Change registrysyncer config to bytes
* Fix launcher diff tests after changing structs in syncer
* Fix linting
* MAke simulated backend client work with chains other than 1337
* core/capabilities/ccip: use OCR offchain config (#1264)
We want to define and use the appropriate OCR offchain config for each
plugin.
Requires https://github.com/smartcontractkit/chainlink-ccip/pull/36/
* Cleaning up
* Add capabilities types to mockery
---------
Co-authored-by: Cedric Cordenier
Co-authored-by: Matthew Pendrey
Co-authored-by: Makram
* make modgraph
* Add changeset
* Fix test with new TxMgr constructor
---------
Co-authored-by: Cedric Cordenier
Co-authored-by: Matthew Pendrey
Co-authored-by: Makram
---
.changeset/eight-radios-hear.md | 5 +
.mockery.yaml | 4 +
LICENSE | 2 +-
.../ccip/ccip_integration_tests/.gitignore | 1 +
.../ccipreader/ccipreader_test.go | 411 +++++++
.../chainreader/Makefile | 12 +
.../chainreader/chainreader_test.go | 273 +++++
.../chainreader/mycontract.go | 519 ++++++++
.../chainreader/mycontract.sol | 31 +
.../ccip/ccip_integration_tests/helpers.go | 938 +++++++++++++++
.../ccip_integration_tests/home_chain_test.go | 103 ++
.../integrationhelpers/integration_helpers.go | 304 +++++
.../ccip_integration_tests/ocr3_node_test.go | 281 +++++
.../ccip_integration_tests/ocr_node_helper.go | 316 +++++
.../ccip_integration_tests/ping_pong_test.go | 95 ++
core/capabilities/ccip/ccipevm/commitcodec.go | 138 +++
.../ccip/ccipevm/commitcodec_test.go | 135 +++
.../capabilities/ccip/ccipevm/executecodec.go | 181 +++
.../ccip/ccipevm/executecodec_test.go | 174 +++
core/capabilities/ccip/ccipevm/helpers.go | 33 +
.../capabilities/ccip/ccipevm/helpers_test.go | 41 +
core/capabilities/ccip/ccipevm/msghasher.go | 127 ++
.../ccip/ccipevm/msghasher_test.go | 189 +++
core/capabilities/ccip/common/common.go | 23 +
core/capabilities/ccip/common/common_test.go | 51 +
.../ccip/configs/evm/chain_writer.go | 75 ++
.../ccip/configs/evm/contract_reader.go | 219 ++++
core/capabilities/ccip/delegate.go | 321 +++++
core/capabilities/ccip/delegate_test.go | 1 +
core/capabilities/ccip/launcher/README.md | 69 ++
core/capabilities/ccip/launcher/bluegreen.go | 178 +++
.../ccip/launcher/bluegreen_test.go | 1043 +++++++++++++++++
.../launcher/ccip_capability_launcher.png | Bin 0 -> 253433 bytes
.../launcher/ccip_config_state_machine.png | Bin 0 -> 96958 bytes
core/capabilities/ccip/launcher/diff.go | 141 +++
core/capabilities/ccip/launcher/diff_test.go | 352 ++++++
.../ccip/launcher/integration_test.go | 120 ++
core/capabilities/ccip/launcher/launcher.go | 432 +++++++
.../ccip/launcher/launcher_test.go | 472 ++++++++
.../ccip/launcher/test_helpers.go | 56 +
.../ccip/ocrimpls/config_digester.go | 23 +
.../ccip/ocrimpls/config_tracker.go | 77 ++
.../ccip/ocrimpls/contract_transmitter.go | 188 +++
.../ocrimpls/contract_transmitter_test.go | 691 +++++++++++
core/capabilities/ccip/ocrimpls/keyring.go | 61 +
.../ccip/oraclecreator/inprocess.go | 371 ++++++
.../ccip/oraclecreator/inprocess_test.go | 239 ++++
.../ccip/types/mocks/ccip_oracle.go | 122 ++
.../ccip/types/mocks/home_chain_reader.go | 129 ++
.../ccip/types/mocks/oracle_creator.go | 152 +++
core/capabilities/ccip/types/types.go | 46 +
core/capabilities/ccip/validate/validate.go | 94 ++
.../ccip/validate/validate_test.go | 58 +
core/capabilities/launcher.go | 56 +-
core/capabilities/launcher_test.go | 29 +-
core/capabilities/registry.go | 10 +-
.../evm/client/simulated_backend_client.go | 13 +-
core/scripts/go.mod | 5 +-
core/scripts/go.sum | 10 +-
core/services/chainlink/application.go | 87 +-
core/services/job/models.go | 50 +
core/services/job/orm.go | 64 +-
core/services/pipeline/common.go | 1 +
.../services/registrysyncer/local_registry.go | 19 +-
core/services/registrysyncer/syncer.go | 57 +-
core/services/registrysyncer/syncer_test.go | 44 +-
core/services/synchronization/common.go | 4 +
core/services/workflows/engine_test.go | 26 +-
core/web/presenters/job.go | 23 +
core/web/presenters/job_test.go | 98 +-
go.md | 4 +
go.mod | 5 +-
go.sum | 10 +-
integration-tests/go.mod | 5 +-
integration-tests/go.sum | 10 +-
integration-tests/load/go.mod | 5 +-
integration-tests/load/go.sum | 10 +-
77 files changed, 10565 insertions(+), 197 deletions(-)
create mode 100644 .changeset/eight-radios-hear.md
create mode 100644 core/capabilities/ccip/ccip_integration_tests/.gitignore
create mode 100644 core/capabilities/ccip/ccip_integration_tests/ccipreader/ccipreader_test.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/chainreader/Makefile
create mode 100644 core/capabilities/ccip/ccip_integration_tests/chainreader/chainreader_test.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.sol
create mode 100644 core/capabilities/ccip/ccip_integration_tests/helpers.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/home_chain_test.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/integrationhelpers/integration_helpers.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/ocr3_node_test.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/ocr_node_helper.go
create mode 100644 core/capabilities/ccip/ccip_integration_tests/ping_pong_test.go
create mode 100644 core/capabilities/ccip/ccipevm/commitcodec.go
create mode 100644 core/capabilities/ccip/ccipevm/commitcodec_test.go
create mode 100644 core/capabilities/ccip/ccipevm/executecodec.go
create mode 100644 core/capabilities/ccip/ccipevm/executecodec_test.go
create mode 100644 core/capabilities/ccip/ccipevm/helpers.go
create mode 100644 core/capabilities/ccip/ccipevm/helpers_test.go
create mode 100644 core/capabilities/ccip/ccipevm/msghasher.go
create mode 100644 core/capabilities/ccip/ccipevm/msghasher_test.go
create mode 100644 core/capabilities/ccip/common/common.go
create mode 100644 core/capabilities/ccip/common/common_test.go
create mode 100644 core/capabilities/ccip/configs/evm/chain_writer.go
create mode 100644 core/capabilities/ccip/configs/evm/contract_reader.go
create mode 100644 core/capabilities/ccip/delegate.go
create mode 100644 core/capabilities/ccip/delegate_test.go
create mode 100644 core/capabilities/ccip/launcher/README.md
create mode 100644 core/capabilities/ccip/launcher/bluegreen.go
create mode 100644 core/capabilities/ccip/launcher/bluegreen_test.go
create mode 100644 core/capabilities/ccip/launcher/ccip_capability_launcher.png
create mode 100644 core/capabilities/ccip/launcher/ccip_config_state_machine.png
create mode 100644 core/capabilities/ccip/launcher/diff.go
create mode 100644 core/capabilities/ccip/launcher/diff_test.go
create mode 100644 core/capabilities/ccip/launcher/integration_test.go
create mode 100644 core/capabilities/ccip/launcher/launcher.go
create mode 100644 core/capabilities/ccip/launcher/launcher_test.go
create mode 100644 core/capabilities/ccip/launcher/test_helpers.go
create mode 100644 core/capabilities/ccip/ocrimpls/config_digester.go
create mode 100644 core/capabilities/ccip/ocrimpls/config_tracker.go
create mode 100644 core/capabilities/ccip/ocrimpls/contract_transmitter.go
create mode 100644 core/capabilities/ccip/ocrimpls/contract_transmitter_test.go
create mode 100644 core/capabilities/ccip/ocrimpls/keyring.go
create mode 100644 core/capabilities/ccip/oraclecreator/inprocess.go
create mode 100644 core/capabilities/ccip/oraclecreator/inprocess_test.go
create mode 100644 core/capabilities/ccip/types/mocks/ccip_oracle.go
create mode 100644 core/capabilities/ccip/types/mocks/home_chain_reader.go
create mode 100644 core/capabilities/ccip/types/mocks/oracle_creator.go
create mode 100644 core/capabilities/ccip/types/types.go
create mode 100644 core/capabilities/ccip/validate/validate.go
create mode 100644 core/capabilities/ccip/validate/validate_test.go
diff --git a/.changeset/eight-radios-hear.md b/.changeset/eight-radios-hear.md
new file mode 100644
index 00000000000..b422f378326
--- /dev/null
+++ b/.changeset/eight-radios-hear.md
@@ -0,0 +1,5 @@
+---
+"chainlink": minor
+---
+
+#added merging core/capabilities/ccip from https://github.com/smartcontractkit/ccip
diff --git a/.mockery.yaml b/.mockery.yaml
index 8fab61a5b9d..abb3105b136 100644
--- a/.mockery.yaml
+++ b/.mockery.yaml
@@ -43,6 +43,10 @@ packages:
github.com/smartcontractkit/chainlink/v2/core/bridges:
interfaces:
ORM:
+ github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types:
+ interfaces:
+ CCIPOracle:
+ OracleCreator:
github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/types:
interfaces:
Dispatcher:
diff --git a/LICENSE b/LICENSE
index 4a10bfc38b0..3af9faa6c6f 100644
--- a/LICENSE
+++ b/LICENSE
@@ -24,7 +24,7 @@ THE SOFTWARE.
*All content residing under (1) “/contracts/src/v0.8/ccip”; (2)
-“/core/gethwrappers/ccip”; (3) “/core/services/ocr2/plugins/ccip” are licensed
+“/core/gethwrappers/ccip”; (3) “/core/services/ocr2/plugins/ccip”; (4) "/core/capabilities/ccip" are licensed
under “Business Source License 1.1” with a Change Date of May 23, 2027 and
Change License to “MIT License”
diff --git a/core/capabilities/ccip/ccip_integration_tests/.gitignore b/core/capabilities/ccip/ccip_integration_tests/.gitignore
new file mode 100644
index 00000000000..567609b1234
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/.gitignore
@@ -0,0 +1 @@
+build/
diff --git a/core/capabilities/ccip/ccip_integration_tests/ccipreader/ccipreader_test.go b/core/capabilities/ccip/ccip_integration_tests/ccipreader/ccipreader_test.go
new file mode 100644
index 00000000000..66c47f4741f
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/ccipreader/ccipreader_test.go
@@ -0,0 +1,411 @@
+package ccipreader
+
+import (
+ "context"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+ "golang.org/x/exp/maps"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_reader_tester"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
+
+ "github.com/smartcontractkit/chainlink-ccip/pkg/consts"
+ "github.com/smartcontractkit/chainlink-ccip/pkg/contractreader"
+ ccipreaderpkg "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
+ "github.com/smartcontractkit/chainlink-ccip/plugintypes"
+)
+
+const (
+ chainS1 = cciptypes.ChainSelector(1)
+ chainS2 = cciptypes.ChainSelector(2)
+ chainS3 = cciptypes.ChainSelector(3)
+ chainD = cciptypes.ChainSelector(4)
+)
+
+func TestCCIPReader_CommitReportsGTETimestamp(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ cfg := evmtypes.ChainReaderConfig{
+ Contracts: map[string]evmtypes.ChainContractReader{
+ consts.ContractNameOffRamp: {
+ ContractPollingFilter: evmtypes.ContractPollingFilter{
+ GenericEventNames: []string{consts.EventNameCommitReportAccepted},
+ },
+ ContractABI: ccip_reader_tester.CCIPReaderTesterABI,
+ Configs: map[string]*evmtypes.ChainReaderDefinition{
+ consts.EventNameCommitReportAccepted: {
+ ChainSpecificName: consts.EventNameCommitReportAccepted,
+ ReadType: evmtypes.Event,
+ },
+ },
+ },
+ },
+ }
+
+ s := testSetup(ctx, t, chainD, chainD, nil, cfg)
+
+ tokenA := common.HexToAddress("123")
+ const numReports = 5
+
+ for i := uint8(0); i < numReports; i++ {
+ _, err := s.contract.EmitCommitReportAccepted(s.auth, ccip_reader_tester.EVM2EVMMultiOffRampCommitReport{
+ PriceUpdates: ccip_reader_tester.InternalPriceUpdates{
+ TokenPriceUpdates: []ccip_reader_tester.InternalTokenPriceUpdate{
+ {
+ SourceToken: tokenA,
+ UsdPerToken: big.NewInt(1000),
+ },
+ },
+ GasPriceUpdates: []ccip_reader_tester.InternalGasPriceUpdate{
+ {
+ DestChainSelector: uint64(chainD),
+ UsdPerUnitGas: big.NewInt(90),
+ },
+ },
+ },
+ MerkleRoots: []ccip_reader_tester.EVM2EVMMultiOffRampMerkleRoot{
+ {
+ SourceChainSelector: uint64(chainS1),
+ Interval: ccip_reader_tester.EVM2EVMMultiOffRampInterval{
+ Min: 10,
+ Max: 20,
+ },
+ MerkleRoot: [32]byte{i + 1},
+ },
+ },
+ })
+ assert.NoError(t, err)
+ s.sb.Commit()
+ }
+
+ var reports []plugintypes.CommitPluginReportWithMeta
+ var err error
+ require.Eventually(t, func() bool {
+ reports, err = s.reader.CommitReportsGTETimestamp(
+ ctx,
+ chainD,
+ time.Unix(30, 0), // Skips first report, simulated backend report timestamps are [20, 30, 40, ...]
+ 10,
+ )
+ require.NoError(t, err)
+ return len(reports) == numReports-1
+ }, testutils.WaitTimeout(t), 50*time.Millisecond)
+
+ assert.Len(t, reports[0].Report.MerkleRoots, 1)
+ assert.Equal(t, chainS1, reports[0].Report.MerkleRoots[0].ChainSel)
+ assert.Equal(t, cciptypes.SeqNum(10), reports[0].Report.MerkleRoots[0].SeqNumsRange.Start())
+ assert.Equal(t, cciptypes.SeqNum(20), reports[0].Report.MerkleRoots[0].SeqNumsRange.End())
+ assert.Equal(t, "0x0200000000000000000000000000000000000000000000000000000000000000",
+ reports[0].Report.MerkleRoots[0].MerkleRoot.String())
+
+ assert.Equal(t, tokenA.String(), string(reports[0].Report.PriceUpdates.TokenPriceUpdates[0].TokenID))
+ assert.Equal(t, uint64(1000), reports[0].Report.PriceUpdates.TokenPriceUpdates[0].Price.Uint64())
+
+ assert.Equal(t, chainD, reports[0].Report.PriceUpdates.GasPriceUpdates[0].ChainSel)
+ assert.Equal(t, uint64(90), reports[0].Report.PriceUpdates.GasPriceUpdates[0].GasPrice.Uint64())
+}
+
+func TestCCIPReader_ExecutedMessageRanges(t *testing.T) {
+ ctx := testutils.Context(t)
+ cfg := evmtypes.ChainReaderConfig{
+ Contracts: map[string]evmtypes.ChainContractReader{
+ consts.ContractNameOffRamp: {
+ ContractPollingFilter: evmtypes.ContractPollingFilter{
+ GenericEventNames: []string{consts.EventNameExecutionStateChanged},
+ },
+ ContractABI: ccip_reader_tester.CCIPReaderTesterABI,
+ Configs: map[string]*evmtypes.ChainReaderDefinition{
+ consts.EventNameExecutionStateChanged: {
+ ChainSpecificName: consts.EventNameExecutionStateChanged,
+ ReadType: evmtypes.Event,
+ },
+ },
+ },
+ },
+ }
+
+ s := testSetup(ctx, t, chainD, chainD, nil, cfg)
+
+ _, err := s.contract.EmitExecutionStateChanged(
+ s.auth,
+ uint64(chainS1),
+ 14,
+ cciptypes.Bytes32{1, 0, 0, 1},
+ 1,
+ []byte{1, 2, 3, 4},
+ )
+ assert.NoError(t, err)
+ s.sb.Commit()
+
+ _, err = s.contract.EmitExecutionStateChanged(
+ s.auth,
+ uint64(chainS1),
+ 15,
+ cciptypes.Bytes32{1, 0, 0, 2},
+ 1,
+ []byte{1, 2, 3, 4, 5},
+ )
+ assert.NoError(t, err)
+ s.sb.Commit()
+
+ // Need to replay as sometimes the logs are not picked up by the log poller (?)
+ // Maybe another situation where chain reader doesn't register filters as expected.
+ require.NoError(t, s.lp.Replay(ctx, 1))
+
+ var executedRanges []cciptypes.SeqNumRange
+ require.Eventually(t, func() bool {
+ executedRanges, err = s.reader.ExecutedMessageRanges(
+ ctx,
+ chainS1,
+ chainD,
+ cciptypes.NewSeqNumRange(14, 15),
+ )
+ require.NoError(t, err)
+ return len(executedRanges) == 2
+ }, testutils.WaitTimeout(t), 50*time.Millisecond)
+
+ assert.Equal(t, cciptypes.SeqNum(14), executedRanges[0].Start())
+ assert.Equal(t, cciptypes.SeqNum(14), executedRanges[0].End())
+
+ assert.Equal(t, cciptypes.SeqNum(15), executedRanges[1].Start())
+ assert.Equal(t, cciptypes.SeqNum(15), executedRanges[1].End())
+}
+
+func TestCCIPReader_MsgsBetweenSeqNums(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ cfg := evmtypes.ChainReaderConfig{
+ Contracts: map[string]evmtypes.ChainContractReader{
+ consts.ContractNameOnRamp: {
+ ContractPollingFilter: evmtypes.ContractPollingFilter{
+ GenericEventNames: []string{consts.EventNameCCIPSendRequested},
+ },
+ ContractABI: ccip_reader_tester.CCIPReaderTesterABI,
+ Configs: map[string]*evmtypes.ChainReaderDefinition{
+ consts.EventNameCCIPSendRequested: {
+ ChainSpecificName: consts.EventNameCCIPSendRequested,
+ ReadType: evmtypes.Event,
+ },
+ },
+ },
+ },
+ }
+
+ s := testSetup(ctx, t, chainS1, chainD, nil, cfg)
+
+ _, err := s.contract.EmitCCIPSendRequested(s.auth, uint64(chainD), ccip_reader_tester.InternalEVM2AnyRampMessage{
+ Header: ccip_reader_tester.InternalRampMessageHeader{
+ MessageId: [32]byte{1, 0, 0, 0, 0},
+ SourceChainSelector: uint64(chainS1),
+ DestChainSelector: uint64(chainD),
+ SequenceNumber: 10,
+ },
+ Sender: utils.RandomAddress(),
+ Data: make([]byte, 0),
+ Receiver: utils.RandomAddress().Bytes(),
+ ExtraArgs: make([]byte, 0),
+ FeeToken: utils.RandomAddress(),
+ FeeTokenAmount: big.NewInt(0),
+ TokenAmounts: make([]ccip_reader_tester.InternalRampTokenAmount, 0),
+ })
+ assert.NoError(t, err)
+
+ _, err = s.contract.EmitCCIPSendRequested(s.auth, uint64(chainD), ccip_reader_tester.InternalEVM2AnyRampMessage{
+ Header: ccip_reader_tester.InternalRampMessageHeader{
+ MessageId: [32]byte{1, 0, 0, 0, 1},
+ SourceChainSelector: uint64(chainS1),
+ DestChainSelector: uint64(chainD),
+ SequenceNumber: 15,
+ },
+ Sender: utils.RandomAddress(),
+ Data: make([]byte, 0),
+ Receiver: utils.RandomAddress().Bytes(),
+ ExtraArgs: make([]byte, 0),
+ FeeToken: utils.RandomAddress(),
+ FeeTokenAmount: big.NewInt(0),
+ TokenAmounts: make([]ccip_reader_tester.InternalRampTokenAmount, 0),
+ })
+ assert.NoError(t, err)
+
+ s.sb.Commit()
+
+ var msgs []cciptypes.Message
+ require.Eventually(t, func() bool {
+ msgs, err = s.reader.MsgsBetweenSeqNums(
+ ctx,
+ chainS1,
+ cciptypes.NewSeqNumRange(5, 20),
+ )
+ require.NoError(t, err)
+ return len(msgs) == 2
+ }, 10*time.Second, 100*time.Millisecond)
+
+ require.Len(t, msgs, 2)
+ require.Equal(t, cciptypes.SeqNum(10), msgs[0].Header.SequenceNumber)
+ require.Equal(t, cciptypes.SeqNum(15), msgs[1].Header.SequenceNumber)
+ for _, msg := range msgs {
+ require.Equal(t, chainS1, msg.Header.SourceChainSelector)
+ require.Equal(t, chainD, msg.Header.DestChainSelector)
+ }
+}
+
+func TestCCIPReader_NextSeqNum(t *testing.T) {
+ ctx := testutils.Context(t)
+
+ onChainSeqNums := map[cciptypes.ChainSelector]cciptypes.SeqNum{
+ chainS1: 10,
+ chainS2: 20,
+ chainS3: 30,
+ }
+
+ cfg := evmtypes.ChainReaderConfig{
+ Contracts: map[string]evmtypes.ChainContractReader{
+ consts.ContractNameOffRamp: {
+ ContractABI: ccip_reader_tester.CCIPReaderTesterABI,
+ Configs: map[string]*evmtypes.ChainReaderDefinition{
+ consts.MethodNameGetSourceChainConfig: {
+ ChainSpecificName: "getSourceChainConfig",
+ ReadType: evmtypes.Method,
+ },
+ },
+ },
+ },
+ }
+
+ s := testSetup(ctx, t, chainD, chainD, onChainSeqNums, cfg)
+
+ seqNums, err := s.reader.NextSeqNum(ctx, []cciptypes.ChainSelector{chainS1, chainS2, chainS3})
+ assert.NoError(t, err)
+ assert.Len(t, seqNums, 3)
+ assert.Equal(t, cciptypes.SeqNum(10), seqNums[0])
+ assert.Equal(t, cciptypes.SeqNum(20), seqNums[1])
+ assert.Equal(t, cciptypes.SeqNum(30), seqNums[2])
+}
+
+func testSetup(ctx context.Context, t *testing.T, readerChain, destChain cciptypes.ChainSelector, onChainSeqNums map[cciptypes.ChainSelector]cciptypes.SeqNum, cfg evmtypes.ChainReaderConfig) *testSetupData {
+ const chainID = 1337
+
+ // Generate a new key pair for the simulated account
+ privateKey, err := crypto.GenerateKey()
+ assert.NoError(t, err)
+ // Set up the genesis account with balance
+ blnc, ok := big.NewInt(0).SetString("999999999999999999999999999999999999", 10)
+ assert.True(t, ok)
+ alloc := map[common.Address]core.GenesisAccount{crypto.PubkeyToAddress(privateKey.PublicKey): {Balance: blnc}}
+ simulatedBackend := backends.NewSimulatedBackend(alloc, 0)
+ // Create a transactor
+
+ auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(chainID))
+ assert.NoError(t, err)
+ auth.GasLimit = uint64(0)
+
+ // Deploy the contract
+ address, _, _, err := ccip_reader_tester.DeployCCIPReaderTester(auth, simulatedBackend)
+ assert.NoError(t, err)
+ simulatedBackend.Commit()
+
+ // Setup contract client
+ contract, err := ccip_reader_tester.NewCCIPReaderTester(address, simulatedBackend)
+ assert.NoError(t, err)
+
+ lggr := logger.TestLogger(t)
+ lggr.SetLogLevel(zapcore.ErrorLevel)
+ db := pgtest.NewSqlxDB(t)
+ lpOpts := logpoller.Opts{
+ PollPeriod: time.Millisecond,
+ FinalityDepth: 0,
+ BackfillBatchSize: 10,
+ RpcBatchSize: 10,
+ KeepFinalizedBlocksDepth: 100000,
+ }
+ cl := client.NewSimulatedBackendClient(t, simulatedBackend, big.NewInt(0).SetUint64(uint64(readerChain)))
+ headTracker := headtracker.NewSimulatedHeadTracker(cl, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ lp := logpoller.NewLogPoller(logpoller.NewORM(big.NewInt(0).SetUint64(uint64(readerChain)), db, lggr),
+ cl,
+ lggr,
+ headTracker,
+ lpOpts,
+ )
+ assert.NoError(t, lp.Start(ctx))
+
+ for sourceChain, seqNum := range onChainSeqNums {
+ _, err1 := contract.SetSourceChainConfig(auth, uint64(sourceChain), ccip_reader_tester.EVM2EVMMultiOffRampSourceChainConfig{
+ IsEnabled: true,
+ MinSeqNr: uint64(seqNum),
+ })
+ assert.NoError(t, err1)
+ simulatedBackend.Commit()
+ scc, err1 := contract.GetSourceChainConfig(&bind.CallOpts{Context: ctx}, uint64(sourceChain))
+ assert.NoError(t, err1)
+ assert.Equal(t, seqNum, cciptypes.SeqNum(scc.MinSeqNr))
+ }
+
+ contractNames := maps.Keys(cfg.Contracts)
+ assert.Len(t, contractNames, 1, "test setup assumes there is only one contract")
+
+ cr, err := evm.NewChainReaderService(ctx, lggr, lp, headTracker, cl, cfg)
+ require.NoError(t, err)
+
+ extendedCr := contractreader.NewExtendedContractReader(cr)
+ err = extendedCr.Bind(ctx, []types.BoundContract{
+ {
+ Address: address.String(),
+ Name: contractNames[0],
+ },
+ })
+ require.NoError(t, err)
+
+ err = cr.Start(ctx)
+ require.NoError(t, err)
+
+ contractReaders := map[cciptypes.ChainSelector]contractreader.Extended{readerChain: extendedCr}
+ contractWriters := make(map[cciptypes.ChainSelector]types.ChainWriter)
+ reader := ccipreaderpkg.NewCCIPReaderWithExtendedContractReaders(lggr, contractReaders, contractWriters, destChain)
+
+ t.Cleanup(func() {
+ require.NoError(t, cr.Close())
+ require.NoError(t, lp.Close())
+ require.NoError(t, db.Close())
+ })
+
+ return &testSetupData{
+ contractAddr: address,
+ contract: contract,
+ sb: simulatedBackend,
+ auth: auth,
+ lp: lp,
+ cl: cl,
+ reader: reader,
+ }
+}
+
+type testSetupData struct {
+ contractAddr common.Address
+ contract *ccip_reader_tester.CCIPReaderTester
+ sb *backends.SimulatedBackend
+ auth *bind.TransactOpts
+ lp logpoller.LogPoller
+ cl client.Client
+ reader ccipreaderpkg.CCIPReader
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/chainreader/Makefile b/core/capabilities/ccip/ccip_integration_tests/chainreader/Makefile
new file mode 100644
index 00000000000..e9c88564e69
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/chainreader/Makefile
@@ -0,0 +1,12 @@
+
+# IMPORTANT: If you encounter any issues try using solc 0.8.18 and abigen 1.14.5
+
+.PHONY: build
+build:
+ rm -rf build/
+ solc --evm-version paris --abi --bin mycontract.sol -o build
+ abigen --abi build/mycontract_sol_SimpleContract.abi --bin build/mycontract_sol_SimpleContract.bin --pkg=chainreader --out=mycontract.go
+
+.PHONY: test
+test: build
+ go test -v --tags "playground" ./...
diff --git a/core/capabilities/ccip/ccip_integration_tests/chainreader/chainreader_test.go b/core/capabilities/ccip/ccip_integration_tests/chainreader/chainreader_test.go
new file mode 100644
index 00000000000..52a3de0dae9
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/chainreader/chainreader_test.go
@@ -0,0 +1,273 @@
+//go:build playground
+// +build playground
+
+package chainreader
+
+import (
+ "context"
+ _ "embed"
+ "math/big"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/codec"
+ types2 "github.com/smartcontractkit/chainlink-common/pkg/types"
+ query2 "github.com/smartcontractkit/chainlink-common/pkg/types/query"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ logger2 "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
+)
+
+const chainID = 1337
+
+type testSetupData struct {
+ contractAddr common.Address
+ contract *Chainreader
+ sb *backends.SimulatedBackend
+ auth *bind.TransactOpts
+}
+
+func TestChainReader(t *testing.T) {
+ ctx := testutils.Context(t)
+ lggr := logger2.NullLogger
+ d := testSetup(t, ctx)
+
+ db := pgtest.NewSqlxDB(t)
+ lpOpts := logpoller.Opts{
+ PollPeriod: time.Millisecond,
+ FinalityDepth: 0,
+ BackfillBatchSize: 10,
+ RpcBatchSize: 10,
+ KeepFinalizedBlocksDepth: 100000,
+ }
+ cl := client.NewSimulatedBackendClient(t, d.sb, big.NewInt(chainID))
+ headTracker := headtracker.NewSimulatedHeadTracker(cl, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ lp := logpoller.NewLogPoller(logpoller.NewORM(big.NewInt(chainID), db, lggr),
+ cl,
+ lggr,
+ headTracker,
+ lpOpts,
+ )
+ assert.NoError(t, lp.Start(ctx))
+
+ const (
+ ContractNameAlias = "myCoolContract"
+
+ FnAliasGetCount = "myCoolFunction"
+ FnGetCount = "getEventCount"
+
+ FnAliasGetNumbers = "GetNumbers"
+ FnGetNumbers = "getNumbers"
+
+ FnAliasGetPerson = "GetPerson"
+ FnGetPerson = "getPerson"
+
+ EventNameAlias = "myCoolEvent"
+ EventName = "SimpleEvent"
+ )
+
+ // Initialize chainReader
+ cfg := evmtypes.ChainReaderConfig{
+ Contracts: map[string]evmtypes.ChainContractReader{
+ ContractNameAlias: {
+ ContractPollingFilter: evmtypes.ContractPollingFilter{
+ GenericEventNames: []string{EventNameAlias},
+ },
+ ContractABI: ChainreaderMetaData.ABI,
+ Configs: map[string]*evmtypes.ChainReaderDefinition{
+ EventNameAlias: {
+ ChainSpecificName: EventName,
+ ReadType: evmtypes.Event,
+ ConfidenceConfirmations: map[string]int{"0.0": 0, "1.0": 0},
+ },
+ FnAliasGetCount: {
+ ChainSpecificName: FnGetCount,
+ },
+ FnAliasGetNumbers: {
+ ChainSpecificName: FnGetNumbers,
+ OutputModifications: codec.ModifiersConfig{},
+ },
+ FnAliasGetPerson: {
+ ChainSpecificName: FnGetPerson,
+ OutputModifications: codec.ModifiersConfig{
+ &codec.RenameModifierConfig{
+ Fields: map[string]string{"Name": "NameField"}, // solidity name -> go struct name
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ cr, err := evm.NewChainReaderService(ctx, lggr, lp, cl, cfg)
+ assert.NoError(t, err)
+ err = cr.Bind(ctx, []types2.BoundContract{
+ {
+ Address: d.contractAddr.String(),
+ Name: ContractNameAlias,
+ Pending: false,
+ },
+ })
+ assert.NoError(t, err)
+
+ err = cr.Start(ctx)
+ assert.NoError(t, err)
+ for {
+ if err := cr.Ready(); err == nil {
+ break
+ }
+ }
+
+ emitEvents(t, d, ctx) // Calls the contract to emit events
+
+ // (hack) Sometimes LP logs are missing, commit several times and wait few seconds to make it work.
+ for i := 0; i < 100; i++ {
+ d.sb.Commit()
+ }
+ time.Sleep(5 * time.Second)
+
+ t.Run("simple contract read", func(t *testing.T) {
+ var cnt big.Int
+ err = cr.GetLatestValue(ctx, ContractNameAlias, FnAliasGetCount, map[string]interface{}{}, &cnt)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(10), cnt.Int64())
+ })
+
+ t.Run("read array", func(t *testing.T) {
+ var nums []big.Int
+ err = cr.GetLatestValue(ctx, ContractNameAlias, FnAliasGetNumbers, map[string]interface{}{}, &nums)
+ assert.NoError(t, err)
+ assert.Len(t, nums, 10)
+ for i := 1; i <= 10; i++ {
+ assert.Equal(t, int64(i), nums[i-1].Int64())
+ }
+ })
+
+ t.Run("read struct", func(t *testing.T) {
+ person := struct {
+ NameField string
+ Age *big.Int // WARN: specifying a wrong data type e.g. int instead of *big.Int fails silently with a default value of 0
+ }{}
+ err = cr.GetLatestValue(ctx, ContractNameAlias, FnAliasGetPerson, map[string]interface{}{}, &person)
+ assert.Equal(t, "Dim", person.NameField)
+ assert.Equal(t, int64(18), person.Age.Int64())
+ })
+
+ t.Run("read events", func(t *testing.T) {
+ var myDataType *big.Int
+ seq, err := cr.QueryKey(
+ ctx,
+ ContractNameAlias,
+ query2.KeyFilter{
+ Key: EventNameAlias,
+ Expressions: []query2.Expression{},
+ },
+ query2.LimitAndSort{},
+ myDataType,
+ )
+ assert.NoError(t, err)
+ assert.Equal(t, 10, len(seq), "expected 10 events from chain reader")
+ for _, v := range seq {
+ // TODO: for some reason log poller does not populate event data
+ blockNum, err := strconv.ParseUint(v.Identifier, 10, 64)
+ assert.NoError(t, err)
+ assert.Positive(t, blockNum)
+ t.Logf("(chain reader) got event: (data=%v) (hash=%x)", v.Data, v.Hash)
+ }
+ })
+}
+
+func testSetup(t *testing.T, ctx context.Context) *testSetupData {
+ // Generate a new key pair for the simulated account
+ privateKey, err := crypto.GenerateKey()
+ assert.NoError(t, err)
+ // Set up the genesis account with balance
+ blnc, ok := big.NewInt(0).SetString("999999999999999999999999999999999999", 10)
+ assert.True(t, ok)
+ alloc := map[common.Address]core.GenesisAccount{crypto.PubkeyToAddress(privateKey.PublicKey): {Balance: blnc}}
+ simulatedBackend := backends.NewSimulatedBackend(alloc, 0)
+ // Create a transactor
+
+ auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(chainID))
+ assert.NoError(t, err)
+ auth.GasLimit = uint64(0)
+
+ // Deploy the contract
+ address, tx, _, err := DeployChainreader(auth, simulatedBackend)
+ assert.NoError(t, err)
+ simulatedBackend.Commit()
+ t.Logf("contract deployed: addr=%s tx=%s", address.Hex(), tx.Hash())
+
+ // Setup contract client
+ contract, err := NewChainreader(address, simulatedBackend)
+ assert.NoError(t, err)
+
+ return &testSetupData{
+ contractAddr: address,
+ contract: contract,
+ sb: simulatedBackend,
+ auth: auth,
+ }
+}
+
+func emitEvents(t *testing.T, d *testSetupData, ctx context.Context) {
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ // Start emitting events
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 10; i++ {
+ _, err := d.contract.EmitEvent(d.auth)
+ assert.NoError(t, err)
+ d.sb.Commit()
+ }
+ }()
+
+ // Listen events using go-ethereum lib
+ go func() {
+ query := ethereum.FilterQuery{
+ FromBlock: big.NewInt(0),
+ Addresses: []common.Address{d.contractAddr},
+ }
+ logs := make(chan types.Log)
+ sub, err := d.sb.SubscribeFilterLogs(ctx, query, logs)
+ assert.NoError(t, err)
+
+ numLogs := 0
+ defer wg.Done()
+ for {
+ // Wait for the events
+ select {
+ case err := <-sub.Err():
+ assert.NoError(t, err, "got an unexpected error")
+ case vLog := <-logs:
+ assert.Equal(t, d.contractAddr, vLog.Address, "got an unexpected address")
+ t.Logf("(geth) got new log (cnt=%d) (data=%x) (topics=%s)", numLogs, vLog.Data, vLog.Topics)
+ numLogs++
+ if numLogs == 10 {
+ return
+ }
+ }
+ }
+ }()
+
+ wg.Wait() // wait for all the events to be consumed
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.go b/core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.go
new file mode 100644
index 00000000000..c7d480eed46
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.go
@@ -0,0 +1,519 @@
+// Code generated - DO NOT EDIT.
+// This file is a generated binding and any manual changes will be lost.
+
+package chainreader
+
+import (
+ "errors"
+ "math/big"
+ "strings"
+
+ ethereum "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/event"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var (
+ _ = errors.New
+ _ = big.NewInt
+ _ = strings.NewReader
+ _ = ethereum.NotFound
+ _ = bind.Bind
+ _ = common.Big1
+ _ = types.BloomLookup
+ _ = event.NewSubscription
+ _ = abi.ConvertType
+)
+
+// SimpleContractPerson is an auto generated low-level Go binding around an user-defined struct.
+type SimpleContractPerson struct {
+ Name string
+ Age *big.Int
+}
+
+// ChainreaderMetaData contains all meta data concerning the Chainreader contract.
+var ChainreaderMetaData = &bind.MetaData{
+ ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"SimpleEvent\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"emitEvent\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eventCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getEventCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getNumbers\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerson\",\"outputs\":[{\"components\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"age\",\"type\":\"uint256\"}],\"internalType\":\"structSimpleContract.Person\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"numbers\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
+ Bin: "0x608060405234801561001057600080fd5b506105a1806100206000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c806371be2e4a146100675780637b0cb8391461008557806389f915f61461008f5780638ec4dc95146100ad578063d39fa233146100cb578063d9e48f5c146100fb575b600080fd5b61006f610119565b60405161007c91906102ac565b60405180910390f35b61008d61011f565b005b61009761019c565b6040516100a49190610385565b60405180910390f35b6100b56101f4565b6040516100c29190610474565b60405180910390f35b6100e560048036038101906100e091906104c7565b61024c565b6040516100f291906102ac565b60405180910390f35b610103610270565b60405161011091906102ac565b60405180910390f35b60005481565b60008081548092919061013190610523565b9190505550600160005490806001815401808255809150506001900390600052602060002001600090919091909150557f12d199749b3f4c44df8d9386c63d725b7756ec47204f3aa0bf05ea832f89effb60005460405161019291906102ac565b60405180910390a1565b606060018054806020026020016040519081016040528092919081815260200182805480156101ea57602002820191906000526020600020905b8154815260200190600101908083116101d6575b5050505050905090565b6101fc610279565b60405180604001604052806040518060400160405280600381526020017f44696d000000000000000000000000000000000000000000000000000000000081525081526020016012815250905090565b6001818154811061025c57600080fd5b906000526020600020016000915090505481565b60008054905090565b604051806040016040528060608152602001600081525090565b6000819050919050565b6102a681610293565b82525050565b60006020820190506102c1600083018461029d565b92915050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b6102fc81610293565b82525050565b600061030e83836102f3565b60208301905092915050565b6000602082019050919050565b6000610332826102c7565b61033c81856102d2565b9350610347836102e3565b8060005b8381101561037857815161035f8882610302565b975061036a8361031a565b92505060018101905061034b565b5085935050505092915050565b6000602082019050818103600083015261039f8184610327565b905092915050565b600081519050919050565b600082825260208201905092915050565b60005b838110156103e15780820151818401526020810190506103c6565b60008484015250505050565b6000601f19601f8301169050919050565b6000610409826103a7565b61041381856103b2565b93506104238185602086016103c3565b61042c816103ed565b840191505092915050565b6000604083016000830151848203600086015261045482826103fe565b915050602083015161046960208601826102f3565b508091505092915050565b6000602082019050818103600083015261048e8184610437565b905092915050565b600080fd5b6104a481610293565b81146104af57600080fd5b50565b6000813590506104c18161049b565b92915050565b6000602082840312156104dd576104dc610496565b5b60006104eb848285016104b2565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600061052e82610293565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036105605761055f6104f4565b5b60018201905091905056fea2646970667358221220f7986dc9efbc0d9ef58e2925ffddc62ea13a6bab8b3a2c03ad2d85d50653129664736f6c63430008120033",
+}
+
+// ChainreaderABI is the input ABI used to generate the binding from.
+// Deprecated: Use ChainreaderMetaData.ABI instead.
+var ChainreaderABI = ChainreaderMetaData.ABI
+
+// ChainreaderBin is the compiled bytecode used for deploying new contracts.
+// Deprecated: Use ChainreaderMetaData.Bin instead.
+var ChainreaderBin = ChainreaderMetaData.Bin
+
+// DeployChainreader deploys a new Ethereum contract, binding an instance of Chainreader to it.
+func DeployChainreader(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Chainreader, error) {
+ parsed, err := ChainreaderMetaData.GetAbi()
+ if err != nil {
+ return common.Address{}, nil, nil, err
+ }
+ if parsed == nil {
+ return common.Address{}, nil, nil, errors.New("GetABI returned nil")
+ }
+
+ address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ChainreaderBin), backend)
+ if err != nil {
+ return common.Address{}, nil, nil, err
+ }
+ return address, tx, &Chainreader{ChainreaderCaller: ChainreaderCaller{contract: contract}, ChainreaderTransactor: ChainreaderTransactor{contract: contract}, ChainreaderFilterer: ChainreaderFilterer{contract: contract}}, nil
+}
+
+// Chainreader is an auto generated Go binding around an Ethereum contract.
+type Chainreader struct {
+ ChainreaderCaller // Read-only binding to the contract
+ ChainreaderTransactor // Write-only binding to the contract
+ ChainreaderFilterer // Log filterer for contract events
+}
+
+// ChainreaderCaller is an auto generated read-only Go binding around an Ethereum contract.
+type ChainreaderCaller struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// ChainreaderTransactor is an auto generated write-only Go binding around an Ethereum contract.
+type ChainreaderTransactor struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// ChainreaderFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
+type ChainreaderFilterer struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// ChainreaderSession is an auto generated Go binding around an Ethereum contract,
+// with pre-set call and transact options.
+type ChainreaderSession struct {
+ Contract *Chainreader // Generic contract binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// ChainreaderCallerSession is an auto generated read-only Go binding around an Ethereum contract,
+// with pre-set call options.
+type ChainreaderCallerSession struct {
+ Contract *ChainreaderCaller // Generic contract caller binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+}
+
+// ChainreaderTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
+// with pre-set transact options.
+type ChainreaderTransactorSession struct {
+ Contract *ChainreaderTransactor // Generic contract transactor binding to set the session for
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// ChainreaderRaw is an auto generated low-level Go binding around an Ethereum contract.
+type ChainreaderRaw struct {
+ Contract *Chainreader // Generic contract binding to access the raw methods on
+}
+
+// ChainreaderCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
+type ChainreaderCallerRaw struct {
+ Contract *ChainreaderCaller // Generic read-only contract binding to access the raw methods on
+}
+
+// ChainreaderTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
+type ChainreaderTransactorRaw struct {
+ Contract *ChainreaderTransactor // Generic write-only contract binding to access the raw methods on
+}
+
+// NewChainreader creates a new instance of Chainreader, bound to a specific deployed contract.
+func NewChainreader(address common.Address, backend bind.ContractBackend) (*Chainreader, error) {
+ contract, err := bindChainreader(address, backend, backend, backend)
+ if err != nil {
+ return nil, err
+ }
+ return &Chainreader{ChainreaderCaller: ChainreaderCaller{contract: contract}, ChainreaderTransactor: ChainreaderTransactor{contract: contract}, ChainreaderFilterer: ChainreaderFilterer{contract: contract}}, nil
+}
+
+// NewChainreaderCaller creates a new read-only instance of Chainreader, bound to a specific deployed contract.
+func NewChainreaderCaller(address common.Address, caller bind.ContractCaller) (*ChainreaderCaller, error) {
+ contract, err := bindChainreader(address, caller, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &ChainreaderCaller{contract: contract}, nil
+}
+
+// NewChainreaderTransactor creates a new write-only instance of Chainreader, bound to a specific deployed contract.
+func NewChainreaderTransactor(address common.Address, transactor bind.ContractTransactor) (*ChainreaderTransactor, error) {
+ contract, err := bindChainreader(address, nil, transactor, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &ChainreaderTransactor{contract: contract}, nil
+}
+
+// NewChainreaderFilterer creates a new log filterer instance of Chainreader, bound to a specific deployed contract.
+func NewChainreaderFilterer(address common.Address, filterer bind.ContractFilterer) (*ChainreaderFilterer, error) {
+ contract, err := bindChainreader(address, nil, nil, filterer)
+ if err != nil {
+ return nil, err
+ }
+ return &ChainreaderFilterer{contract: contract}, nil
+}
+
+// bindChainreader binds a generic wrapper to an already deployed contract.
+func bindChainreader(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
+ parsed, err := ChainreaderMetaData.GetAbi()
+ if err != nil {
+ return nil, err
+ }
+ return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_Chainreader *ChainreaderRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _Chainreader.Contract.ChainreaderCaller.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_Chainreader *ChainreaderRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
+ return _Chainreader.Contract.ChainreaderTransactor.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_Chainreader *ChainreaderRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
+ return _Chainreader.Contract.ChainreaderTransactor.contract.Transact(opts, method, params...)
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_Chainreader *ChainreaderCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _Chainreader.Contract.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_Chainreader *ChainreaderTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
+ return _Chainreader.Contract.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_Chainreader *ChainreaderTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
+ return _Chainreader.Contract.contract.Transact(opts, method, params...)
+}
+
+// EventCount is a free data retrieval call binding the contract method 0x71be2e4a.
+//
+// Solidity: function eventCount() view returns(uint256)
+func (_Chainreader *ChainreaderCaller) EventCount(opts *bind.CallOpts) (*big.Int, error) {
+ var out []interface{}
+ err := _Chainreader.contract.Call(opts, &out, "eventCount")
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// EventCount is a free data retrieval call binding the contract method 0x71be2e4a.
+//
+// Solidity: function eventCount() view returns(uint256)
+func (_Chainreader *ChainreaderSession) EventCount() (*big.Int, error) {
+ return _Chainreader.Contract.EventCount(&_Chainreader.CallOpts)
+}
+
+// EventCount is a free data retrieval call binding the contract method 0x71be2e4a.
+//
+// Solidity: function eventCount() view returns(uint256)
+func (_Chainreader *ChainreaderCallerSession) EventCount() (*big.Int, error) {
+ return _Chainreader.Contract.EventCount(&_Chainreader.CallOpts)
+}
+
+// GetEventCount is a free data retrieval call binding the contract method 0xd9e48f5c.
+//
+// Solidity: function getEventCount() view returns(uint256)
+func (_Chainreader *ChainreaderCaller) GetEventCount(opts *bind.CallOpts) (*big.Int, error) {
+ var out []interface{}
+ err := _Chainreader.contract.Call(opts, &out, "getEventCount")
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// GetEventCount is a free data retrieval call binding the contract method 0xd9e48f5c.
+//
+// Solidity: function getEventCount() view returns(uint256)
+func (_Chainreader *ChainreaderSession) GetEventCount() (*big.Int, error) {
+ return _Chainreader.Contract.GetEventCount(&_Chainreader.CallOpts)
+}
+
+// GetEventCount is a free data retrieval call binding the contract method 0xd9e48f5c.
+//
+// Solidity: function getEventCount() view returns(uint256)
+func (_Chainreader *ChainreaderCallerSession) GetEventCount() (*big.Int, error) {
+ return _Chainreader.Contract.GetEventCount(&_Chainreader.CallOpts)
+}
+
+// GetNumbers is a free data retrieval call binding the contract method 0x89f915f6.
+//
+// Solidity: function getNumbers() view returns(uint256[])
+func (_Chainreader *ChainreaderCaller) GetNumbers(opts *bind.CallOpts) ([]*big.Int, error) {
+ var out []interface{}
+ err := _Chainreader.contract.Call(opts, &out, "getNumbers")
+
+ if err != nil {
+ return *new([]*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int)
+
+ return out0, err
+
+}
+
+// GetNumbers is a free data retrieval call binding the contract method 0x89f915f6.
+//
+// Solidity: function getNumbers() view returns(uint256[])
+func (_Chainreader *ChainreaderSession) GetNumbers() ([]*big.Int, error) {
+ return _Chainreader.Contract.GetNumbers(&_Chainreader.CallOpts)
+}
+
+// GetNumbers is a free data retrieval call binding the contract method 0x89f915f6.
+//
+// Solidity: function getNumbers() view returns(uint256[])
+func (_Chainreader *ChainreaderCallerSession) GetNumbers() ([]*big.Int, error) {
+ return _Chainreader.Contract.GetNumbers(&_Chainreader.CallOpts)
+}
+
+// GetPerson is a free data retrieval call binding the contract method 0x8ec4dc95.
+//
+// Solidity: function getPerson() pure returns((string,uint256))
+func (_Chainreader *ChainreaderCaller) GetPerson(opts *bind.CallOpts) (SimpleContractPerson, error) {
+ var out []interface{}
+ err := _Chainreader.contract.Call(opts, &out, "getPerson")
+
+ if err != nil {
+ return *new(SimpleContractPerson), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(SimpleContractPerson)).(*SimpleContractPerson)
+
+ return out0, err
+
+}
+
+// GetPerson is a free data retrieval call binding the contract method 0x8ec4dc95.
+//
+// Solidity: function getPerson() pure returns((string,uint256))
+func (_Chainreader *ChainreaderSession) GetPerson() (SimpleContractPerson, error) {
+ return _Chainreader.Contract.GetPerson(&_Chainreader.CallOpts)
+}
+
+// GetPerson is a free data retrieval call binding the contract method 0x8ec4dc95.
+//
+// Solidity: function getPerson() pure returns((string,uint256))
+func (_Chainreader *ChainreaderCallerSession) GetPerson() (SimpleContractPerson, error) {
+ return _Chainreader.Contract.GetPerson(&_Chainreader.CallOpts)
+}
+
+// Numbers is a free data retrieval call binding the contract method 0xd39fa233.
+//
+// Solidity: function numbers(uint256 ) view returns(uint256)
+func (_Chainreader *ChainreaderCaller) Numbers(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) {
+ var out []interface{}
+ err := _Chainreader.contract.Call(opts, &out, "numbers", arg0)
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// Numbers is a free data retrieval call binding the contract method 0xd39fa233.
+//
+// Solidity: function numbers(uint256 ) view returns(uint256)
+func (_Chainreader *ChainreaderSession) Numbers(arg0 *big.Int) (*big.Int, error) {
+ return _Chainreader.Contract.Numbers(&_Chainreader.CallOpts, arg0)
+}
+
+// Numbers is a free data retrieval call binding the contract method 0xd39fa233.
+//
+// Solidity: function numbers(uint256 ) view returns(uint256)
+func (_Chainreader *ChainreaderCallerSession) Numbers(arg0 *big.Int) (*big.Int, error) {
+ return _Chainreader.Contract.Numbers(&_Chainreader.CallOpts, arg0)
+}
+
+// EmitEvent is a paid mutator transaction binding the contract method 0x7b0cb839.
+//
+// Solidity: function emitEvent() returns()
+func (_Chainreader *ChainreaderTransactor) EmitEvent(opts *bind.TransactOpts) (*types.Transaction, error) {
+ return _Chainreader.contract.Transact(opts, "emitEvent")
+}
+
+// EmitEvent is a paid mutator transaction binding the contract method 0x7b0cb839.
+//
+// Solidity: function emitEvent() returns()
+func (_Chainreader *ChainreaderSession) EmitEvent() (*types.Transaction, error) {
+ return _Chainreader.Contract.EmitEvent(&_Chainreader.TransactOpts)
+}
+
+// EmitEvent is a paid mutator transaction binding the contract method 0x7b0cb839.
+//
+// Solidity: function emitEvent() returns()
+func (_Chainreader *ChainreaderTransactorSession) EmitEvent() (*types.Transaction, error) {
+ return _Chainreader.Contract.EmitEvent(&_Chainreader.TransactOpts)
+}
+
+// ChainreaderSimpleEventIterator is returned from FilterSimpleEvent and is used to iterate over the raw logs and unpacked data for SimpleEvent events raised by the Chainreader contract.
+type ChainreaderSimpleEventIterator struct {
+ Event *ChainreaderSimpleEvent // Event containing the contract specifics and raw log
+
+ contract *bind.BoundContract // Generic contract to use for unpacking event data
+ event string // Event name to use for unpacking event data
+
+ logs chan types.Log // Log channel receiving the found contract events
+ sub ethereum.Subscription // Subscription for errors, completion and termination
+ done bool // Whether the subscription completed delivering logs
+ fail error // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *ChainreaderSimpleEventIterator) Next() bool {
+ // If the iterator failed, stop iterating
+ if it.fail != nil {
+ return false
+ }
+ // If the iterator completed, deliver directly whatever's available
+ if it.done {
+ select {
+ case log := <-it.logs:
+ it.Event = new(ChainreaderSimpleEvent)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+ // Iterator still in progress, wait for either a data or an error event
+ select {
+ case log := <-it.logs:
+ it.Event = new(ChainreaderSimpleEvent)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *ChainreaderSimpleEventIterator) Error() error {
+ return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *ChainreaderSimpleEventIterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+}
+
+// ChainreaderSimpleEvent represents a SimpleEvent event raised by the Chainreader contract.
+type ChainreaderSimpleEvent struct {
+ Value *big.Int
+ Raw types.Log // Blockchain specific contextual infos
+}
+
+// FilterSimpleEvent is a free log retrieval operation binding the contract event 0x12d199749b3f4c44df8d9386c63d725b7756ec47204f3aa0bf05ea832f89effb.
+//
+// Solidity: event SimpleEvent(uint256 value)
+func (_Chainreader *ChainreaderFilterer) FilterSimpleEvent(opts *bind.FilterOpts) (*ChainreaderSimpleEventIterator, error) {
+
+ logs, sub, err := _Chainreader.contract.FilterLogs(opts, "SimpleEvent")
+ if err != nil {
+ return nil, err
+ }
+ return &ChainreaderSimpleEventIterator{contract: _Chainreader.contract, event: "SimpleEvent", logs: logs, sub: sub}, nil
+}
+
+// WatchSimpleEvent is a free log subscription operation binding the contract event 0x12d199749b3f4c44df8d9386c63d725b7756ec47204f3aa0bf05ea832f89effb.
+//
+// Solidity: event SimpleEvent(uint256 value)
+func (_Chainreader *ChainreaderFilterer) WatchSimpleEvent(opts *bind.WatchOpts, sink chan<- *ChainreaderSimpleEvent) (event.Subscription, error) {
+
+ logs, sub, err := _Chainreader.contract.WatchLogs(opts, "SimpleEvent")
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+ // New log arrived, parse the event and forward to the user
+ event := new(ChainreaderSimpleEvent)
+ if err := _Chainreader.contract.UnpackLog(event, "SimpleEvent", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
+// ParseSimpleEvent is a log parse operation binding the contract event 0x12d199749b3f4c44df8d9386c63d725b7756ec47204f3aa0bf05ea832f89effb.
+//
+// Solidity: event SimpleEvent(uint256 value)
+func (_Chainreader *ChainreaderFilterer) ParseSimpleEvent(log types.Log) (*ChainreaderSimpleEvent, error) {
+ event := new(ChainreaderSimpleEvent)
+ if err := _Chainreader.contract.UnpackLog(event, "SimpleEvent", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.sol b/core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.sol
new file mode 100644
index 00000000000..0fae1f4baac
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/chainreader/mycontract.sol
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.18;
+
+contract SimpleContract {
+ event SimpleEvent(uint256 value);
+ uint256 public eventCount;
+ uint[] public numbers;
+
+ struct Person {
+ string name;
+ uint age;
+ }
+
+ function emitEvent() public {
+ eventCount++;
+ numbers.push(eventCount);
+ emit SimpleEvent(eventCount);
+ }
+
+ function getEventCount() public view returns (uint256) {
+ return eventCount;
+ }
+
+ function getNumbers() public view returns (uint256[] memory) {
+ return numbers;
+ }
+
+ function getPerson() public pure returns (Person memory) {
+ return Person("Dim", 18);
+ }
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/helpers.go b/core/capabilities/ccip/ccip_integration_tests/helpers.go
new file mode 100644
index 00000000000..7606c8bbebc
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/helpers.go
@@ -0,0 +1,938 @@
+package ccip_integration_tests
+
+import (
+ "bytes"
+ "encoding/hex"
+ "math/big"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/smartcontractkit/chainlink-ccip/chainconfig"
+ "github.com/smartcontractkit/chainlink-ccip/pluginconfig"
+ commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/ccip_integration_tests/integrationhelpers"
+ cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+
+ confighelper2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_proxy_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_config"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/maybe_revert_message_receiver"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/nonce_manager"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ocr3_config_encoder"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_admin_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/weth9"
+ kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/link_token"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+
+ chainsel "github.com/smartcontractkit/chain-selectors"
+
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ homeChainID = chainsel.GETH_TESTNET.EvmChainID
+ ccipSendRequestedTopic = evm_2_evm_multi_onramp.EVM2EVMMultiOnRampCCIPSendRequested{}.Topic()
+ commitReportAcceptedTopic = evm_2_evm_multi_offramp.EVM2EVMMultiOffRampCommitReportAccepted{}.Topic()
+ executionStateChangedTopic = evm_2_evm_multi_offramp.EVM2EVMMultiOffRampExecutionStateChanged{}.Topic()
+)
+
+const (
+ CapabilityLabelledName = "ccip"
+ CapabilityVersion = "v1.0.0"
+ NodeOperatorID = 1
+
+ // These constants drive what is set in the plugin offchain configs.
+ FirstBlockAge = 8 * time.Hour
+ RemoteGasPriceBatchWriteFrequency = 30 * time.Minute
+ BatchGasLimit = 6_500_000
+ RelativeBoostPerWaitHour = 1.5
+ InflightCacheExpiry = 10 * time.Minute
+ RootSnoozeTime = 30 * time.Minute
+ BatchingStrategyID = 0
+ DeltaProgress = 30 * time.Second
+ DeltaResend = 10 * time.Second
+ DeltaInitial = 20 * time.Second
+ DeltaRound = 2 * time.Second
+ DeltaGrace = 2 * time.Second
+ DeltaCertifiedCommitRequest = 10 * time.Second
+ DeltaStage = 10 * time.Second
+ Rmax = 3
+ MaxDurationQuery = 50 * time.Millisecond
+ MaxDurationObservation = 5 * time.Second
+ MaxDurationShouldAcceptAttestedReport = 10 * time.Second
+ MaxDurationShouldTransmitAcceptedReport = 10 * time.Second
+)
+
+func e18Mult(amount uint64) *big.Int {
+ return new(big.Int).Mul(uBigInt(amount), uBigInt(1e18))
+}
+
+func uBigInt(i uint64) *big.Int {
+ return new(big.Int).SetUint64(i)
+}
+
+type homeChain struct {
+ backend *backends.SimulatedBackend
+ owner *bind.TransactOpts
+ chainID uint64
+ capabilityRegistry *kcr.CapabilitiesRegistry
+ ccipConfig *ccip_config.CCIPConfig
+}
+
+type onchainUniverse struct {
+ backend *backends.SimulatedBackend
+ owner *bind.TransactOpts
+ chainID uint64
+ linkToken *link_token.LinkToken
+ weth *weth9.WETH9
+ router *router.Router
+ rmnProxy *arm_proxy_contract.ARMProxyContract
+ rmn *mock_arm_contract.MockARMContract
+ onramp *evm_2_evm_multi_onramp.EVM2EVMMultiOnRamp
+ offramp *evm_2_evm_multi_offramp.EVM2EVMMultiOffRamp
+ priceRegistry *price_registry.PriceRegistry
+ tokenAdminRegistry *token_admin_registry.TokenAdminRegistry
+ nonceManager *nonce_manager.NonceManager
+ receiver *maybe_revert_message_receiver.MaybeRevertMessageReceiver
+}
+
+type requestData struct {
+ destChainSelector uint64
+ receiverAddress common.Address
+ data []byte
+}
+
+func (u *onchainUniverse) SendCCIPRequests(t *testing.T, requestDatas []requestData) {
+ for _, reqData := range requestDatas {
+ msg := router.ClientEVM2AnyMessage{
+ Receiver: common.LeftPadBytes(reqData.receiverAddress.Bytes(), 32),
+ Data: reqData.data,
+ TokenAmounts: nil, // TODO: no tokens for now
+ FeeToken: u.weth.Address(),
+ ExtraArgs: nil, // TODO: no extra args for now, falls back to default
+ }
+ fee, err := u.router.GetFee(&bind.CallOpts{Context: testutils.Context(t)}, reqData.destChainSelector, msg)
+ require.NoError(t, err)
+ _, err = u.weth.Deposit(&bind.TransactOpts{
+ From: u.owner.From,
+ Signer: u.owner.Signer,
+ Value: fee,
+ })
+ require.NoError(t, err)
+ u.backend.Commit()
+ _, err = u.weth.Approve(u.owner, u.router.Address(), fee)
+ require.NoError(t, err)
+ u.backend.Commit()
+
+ t.Logf("Sending CCIP request from chain %d (selector %d) to chain selector %d",
+ u.chainID, getSelector(u.chainID), reqData.destChainSelector)
+ _, err = u.router.CcipSend(u.owner, reqData.destChainSelector, msg)
+ require.NoError(t, err)
+ u.backend.Commit()
+ }
+}
+
+type chainBase struct {
+ backend *backends.SimulatedBackend
+ owner *bind.TransactOpts
+}
+
+// createUniverses does the following:
+// 1. Creates 1 home chain and `numChains`-1 non-home chains
+// 2. Sets up home chain with the capability registry and the CCIP config contract
+// 2. Deploys the CCIP contracts to all chains.
+// 3. Sets up the initial configurations for the contracts on all chains.
+// 4. Wires the chains together.
+//
+// Conceptually one universe is ONE chain with all the contracts deployed on it and all the dependencies initialized.
+func createUniverses(
+ t *testing.T,
+ numChains int,
+) (homeChainUni homeChain, universes map[uint64]onchainUniverse) {
+ chains := createChains(t, numChains)
+
+ homeChainBase, ok := chains[homeChainID]
+ require.True(t, ok, "home chain backend not available")
+ // Set up home chain first
+ homeChainUniverse := setupHomeChain(t, homeChainBase.owner, homeChainBase.backend)
+
+ // deploy the ccip contracts on all chains
+ universes = make(map[uint64]onchainUniverse)
+ for chainID, base := range chains {
+ owner := base.owner
+ backend := base.backend
+ // deploy the CCIP contracts
+ linkToken := deployLinkToken(t, owner, backend, chainID)
+ rmn := deployMockARMContract(t, owner, backend, chainID)
+ rmnProxy := deployARMProxyContract(t, owner, backend, rmn.Address(), chainID)
+ weth := deployWETHContract(t, owner, backend, chainID)
+ rout := deployRouter(t, owner, backend, weth.Address(), rmnProxy.Address(), chainID)
+ priceRegistry := deployPriceRegistry(t, owner, backend, linkToken.Address(), weth.Address(), big.NewInt(1e18), chainID)
+ tokenAdminRegistry := deployTokenAdminRegistry(t, owner, backend, chainID)
+ nonceManager := deployNonceManager(t, owner, backend, chainID)
+
+ // ======================================================================
+ // OnRamp
+ // ======================================================================
+ onRampAddr, _, _, err := evm_2_evm_multi_onramp.DeployEVM2EVMMultiOnRamp(
+ owner,
+ backend,
+ evm_2_evm_multi_onramp.EVM2EVMMultiOnRampStaticConfig{
+ ChainSelector: getSelector(chainID),
+ RmnProxy: rmnProxy.Address(),
+ NonceManager: nonceManager.Address(),
+ TokenAdminRegistry: tokenAdminRegistry.Address(),
+ },
+ evm_2_evm_multi_onramp.EVM2EVMMultiOnRampDynamicConfig{
+ Router: rout.Address(),
+ PriceRegistry: priceRegistry.Address(),
+ // `withdrawFeeTokens` onRamp function is not part of the message flow
+ // so we can set this to any address
+ FeeAggregator: testutils.NewAddress(),
+ },
+ )
+ require.NoErrorf(t, err, "failed to deploy onramp on chain id %d", chainID)
+ backend.Commit()
+ onramp, err := evm_2_evm_multi_onramp.NewEVM2EVMMultiOnRamp(onRampAddr, backend)
+ require.NoError(t, err)
+
+ // ======================================================================
+ // OffRamp
+ // ======================================================================
+ offrampAddr, _, _, err := evm_2_evm_multi_offramp.DeployEVM2EVMMultiOffRamp(
+ owner,
+ backend,
+ evm_2_evm_multi_offramp.EVM2EVMMultiOffRampStaticConfig{
+ ChainSelector: getSelector(chainID),
+ RmnProxy: rmnProxy.Address(),
+ TokenAdminRegistry: tokenAdminRegistry.Address(),
+ NonceManager: nonceManager.Address(),
+ },
+ evm_2_evm_multi_offramp.EVM2EVMMultiOffRampDynamicConfig{
+ Router: rout.Address(),
+ PriceRegistry: priceRegistry.Address(),
+ },
+ // Source chain configs will be set up later once we have all chains
+ []evm_2_evm_multi_offramp.EVM2EVMMultiOffRampSourceChainConfigArgs{},
+ )
+ require.NoErrorf(t, err, "failed to deploy offramp on chain id %d", chainID)
+ backend.Commit()
+ offramp, err := evm_2_evm_multi_offramp.NewEVM2EVMMultiOffRamp(offrampAddr, backend)
+ require.NoError(t, err)
+
+ receiverAddress, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(
+ owner,
+ backend,
+ false,
+ )
+ require.NoError(t, err, "failed to deploy MaybeRevertMessageReceiver on chain id %d", chainID)
+ backend.Commit()
+ receiver, err := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(receiverAddress, backend)
+ require.NoError(t, err)
+
+ universe := onchainUniverse{
+ backend: backend,
+ owner: owner,
+ chainID: chainID,
+ linkToken: linkToken,
+ weth: weth,
+ router: rout,
+ rmnProxy: rmnProxy,
+ rmn: rmn,
+ onramp: onramp,
+ offramp: offramp,
+ priceRegistry: priceRegistry,
+ tokenAdminRegistry: tokenAdminRegistry,
+ nonceManager: nonceManager,
+ receiver: receiver,
+ }
+ // Set up the initial configurations for the contracts
+ setupUniverseBasics(t, universe)
+
+ universes[chainID] = universe
+ }
+
+ // Once we have all chains created and contracts deployed, we can set up the initial configurations and wire chains together
+ connectUniverses(t, universes)
+
+ // print out all contract addresses for debugging purposes
+ for chainID, uni := range universes {
+ t.Logf("Chain ID: %d\n Chain Selector: %d\n LinkToken: %s\n WETH: %s\n Router: %s\n RMNProxy: %s\n RMN: %s\n OnRamp: %s\n OffRamp: %s\n PriceRegistry: %s\n TokenAdminRegistry: %s\n NonceManager: %s\n",
+ chainID,
+ getSelector(chainID),
+ uni.linkToken.Address().Hex(),
+ uni.weth.Address().Hex(),
+ uni.router.Address().Hex(),
+ uni.rmnProxy.Address().Hex(),
+ uni.rmn.Address().Hex(),
+ uni.onramp.Address().Hex(),
+ uni.offramp.Address().Hex(),
+ uni.priceRegistry.Address().Hex(),
+ uni.tokenAdminRegistry.Address().Hex(),
+ uni.nonceManager.Address().Hex(),
+ )
+ }
+
+ // print out topic hashes of relevant events for debugging purposes
+ t.Logf("Topic hash of CommitReportAccepted: %s", commitReportAcceptedTopic.Hex())
+ t.Logf("Topic hash of ExecutionStateChanged: %s", executionStateChangedTopic.Hex())
+ t.Logf("Topic hash of CCIPSendRequested: %s", ccipSendRequestedTopic.Hex())
+
+ return homeChainUniverse, universes
+}
+
+// Creates 1 home chain and `numChains`-1 non-home chains
+func createChains(t *testing.T, numChains int) map[uint64]chainBase {
+ chains := make(map[uint64]chainBase)
+
+ homeChainOwner := testutils.MustNewSimTransactor(t)
+ homeChainBackend := backends.NewSimulatedBackend(core.GenesisAlloc{
+ homeChainOwner.From: core.GenesisAccount{
+ Balance: assets.Ether(10_000).ToInt(),
+ },
+ }, 30e6)
+ tweakChainTimestamp(t, homeChainBackend, FirstBlockAge)
+
+ chains[homeChainID] = chainBase{
+ owner: homeChainOwner,
+ backend: homeChainBackend,
+ }
+
+ for chainID := chainsel.TEST_90000001.EvmChainID; len(chains) < numChains && chainID < chainsel.TEST_90000020.EvmChainID; chainID++ {
+ owner := testutils.MustNewSimTransactor(t)
+ backend := backends.NewSimulatedBackend(core.GenesisAlloc{
+ owner.From: core.GenesisAccount{
+ Balance: assets.Ether(10_000).ToInt(),
+ },
+ }, 30e6)
+
+ tweakChainTimestamp(t, backend, FirstBlockAge)
+
+ chains[chainID] = chainBase{
+ owner: owner,
+ backend: backend,
+ }
+ }
+
+ return chains
+}
+
+// CCIP relies on block timestamps, but SimulatedBackend uses by default clock starting from 1970-01-01
+// This trick is used to move the clock closer to the current time. We set first block to be X hours ago.
+// Tests create plenty of transactions so this number can't be too low, every new block mined will tick the clock,
+// if you mine more than "X hours" transactions, SimulatedBackend will panic because generated timestamps will be in the future.
+func tweakChainTimestamp(t *testing.T, backend *backends.SimulatedBackend, tweak time.Duration) {
+ blockTime := time.Unix(int64(backend.Blockchain().CurrentHeader().Time), 0)
+ sinceBlockTime := time.Since(blockTime)
+ diff := sinceBlockTime - tweak
+ err := backend.AdjustTime(diff)
+ require.NoError(t, err, "unable to adjust time on simulated chain")
+ backend.Commit()
+ backend.Commit()
+}
+
+func setupHomeChain(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend) homeChain {
+ // deploy the capability registry on the home chain
+ crAddress, _, _, err := kcr.DeployCapabilitiesRegistry(owner, backend)
+ require.NoError(t, err, "failed to deploy capability registry on home chain")
+ backend.Commit()
+
+ capabilityRegistry, err := kcr.NewCapabilitiesRegistry(crAddress, backend)
+ require.NoError(t, err)
+
+ ccAddress, _, _, err := ccip_config.DeployCCIPConfig(owner, backend, crAddress)
+ require.NoError(t, err)
+ backend.Commit()
+
+ capabilityConfig, err := ccip_config.NewCCIPConfig(ccAddress, backend)
+ require.NoError(t, err)
+
+ _, err = capabilityRegistry.AddCapabilities(owner, []kcr.CapabilitiesRegistryCapability{
+ {
+ LabelledName: CapabilityLabelledName,
+ Version: CapabilityVersion,
+ CapabilityType: 2, // consensus. not used (?)
+ ResponseType: 0, // report. not used (?)
+ ConfigurationContract: ccAddress,
+ },
+ })
+ require.NoError(t, err, "failed to add capabilities to the capability registry")
+ backend.Commit()
+
+ // Add NodeOperator, for simplicity we'll add one NodeOperator only
+ // First NodeOperator will have NodeOperatorId = 1
+ _, err = capabilityRegistry.AddNodeOperators(owner, []kcr.CapabilitiesRegistryNodeOperator{
+ {
+ Admin: owner.From,
+ Name: "NodeOperator",
+ },
+ })
+ require.NoError(t, err, "failed to add node operator to the capability registry")
+ backend.Commit()
+
+ return homeChain{
+ backend: backend,
+ owner: owner,
+ chainID: homeChainID,
+ capabilityRegistry: capabilityRegistry,
+ ccipConfig: capabilityConfig,
+ }
+}
+
+func sortP2PIDS(p2pIDs [][32]byte) {
+ sort.Slice(p2pIDs, func(i, j int) bool {
+ return bytes.Compare(p2pIDs[i][:], p2pIDs[j][:]) < 0
+ })
+}
+
+func (h *homeChain) AddNodes(
+ t *testing.T,
+ p2pIDs [][32]byte,
+ capabilityIDs [][32]byte,
+) {
+ // Need to sort, otherwise _checkIsValidUniqueSubset onChain will fail
+ sortP2PIDS(p2pIDs)
+ var nodeParams []kcr.CapabilitiesRegistryNodeParams
+ for _, p2pID := range p2pIDs {
+ nodeParam := kcr.CapabilitiesRegistryNodeParams{
+ NodeOperatorId: NodeOperatorID,
+ Signer: p2pID, // Not used in tests
+ P2pId: p2pID,
+ HashedCapabilityIds: capabilityIDs,
+ }
+ nodeParams = append(nodeParams, nodeParam)
+ }
+ _, err := h.capabilityRegistry.AddNodes(h.owner, nodeParams)
+ require.NoError(t, err, "failed to add node operator oracles")
+ h.backend.Commit()
+}
+
+func AddChainConfig(
+ t *testing.T,
+ h homeChain,
+ chainSelector uint64,
+ p2pIDs [][32]byte,
+ f uint8,
+) ccip_config.CCIPConfigTypesChainConfigInfo {
+ // Need to sort, otherwise _checkIsValidUniqueSubset onChain will fail
+ sortP2PIDS(p2pIDs)
+ // First Add ChainConfig that includes all p2pIDs as readers
+ encodedExtraChainConfig, err := chainconfig.EncodeChainConfig(chainconfig.ChainConfig{
+ GasPriceDeviationPPB: ccipocr3.NewBigIntFromInt64(1000),
+ DAGasPriceDeviationPPB: ccipocr3.NewBigIntFromInt64(0),
+ FinalityDepth: 10,
+ OptimisticConfirmations: 1,
+ })
+ require.NoError(t, err)
+ chainConfig := integrationhelpers.SetupConfigInfo(chainSelector, p2pIDs, f, encodedExtraChainConfig)
+ inputConfig := []ccip_config.CCIPConfigTypesChainConfigInfo{
+ chainConfig,
+ }
+ _, err = h.ccipConfig.ApplyChainConfigUpdates(h.owner, nil, inputConfig)
+ require.NoError(t, err)
+ h.backend.Commit()
+ return chainConfig
+}
+
+func (h *homeChain) AddDON(
+ t *testing.T,
+ ccipCapabilityID [32]byte,
+ chainSelector uint64,
+ uni onchainUniverse,
+ f uint8,
+ bootstrapP2PID [32]byte,
+ p2pIDs [][32]byte,
+ oracles []confighelper2.OracleIdentityExtra,
+) {
+ // Get OCR3 Config from helper
+ var schedule []int
+ for range oracles {
+ schedule = append(schedule, 1)
+ }
+
+ tabi, err := ocr3_config_encoder.IOCR3ConfigEncoderMetaData.GetAbi()
+ require.NoError(t, err)
+
+ // Add DON on capability registry contract
+ var ocr3Configs []ocr3_config_encoder.CCIPConfigTypesOCR3Config
+ for _, pluginType := range []cctypes.PluginType{cctypes.PluginTypeCCIPCommit, cctypes.PluginTypeCCIPExec} {
+ var encodedOffchainConfig []byte
+ var err2 error
+ if pluginType == cctypes.PluginTypeCCIPCommit {
+ encodedOffchainConfig, err2 = pluginconfig.EncodeCommitOffchainConfig(pluginconfig.CommitOffchainConfig{
+ RemoteGasPriceBatchWriteFrequency: *commonconfig.MustNewDuration(RemoteGasPriceBatchWriteFrequency),
+ // TODO: implement token price writes
+ // TokenPriceBatchWriteFrequency: *commonconfig.MustNewDuration(tokenPriceBatchWriteFrequency),
+ })
+ require.NoError(t, err2)
+ } else {
+ encodedOffchainConfig, err2 = pluginconfig.EncodeExecuteOffchainConfig(pluginconfig.ExecuteOffchainConfig{
+ BatchGasLimit: BatchGasLimit,
+ RelativeBoostPerWaitHour: RelativeBoostPerWaitHour,
+ MessageVisibilityInterval: *commonconfig.MustNewDuration(FirstBlockAge),
+ InflightCacheExpiry: *commonconfig.MustNewDuration(InflightCacheExpiry),
+ RootSnoozeTime: *commonconfig.MustNewDuration(RootSnoozeTime),
+ BatchingStrategyID: BatchingStrategyID,
+ })
+ require.NoError(t, err2)
+ }
+ signers, transmitters, configF, _, offchainConfigVersion, offchainConfig, err2 := ocr3confighelper.ContractSetConfigArgsForTests(
+ DeltaProgress,
+ DeltaResend,
+ DeltaInitial,
+ DeltaRound,
+ DeltaGrace,
+ DeltaCertifiedCommitRequest,
+ DeltaStage,
+ Rmax,
+ schedule,
+ oracles,
+ encodedOffchainConfig,
+ MaxDurationQuery,
+ MaxDurationObservation,
+ MaxDurationShouldAcceptAttestedReport,
+ MaxDurationShouldTransmitAcceptedReport,
+ int(f),
+ []byte{}, // empty OnChainConfig
+ )
+ require.NoError(t, err2, "failed to create contract config")
+
+ signersBytes := make([][]byte, len(signers))
+ for i, signer := range signers {
+ signersBytes[i] = signer
+ }
+
+ transmittersBytes := make([][]byte, len(transmitters))
+ for i, transmitter := range transmitters {
+ // anotherErr because linting doesn't want to shadow err
+ parsed, anotherErr := common.ParseHexOrString(string(transmitter))
+ require.NoError(t, anotherErr)
+ transmittersBytes[i] = parsed
+ }
+
+ ocr3Configs = append(ocr3Configs, ocr3_config_encoder.CCIPConfigTypesOCR3Config{
+ PluginType: uint8(pluginType),
+ ChainSelector: chainSelector,
+ F: configF,
+ OffchainConfigVersion: offchainConfigVersion,
+ OfframpAddress: uni.offramp.Address().Bytes(),
+ BootstrapP2PIds: [][32]byte{bootstrapP2PID},
+ P2pIds: p2pIDs,
+ Signers: signersBytes,
+ Transmitters: transmittersBytes,
+ OffchainConfig: offchainConfig,
+ })
+ }
+
+ encodedCall, err := tabi.Pack("exposeOCR3Config", ocr3Configs)
+ require.NoError(t, err)
+
+ // Trim first four bytes to remove function selector.
+ encodedConfigs := encodedCall[4:]
+
+ // commit so that we have an empty block to filter events from
+ h.backend.Commit()
+
+ _, err = h.capabilityRegistry.AddDON(h.owner, p2pIDs, []kcr.CapabilitiesRegistryCapabilityConfiguration{
+ {
+ CapabilityId: ccipCapabilityID,
+ Config: encodedConfigs,
+ },
+ }, false, false, f)
+ require.NoError(t, err)
+ h.backend.Commit()
+
+ endBlock := h.backend.Blockchain().CurrentBlock().Number.Uint64()
+ iter, err := h.capabilityRegistry.FilterConfigSet(&bind.FilterOpts{
+ Start: h.backend.Blockchain().CurrentBlock().Number.Uint64() - 1,
+ End: &endBlock,
+ })
+ require.NoError(t, err, "failed to filter config set events")
+ var donID uint32
+ for iter.Next() {
+ donID = iter.Event.DonId
+ break
+ }
+ require.NotZero(t, donID, "failed to get donID from config set event")
+
+ var signerAddresses []common.Address
+ for _, oracle := range oracles {
+ signerAddresses = append(signerAddresses, common.BytesToAddress(oracle.OnchainPublicKey))
+ }
+
+ var transmitterAddresses []common.Address
+ for _, oracle := range oracles {
+ transmitterAddresses = append(transmitterAddresses, common.HexToAddress(string(oracle.TransmitAccount)))
+ }
+
+ // get the config digest from the ccip config contract and set config on the offramp.
+ var offrampOCR3Configs []evm_2_evm_multi_offramp.MultiOCR3BaseOCRConfigArgs
+ for _, pluginType := range []cctypes.PluginType{cctypes.PluginTypeCCIPCommit, cctypes.PluginTypeCCIPExec} {
+ ocrConfig, err1 := h.ccipConfig.GetOCRConfig(&bind.CallOpts{
+ Context: testutils.Context(t),
+ }, donID, uint8(pluginType))
+ require.NoError(t, err1, "failed to get OCR3 config from ccip config contract")
+ require.Len(t, ocrConfig, 1, "expected exactly one OCR3 config")
+ offrampOCR3Configs = append(offrampOCR3Configs, evm_2_evm_multi_offramp.MultiOCR3BaseOCRConfigArgs{
+ ConfigDigest: ocrConfig[0].ConfigDigest,
+ OcrPluginType: uint8(pluginType),
+ F: f,
+ IsSignatureVerificationEnabled: pluginType == cctypes.PluginTypeCCIPCommit,
+ Signers: signerAddresses,
+ Transmitters: transmitterAddresses,
+ })
+ }
+
+ uni.backend.Commit()
+
+ _, err = uni.offramp.SetOCR3Configs(uni.owner, offrampOCR3Configs)
+ require.NoError(t, err, "failed to set ocr3 configs on offramp")
+ uni.backend.Commit()
+
+ for _, pluginType := range []cctypes.PluginType{cctypes.PluginTypeCCIPCommit, cctypes.PluginTypeCCIPExec} {
+ ocrConfig, err := uni.offramp.LatestConfigDetails(&bind.CallOpts{
+ Context: testutils.Context(t),
+ }, uint8(pluginType))
+ require.NoError(t, err, "failed to get latest commit OCR3 config")
+ require.Equalf(t, offrampOCR3Configs[pluginType].ConfigDigest, ocrConfig.ConfigInfo.ConfigDigest, "%s OCR3 config digest mismatch", pluginType.String())
+ require.Equalf(t, offrampOCR3Configs[pluginType].F, ocrConfig.ConfigInfo.F, "%s OCR3 config F mismatch", pluginType.String())
+ require.Equalf(t, offrampOCR3Configs[pluginType].IsSignatureVerificationEnabled, ocrConfig.ConfigInfo.IsSignatureVerificationEnabled, "%s OCR3 config signature verification mismatch", pluginType.String())
+ if pluginType == cctypes.PluginTypeCCIPCommit {
+ // only commit will set signers, exec doesn't need them.
+ require.Equalf(t, offrampOCR3Configs[pluginType].Signers, ocrConfig.Signers, "%s OCR3 config signers mismatch", pluginType.String())
+ }
+ require.Equalf(t, offrampOCR3Configs[pluginType].Transmitters, ocrConfig.Transmitters, "%s OCR3 config transmitters mismatch", pluginType.String())
+ }
+
+ t.Logf("set ocr3 config on the offramp, signers: %+v, transmitters: %+v", signerAddresses, transmitterAddresses)
+}
+
+func connectUniverses(
+ t *testing.T,
+ universes map[uint64]onchainUniverse,
+) {
+ for _, uni := range universes {
+ wireRouter(t, uni, universes)
+ wirePriceRegistry(t, uni, universes)
+ wireOffRamp(t, uni, universes)
+ initRemoteChainsGasPrices(t, uni, universes)
+ }
+}
+
+// setupUniverseBasics sets up the initial configurations for the CCIP contracts on a single chain.
+// 1. Mint 1000 LINK to the owner
+// 2. Set the price registry with local token prices
+// 3. Authorize the onRamp and offRamp on the nonce manager
+func setupUniverseBasics(t *testing.T, uni onchainUniverse) {
+ // =============================================================================
+ // Universe specific updates/configs
+ // These updates are specific to each universe and are set up here
+ // These updates don't depend on other chains
+ // =============================================================================
+ owner := uni.owner
+ // =============================================================================
+ // Mint 1000 LINK to owner
+ // =============================================================================
+ _, err := uni.linkToken.GrantMintRole(owner, owner.From)
+ require.NoError(t, err)
+ _, err = uni.linkToken.Mint(owner, owner.From, e18Mult(1000))
+ require.NoError(t, err)
+ uni.backend.Commit()
+
+ // =============================================================================
+ // Price updates for tokens
+ // These are the prices of the fee tokens of local chain in USD
+ // =============================================================================
+ tokenPriceUpdates := []price_registry.InternalTokenPriceUpdate{
+ {
+ SourceToken: uni.linkToken.Address(),
+ UsdPerToken: e18Mult(20),
+ },
+ {
+ SourceToken: uni.weth.Address(),
+ UsdPerToken: e18Mult(4000),
+ },
+ }
+ _, err = uni.priceRegistry.UpdatePrices(owner, price_registry.InternalPriceUpdates{
+ TokenPriceUpdates: tokenPriceUpdates,
+ })
+ require.NoErrorf(t, err, "failed to update prices in price registry on chain id %d", uni.chainID)
+ uni.backend.Commit()
+
+ _, err = uni.priceRegistry.ApplyAuthorizedCallerUpdates(owner, price_registry.AuthorizedCallersAuthorizedCallerArgs{
+ AddedCallers: []common.Address{
+ uni.offramp.Address(),
+ },
+ })
+ require.NoError(t, err, "failed to authorize offramp on price registry")
+ uni.backend.Commit()
+
+ // =============================================================================
+ // Authorize OnRamp & OffRamp on NonceManager
+ // Otherwise the onramp will not be able to call the nonceManager to get next Nonce
+ // =============================================================================
+ authorizedCallersAuthorizedCallerArgs := nonce_manager.AuthorizedCallersAuthorizedCallerArgs{
+ AddedCallers: []common.Address{
+ uni.onramp.Address(),
+ uni.offramp.Address(),
+ },
+ }
+ _, err = uni.nonceManager.ApplyAuthorizedCallerUpdates(owner, authorizedCallersAuthorizedCallerArgs)
+ require.NoError(t, err)
+ uni.backend.Commit()
+}
+
+// As we can't change router contract. The contract was expecting onRamp and offRamp per lane and not per chain
+// In the new architecture we have only one onRamp and one offRamp per chain.
+// hence we add the mapping for all remote chains to the onRamp/offRamp contract of the local chain
+func wireRouter(t *testing.T, uni onchainUniverse, universes map[uint64]onchainUniverse) {
+ owner := uni.owner
+ var (
+ routerOnrampUpdates []router.RouterOnRamp
+ routerOfframpUpdates []router.RouterOffRamp
+ )
+ for remoteChainID := range universes {
+ if remoteChainID == uni.chainID {
+ continue
+ }
+ routerOnrampUpdates = append(routerOnrampUpdates, router.RouterOnRamp{
+ DestChainSelector: getSelector(remoteChainID),
+ OnRamp: uni.onramp.Address(),
+ })
+ routerOfframpUpdates = append(routerOfframpUpdates, router.RouterOffRamp{
+ SourceChainSelector: getSelector(remoteChainID),
+ OffRamp: uni.offramp.Address(),
+ })
+ }
+ _, err := uni.router.ApplyRampUpdates(owner, routerOnrampUpdates, []router.RouterOffRamp{}, routerOfframpUpdates)
+ require.NoErrorf(t, err, "failed to apply ramp updates on router on chain id %d", uni.chainID)
+ uni.backend.Commit()
+}
+
+// Setting OnRampDestChainConfigs
+func wirePriceRegistry(t *testing.T, uni onchainUniverse, universes map[uint64]onchainUniverse) {
+ owner := uni.owner
+ var priceRegistryDestChainConfigArgs []price_registry.PriceRegistryDestChainConfigArgs
+ for remoteChainID := range universes {
+ if remoteChainID == uni.chainID {
+ continue
+ }
+ priceRegistryDestChainConfigArgs = append(priceRegistryDestChainConfigArgs, price_registry.PriceRegistryDestChainConfigArgs{
+ DestChainSelector: getSelector(remoteChainID),
+ DestChainConfig: defaultPriceRegistryDestChainConfig(t),
+ })
+ }
+ _, err := uni.priceRegistry.ApplyDestChainConfigUpdates(owner, priceRegistryDestChainConfigArgs)
+ require.NoErrorf(t, err, "failed to apply dest chain config updates on price registry on chain id %d", uni.chainID)
+ uni.backend.Commit()
+}
+
+// Setting OffRampSourceChainConfigs
+func wireOffRamp(t *testing.T, uni onchainUniverse, universes map[uint64]onchainUniverse) {
+ owner := uni.owner
+ var offrampSourceChainConfigArgs []evm_2_evm_multi_offramp.EVM2EVMMultiOffRampSourceChainConfigArgs
+ for remoteChainID, remoteUniverse := range universes {
+ if remoteChainID == uni.chainID {
+ continue
+ }
+ offrampSourceChainConfigArgs = append(offrampSourceChainConfigArgs, evm_2_evm_multi_offramp.EVM2EVMMultiOffRampSourceChainConfigArgs{
+ SourceChainSelector: getSelector(remoteChainID), // for each destination chain, add a source chain config
+ IsEnabled: true,
+ OnRamp: remoteUniverse.onramp.Address().Bytes(),
+ })
+ }
+ _, err := uni.offramp.ApplySourceChainConfigUpdates(owner, offrampSourceChainConfigArgs)
+ require.NoErrorf(t, err, "failed to apply source chain config updates on offramp on chain id %d", uni.chainID)
+ uni.backend.Commit()
+ for remoteChainID, remoteUniverse := range universes {
+ if remoteChainID == uni.chainID {
+ continue
+ }
+ sourceCfg, err2 := uni.offramp.GetSourceChainConfig(&bind.CallOpts{}, getSelector(remoteChainID))
+ require.NoError(t, err2)
+ require.True(t, sourceCfg.IsEnabled, "source chain config should be enabled")
+ require.Equal(t, remoteUniverse.onramp.Address(), common.BytesToAddress(sourceCfg.OnRamp), "source chain config onRamp address mismatch")
+ }
+}
+
+func getSelector(chainID uint64) uint64 {
+ selector, err := chainsel.SelectorFromChainId(chainID)
+ if err != nil {
+ panic(err)
+ }
+ return selector
+}
+
+// initRemoteChainsGasPrices sets the gas prices for all chains except the local chain in the local price registry
+func initRemoteChainsGasPrices(t *testing.T, uni onchainUniverse, universes map[uint64]onchainUniverse) {
+ var gasPriceUpdates []price_registry.InternalGasPriceUpdate
+ for remoteChainID := range universes {
+ if remoteChainID == uni.chainID {
+ continue
+ }
+ gasPriceUpdates = append(gasPriceUpdates,
+ price_registry.InternalGasPriceUpdate{
+ DestChainSelector: getSelector(remoteChainID),
+ UsdPerUnitGas: big.NewInt(2e12),
+ },
+ )
+ }
+ _, err := uni.priceRegistry.UpdatePrices(uni.owner, price_registry.InternalPriceUpdates{
+ GasPriceUpdates: gasPriceUpdates,
+ })
+ require.NoError(t, err)
+}
+
+func defaultPriceRegistryDestChainConfig(t *testing.T) price_registry.PriceRegistryDestChainConfig {
+ // https://github.com/smartcontractkit/ccip/blob/c4856b64bd766f1ddbaf5d13b42d3c4b12efde3a/contracts/src/v0.8/ccip/libraries/Internal.sol#L337-L337
+ /*
+ ```Solidity
+ // bytes4(keccak256("CCIP ChainFamilySelector EVM"))
+ bytes4 public constant CHAIN_FAMILY_SELECTOR_EVM = 0x2812d52c;
+ ```
+ */
+ evmFamilySelector, err := hex.DecodeString("2812d52c")
+ require.NoError(t, err)
+ return price_registry.PriceRegistryDestChainConfig{
+ IsEnabled: true,
+ MaxNumberOfTokensPerMsg: 10,
+ MaxDataBytes: 256,
+ MaxPerMsgGasLimit: 3_000_000,
+ DestGasOverhead: 50_000,
+ DefaultTokenFeeUSDCents: 1,
+ DestGasPerPayloadByte: 10,
+ DestDataAvailabilityOverheadGas: 0,
+ DestGasPerDataAvailabilityByte: 100,
+ DestDataAvailabilityMultiplierBps: 1,
+ DefaultTokenDestGasOverhead: 125_000,
+ DefaultTokenDestBytesOverhead: 32,
+ DefaultTxGasLimit: 200_000,
+ GasMultiplierWeiPerEth: 1,
+ NetworkFeeUSDCents: 1,
+ ChainFamilySelector: [4]byte(evmFamilySelector),
+ }
+}
+
+func deployLinkToken(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend, chainID uint64) *link_token.LinkToken {
+ linkAddr, _, _, err := link_token.DeployLinkToken(owner, backend)
+ require.NoErrorf(t, err, "failed to deploy link token on chain id %d", chainID)
+ backend.Commit()
+ linkToken, err := link_token.NewLinkToken(linkAddr, backend)
+ require.NoError(t, err)
+ return linkToken
+}
+
+func deployMockARMContract(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend, chainID uint64) *mock_arm_contract.MockARMContract {
+ rmnAddr, _, _, err := mock_arm_contract.DeployMockARMContract(owner, backend)
+ require.NoErrorf(t, err, "failed to deploy mock arm on chain id %d", chainID)
+ backend.Commit()
+ rmn, err := mock_arm_contract.NewMockARMContract(rmnAddr, backend)
+ require.NoError(t, err)
+ return rmn
+}
+
+func deployARMProxyContract(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend, rmnAddr common.Address, chainID uint64) *arm_proxy_contract.ARMProxyContract {
+ rmnProxyAddr, _, _, err := arm_proxy_contract.DeployARMProxyContract(owner, backend, rmnAddr)
+ require.NoErrorf(t, err, "failed to deploy arm proxy on chain id %d", chainID)
+ backend.Commit()
+ rmnProxy, err := arm_proxy_contract.NewARMProxyContract(rmnProxyAddr, backend)
+ require.NoError(t, err)
+ return rmnProxy
+}
+
+func deployWETHContract(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend, chainID uint64) *weth9.WETH9 {
+ wethAddr, _, _, err := weth9.DeployWETH9(owner, backend)
+ require.NoErrorf(t, err, "failed to deploy weth contract on chain id %d", chainID)
+ backend.Commit()
+ weth, err := weth9.NewWETH9(wethAddr, backend)
+ require.NoError(t, err)
+ return weth
+}
+
+func deployRouter(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend, wethAddr, rmnProxyAddr common.Address, chainID uint64) *router.Router {
+ routerAddr, _, _, err := router.DeployRouter(owner, backend, wethAddr, rmnProxyAddr)
+ require.NoErrorf(t, err, "failed to deploy router on chain id %d", chainID)
+ backend.Commit()
+ rout, err := router.NewRouter(routerAddr, backend)
+ require.NoError(t, err)
+ return rout
+}
+
+func deployPriceRegistry(
+ t *testing.T,
+ owner *bind.TransactOpts,
+ backend *backends.SimulatedBackend,
+ linkAddr,
+ wethAddr common.Address,
+ maxFeeJuelsPerMsg *big.Int,
+ chainID uint64,
+) *price_registry.PriceRegistry {
+ priceRegistryAddr, _, _, err := price_registry.DeployPriceRegistry(
+ owner,
+ backend,
+ price_registry.PriceRegistryStaticConfig{
+ MaxFeeJuelsPerMsg: maxFeeJuelsPerMsg,
+ LinkToken: linkAddr,
+ StalenessThreshold: 24 * 60 * 60, // 24 hours
+ },
+ []common.Address{
+ owner.From, // owner can update prices in this test
+ }, // price updaters, will be set to offramp later
+ []common.Address{linkAddr, wethAddr}, // fee tokens
+ // empty for now, need to fill in when testing token transfers
+ []price_registry.PriceRegistryTokenPriceFeedUpdate{},
+ // empty for now, need to fill in when testing token transfers
+ []price_registry.PriceRegistryTokenTransferFeeConfigArgs{},
+ []price_registry.PriceRegistryPremiumMultiplierWeiPerEthArgs{
+ {
+ PremiumMultiplierWeiPerEth: 9e17, // 0.9 ETH
+ Token: linkAddr,
+ },
+ {
+ PremiumMultiplierWeiPerEth: 1e18,
+ Token: wethAddr,
+ },
+ },
+ // Destination chain configs will be set up later once we have all chains
+ []price_registry.PriceRegistryDestChainConfigArgs{},
+ )
+ require.NoErrorf(t, err, "failed to deploy price registry on chain id %d", chainID)
+ backend.Commit()
+ priceRegistry, err := price_registry.NewPriceRegistry(priceRegistryAddr, backend)
+ require.NoError(t, err)
+ return priceRegistry
+}
+
+func deployTokenAdminRegistry(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend, chainID uint64) *token_admin_registry.TokenAdminRegistry {
+ tarAddr, _, _, err := token_admin_registry.DeployTokenAdminRegistry(owner, backend)
+ require.NoErrorf(t, err, "failed to deploy token admin registry on chain id %d", chainID)
+ backend.Commit()
+ tokenAdminRegistry, err := token_admin_registry.NewTokenAdminRegistry(tarAddr, backend)
+ require.NoError(t, err)
+ return tokenAdminRegistry
+}
+
+func deployNonceManager(t *testing.T, owner *bind.TransactOpts, backend *backends.SimulatedBackend, chainID uint64) *nonce_manager.NonceManager {
+ nonceManagerAddr, _, _, err := nonce_manager.DeployNonceManager(owner, backend, []common.Address{owner.From})
+ require.NoErrorf(t, err, "failed to deploy nonce_manager on chain id %d", chainID)
+ backend.Commit()
+ nonceManager, err := nonce_manager.NewNonceManager(nonceManagerAddr, backend)
+ require.NoError(t, err)
+ return nonceManager
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/home_chain_test.go b/core/capabilities/ccip/ccip_integration_tests/home_chain_test.go
new file mode 100644
index 00000000000..c78fd37b809
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/home_chain_test.go
@@ -0,0 +1,103 @@
+package ccip_integration_tests
+
+import (
+ "testing"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/ccip_integration_tests/integrationhelpers"
+
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/onsi/gomega"
+
+ libocrtypes "github.com/smartcontractkit/libocr/ragep2p/types"
+
+ "github.com/smartcontractkit/chainlink-ccip/chainconfig"
+ ccipreader "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+
+ "github.com/stretchr/testify/require"
+
+ capcfg "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_config"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func TestHomeChainReader(t *testing.T) {
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ uni := integrationhelpers.NewTestUniverse(ctx, t, lggr)
+ // We need 3*f + 1 p2pIDs to have enough nodes to bootstrap
+ var arr []int64
+ n := int(integrationhelpers.FChainA*3 + 1)
+ for i := 0; i <= n; i++ {
+ arr = append(arr, int64(i))
+ }
+ p2pIDs := integrationhelpers.P2pIDsFromInts(arr)
+ uni.AddCapability(p2pIDs)
+ //==============================Apply configs to Capability Contract=================================
+ encodedChainConfig, err := chainconfig.EncodeChainConfig(chainconfig.ChainConfig{
+ GasPriceDeviationPPB: cciptypes.NewBigIntFromInt64(1000),
+ DAGasPriceDeviationPPB: cciptypes.NewBigIntFromInt64(1_000_000),
+ FinalityDepth: -1,
+ OptimisticConfirmations: 1,
+ })
+ require.NoError(t, err)
+ chainAConf := integrationhelpers.SetupConfigInfo(integrationhelpers.ChainA, p2pIDs, integrationhelpers.FChainA, encodedChainConfig)
+ chainBConf := integrationhelpers.SetupConfigInfo(integrationhelpers.ChainB, p2pIDs[1:], integrationhelpers.FChainB, encodedChainConfig)
+ chainCConf := integrationhelpers.SetupConfigInfo(integrationhelpers.ChainC, p2pIDs[2:], integrationhelpers.FChainC, encodedChainConfig)
+ inputConfig := []capcfg.CCIPConfigTypesChainConfigInfo{
+ chainAConf,
+ chainBConf,
+ chainCConf,
+ }
+ _, err = uni.CcipCfg.ApplyChainConfigUpdates(uni.Transactor, nil, inputConfig)
+ require.NoError(t, err)
+ uni.Backend.Commit()
+ //================================Setup HomeChainReader===============================
+
+ pollDuration := time.Second
+ homeChain := uni.HomeChainReader
+
+ gomega.NewWithT(t).Eventually(func() bool {
+ configs, _ := homeChain.GetAllChainConfigs()
+ return configs != nil
+ }, testutils.WaitTimeout(t), pollDuration*5).Should(gomega.BeTrue())
+
+ t.Logf("homchain reader is ready")
+ //================================Test HomeChain Reader===============================
+ expectedChainConfigs := map[cciptypes.ChainSelector]ccipreader.ChainConfig{}
+ for _, c := range inputConfig {
+ expectedChainConfigs[cciptypes.ChainSelector(c.ChainSelector)] = ccipreader.ChainConfig{
+ FChain: int(c.ChainConfig.FChain),
+ SupportedNodes: toPeerIDs(c.ChainConfig.Readers),
+ Config: mustDecodeChainConfig(t, c.ChainConfig.Config),
+ }
+ }
+ configs, err := homeChain.GetAllChainConfigs()
+ require.NoError(t, err)
+ require.Equal(t, expectedChainConfigs, configs)
+ //=================================Remove ChainC from OnChainConfig=========================================
+ _, err = uni.CcipCfg.ApplyChainConfigUpdates(uni.Transactor, []uint64{integrationhelpers.ChainC}, nil)
+ require.NoError(t, err)
+ uni.Backend.Commit()
+ time.Sleep(pollDuration * 5) // Wait for the chain reader to update
+ configs, err = homeChain.GetAllChainConfigs()
+ require.NoError(t, err)
+ delete(expectedChainConfigs, cciptypes.ChainSelector(integrationhelpers.ChainC))
+ require.Equal(t, expectedChainConfigs, configs)
+}
+
+func toPeerIDs(readers [][32]byte) mapset.Set[libocrtypes.PeerID] {
+ peerIDs := mapset.NewSet[libocrtypes.PeerID]()
+ for _, r := range readers {
+ peerIDs.Add(r)
+ }
+ return peerIDs
+}
+
+func mustDecodeChainConfig(t *testing.T, encodedChainConfig []byte) chainconfig.ChainConfig {
+ chainConfig, err := chainconfig.DecodeChainConfig(encodedChainConfig)
+ require.NoError(t, err)
+ return chainConfig
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/integrationhelpers/integration_helpers.go b/core/capabilities/ccip/ccip_integration_tests/integrationhelpers/integration_helpers.go
new file mode 100644
index 00000000000..7520b126336
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/integrationhelpers/integration_helpers.go
@@ -0,0 +1,304 @@
+package integrationhelpers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "sort"
+ "testing"
+ "time"
+
+ configsevm "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/configs/evm"
+ cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+
+ "github.com/smartcontractkit/chainlink-ccip/pkg/consts"
+ ccipreader "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_config"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ocr3_config_encoder"
+ kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
+ evmrelaytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
+)
+
+const chainID = 1337
+
+func NewReader(
+ t *testing.T,
+ logPoller logpoller.LogPoller,
+ headTracker logpoller.HeadTracker,
+ client client.Client,
+ address common.Address,
+ chainReaderConfig evmrelaytypes.ChainReaderConfig,
+) types.ContractReader {
+ cr, err := evm.NewChainReaderService(testutils.Context(t), logger.TestLogger(t), logPoller, headTracker, client, chainReaderConfig)
+ require.NoError(t, err)
+ err = cr.Bind(testutils.Context(t), []types.BoundContract{
+ {
+ Address: address.String(),
+ Name: consts.ContractNameCCIPConfig,
+ },
+ })
+ require.NoError(t, err)
+ require.NoError(t, cr.Start(testutils.Context(t)))
+ for {
+ if err := cr.Ready(); err == nil {
+ break
+ }
+ }
+
+ return cr
+}
+
+const (
+ ChainA uint64 = 1
+ FChainA uint8 = 1
+
+ ChainB uint64 = 2
+ FChainB uint8 = 2
+
+ ChainC uint64 = 3
+ FChainC uint8 = 3
+
+ CcipCapabilityLabelledName = "ccip"
+ CcipCapabilityVersion = "v1.0"
+)
+
+var CapabilityID = fmt.Sprintf("%s@%s", CcipCapabilityLabelledName, CcipCapabilityVersion)
+
+type TestUniverse struct {
+ Transactor *bind.TransactOpts
+ Backend *backends.SimulatedBackend
+ CapReg *kcr.CapabilitiesRegistry
+ CcipCfg *ccip_config.CCIPConfig
+ TestingT *testing.T
+ LogPoller logpoller.LogPoller
+ HeadTracker logpoller.HeadTracker
+ SimClient client.Client
+ HomeChainReader ccipreader.HomeChain
+}
+
+func NewTestUniverse(ctx context.Context, t *testing.T, lggr logger.Logger) TestUniverse {
+ transactor := testutils.MustNewSimTransactor(t)
+ backend := backends.NewSimulatedBackend(core.GenesisAlloc{
+ transactor.From: {Balance: assets.Ether(1000).ToInt()},
+ }, 30e6)
+
+ crAddress, _, _, err := kcr.DeployCapabilitiesRegistry(transactor, backend)
+ require.NoError(t, err)
+ backend.Commit()
+
+ capReg, err := kcr.NewCapabilitiesRegistry(crAddress, backend)
+ require.NoError(t, err)
+
+ ccAddress, _, _, err := ccip_config.DeployCCIPConfig(transactor, backend, crAddress)
+ require.NoError(t, err)
+ backend.Commit()
+
+ cc, err := ccip_config.NewCCIPConfig(ccAddress, backend)
+ require.NoError(t, err)
+
+ db := pgtest.NewSqlxDB(t)
+ lpOpts := logpoller.Opts{
+ PollPeriod: time.Millisecond,
+ FinalityDepth: 0,
+ BackfillBatchSize: 10,
+ RpcBatchSize: 10,
+ KeepFinalizedBlocksDepth: 100000,
+ }
+ cl := client.NewSimulatedBackendClient(t, backend, big.NewInt(chainID))
+ headTracker := headtracker.NewSimulatedHeadTracker(cl, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
+ if lpOpts.PollPeriod == 0 {
+ lpOpts.PollPeriod = 1 * time.Hour
+ }
+ lp := logpoller.NewLogPoller(logpoller.NewORM(big.NewInt(chainID), db, lggr), cl, logger.NullLogger, headTracker, lpOpts)
+ require.NoError(t, lp.Start(ctx))
+ t.Cleanup(func() { require.NoError(t, lp.Close()) })
+
+ hcr := NewHomeChainReader(t, lp, headTracker, cl, ccAddress)
+ return TestUniverse{
+ Transactor: transactor,
+ Backend: backend,
+ CapReg: capReg,
+ CcipCfg: cc,
+ TestingT: t,
+ SimClient: cl,
+ LogPoller: lp,
+ HeadTracker: headTracker,
+ HomeChainReader: hcr,
+ }
+}
+
+func (t TestUniverse) NewContractReader(ctx context.Context, cfg []byte) (types.ContractReader, error) {
+ var config evmrelaytypes.ChainReaderConfig
+ err := json.Unmarshal(cfg, &config)
+ require.NoError(t.TestingT, err)
+ return evm.NewChainReaderService(ctx, logger.TestLogger(t.TestingT), t.LogPoller, t.HeadTracker, t.SimClient, config)
+}
+
+func P2pIDsFromInts(ints []int64) [][32]byte {
+ var p2pIDs [][32]byte
+ for _, i := range ints {
+ p2pID := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(i)).PeerID()
+ p2pIDs = append(p2pIDs, p2pID)
+ }
+ sort.Slice(p2pIDs, func(i, j int) bool {
+ for k := 0; k < 32; k++ {
+ if p2pIDs[i][k] < p2pIDs[j][k] {
+ return true
+ } else if p2pIDs[i][k] > p2pIDs[j][k] {
+ return false
+ }
+ }
+ return false
+ })
+ return p2pIDs
+}
+
+func (t *TestUniverse) AddCapability(p2pIDs [][32]byte) {
+ _, err := t.CapReg.AddCapabilities(t.Transactor, []kcr.CapabilitiesRegistryCapability{
+ {
+ LabelledName: CcipCapabilityLabelledName,
+ Version: CcipCapabilityVersion,
+ CapabilityType: 0,
+ ResponseType: 0,
+ ConfigurationContract: t.CcipCfg.Address(),
+ },
+ })
+ require.NoError(t.TestingT, err, "failed to add capability to registry")
+ t.Backend.Commit()
+
+ ccipCapabilityID, err := t.CapReg.GetHashedCapabilityId(nil, CcipCapabilityLabelledName, CcipCapabilityVersion)
+ require.NoError(t.TestingT, err)
+
+ for i := 0; i < len(p2pIDs); i++ {
+ _, err = t.CapReg.AddNodeOperators(t.Transactor, []kcr.CapabilitiesRegistryNodeOperator{
+ {
+ Admin: t.Transactor.From,
+ Name: fmt.Sprintf("nop-%d", i),
+ },
+ })
+ require.NoError(t.TestingT, err)
+ t.Backend.Commit()
+
+ // get the node operator id from the event
+ it, err := t.CapReg.FilterNodeOperatorAdded(nil, nil, nil)
+ require.NoError(t.TestingT, err)
+ var nodeOperatorID uint32
+ for it.Next() {
+ if it.Event.Name == fmt.Sprintf("nop-%d", i) {
+ nodeOperatorID = it.Event.NodeOperatorId
+ break
+ }
+ }
+ require.NotZero(t.TestingT, nodeOperatorID)
+
+ _, err = t.CapReg.AddNodes(t.Transactor, []kcr.CapabilitiesRegistryNodeParams{
+ {
+ NodeOperatorId: nodeOperatorID,
+ Signer: testutils.Random32Byte(),
+ P2pId: p2pIDs[i],
+ HashedCapabilityIds: [][32]byte{ccipCapabilityID},
+ },
+ })
+ require.NoError(t.TestingT, err)
+ t.Backend.Commit()
+
+ // verify that the node was added successfully
+ nodeInfo, err := t.CapReg.GetNode(nil, p2pIDs[i])
+ require.NoError(t.TestingT, err)
+
+ require.Equal(t.TestingT, nodeOperatorID, nodeInfo.NodeOperatorId)
+ require.Equal(t.TestingT, p2pIDs[i][:], nodeInfo.P2pId[:])
+ }
+}
+
+func NewHomeChainReader(t *testing.T, logPoller logpoller.LogPoller, headTracker logpoller.HeadTracker, client client.Client, ccAddress common.Address) ccipreader.HomeChain {
+ cr := NewReader(t, logPoller, headTracker, client, ccAddress, configsevm.HomeChainReaderConfigRaw())
+
+ hcr := ccipreader.NewHomeChainReader(cr, logger.TestLogger(t), 500*time.Millisecond)
+ require.NoError(t, hcr.Start(testutils.Context(t)))
+ t.Cleanup(func() { require.NoError(t, hcr.Close()) })
+
+ return hcr
+}
+
+func (t *TestUniverse) AddDONToRegistry(
+ ccipCapabilityID [32]byte,
+ chainSelector uint64,
+ f uint8,
+ bootstrapP2PID [32]byte,
+ p2pIDs [][32]byte,
+) {
+ tabi, err := ocr3_config_encoder.IOCR3ConfigEncoderMetaData.GetAbi()
+ require.NoError(t.TestingT, err)
+
+ var (
+ signers [][]byte
+ transmitters [][]byte
+ )
+ for range p2pIDs {
+ signers = append(signers, testutils.NewAddress().Bytes())
+ transmitters = append(transmitters, testutils.NewAddress().Bytes())
+ }
+
+ var ocr3Configs []ocr3_config_encoder.CCIPConfigTypesOCR3Config
+ for _, pluginType := range []cctypes.PluginType{cctypes.PluginTypeCCIPCommit, cctypes.PluginTypeCCIPExec} {
+ ocr3Configs = append(ocr3Configs, ocr3_config_encoder.CCIPConfigTypesOCR3Config{
+ PluginType: uint8(pluginType),
+ ChainSelector: chainSelector,
+ F: f,
+ OffchainConfigVersion: 30,
+ OfframpAddress: testutils.NewAddress().Bytes(),
+ BootstrapP2PIds: [][32]byte{bootstrapP2PID},
+ P2pIds: p2pIDs,
+ Signers: signers,
+ Transmitters: transmitters,
+ OffchainConfig: []byte("offchain config"),
+ })
+ }
+
+ encodedCall, err := tabi.Pack("exposeOCR3Config", ocr3Configs)
+ require.NoError(t.TestingT, err)
+
+ // Trim first four bytes to remove function selector.
+ encodedConfigs := encodedCall[4:]
+
+ _, err = t.CapReg.AddDON(t.Transactor, p2pIDs, []kcr.CapabilitiesRegistryCapabilityConfiguration{
+ {
+ CapabilityId: ccipCapabilityID,
+ Config: encodedConfigs,
+ },
+ }, false, false, f)
+ require.NoError(t.TestingT, err)
+ t.Backend.Commit()
+}
+
+func SetupConfigInfo(chainSelector uint64, readers [][32]byte, fChain uint8, cfg []byte) ccip_config.CCIPConfigTypesChainConfigInfo {
+ return ccip_config.CCIPConfigTypesChainConfigInfo{
+ ChainSelector: chainSelector,
+ ChainConfig: ccip_config.CCIPConfigTypesChainConfig{
+ Readers: readers,
+ FChain: fChain,
+ Config: cfg,
+ },
+ }
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/ocr3_node_test.go b/core/capabilities/ccip/ccip_integration_tests/ocr3_node_test.go
new file mode 100644
index 00000000000..8cafb901724
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/ocr3_node_test.go
@@ -0,0 +1,281 @@
+package ccip_integration_tests
+
+import (
+ "fmt"
+ "math/big"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/hashicorp/consul/sdk/freeport"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
+
+ confighelper2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/stretchr/testify/require"
+)
+
+const STATE_SUCCESS = uint8(2)
+
+/*
+* If you want to debug, set log level to info and use the following commands for easier logs filtering.
+*
+* // Run the test and redirect logs to logs.txt
+* go test -v -run "^TestIntegration_OCR3Nodes" ./core/capabilities/ccip/ccip_integration_tests 2>&1 > logs.txt
+*
+* // Reads logs.txt as a stream and apply filters using grep
+* tail -fn0 logs.txt | grep "CCIPExecPlugin"
+ */
+func TestIntegration_OCR3Nodes(t *testing.T) {
+ const (
+ numChains = 3 // number of chains that this test will run on
+ numNodes = 4 // number of OCR3 nodes, test assumes that every node supports every chain
+
+ simulatedBackendBlockTime = 900 * time.Millisecond // Simulated backend blocks committing interval
+ oraclesBootWaitTime = 30 * time.Second // Time to wait for oracles to come up (HACK)
+ fChain = 1 // fChain value for all the chains
+ oracleLogLevel = zapcore.InfoLevel // Log level for the oracle / plugins.
+ )
+
+ t.Logf("creating %d universes", numChains)
+ homeChainUni, universes := createUniverses(t, numChains)
+
+ var (
+ oracles = make(map[uint64][]confighelper2.OracleIdentityExtra)
+ apps []chainlink.Application
+ nodes []*ocr3Node
+ p2pIDs [][32]byte
+
+ // The bootstrap node will be: nodes[0]
+ bootstrapPort int
+ bootstrapP2PID p2pkey.PeerID
+ )
+
+ ports := freeport.GetN(t, numNodes)
+ ctx := testutils.Context(t)
+ callCtx := &bind.CallOpts{Context: ctx}
+
+ for i := 0; i < numNodes; i++ {
+ t.Logf("Setting up ocr3 node:%d at port:%d", i, ports[i])
+ node := setupNodeOCR3(t, ports[i], universes, homeChainUni, oracleLogLevel)
+
+ for chainID, transmitter := range node.transmitters {
+ identity := confighelper2.OracleIdentityExtra{
+ OracleIdentity: confighelper2.OracleIdentity{
+ OnchainPublicKey: node.keybundle.PublicKey(), // Different for each chain
+ TransmitAccount: ocrtypes.Account(transmitter.Hex()),
+ OffchainPublicKey: node.keybundle.OffchainPublicKey(), // Same for each family
+ PeerID: node.peerID,
+ },
+ ConfigEncryptionPublicKey: node.keybundle.ConfigEncryptionPublicKey(), // Different for each chain
+ }
+ oracles[chainID] = append(oracles[chainID], identity)
+ }
+
+ apps = append(apps, node.app)
+ nodes = append(nodes, node)
+
+ peerID, err := p2pkey.MakePeerID(node.peerID)
+ require.NoError(t, err)
+ p2pIDs = append(p2pIDs, peerID)
+ }
+
+ bootstrapPort = ports[0]
+ bootstrapP2PID = p2pIDs[0]
+ bootstrapAddr := fmt.Sprintf("127.0.0.1:%d", bootstrapPort)
+ t.Logf("[bootstrap node] peerID:%s p2pID:%d address:%s", nodes[0].peerID, bootstrapP2PID, bootstrapAddr)
+
+ // Start committing periodically in the background for all the chains
+ tick := time.NewTicker(simulatedBackendBlockTime)
+ defer tick.Stop()
+ commitBlocksBackground(t, universes, tick)
+
+ ccipCapabilityID, err := homeChainUni.capabilityRegistry.GetHashedCapabilityId(
+ callCtx, CapabilityLabelledName, CapabilityVersion)
+ require.NoError(t, err, "failed to get hashed capability id for ccip")
+ require.NotEqual(t, [32]byte{}, ccipCapabilityID, "ccip capability id is empty")
+
+ // Need to Add nodes and assign capabilities to them before creating DONS
+ homeChainUni.AddNodes(t, p2pIDs, [][32]byte{ccipCapabilityID})
+
+ for _, uni := range universes {
+ t.Logf("Adding chainconfig for chain %d", uni.chainID)
+ AddChainConfig(t, homeChainUni, getSelector(uni.chainID), p2pIDs, fChain)
+ }
+
+ cfgs, err := homeChainUni.ccipConfig.GetAllChainConfigs(callCtx)
+ require.NoError(t, err)
+ require.Len(t, cfgs, numChains)
+
+ // Create a DON for each chain
+ for _, uni := range universes {
+ // Add nodes and give them the capability
+ t.Log("Adding DON for universe: ", uni.chainID)
+ chainSelector := getSelector(uni.chainID)
+ homeChainUni.AddDON(
+ t,
+ ccipCapabilityID,
+ chainSelector,
+ uni,
+ fChain,
+ bootstrapP2PID,
+ p2pIDs,
+ oracles[uni.chainID],
+ )
+ }
+
+ t.Log("Creating ocr3 jobs, starting oracles")
+ for i := 0; i < len(nodes); i++ {
+ err1 := nodes[i].app.Start(ctx)
+ require.NoError(t, err1)
+ tApp := apps[i]
+ t.Cleanup(func() { require.NoError(t, tApp.Stop()) })
+
+ jb := mustGetJobSpec(t, bootstrapP2PID, bootstrapPort, nodes[i].peerID, nodes[i].keybundle.ID())
+ require.NoErrorf(t, tApp.AddJobV2(ctx, &jb), "Wasn't able to create ccip job for node %d", i)
+ }
+
+ t.Logf("Sending ccip requests from each chain to all other chains")
+ for _, uni := range universes {
+ requests := genRequestData(uni.chainID, universes)
+ uni.SendCCIPRequests(t, requests)
+ }
+
+ // Wait for the oracles to come up.
+ // TODO: We need some data driven way to do this e.g. wait until LP filters to be registered.
+ time.Sleep(oraclesBootWaitTime)
+
+ // Replay the log poller on all the chains so that the logs are in the db.
+ // otherwise the plugins won't pick them up.
+ for _, node := range nodes {
+ for chainID := range universes {
+ t.Logf("Replaying logs for chain %d from block %d", chainID, 1)
+ require.NoError(t, node.app.ReplayFromBlock(big.NewInt(int64(chainID)), 1, false), "failed to replay logs")
+ }
+ }
+
+ // with only one request sent from each chain to each other chain,
+ // and with sequence numbers on incrementing by 1 on a per-dest chain
+ // basis, we expect the min sequence number to be 1 on all chains.
+ expectedSeqNrRange := ccipocr3.NewSeqNumRange(1, 1)
+ var wg sync.WaitGroup
+ for _, uni := range universes {
+ for remoteSelector := range universes {
+ if remoteSelector == uni.chainID {
+ continue
+ }
+ wg.Add(1)
+ go func(uni onchainUniverse, remoteSelector uint64) {
+ defer wg.Done()
+ waitForCommitWithInterval(t, uni, getSelector(remoteSelector), expectedSeqNrRange)
+ }(uni, remoteSelector)
+ }
+ }
+
+ start := time.Now()
+ wg.Wait()
+ t.Logf("All chains received the expected commit report in %s", time.Since(start))
+
+ // with only one request sent from each chain to each other chain,
+ // all ExecutionStateChanged events should have the sequence number 1.
+ expectedSeqNr := uint64(1)
+ for _, uni := range universes {
+ for remoteSelector := range universes {
+ if remoteSelector == uni.chainID {
+ continue
+ }
+ wg.Add(1)
+ go func(uni onchainUniverse, remoteSelector uint64) {
+ defer wg.Done()
+ waitForExecWithSeqNr(t, uni, getSelector(remoteSelector), expectedSeqNr)
+ }(uni, remoteSelector)
+ }
+ }
+
+ start = time.Now()
+ wg.Wait()
+ t.Logf("All chains received the expected ExecutionStateChanged event in %s", time.Since(start))
+}
+
+func genRequestData(chainID uint64, universes map[uint64]onchainUniverse) []requestData {
+ var res []requestData
+ for destChainID, destUni := range universes {
+ if destChainID == chainID {
+ continue
+ }
+ res = append(res, requestData{
+ destChainSelector: getSelector(destChainID),
+ receiverAddress: destUni.receiver.Address(),
+ data: []byte(fmt.Sprintf("msg from chain %d to chain %d", chainID, destChainID)),
+ })
+ }
+ return res
+}
+
+func waitForCommitWithInterval(
+ t *testing.T,
+ uni onchainUniverse,
+ expectedSourceChainSelector uint64,
+ expectedSeqNumRange ccipocr3.SeqNumRange,
+) {
+ sink := make(chan *evm_2_evm_multi_offramp.EVM2EVMMultiOffRampCommitReportAccepted)
+ subscription, err := uni.offramp.WatchCommitReportAccepted(&bind.WatchOpts{
+ Context: testutils.Context(t),
+ }, sink)
+ require.NoError(t, err)
+
+ for {
+ select {
+ case <-time.After(10 * time.Second):
+ t.Logf("Waiting for commit report on chain id %d (selector %d) from source selector %d expected seq nr range %s",
+ uni.chainID, getSelector(uni.chainID), expectedSourceChainSelector, expectedSeqNumRange.String())
+ case subErr := <-subscription.Err():
+ t.Fatalf("Subscription error: %+v", subErr)
+ case report := <-sink:
+ if len(report.Report.MerkleRoots) > 0 {
+ // Check the interval of sequence numbers and make sure it matches
+ // the expected range.
+ for _, mr := range report.Report.MerkleRoots {
+ if mr.SourceChainSelector == expectedSourceChainSelector &&
+ uint64(expectedSeqNumRange.Start()) == mr.Interval.Min &&
+ uint64(expectedSeqNumRange.End()) == mr.Interval.Max {
+ t.Logf("Received commit report on chain id %d (selector %d) from source selector %d expected seq nr range %s",
+ uni.chainID, getSelector(uni.chainID), expectedSourceChainSelector, expectedSeqNumRange.String())
+ return
+ }
+ }
+ }
+ }
+ }
+}
+
+func waitForExecWithSeqNr(t *testing.T, uni onchainUniverse, expectedSourceChainSelector, expectedSeqNr uint64) {
+ for {
+ scc, err := uni.offramp.GetSourceChainConfig(nil, expectedSourceChainSelector)
+ require.NoError(t, err)
+ t.Logf("Waiting for ExecutionStateChanged on chain %d (selector %d) from chain %d with expected sequence number %d, current onchain minSeqNr: %d",
+ uni.chainID, getSelector(uni.chainID), expectedSourceChainSelector, expectedSeqNr, scc.MinSeqNr)
+ iter, err := uni.offramp.FilterExecutionStateChanged(nil, []uint64{expectedSourceChainSelector}, []uint64{expectedSeqNr}, nil)
+ require.NoError(t, err)
+ var count int
+ for iter.Next() {
+ if iter.Event.SequenceNumber == expectedSeqNr && iter.Event.SourceChainSelector == expectedSourceChainSelector {
+ count++
+ }
+ }
+ if count == 1 {
+ t.Logf("Received ExecutionStateChanged on chain %d (selector %d) from chain %d with expected sequence number %d",
+ uni.chainID, getSelector(uni.chainID), expectedSourceChainSelector, expectedSeqNr)
+ return
+ }
+ time.Sleep(5 * time.Second)
+ }
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/ocr_node_helper.go b/core/capabilities/ccip/ccip_integration_tests/ocr_node_helper.go
new file mode 100644
index 00000000000..75b0e0ee947
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/ocr_node_helper.go
@@ -0,0 +1,316 @@
+package ccip_integration_tests
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "net/http"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ coretypes "github.com/smartcontractkit/chainlink-common/pkg/types/core/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/validate"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ gethtypes "github.com/ethereum/go-ethereum/core/types"
+ "github.com/jmoiron/sqlx"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/config"
+ "github.com/smartcontractkit/chainlink-common/pkg/loop"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ v2toml "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
+ evmutils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
+ configv2 "github.com/smartcontractkit/chainlink/v2/core/config/toml"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/logger/audit"
+ "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+ "github.com/smartcontractkit/chainlink/v2/plugins"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+)
+
+type ocr3Node struct {
+ app chainlink.Application
+ peerID string
+ transmitters map[uint64]common.Address
+ keybundle ocr2key.KeyBundle
+ db *sqlx.DB
+}
+
+// setupNodeOCR3 creates a chainlink node and any associated keys in order to run
+// ccip.
+func setupNodeOCR3(
+ t *testing.T,
+ port int,
+ universes map[uint64]onchainUniverse,
+ homeChainUniverse homeChain,
+ logLevel zapcore.Level,
+) *ocr3Node {
+ // Do not want to load fixtures as they contain a dummy chainID.
+ cfg, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
+ c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test.
+
+ c.Feature.LogPoller = ptr(true)
+
+ // P2P V2 configs.
+ c.P2P.V2.Enabled = ptr(true)
+ c.P2P.V2.DeltaDial = config.MustNewDuration(500 * time.Millisecond)
+ c.P2P.V2.DeltaReconcile = config.MustNewDuration(5 * time.Second)
+ c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", port)}
+
+ // Enable Capabilities, This is a pre-requisite for registrySyncer to work.
+ c.Capabilities.ExternalRegistry.NetworkID = ptr(relay.NetworkEVM)
+ c.Capabilities.ExternalRegistry.ChainID = ptr(strconv.FormatUint(homeChainUniverse.chainID, 10))
+ c.Capabilities.ExternalRegistry.Address = ptr(homeChainUniverse.capabilityRegistry.Address().String())
+
+ // OCR configs
+ c.OCR.Enabled = ptr(false)
+ c.OCR.DefaultTransactionQueueDepth = ptr(uint32(200))
+ c.OCR2.Enabled = ptr(true)
+ c.OCR2.ContractPollInterval = config.MustNewDuration(5 * time.Second)
+
+ c.Log.Level = ptr(configv2.LogLevel(logLevel))
+
+ var chains v2toml.EVMConfigs
+ for chainID := range universes {
+ chains = append(chains, createConfigV2Chain(uBigInt(chainID)))
+ }
+ c.EVM = chains
+ })
+
+ lggr := logger.TestLogger(t)
+ lggr.SetLogLevel(logLevel)
+ ctx := testutils.Context(t)
+ clients := make(map[uint64]client.Client)
+
+ for chainID, uni := range universes {
+ clients[chainID] = client.NewSimulatedBackendClient(t, uni.backend, uBigInt(chainID))
+ }
+
+ master := keystore.New(db, utils.FastScryptParams, lggr)
+
+ kStore := KeystoreSim{
+ eks: &EthKeystoreSim{
+ Eth: master.Eth(),
+ t: t,
+ },
+ csa: master.CSA(),
+ }
+ mailMon := mailbox.NewMonitor("ccip", lggr.Named("mailbox"))
+ evmOpts := chainlink.EVMFactoryConfig{
+ ChainOpts: legacyevm.ChainOpts{
+ AppConfig: cfg,
+ GenEthClient: func(i *big.Int) client.Client {
+ client, ok := clients[i.Uint64()]
+ if !ok {
+ t.Fatal("no backend for chainID", i)
+ }
+ return client
+ },
+ MailMon: mailMon,
+ DS: db,
+ },
+ CSAETHKeystore: kStore,
+ }
+ relayerFactory := chainlink.RelayerFactory{
+ Logger: lggr,
+ LoopRegistry: plugins.NewLoopRegistry(lggr.Named("LoopRegistry"), cfg.Tracing()),
+ GRPCOpts: loop.GRPCOpts{},
+ CapabilitiesRegistry: coretypes.NewCapabilitiesRegistry(t),
+ }
+ initOps := []chainlink.CoreRelayerChainInitFunc{chainlink.InitEVM(testutils.Context(t), relayerFactory, evmOpts)}
+ rci, err := chainlink.NewCoreRelayerChainInteroperators(initOps...)
+ require.NoError(t, err)
+
+ app, err := chainlink.NewApplication(chainlink.ApplicationOpts{
+ Config: cfg,
+ DS: db,
+ KeyStore: master,
+ RelayerChainInteroperators: rci,
+ Logger: lggr,
+ ExternalInitiatorManager: nil,
+ CloseLogger: lggr.Sync,
+ UnrestrictedHTTPClient: &http.Client{},
+ RestrictedHTTPClient: &http.Client{},
+ AuditLogger: audit.NoopLogger,
+ MailMon: mailMon,
+ LoopRegistry: plugins.NewLoopRegistry(lggr, cfg.Tracing()),
+ })
+ require.NoError(t, err)
+ require.NoError(t, app.GetKeyStore().Unlock(ctx, "password"))
+ _, err = app.GetKeyStore().P2P().Create(ctx)
+ require.NoError(t, err)
+
+ p2pIDs, err := app.GetKeyStore().P2P().GetAll()
+ require.NoError(t, err)
+ require.Len(t, p2pIDs, 1)
+ peerID := p2pIDs[0].PeerID()
+ // create a transmitter for each chain
+ transmitters := make(map[uint64]common.Address)
+ for chainID, uni := range universes {
+ backend := uni.backend
+ owner := uni.owner
+ cID := uBigInt(chainID)
+ addrs, err2 := app.GetKeyStore().Eth().EnabledAddressesForChain(testutils.Context(t), cID)
+ require.NoError(t, err2)
+ if len(addrs) == 1 {
+ // just fund the address
+ fundAddress(t, owner, addrs[0], assets.Ether(10).ToInt(), backend)
+ transmitters[chainID] = addrs[0]
+ } else {
+ // create key and fund it
+ _, err3 := app.GetKeyStore().Eth().Create(testutils.Context(t), cID)
+ require.NoError(t, err3, "failed to create key for chain", chainID)
+ sendingKeys, err3 := app.GetKeyStore().Eth().EnabledAddressesForChain(testutils.Context(t), cID)
+ require.NoError(t, err3)
+ require.Len(t, sendingKeys, 1)
+ fundAddress(t, owner, sendingKeys[0], assets.Ether(10).ToInt(), backend)
+ transmitters[chainID] = sendingKeys[0]
+ }
+ }
+ require.Len(t, transmitters, len(universes))
+
+ keybundle, err := app.GetKeyStore().OCR2().Create(ctx, chaintype.EVM)
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ return &ocr3Node{
+ // can't use this app because it doesn't have the right toml config
+ // missing bootstrapp
+ app: app,
+ peerID: peerID.Raw(),
+ transmitters: transmitters,
+ keybundle: keybundle,
+ db: db,
+ }
+}
+
+func ptr[T any](v T) *T { return &v }
+
+var _ keystore.Eth = &EthKeystoreSim{}
+
+type EthKeystoreSim struct {
+ keystore.Eth
+ t *testing.T
+}
+
+// override
+func (e *EthKeystoreSim) SignTx(ctx context.Context, address common.Address, tx *gethtypes.Transaction, chainID *big.Int) (*gethtypes.Transaction, error) {
+ // always sign with chain id 1337 for the simulated backend
+ return e.Eth.SignTx(ctx, address, tx, big.NewInt(1337))
+}
+
+type KeystoreSim struct {
+ eks keystore.Eth
+ csa keystore.CSA
+}
+
+func (e KeystoreSim) Eth() keystore.Eth {
+ return e.eks
+}
+
+func (e KeystoreSim) CSA() keystore.CSA {
+ return e.csa
+}
+
+func fundAddress(t *testing.T, from *bind.TransactOpts, to common.Address, amount *big.Int, backend *backends.SimulatedBackend) {
+ nonce, err := backend.PendingNonceAt(testutils.Context(t), from.From)
+ require.NoError(t, err)
+ gp, err := backend.SuggestGasPrice(testutils.Context(t))
+ require.NoError(t, err)
+ rawTx := gethtypes.NewTx(&gethtypes.LegacyTx{
+ Nonce: nonce,
+ GasPrice: gp,
+ Gas: 21000,
+ To: &to,
+ Value: amount,
+ })
+ signedTx, err := from.Signer(from.From, rawTx)
+ require.NoError(t, err)
+ err = backend.SendTransaction(testutils.Context(t), signedTx)
+ require.NoError(t, err)
+ backend.Commit()
+}
+
+func createConfigV2Chain(chainID *big.Int) *v2toml.EVMConfig {
+ chain := v2toml.Defaults((*evmutils.Big)(chainID))
+ chain.GasEstimator.LimitDefault = ptr(uint64(5e6))
+ chain.LogPollInterval = config.MustNewDuration(100 * time.Millisecond)
+ chain.Transactions.ForwardersEnabled = ptr(false)
+ chain.FinalityDepth = ptr(uint32(2))
+ return &v2toml.EVMConfig{
+ ChainID: (*evmutils.Big)(chainID),
+ Enabled: ptr(true),
+ Chain: chain,
+ Nodes: v2toml.EVMNodes{&v2toml.Node{}},
+ }
+}
+
+// Commit blocks periodically in the background for all chains
+func commitBlocksBackground(t *testing.T, universes map[uint64]onchainUniverse, tick *time.Ticker) {
+ t.Log("starting ticker to commit blocks")
+ tickCtx, tickCancel := context.WithCancel(testutils.Context(t))
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case <-tick.C:
+ for _, uni := range universes {
+ uni.backend.Commit()
+ }
+ case <-tickCtx.Done():
+ return
+ }
+ }
+ }()
+ t.Cleanup(func() {
+ tickCancel()
+ wg.Wait()
+ })
+}
+
+// p2pKeyID: nodes p2p id
+// ocrKeyBundleID: nodes ocr key bundle id
+func mustGetJobSpec(t *testing.T, bootstrapP2PID p2pkey.PeerID, bootstrapPort int, p2pKeyID string, ocrKeyBundleID string) job.Job {
+ specArgs := validate.SpecArgs{
+ P2PV2Bootstrappers: []string{
+ fmt.Sprintf("%s@127.0.0.1:%d", bootstrapP2PID.Raw(), bootstrapPort),
+ },
+ CapabilityVersion: CapabilityVersion,
+ CapabilityLabelledName: CapabilityLabelledName,
+ OCRKeyBundleIDs: map[string]string{
+ relay.NetworkEVM: ocrKeyBundleID,
+ },
+ P2PKeyID: p2pKeyID,
+ PluginConfig: map[string]any{},
+ }
+ specToml, err := validate.NewCCIPSpecToml(specArgs)
+ require.NoError(t, err)
+ jb, err := validate.ValidatedCCIPSpec(specToml)
+ require.NoError(t, err)
+ return jb
+}
diff --git a/core/capabilities/ccip/ccip_integration_tests/ping_pong_test.go b/core/capabilities/ccip/ccip_integration_tests/ping_pong_test.go
new file mode 100644
index 00000000000..8a65ff5167d
--- /dev/null
+++ b/core/capabilities/ccip/ccip_integration_tests/ping_pong_test.go
@@ -0,0 +1,95 @@
+package ccip_integration_tests
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ gethcommon "github.com/ethereum/go-ethereum/common"
+
+ "github.com/stretchr/testify/require"
+
+ "golang.org/x/exp/maps"
+
+ pp "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ping_pong_demo"
+)
+
+/*
+* Test is setting up 3 chains (let's call them A, B, C), each chain deploys and starts 2 ping pong contracts for the other 2.
+* A ---deploy+start---> (pingPongB, pingPongC)
+* B ---deploy+start---> (pingPongA, pingPongC)
+* C ---deploy+start---> (pingPongA, pingPongB)
+* and then checks that each ping pong contract emitted `CCIPSendRequested` event from the expected source to destination.
+* Test fails if any wiring between contracts is not correct.
+ */
+func TestPingPong(t *testing.T) {
+ _, universes := createUniverses(t, 3)
+ pingPongs := initializePingPongContracts(t, universes)
+ for chainID, universe := range universes {
+ for otherChain, pingPong := range pingPongs[chainID] {
+ t.Log("PingPong From: ", chainID, " To: ", otherChain)
+ _, err := pingPong.StartPingPong(universe.owner)
+ require.NoError(t, err)
+ universe.backend.Commit()
+
+ logIter, err := universe.onramp.FilterCCIPSendRequested(&bind.FilterOpts{Start: 0}, nil)
+ require.NoError(t, err)
+ // Iterate until latest event
+ for logIter.Next() {
+ }
+ log := logIter.Event
+ require.Equal(t, getSelector(otherChain), log.DestChainSelector)
+ require.Equal(t, pingPong.Address(), log.Message.Sender)
+ chainPingPongAddr := pingPongs[otherChain][chainID].Address().Bytes()
+ // With chain agnostic addresses we need to pad the address to the correct length if the receiver is zero prefixed
+ paddedAddr := gethcommon.LeftPadBytes(chainPingPongAddr, len(log.Message.Receiver))
+ require.Equal(t, paddedAddr, log.Message.Receiver)
+ }
+ }
+}
+
+// InitializeContracts initializes ping pong contracts on all chains and
+// connects them all to each other.
+func initializePingPongContracts(
+ t *testing.T,
+ chainUniverses map[uint64]onchainUniverse,
+) map[uint64]map[uint64]*pp.PingPongDemo {
+ pingPongs := make(map[uint64]map[uint64]*pp.PingPongDemo)
+ chainIDs := maps.Keys(chainUniverses)
+ // For each chain initialize N ping pong contracts, where N is the (number of chains - 1)
+ for chainID, universe := range chainUniverses {
+ pingPongs[chainID] = make(map[uint64]*pp.PingPongDemo)
+ for _, chainToConnect := range chainIDs {
+ if chainToConnect == chainID {
+ continue // don't connect chain to itself
+ }
+ backend := universe.backend
+ owner := universe.owner
+ pingPongAddr, _, _, err := pp.DeployPingPongDemo(owner, backend, universe.router.Address(), universe.linkToken.Address())
+ require.NoError(t, err)
+ backend.Commit()
+ pingPong, err := pp.NewPingPongDemo(pingPongAddr, backend)
+ require.NoError(t, err)
+ backend.Commit()
+ // Fund the ping pong contract with LINK
+ _, err = universe.linkToken.Transfer(owner, pingPong.Address(), e18Mult(10))
+ backend.Commit()
+ require.NoError(t, err)
+ pingPongs[chainID][chainToConnect] = pingPong
+ }
+ }
+
+ // Set up each ping pong contract to its counterpart on the other chain
+ for chainID, universe := range chainUniverses {
+ for chainToConnect, pingPong := range pingPongs[chainID] {
+ _, err := pingPong.SetCounterpart(
+ universe.owner,
+ getSelector(chainUniverses[chainToConnect].chainID),
+ // This is the address of the ping pong contract on the other chain
+ pingPongs[chainToConnect][chainID].Address(),
+ )
+ require.NoError(t, err)
+ universe.backend.Commit()
+ }
+ }
+ return pingPongs
+}
diff --git a/core/capabilities/ccip/ccipevm/commitcodec.go b/core/capabilities/ccip/ccipevm/commitcodec.go
new file mode 100644
index 00000000000..928cecd0a41
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/commitcodec.go
@@ -0,0 +1,138 @@
+package ccipevm
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+// CommitPluginCodecV1 is a codec for encoding and decoding commit plugin reports.
+// Compatible with:
+// - "EVM2EVMMultiOffRamp 1.6.0-dev"
+type CommitPluginCodecV1 struct {
+ commitReportAcceptedEventInputs abi.Arguments
+}
+
+func NewCommitPluginCodecV1() *CommitPluginCodecV1 {
+ abiParsed, err := abi.JSON(strings.NewReader(evm_2_evm_multi_offramp.EVM2EVMMultiOffRampABI))
+ if err != nil {
+ panic(fmt.Errorf("parse multi offramp abi: %s", err))
+ }
+ eventInputs := abihelpers.MustGetEventInputs("CommitReportAccepted", abiParsed)
+ return &CommitPluginCodecV1{commitReportAcceptedEventInputs: eventInputs}
+}
+
+func (c *CommitPluginCodecV1) Encode(ctx context.Context, report cciptypes.CommitPluginReport) ([]byte, error) {
+ merkleRoots := make([]evm_2_evm_multi_offramp.EVM2EVMMultiOffRampMerkleRoot, 0, len(report.MerkleRoots))
+ for _, root := range report.MerkleRoots {
+ merkleRoots = append(merkleRoots, evm_2_evm_multi_offramp.EVM2EVMMultiOffRampMerkleRoot{
+ SourceChainSelector: uint64(root.ChainSel),
+ Interval: evm_2_evm_multi_offramp.EVM2EVMMultiOffRampInterval{
+ Min: uint64(root.SeqNumsRange.Start()),
+ Max: uint64(root.SeqNumsRange.End()),
+ },
+ MerkleRoot: root.MerkleRoot,
+ })
+ }
+
+ tokenPriceUpdates := make([]evm_2_evm_multi_offramp.InternalTokenPriceUpdate, 0, len(report.PriceUpdates.TokenPriceUpdates))
+ for _, update := range report.PriceUpdates.TokenPriceUpdates {
+ if !common.IsHexAddress(string(update.TokenID)) {
+ return nil, fmt.Errorf("invalid token address: %s", update.TokenID)
+ }
+ if update.Price.IsEmpty() {
+ return nil, fmt.Errorf("empty price for token: %s", update.TokenID)
+ }
+ tokenPriceUpdates = append(tokenPriceUpdates, evm_2_evm_multi_offramp.InternalTokenPriceUpdate{
+ SourceToken: common.HexToAddress(string(update.TokenID)),
+ UsdPerToken: update.Price.Int,
+ })
+ }
+
+ gasPriceUpdates := make([]evm_2_evm_multi_offramp.InternalGasPriceUpdate, 0, len(report.PriceUpdates.GasPriceUpdates))
+ for _, update := range report.PriceUpdates.GasPriceUpdates {
+ if update.GasPrice.IsEmpty() {
+ return nil, fmt.Errorf("empty gas price for chain: %d", update.ChainSel)
+ }
+
+ gasPriceUpdates = append(gasPriceUpdates, evm_2_evm_multi_offramp.InternalGasPriceUpdate{
+ DestChainSelector: uint64(update.ChainSel),
+ UsdPerUnitGas: update.GasPrice.Int,
+ })
+ }
+
+ evmReport := evm_2_evm_multi_offramp.EVM2EVMMultiOffRampCommitReport{
+ PriceUpdates: evm_2_evm_multi_offramp.InternalPriceUpdates{
+ TokenPriceUpdates: tokenPriceUpdates,
+ GasPriceUpdates: gasPriceUpdates,
+ },
+ MerkleRoots: merkleRoots,
+ }
+
+ return c.commitReportAcceptedEventInputs.PackValues([]interface{}{evmReport})
+}
+
+func (c *CommitPluginCodecV1) Decode(ctx context.Context, bytes []byte) (cciptypes.CommitPluginReport, error) {
+ unpacked, err := c.commitReportAcceptedEventInputs.Unpack(bytes)
+ if err != nil {
+ return cciptypes.CommitPluginReport{}, err
+ }
+ if len(unpacked) != 1 {
+ return cciptypes.CommitPluginReport{}, fmt.Errorf("expected 1 argument, got %d", len(unpacked))
+ }
+
+ commitReportRaw := abi.ConvertType(unpacked[0], new(evm_2_evm_multi_offramp.EVM2EVMMultiOffRampCommitReport))
+ commitReport, is := commitReportRaw.(*evm_2_evm_multi_offramp.EVM2EVMMultiOffRampCommitReport)
+ if !is {
+ return cciptypes.CommitPluginReport{},
+ fmt.Errorf("expected EVM2EVMMultiOffRampCommitReport, got %T", unpacked[0])
+ }
+
+ merkleRoots := make([]cciptypes.MerkleRootChain, 0, len(commitReport.MerkleRoots))
+ for _, root := range commitReport.MerkleRoots {
+ merkleRoots = append(merkleRoots, cciptypes.MerkleRootChain{
+ ChainSel: cciptypes.ChainSelector(root.SourceChainSelector),
+ SeqNumsRange: cciptypes.NewSeqNumRange(
+ cciptypes.SeqNum(root.Interval.Min),
+ cciptypes.SeqNum(root.Interval.Max),
+ ),
+ MerkleRoot: root.MerkleRoot,
+ })
+ }
+
+ tokenPriceUpdates := make([]cciptypes.TokenPrice, 0, len(commitReport.PriceUpdates.TokenPriceUpdates))
+ for _, update := range commitReport.PriceUpdates.TokenPriceUpdates {
+ tokenPriceUpdates = append(tokenPriceUpdates, cciptypes.TokenPrice{
+ TokenID: types.Account(update.SourceToken.String()),
+ Price: cciptypes.NewBigInt(big.NewInt(0).Set(update.UsdPerToken)),
+ })
+ }
+
+ gasPriceUpdates := make([]cciptypes.GasPriceChain, 0, len(commitReport.PriceUpdates.GasPriceUpdates))
+ for _, update := range commitReport.PriceUpdates.GasPriceUpdates {
+ gasPriceUpdates = append(gasPriceUpdates, cciptypes.GasPriceChain{
+ GasPrice: cciptypes.NewBigInt(big.NewInt(0).Set(update.UsdPerUnitGas)),
+ ChainSel: cciptypes.ChainSelector(update.DestChainSelector),
+ })
+ }
+
+ return cciptypes.CommitPluginReport{
+ MerkleRoots: merkleRoots,
+ PriceUpdates: cciptypes.PriceUpdates{
+ TokenPriceUpdates: tokenPriceUpdates,
+ GasPriceUpdates: gasPriceUpdates,
+ },
+ }, nil
+}
+
+// Ensure CommitPluginCodec implements the CommitPluginCodec interface
+var _ cciptypes.CommitPluginCodec = (*CommitPluginCodecV1)(nil)
diff --git a/core/capabilities/ccip/ccipevm/commitcodec_test.go b/core/capabilities/ccip/ccipevm/commitcodec_test.go
new file mode 100644
index 00000000000..737f7be1d6e
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/commitcodec_test.go
@@ -0,0 +1,135 @@
+package ccipevm
+
+import (
+ "math/big"
+ "math/rand"
+ "testing"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+)
+
+var randomCommitReport = func() cciptypes.CommitPluginReport {
+ return cciptypes.CommitPluginReport{
+ MerkleRoots: []cciptypes.MerkleRootChain{
+ {
+ ChainSel: cciptypes.ChainSelector(rand.Uint64()),
+ SeqNumsRange: cciptypes.NewSeqNumRange(
+ cciptypes.SeqNum(rand.Uint64()),
+ cciptypes.SeqNum(rand.Uint64()),
+ ),
+ MerkleRoot: utils.RandomBytes32(),
+ },
+ {
+ ChainSel: cciptypes.ChainSelector(rand.Uint64()),
+ SeqNumsRange: cciptypes.NewSeqNumRange(
+ cciptypes.SeqNum(rand.Uint64()),
+ cciptypes.SeqNum(rand.Uint64()),
+ ),
+ MerkleRoot: utils.RandomBytes32(),
+ },
+ },
+ PriceUpdates: cciptypes.PriceUpdates{
+ TokenPriceUpdates: []cciptypes.TokenPrice{
+ {
+ TokenID: types.Account(utils.RandomAddress().String()),
+ Price: cciptypes.NewBigInt(utils.RandUint256()),
+ },
+ },
+ GasPriceUpdates: []cciptypes.GasPriceChain{
+ {GasPrice: cciptypes.NewBigInt(utils.RandUint256()), ChainSel: cciptypes.ChainSelector(rand.Uint64())},
+ {GasPrice: cciptypes.NewBigInt(utils.RandUint256()), ChainSel: cciptypes.ChainSelector(rand.Uint64())},
+ {GasPrice: cciptypes.NewBigInt(utils.RandUint256()), ChainSel: cciptypes.ChainSelector(rand.Uint64())},
+ },
+ },
+ }
+}
+
+func TestCommitPluginCodecV1(t *testing.T) {
+ testCases := []struct {
+ name string
+ report func(report cciptypes.CommitPluginReport) cciptypes.CommitPluginReport
+ expErr bool
+ }{
+ {
+ name: "base report",
+ report: func(report cciptypes.CommitPluginReport) cciptypes.CommitPluginReport {
+ return report
+ },
+ },
+ {
+ name: "empty token address",
+ report: func(report cciptypes.CommitPluginReport) cciptypes.CommitPluginReport {
+ report.PriceUpdates.TokenPriceUpdates[0].TokenID = ""
+ return report
+ },
+ expErr: true,
+ },
+ {
+ name: "empty merkle root",
+ report: func(report cciptypes.CommitPluginReport) cciptypes.CommitPluginReport {
+ report.MerkleRoots[0].MerkleRoot = cciptypes.Bytes32{}
+ return report
+ },
+ },
+ {
+ name: "zero token price",
+ report: func(report cciptypes.CommitPluginReport) cciptypes.CommitPluginReport {
+ report.PriceUpdates.TokenPriceUpdates[0].Price = cciptypes.NewBigInt(big.NewInt(0))
+ return report
+ },
+ },
+ {
+ name: "zero gas price",
+ report: func(report cciptypes.CommitPluginReport) cciptypes.CommitPluginReport {
+ report.PriceUpdates.GasPriceUpdates[0].GasPrice = cciptypes.NewBigInt(big.NewInt(0))
+ return report
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ report := tc.report(randomCommitReport())
+ commitCodec := NewCommitPluginCodecV1()
+ ctx := testutils.Context(t)
+ encodedReport, err := commitCodec.Encode(ctx, report)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ decodedReport, err := commitCodec.Decode(ctx, encodedReport)
+ require.NoError(t, err)
+ require.Equal(t, report, decodedReport)
+ })
+ }
+}
+
+func BenchmarkCommitPluginCodecV1_Encode(b *testing.B) {
+ commitCodec := NewCommitPluginCodecV1()
+ ctx := testutils.Context(b)
+
+ rep := randomCommitReport()
+ for i := 0; i < b.N; i++ {
+ _, err := commitCodec.Encode(ctx, rep)
+ require.NoError(b, err)
+ }
+}
+
+func BenchmarkCommitPluginCodecV1_Decode(b *testing.B) {
+ commitCodec := NewCommitPluginCodecV1()
+ ctx := testutils.Context(b)
+ encodedReport, err := commitCodec.Encode(ctx, randomCommitReport())
+ require.NoError(b, err)
+
+ for i := 0; i < b.N; i++ {
+ _, err := commitCodec.Decode(ctx, encodedReport)
+ require.NoError(b, err)
+ }
+}
diff --git a/core/capabilities/ccip/ccipevm/executecodec.go b/core/capabilities/ccip/ccipevm/executecodec.go
new file mode 100644
index 00000000000..a64c775112c
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/executecodec.go
@@ -0,0 +1,181 @@
+package ccipevm
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers"
+)
+
+// ExecutePluginCodecV1 is a codec for encoding and decoding execute plugin reports.
+// Compatible with:
+// - "EVM2EVMMultiOffRamp 1.6.0-dev"
+type ExecutePluginCodecV1 struct {
+ executeReportMethodInputs abi.Arguments
+}
+
+func NewExecutePluginCodecV1() *ExecutePluginCodecV1 {
+ abiParsed, err := abi.JSON(strings.NewReader(evm_2_evm_multi_offramp.EVM2EVMMultiOffRampABI))
+ if err != nil {
+ panic(fmt.Errorf("parse multi offramp abi: %s", err))
+ }
+ methodInputs := abihelpers.MustGetMethodInputs("manuallyExecute", abiParsed)
+ if len(methodInputs) == 0 {
+ panic("no inputs found for method: manuallyExecute")
+ }
+
+ return &ExecutePluginCodecV1{
+ executeReportMethodInputs: methodInputs[:1],
+ }
+}
+
+func (e *ExecutePluginCodecV1) Encode(ctx context.Context, report cciptypes.ExecutePluginReport) ([]byte, error) {
+ evmReport := make([]evm_2_evm_multi_offramp.InternalExecutionReportSingleChain, 0, len(report.ChainReports))
+
+ for _, chainReport := range report.ChainReports {
+ if chainReport.ProofFlagBits.IsEmpty() {
+ return nil, fmt.Errorf("proof flag bits are empty")
+ }
+
+ evmProofs := make([][32]byte, 0, len(chainReport.Proofs))
+ for _, proof := range chainReport.Proofs {
+ evmProofs = append(evmProofs, proof)
+ }
+
+ evmMessages := make([]evm_2_evm_multi_offramp.InternalAny2EVMRampMessage, 0, len(chainReport.Messages))
+ for _, message := range chainReport.Messages {
+ receiver := common.BytesToAddress(message.Receiver)
+
+ tokenAmounts := make([]evm_2_evm_multi_offramp.InternalRampTokenAmount, 0, len(message.TokenAmounts))
+ for _, tokenAmount := range message.TokenAmounts {
+ if tokenAmount.Amount.IsEmpty() {
+ return nil, fmt.Errorf("empty amount for token: %s", tokenAmount.DestTokenAddress)
+ }
+
+ tokenAmounts = append(tokenAmounts, evm_2_evm_multi_offramp.InternalRampTokenAmount{
+ SourcePoolAddress: tokenAmount.SourcePoolAddress,
+ DestTokenAddress: tokenAmount.DestTokenAddress,
+ ExtraData: tokenAmount.ExtraData,
+ Amount: tokenAmount.Amount.Int,
+ })
+ }
+
+ gasLimit, err := decodeExtraArgsV1V2(message.ExtraArgs)
+ if err != nil {
+ return nil, fmt.Errorf("decode extra args to get gas limit: %w", err)
+ }
+
+ evmMessages = append(evmMessages, evm_2_evm_multi_offramp.InternalAny2EVMRampMessage{
+ Header: evm_2_evm_multi_offramp.InternalRampMessageHeader{
+ MessageId: message.Header.MessageID,
+ SourceChainSelector: uint64(message.Header.SourceChainSelector),
+ DestChainSelector: uint64(message.Header.DestChainSelector),
+ SequenceNumber: uint64(message.Header.SequenceNumber),
+ Nonce: message.Header.Nonce,
+ },
+ Sender: message.Sender,
+ Data: message.Data,
+ Receiver: receiver,
+ GasLimit: gasLimit,
+ TokenAmounts: tokenAmounts,
+ })
+ }
+
+ evmChainReport := evm_2_evm_multi_offramp.InternalExecutionReportSingleChain{
+ SourceChainSelector: uint64(chainReport.SourceChainSelector),
+ Messages: evmMessages,
+ OffchainTokenData: chainReport.OffchainTokenData,
+ Proofs: evmProofs,
+ ProofFlagBits: chainReport.ProofFlagBits.Int,
+ }
+ evmReport = append(evmReport, evmChainReport)
+ }
+
+ return e.executeReportMethodInputs.PackValues([]interface{}{&evmReport})
+}
+
+func (e *ExecutePluginCodecV1) Decode(ctx context.Context, encodedReport []byte) (cciptypes.ExecutePluginReport, error) {
+ unpacked, err := e.executeReportMethodInputs.Unpack(encodedReport)
+ if err != nil {
+ return cciptypes.ExecutePluginReport{}, fmt.Errorf("unpack encoded report: %w", err)
+ }
+ if len(unpacked) != 1 {
+ return cciptypes.ExecutePluginReport{}, fmt.Errorf("unpacked report is empty")
+ }
+
+ evmReportRaw := abi.ConvertType(unpacked[0], new([]evm_2_evm_multi_offramp.InternalExecutionReportSingleChain))
+ evmReportPtr, is := evmReportRaw.(*[]evm_2_evm_multi_offramp.InternalExecutionReportSingleChain)
+ if !is {
+ return cciptypes.ExecutePluginReport{}, fmt.Errorf("got an unexpected report type %T", unpacked[0])
+ }
+ if evmReportPtr == nil {
+ return cciptypes.ExecutePluginReport{}, fmt.Errorf("evm report is nil")
+ }
+
+ evmReport := *evmReportPtr
+ executeReport := cciptypes.ExecutePluginReport{
+ ChainReports: make([]cciptypes.ExecutePluginReportSingleChain, 0, len(evmReport)),
+ }
+
+ for _, evmChainReport := range evmReport {
+ proofs := make([]cciptypes.Bytes32, 0, len(evmChainReport.Proofs))
+ for _, proof := range evmChainReport.Proofs {
+ proofs = append(proofs, proof)
+ }
+
+ messages := make([]cciptypes.Message, 0, len(evmChainReport.Messages))
+ for _, evmMessage := range evmChainReport.Messages {
+ tokenAmounts := make([]cciptypes.RampTokenAmount, 0, len(evmMessage.TokenAmounts))
+ for _, tokenAmount := range evmMessage.TokenAmounts {
+ tokenAmounts = append(tokenAmounts, cciptypes.RampTokenAmount{
+ SourcePoolAddress: tokenAmount.SourcePoolAddress,
+ DestTokenAddress: tokenAmount.DestTokenAddress,
+ ExtraData: tokenAmount.ExtraData,
+ Amount: cciptypes.NewBigInt(tokenAmount.Amount),
+ })
+ }
+
+ message := cciptypes.Message{
+ Header: cciptypes.RampMessageHeader{
+ MessageID: evmMessage.Header.MessageId,
+ SourceChainSelector: cciptypes.ChainSelector(evmMessage.Header.SourceChainSelector),
+ DestChainSelector: cciptypes.ChainSelector(evmMessage.Header.DestChainSelector),
+ SequenceNumber: cciptypes.SeqNum(evmMessage.Header.SequenceNumber),
+ Nonce: evmMessage.Header.Nonce,
+ MsgHash: cciptypes.Bytes32{}, // <-- todo: info not available, but not required atm
+ OnRamp: cciptypes.Bytes{}, // <-- todo: info not available, but not required atm
+ },
+ Sender: evmMessage.Sender,
+ Data: evmMessage.Data,
+ Receiver: evmMessage.Receiver.Bytes(),
+ ExtraArgs: cciptypes.Bytes{}, // <-- todo: info not available, but not required atm
+ FeeToken: cciptypes.Bytes{}, // <-- todo: info not available, but not required atm
+ FeeTokenAmount: cciptypes.BigInt{}, // <-- todo: info not available, but not required atm
+ TokenAmounts: tokenAmounts,
+ }
+ messages = append(messages, message)
+ }
+
+ chainReport := cciptypes.ExecutePluginReportSingleChain{
+ SourceChainSelector: cciptypes.ChainSelector(evmChainReport.SourceChainSelector),
+ Messages: messages,
+ OffchainTokenData: evmChainReport.OffchainTokenData,
+ Proofs: proofs,
+ ProofFlagBits: cciptypes.NewBigInt(evmChainReport.ProofFlagBits),
+ }
+
+ executeReport.ChainReports = append(executeReport.ChainReports, chainReport)
+ }
+
+ return executeReport, nil
+}
+
+// Ensure ExecutePluginCodec implements the ExecutePluginCodec interface
+var _ cciptypes.ExecutePluginCodec = (*ExecutePluginCodecV1)(nil)
diff --git a/core/capabilities/ccip/ccipevm/executecodec_test.go b/core/capabilities/ccip/ccipevm/executecodec_test.go
new file mode 100644
index 00000000000..4f207fdb0e2
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/executecodec_test.go
@@ -0,0 +1,174 @@
+package ccipevm
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/core"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/message_hasher"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/report_codec"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var randomExecuteReport = func(t *testing.T, d *testSetupData) cciptypes.ExecutePluginReport {
+ const numChainReports = 10
+ const msgsPerReport = 10
+ const numTokensPerMsg = 3
+
+ chainReports := make([]cciptypes.ExecutePluginReportSingleChain, numChainReports)
+ for i := 0; i < numChainReports; i++ {
+ reportMessages := make([]cciptypes.Message, msgsPerReport)
+ for j := 0; j < msgsPerReport; j++ {
+ data, err := cciptypes.NewBytesFromString(utils.RandomAddress().String())
+ assert.NoError(t, err)
+
+ tokenAmounts := make([]cciptypes.RampTokenAmount, numTokensPerMsg)
+ for z := 0; z < numTokensPerMsg; z++ {
+ tokenAmounts[z] = cciptypes.RampTokenAmount{
+ SourcePoolAddress: utils.RandomAddress().Bytes(),
+ DestTokenAddress: utils.RandomAddress().Bytes(),
+ ExtraData: data,
+ Amount: cciptypes.NewBigInt(utils.RandUint256()),
+ }
+ }
+
+ extraArgs, err := d.contract.EncodeEVMExtraArgsV1(nil, message_hasher.ClientEVMExtraArgsV1{
+ GasLimit: utils.RandUint256(),
+ })
+ assert.NoError(t, err)
+
+ reportMessages[j] = cciptypes.Message{
+ Header: cciptypes.RampMessageHeader{
+ MessageID: utils.RandomBytes32(),
+ SourceChainSelector: cciptypes.ChainSelector(rand.Uint64()),
+ DestChainSelector: cciptypes.ChainSelector(rand.Uint64()),
+ SequenceNumber: cciptypes.SeqNum(rand.Uint64()),
+ Nonce: rand.Uint64(),
+ MsgHash: utils.RandomBytes32(),
+ OnRamp: utils.RandomAddress().Bytes(),
+ },
+ Sender: utils.RandomAddress().Bytes(),
+ Data: data,
+ Receiver: utils.RandomAddress().Bytes(),
+ ExtraArgs: extraArgs,
+ FeeToken: utils.RandomAddress().Bytes(),
+ FeeTokenAmount: cciptypes.NewBigInt(utils.RandUint256()),
+ TokenAmounts: tokenAmounts,
+ }
+ }
+
+ tokenData := make([][][]byte, numTokensPerMsg)
+ for j := 0; j < numTokensPerMsg; j++ {
+ tokenData[j] = [][]byte{{0x1}, {0x2, 0x3}}
+ }
+
+ chainReports[i] = cciptypes.ExecutePluginReportSingleChain{
+ SourceChainSelector: cciptypes.ChainSelector(rand.Uint64()),
+ Messages: reportMessages,
+ OffchainTokenData: tokenData,
+ Proofs: []cciptypes.Bytes32{utils.RandomBytes32(), utils.RandomBytes32()},
+ ProofFlagBits: cciptypes.NewBigInt(utils.RandUint256()),
+ }
+ }
+
+ return cciptypes.ExecutePluginReport{ChainReports: chainReports}
+}
+
+func TestExecutePluginCodecV1(t *testing.T) {
+ d := testSetup(t)
+
+ testCases := []struct {
+ name string
+ report func(report cciptypes.ExecutePluginReport) cciptypes.ExecutePluginReport
+ expErr bool
+ }{
+ {
+ name: "base report",
+ report: func(report cciptypes.ExecutePluginReport) cciptypes.ExecutePluginReport { return report },
+ expErr: false,
+ },
+ {
+ name: "reports have empty msgs",
+ report: func(report cciptypes.ExecutePluginReport) cciptypes.ExecutePluginReport {
+ report.ChainReports[0].Messages = []cciptypes.Message{}
+ report.ChainReports[4].Messages = []cciptypes.Message{}
+ return report
+ },
+ expErr: false,
+ },
+ {
+ name: "reports have empty offchain token data",
+ report: func(report cciptypes.ExecutePluginReport) cciptypes.ExecutePluginReport {
+ report.ChainReports[0].OffchainTokenData = [][][]byte{}
+ report.ChainReports[4].OffchainTokenData[1] = [][]byte{}
+ return report
+ },
+ expErr: false,
+ },
+ }
+
+ ctx := testutils.Context(t)
+
+ // Deploy the contract
+ transactor := testutils.MustNewSimTransactor(t)
+ simulatedBackend := backends.NewSimulatedBackend(core.GenesisAlloc{
+ transactor.From: {Balance: assets.Ether(1000).ToInt()},
+ }, 30e6)
+ address, _, _, err := report_codec.DeployReportCodec(transactor, simulatedBackend)
+ require.NoError(t, err)
+ simulatedBackend.Commit()
+ contract, err := report_codec.NewReportCodec(address, simulatedBackend)
+ require.NoError(t, err)
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ codec := NewExecutePluginCodecV1()
+ report := tc.report(randomExecuteReport(t, d))
+ bytes, err := codec.Encode(ctx, report)
+ if tc.expErr {
+ assert.Error(t, err)
+ return
+ }
+ assert.NoError(t, err)
+
+ testSetup(t)
+
+ // ignore msg hash in comparison
+ for i := range report.ChainReports {
+ for j := range report.ChainReports[i].Messages {
+ report.ChainReports[i].Messages[j].Header.MsgHash = cciptypes.Bytes32{}
+ report.ChainReports[i].Messages[j].Header.OnRamp = cciptypes.Bytes{}
+ report.ChainReports[i].Messages[j].FeeToken = cciptypes.Bytes{}
+ report.ChainReports[i].Messages[j].ExtraArgs = cciptypes.Bytes{}
+ report.ChainReports[i].Messages[j].FeeTokenAmount = cciptypes.BigInt{}
+ }
+ }
+
+ // decode using the contract
+ contractDecodedReport, err := contract.DecodeExecuteReport(&bind.CallOpts{Context: ctx}, bytes)
+ assert.NoError(t, err)
+ assert.Equal(t, len(report.ChainReports), len(contractDecodedReport))
+ for i, expReport := range report.ChainReports {
+ actReport := contractDecodedReport[i]
+ assert.Equal(t, expReport.OffchainTokenData, actReport.OffchainTokenData)
+ assert.Equal(t, len(expReport.Messages), len(actReport.Messages))
+ assert.Equal(t, uint64(expReport.SourceChainSelector), actReport.SourceChainSelector)
+ }
+
+ // decode using the codec
+ codecDecoded, err := codec.Decode(ctx, bytes)
+ assert.NoError(t, err)
+ assert.Equal(t, report, codecDecoded)
+ })
+ }
+}
diff --git a/core/capabilities/ccip/ccipevm/helpers.go b/core/capabilities/ccip/ccipevm/helpers.go
new file mode 100644
index 00000000000..ee83230a4ce
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/helpers.go
@@ -0,0 +1,33 @@
+package ccipevm
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+)
+
+func decodeExtraArgsV1V2(extraArgs []byte) (gasLimit *big.Int, err error) {
+ if len(extraArgs) < 4 {
+ return nil, fmt.Errorf("extra args too short: %d, should be at least 4 (i.e the extraArgs tag)", len(extraArgs))
+ }
+
+ var method string
+ if bytes.Equal(extraArgs[:4], evmExtraArgsV1Tag) {
+ method = "decodeEVMExtraArgsV1"
+ } else if bytes.Equal(extraArgs[:4], evmExtraArgsV2Tag) {
+ method = "decodeEVMExtraArgsV2"
+ } else {
+ return nil, fmt.Errorf("unknown extra args tag: %x", extraArgs)
+ }
+ ifaces, err := messageHasherABI.Methods[method].Inputs.UnpackValues(extraArgs[4:])
+ if err != nil {
+ return nil, fmt.Errorf("abi decode extra args v1: %w", err)
+ }
+ // gas limit is always the first argument, and allow OOO isn't set explicitly
+ // on the message.
+ _, ok := ifaces[0].(*big.Int)
+ if !ok {
+ return nil, fmt.Errorf("expected *big.Int, got %T", ifaces[0])
+ }
+ return ifaces[0].(*big.Int), nil
+}
diff --git a/core/capabilities/ccip/ccipevm/helpers_test.go b/core/capabilities/ccip/ccipevm/helpers_test.go
new file mode 100644
index 00000000000..95a5d4439bb
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/helpers_test.go
@@ -0,0 +1,41 @@
+package ccipevm
+
+import (
+ "math/big"
+ "math/rand"
+ "testing"
+
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/message_hasher"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test_decodeExtraArgs(t *testing.T) {
+ d := testSetup(t)
+ gasLimit := big.NewInt(rand.Int63())
+
+ t.Run("v1", func(t *testing.T) {
+ encoded, err := d.contract.EncodeEVMExtraArgsV1(nil, message_hasher.ClientEVMExtraArgsV1{
+ GasLimit: gasLimit,
+ })
+ require.NoError(t, err)
+
+ decodedGasLimit, err := decodeExtraArgsV1V2(encoded)
+ require.NoError(t, err)
+
+ require.Equal(t, gasLimit, decodedGasLimit)
+ })
+
+ t.Run("v2", func(t *testing.T) {
+ encoded, err := d.contract.EncodeEVMExtraArgsV2(nil, message_hasher.ClientEVMExtraArgsV2{
+ GasLimit: gasLimit,
+ AllowOutOfOrderExecution: true,
+ })
+ require.NoError(t, err)
+
+ decodedGasLimit, err := decodeExtraArgsV1V2(encoded)
+ require.NoError(t, err)
+
+ require.Equal(t, gasLimit, decodedGasLimit)
+ })
+}
diff --git a/core/capabilities/ccip/ccipevm/msghasher.go b/core/capabilities/ccip/ccipevm/msghasher.go
new file mode 100644
index 00000000000..0df0a8254ac
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/msghasher.go
@@ -0,0 +1,127 @@
+package ccipevm
+
+import (
+ "context"
+ "fmt"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/message_hasher"
+)
+
+var (
+ // bytes32 internal constant LEAF_DOMAIN_SEPARATOR = 0x0000000000000000000000000000000000000000000000000000000000000000;
+ leafDomainSeparator = [32]byte{}
+
+ // bytes32 internal constant ANY_2_EVM_MESSAGE_HASH = keccak256("Any2EVMMessageHashV1");
+ ANY_2_EVM_MESSAGE_HASH = utils.Keccak256Fixed([]byte("Any2EVMMessageHashV1"))
+
+ messageHasherABI = types.MustGetABI(message_hasher.MessageHasherABI)
+
+ // bytes4 public constant EVM_EXTRA_ARGS_V1_TAG = 0x97a657c9;
+ evmExtraArgsV1Tag = hexutil.MustDecode("0x97a657c9")
+
+ // bytes4 public constant EVM_EXTRA_ARGS_V2_TAG = 0x181dcf10;
+ evmExtraArgsV2Tag = hexutil.MustDecode("0x181dcf10")
+)
+
+// MessageHasherV1 implements the MessageHasher interface.
+// Compatible with:
+// - "EVM2EVMMultiOnRamp 1.6.0-dev"
+type MessageHasherV1 struct{}
+
+func NewMessageHasherV1() *MessageHasherV1 {
+ return &MessageHasherV1{}
+}
+
+// Hash implements the MessageHasher interface.
+// It constructs all of the inputs to the final keccak256 hash in Internal._hash(Any2EVMRampMessage).
+// The main structure of the hash is as follows:
+/*
+ keccak256(
+ leafDomainSeparator,
+ keccak256(any_2_evm_message_hash, header.sourceChainSelector, header.destinationChainSelector, onRamp),
+ keccak256(fixedSizeMessageFields),
+ keccak256(messageData),
+ keccak256(encodedRampTokenAmounts),
+ )
+*/
+func (h *MessageHasherV1) Hash(_ context.Context, msg cciptypes.Message) (cciptypes.Bytes32, error) {
+ var rampTokenAmounts []message_hasher.InternalRampTokenAmount
+ for _, rta := range msg.TokenAmounts {
+ rampTokenAmounts = append(rampTokenAmounts, message_hasher.InternalRampTokenAmount{
+ SourcePoolAddress: rta.SourcePoolAddress,
+ DestTokenAddress: rta.DestTokenAddress,
+ ExtraData: rta.ExtraData,
+ Amount: rta.Amount.Int,
+ })
+ }
+ encodedRampTokenAmounts, err := abiEncode("encodeTokenAmountsHashPreimage", rampTokenAmounts)
+ if err != nil {
+ return [32]byte{}, fmt.Errorf("abi encode token amounts: %w", err)
+ }
+
+ metaDataHashInput, err := abiEncode(
+ "encodeMetadataHashPreimage",
+ ANY_2_EVM_MESSAGE_HASH,
+ uint64(msg.Header.SourceChainSelector),
+ uint64(msg.Header.DestChainSelector),
+ []byte(msg.Header.OnRamp),
+ )
+ if err != nil {
+ return [32]byte{}, fmt.Errorf("abi encode metadata hash input: %w", err)
+ }
+
+ // Need to decode the extra args to get the gas limit.
+ // TODO: we assume that extra args is always abi-encoded for now, but we need
+ // to decode according to source chain selector family. We should add a family
+ // lookup API to the chain-selectors library.
+ gasLimit, err := decodeExtraArgsV1V2(msg.ExtraArgs)
+ if err != nil {
+ return [32]byte{}, fmt.Errorf("decode extra args: %w", err)
+ }
+
+ fixedSizeFieldsEncoded, err := abiEncode(
+ "encodeFixedSizeFieldsHashPreimage",
+ msg.Header.MessageID,
+ []byte(msg.Sender),
+ common.BytesToAddress(msg.Receiver),
+ uint64(msg.Header.SequenceNumber),
+ gasLimit,
+ msg.Header.Nonce,
+ )
+ if err != nil {
+ return [32]byte{}, fmt.Errorf("abi encode fixed size values: %w", err)
+ }
+
+ packedValues, err := abiEncode(
+ "encodeFinalHashPreimage",
+ leafDomainSeparator,
+ utils.Keccak256Fixed(metaDataHashInput),
+ utils.Keccak256Fixed(fixedSizeFieldsEncoded),
+ utils.Keccak256Fixed(msg.Data),
+ utils.Keccak256Fixed(encodedRampTokenAmounts),
+ )
+ if err != nil {
+ return [32]byte{}, fmt.Errorf("abi encode packed values: %w", err)
+ }
+
+ return utils.Keccak256Fixed(packedValues), nil
+}
+
+func abiEncode(method string, values ...interface{}) ([]byte, error) {
+ res, err := messageHasherABI.Pack(method, values...)
+ if err != nil {
+ return nil, err
+ }
+ // trim the method selector.
+ return res[4:], nil
+}
+
+// Interface compliance check
+var _ cciptypes.MessageHasher = (*MessageHasherV1)(nil)
diff --git a/core/capabilities/ccip/ccipevm/msghasher_test.go b/core/capabilities/ccip/ccipevm/msghasher_test.go
new file mode 100644
index 00000000000..911a10b26a5
--- /dev/null
+++ b/core/capabilities/ccip/ccipevm/msghasher_test.go
@@ -0,0 +1,189 @@
+package ccipevm
+
+import (
+ "context"
+ cryptorand "crypto/rand"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/stretchr/testify/require"
+
+ cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccipocr3"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/message_hasher"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+)
+
+// NOTE: these test cases are only EVM <-> EVM.
+// Update these cases once we have non-EVM examples.
+func TestMessageHasher_EVM2EVM(t *testing.T) {
+ ctx := testutils.Context(t)
+ d := testSetup(t)
+
+ testCases := []evmExtraArgs{
+ {version: "v1", gasLimit: big.NewInt(rand.Int63())},
+ {version: "v2", gasLimit: big.NewInt(rand.Int63()), allowOOO: false},
+ {version: "v2", gasLimit: big.NewInt(rand.Int63()), allowOOO: true},
+ }
+ for i, tc := range testCases {
+ t.Run(fmt.Sprintf("tc_%d", i), func(tt *testing.T) {
+ testHasherEVM2EVM(ctx, tt, d, tc)
+ })
+ }
+}
+
+func testHasherEVM2EVM(ctx context.Context, t *testing.T, d *testSetupData, evmExtraArgs evmExtraArgs) {
+ ccipMsg := createEVM2EVMMessage(t, d.contract, evmExtraArgs)
+
+ var tokenAmounts []message_hasher.InternalRampTokenAmount
+ for _, rta := range ccipMsg.TokenAmounts {
+ tokenAmounts = append(tokenAmounts, message_hasher.InternalRampTokenAmount{
+ SourcePoolAddress: rta.SourcePoolAddress,
+ DestTokenAddress: rta.DestTokenAddress,
+ ExtraData: rta.ExtraData[:],
+ Amount: rta.Amount.Int,
+ })
+ }
+ evmMsg := message_hasher.InternalAny2EVMRampMessage{
+ Header: message_hasher.InternalRampMessageHeader{
+ MessageId: ccipMsg.Header.MessageID,
+ SourceChainSelector: uint64(ccipMsg.Header.SourceChainSelector),
+ DestChainSelector: uint64(ccipMsg.Header.DestChainSelector),
+ SequenceNumber: uint64(ccipMsg.Header.SequenceNumber),
+ Nonce: ccipMsg.Header.Nonce,
+ },
+ Sender: ccipMsg.Sender,
+ Receiver: common.BytesToAddress(ccipMsg.Receiver),
+ GasLimit: evmExtraArgs.gasLimit,
+ Data: ccipMsg.Data,
+ TokenAmounts: tokenAmounts,
+ }
+
+ expectedHash, err := d.contract.Hash(&bind.CallOpts{Context: ctx}, evmMsg, ccipMsg.Header.OnRamp)
+ require.NoError(t, err)
+
+ evmMsgHasher := NewMessageHasherV1()
+ actualHash, err := evmMsgHasher.Hash(ctx, ccipMsg)
+ require.NoError(t, err)
+
+ require.Equal(t, fmt.Sprintf("%x", expectedHash), strings.TrimPrefix(actualHash.String(), "0x"))
+}
+
+type evmExtraArgs struct {
+ version string
+ gasLimit *big.Int
+ allowOOO bool
+}
+
+func createEVM2EVMMessage(t *testing.T, messageHasher *message_hasher.MessageHasher, evmExtraArgs evmExtraArgs) cciptypes.Message {
+ messageID := utils.RandomBytes32()
+
+ sourceTokenData := make([]byte, rand.Intn(2048))
+ _, err := cryptorand.Read(sourceTokenData)
+ require.NoError(t, err)
+
+ sourceChain := rand.Uint64()
+ seqNum := rand.Uint64()
+ nonce := rand.Uint64()
+ destChain := rand.Uint64()
+
+ var extraArgsBytes []byte
+ if evmExtraArgs.version == "v1" {
+ extraArgsBytes, err = messageHasher.EncodeEVMExtraArgsV1(nil, message_hasher.ClientEVMExtraArgsV1{
+ GasLimit: evmExtraArgs.gasLimit,
+ })
+ require.NoError(t, err)
+ } else if evmExtraArgs.version == "v2" {
+ extraArgsBytes, err = messageHasher.EncodeEVMExtraArgsV2(nil, message_hasher.ClientEVMExtraArgsV2{
+ GasLimit: evmExtraArgs.gasLimit,
+ AllowOutOfOrderExecution: evmExtraArgs.allowOOO,
+ })
+ require.NoError(t, err)
+ } else {
+ require.FailNowf(t, "unknown extra args version", "version: %s", evmExtraArgs.version)
+ }
+
+ messageData := make([]byte, rand.Intn(2048))
+ _, err = cryptorand.Read(messageData)
+ require.NoError(t, err)
+
+ numTokens := rand.Intn(10)
+ var sourceTokenDatas [][]byte
+ for i := 0; i < numTokens; i++ {
+ sourceTokenDatas = append(sourceTokenDatas, sourceTokenData)
+ }
+
+ var tokenAmounts []cciptypes.RampTokenAmount
+ for i := 0; i < len(sourceTokenDatas); i++ {
+ extraData := utils.RandomBytes32()
+ tokenAmounts = append(tokenAmounts, cciptypes.RampTokenAmount{
+ SourcePoolAddress: abiEncodedAddress(t),
+ DestTokenAddress: abiEncodedAddress(t),
+ ExtraData: extraData[:],
+ Amount: cciptypes.NewBigInt(big.NewInt(0).SetUint64(rand.Uint64())),
+ })
+ }
+
+ return cciptypes.Message{
+ Header: cciptypes.RampMessageHeader{
+ MessageID: messageID,
+ SourceChainSelector: cciptypes.ChainSelector(sourceChain),
+ DestChainSelector: cciptypes.ChainSelector(destChain),
+ SequenceNumber: cciptypes.SeqNum(seqNum),
+ Nonce: nonce,
+ OnRamp: abiEncodedAddress(t),
+ },
+ Sender: abiEncodedAddress(t),
+ Receiver: abiEncodedAddress(t),
+ Data: messageData,
+ TokenAmounts: tokenAmounts,
+ FeeToken: abiEncodedAddress(t),
+ FeeTokenAmount: cciptypes.NewBigInt(big.NewInt(0).SetUint64(rand.Uint64())),
+ ExtraArgs: extraArgsBytes,
+ }
+}
+
+func abiEncodedAddress(t *testing.T) []byte {
+ addr := utils.RandomAddress()
+ encoded, err := utils.ABIEncode(`[{"type": "address"}]`, addr)
+ require.NoError(t, err)
+ return encoded
+}
+
+type testSetupData struct {
+ contractAddr common.Address
+ contract *message_hasher.MessageHasher
+ sb *backends.SimulatedBackend
+ auth *bind.TransactOpts
+}
+
+func testSetup(t *testing.T) *testSetupData {
+ transactor := testutils.MustNewSimTransactor(t)
+ simulatedBackend := backends.NewSimulatedBackend(core.GenesisAlloc{
+ transactor.From: {Balance: assets.Ether(1000).ToInt()},
+ }, 30e6)
+
+ // Deploy the contract
+ address, _, _, err := message_hasher.DeployMessageHasher(transactor, simulatedBackend)
+ require.NoError(t, err)
+ simulatedBackend.Commit()
+
+ // Setup contract client
+ contract, err := message_hasher.NewMessageHasher(address, simulatedBackend)
+ require.NoError(t, err)
+
+ return &testSetupData{
+ contractAddr: address,
+ contract: contract,
+ sb: simulatedBackend,
+ auth: transactor,
+ }
+}
diff --git a/core/capabilities/ccip/common/common.go b/core/capabilities/ccip/common/common.go
new file mode 100644
index 00000000000..6409345ed93
--- /dev/null
+++ b/core/capabilities/ccip/common/common.go
@@ -0,0 +1,23 @@
+package common
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/crypto"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils"
+)
+
+// HashedCapabilityID returns the hashed capability id in a manner equivalent to the capability registry.
+func HashedCapabilityID(capabilityLabelledName, capabilityVersion string) (r [32]byte, err error) {
+ // TODO: investigate how to avoid parsing the ABI everytime.
+ tabi := `[{"type": "string"}, {"type": "string"}]`
+ abiEncoded, err := utils.ABIEncode(tabi, capabilityLabelledName, capabilityVersion)
+ if err != nil {
+ return r, fmt.Errorf("failed to ABI encode capability version and labelled name: %w", err)
+ }
+
+ h := crypto.Keccak256(abiEncoded)
+ copy(r[:], h)
+ return r, nil
+}
diff --git a/core/capabilities/ccip/common/common_test.go b/core/capabilities/ccip/common/common_test.go
new file mode 100644
index 00000000000..a7484a83ad9
--- /dev/null
+++ b/core/capabilities/ccip/common/common_test.go
@@ -0,0 +1,51 @@
+package common_test
+
+import (
+ "testing"
+
+ capcommon "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/common"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+)
+
+func Test_HashedCapabilityId(t *testing.T) {
+ transactor := testutils.MustNewSimTransactor(t)
+ sb := backends.NewSimulatedBackend(core.GenesisAlloc{
+ transactor.From: {Balance: assets.Ether(1000).ToInt()},
+ }, 30e6)
+
+ crAddress, _, _, err := kcr.DeployCapabilitiesRegistry(transactor, sb)
+ require.NoError(t, err)
+ sb.Commit()
+
+ cr, err := kcr.NewCapabilitiesRegistry(crAddress, sb)
+ require.NoError(t, err)
+
+ // add a capability, ignore cap config for simplicity.
+ _, err = cr.AddCapabilities(transactor, []kcr.CapabilitiesRegistryCapability{
+ {
+ LabelledName: "ccip",
+ Version: "v1.0.0",
+ CapabilityType: 0,
+ ResponseType: 0,
+ ConfigurationContract: common.Address{},
+ },
+ })
+ require.NoError(t, err)
+ sb.Commit()
+
+ hidExpected, err := cr.GetHashedCapabilityId(nil, "ccip", "v1.0.0")
+ require.NoError(t, err)
+
+ hid, err := capcommon.HashedCapabilityID("ccip", "v1.0.0")
+ require.NoError(t, err)
+
+ require.Equal(t, hidExpected, hid)
+}
diff --git a/core/capabilities/ccip/configs/evm/chain_writer.go b/core/capabilities/ccip/configs/evm/chain_writer.go
new file mode 100644
index 00000000000..6d3b73c6f5c
--- /dev/null
+++ b/core/capabilities/ccip/configs/evm/chain_writer.go
@@ -0,0 +1,75 @@
+package evm
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink-ccip/pkg/consts"
+ "github.com/smartcontractkit/chainlink/v2/common/txmgr"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_offramp"
+ evmrelaytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
+)
+
+var (
+ offrampABI = evmtypes.MustGetABI(evm_2_evm_multi_offramp.EVM2EVMMultiOffRampABI)
+)
+
+func MustChainWriterConfig(
+ fromAddress common.Address,
+ maxGasPrice *assets.Wei,
+ commitGasLimit,
+ execBatchGasLimit uint64,
+) []byte {
+ rawConfig := ChainWriterConfigRaw(fromAddress, maxGasPrice, commitGasLimit, execBatchGasLimit)
+ encoded, err := json.Marshal(rawConfig)
+ if err != nil {
+ panic(fmt.Errorf("failed to marshal ChainWriterConfig: %w", err))
+ }
+
+ return encoded
+}
+
+// ChainWriterConfigRaw returns a ChainWriterConfig that can be used to transmit commit and execute reports.
+func ChainWriterConfigRaw(
+ fromAddress common.Address,
+ maxGasPrice *assets.Wei,
+ commitGasLimit,
+ execBatchGasLimit uint64,
+) evmrelaytypes.ChainWriterConfig {
+ return evmrelaytypes.ChainWriterConfig{
+ Contracts: map[string]*evmrelaytypes.ContractConfig{
+ consts.ContractNameOffRamp: {
+ ContractABI: evm_2_evm_multi_offramp.EVM2EVMMultiOffRampABI,
+ Configs: map[string]*evmrelaytypes.ChainWriterDefinition{
+ consts.MethodCommit: {
+ ChainSpecificName: mustGetMethodName("commit", offrampABI),
+ FromAddress: fromAddress,
+ GasLimit: commitGasLimit,
+ },
+ consts.MethodExecute: {
+ ChainSpecificName: mustGetMethodName("execute", offrampABI),
+ FromAddress: fromAddress,
+ GasLimit: execBatchGasLimit,
+ },
+ },
+ },
+ },
+ SendStrategy: txmgr.NewSendEveryStrategy(),
+ MaxGasPrice: maxGasPrice,
+ }
+}
+
+// mustGetMethodName panics if the method name is not found in the provided ABI.
+func mustGetMethodName(name string, tabi abi.ABI) (methodName string) {
+ m, ok := tabi.Methods[name]
+ if !ok {
+ panic(fmt.Sprintf("missing method %s in the abi", name))
+ }
+ return m.Name
+}
diff --git a/core/capabilities/ccip/configs/evm/contract_reader.go b/core/capabilities/ccip/configs/evm/contract_reader.go
new file mode 100644
index 00000000000..085729690d5
--- /dev/null
+++ b/core/capabilities/ccip/configs/evm/contract_reader.go
@@ -0,0 +1,219 @@
+package evm
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+
+ "github.com/smartcontractkit/chainlink-ccip/pkg/consts"
+
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_config"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_offramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_multi_onramp"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry"
+ kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
+ evmrelaytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
+)
+
+var (
+ onrampABI = evmtypes.MustGetABI(evm_2_evm_multi_onramp.EVM2EVMMultiOnRampABI)
+ capabilitiesRegsitryABI = evmtypes.MustGetABI(kcr.CapabilitiesRegistryABI)
+ ccipConfigABI = evmtypes.MustGetABI(ccip_config.CCIPConfigABI)
+ priceRegistryABI = evmtypes.MustGetABI(price_registry.PriceRegistryABI)
+)
+
+// MustSourceReaderConfig returns a ChainReaderConfig that can be used to read from the onramp.
+// The configuration is marshaled into JSON so that it can be passed to the relayer NewContractReader() method.
+func MustSourceReaderConfig() []byte {
+ rawConfig := SourceReaderConfig()
+ encoded, err := json.Marshal(rawConfig)
+ if err != nil {
+ panic(fmt.Errorf("failed to marshal ChainReaderConfig into JSON: %w", err))
+ }
+
+ return encoded
+}
+
+// MustDestReaderConfig returns a ChainReaderConfig that can be used to read from the offramp.
+// The configuration is marshaled into JSON so that it can be passed to the relayer NewContractReader() method.
+func MustDestReaderConfig() []byte {
+ rawConfig := DestReaderConfig()
+ encoded, err := json.Marshal(rawConfig)
+ if err != nil {
+ panic(fmt.Errorf("failed to marshal ChainReaderConfig into JSON: %w", err))
+ }
+
+ return encoded
+}
+
+// DestReaderConfig returns a ChainReaderConfig that can be used to read from the offramp.
+func DestReaderConfig() evmrelaytypes.ChainReaderConfig {
+ return evmrelaytypes.ChainReaderConfig{
+ Contracts: map[string]evmrelaytypes.ChainContractReader{
+ consts.ContractNameOffRamp: {
+ ContractABI: evm_2_evm_multi_offramp.EVM2EVMMultiOffRampABI,
+ ContractPollingFilter: evmrelaytypes.ContractPollingFilter{
+ GenericEventNames: []string{
+ mustGetEventName(consts.EventNameExecutionStateChanged, offrampABI),
+ mustGetEventName(consts.EventNameCommitReportAccepted, offrampABI),
+ },
+ },
+ Configs: map[string]*evmrelaytypes.ChainReaderDefinition{
+ consts.MethodNameGetExecutionState: {
+ ChainSpecificName: mustGetMethodName("getExecutionState", offrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameGetMerkleRoot: {
+ ChainSpecificName: mustGetMethodName("getMerkleRoot", offrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameIsBlessed: {
+ ChainSpecificName: mustGetMethodName("isBlessed", offrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameGetLatestPriceSequenceNumber: {
+ ChainSpecificName: mustGetMethodName("getLatestPriceSequenceNumber", offrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameOfframpGetStaticConfig: {
+ ChainSpecificName: mustGetMethodName("getStaticConfig", offrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameOfframpGetDynamicConfig: {
+ ChainSpecificName: mustGetMethodName("getDynamicConfig", offrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameGetSourceChainConfig: {
+ ChainSpecificName: mustGetMethodName("getSourceChainConfig", offrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.EventNameCommitReportAccepted: {
+ ChainSpecificName: mustGetEventName(consts.EventNameCommitReportAccepted, offrampABI),
+ ReadType: evmrelaytypes.Event,
+ },
+ consts.EventNameExecutionStateChanged: {
+ ChainSpecificName: mustGetEventName(consts.EventNameExecutionStateChanged, offrampABI),
+ ReadType: evmrelaytypes.Event,
+ },
+ },
+ },
+ },
+ }
+}
+
+// SourceReaderConfig returns a ChainReaderConfig that can be used to read from the onramp.
+func SourceReaderConfig() evmrelaytypes.ChainReaderConfig {
+ return evmrelaytypes.ChainReaderConfig{
+ Contracts: map[string]evmrelaytypes.ChainContractReader{
+ consts.ContractNameOnRamp: {
+ ContractABI: evm_2_evm_multi_onramp.EVM2EVMMultiOnRampABI,
+ ContractPollingFilter: evmrelaytypes.ContractPollingFilter{
+ GenericEventNames: []string{
+ mustGetEventName(consts.EventNameCCIPSendRequested, onrampABI),
+ },
+ },
+ Configs: map[string]*evmrelaytypes.ChainReaderDefinition{
+ // all "{external|public} view" functions in the onramp except for getFee and getPoolBySourceToken are here.
+ // getFee is not expected to get called offchain and is only called by end-user contracts.
+ consts.MethodNameGetExpectedNextSequenceNumber: {
+ ChainSpecificName: mustGetMethodName("getExpectedNextSequenceNumber", onrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameOnrampGetStaticConfig: {
+ ChainSpecificName: mustGetMethodName("getStaticConfig", onrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.MethodNameOnrampGetDynamicConfig: {
+ ChainSpecificName: mustGetMethodName("getDynamicConfig", onrampABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ consts.EventNameCCIPSendRequested: {
+ ChainSpecificName: mustGetEventName(consts.EventNameCCIPSendRequested, onrampABI),
+ ReadType: evmrelaytypes.Event,
+ EventDefinitions: &evmrelaytypes.EventDefinitions{
+ GenericDataWordNames: map[string]uint8{
+ consts.EventAttributeSequenceNumber: 5,
+ },
+ },
+ },
+ },
+ },
+ consts.ContractNamePriceRegistry: {
+ ContractABI: price_registry.PriceRegistryABI,
+ Configs: map[string]*evmrelaytypes.ChainReaderDefinition{
+ // TODO: update with the consts from https://github.com/smartcontractkit/chainlink-ccip/pull/39
+ // in a followup.
+ "GetStaticConfig": {
+ ChainSpecificName: mustGetMethodName("getStaticConfig", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ "GetDestChainConfig": {
+ ChainSpecificName: mustGetMethodName("getDestChainConfig", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ "GetPremiumMultiplierWeiPerEth": {
+ ChainSpecificName: mustGetMethodName("getPremiumMultiplierWeiPerEth", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ "GetTokenTransferFeeConfig": {
+ ChainSpecificName: mustGetMethodName("getTokenTransferFeeConfig", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ "ProcessMessageArgs": {
+ ChainSpecificName: mustGetMethodName("processMessageArgs", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ "ValidatePoolReturnData": {
+ ChainSpecificName: mustGetMethodName("validatePoolReturnData", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ "GetValidatedTokenPrice": {
+ ChainSpecificName: mustGetMethodName("getValidatedTokenPrice", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ "GetFeeTokens": {
+ ChainSpecificName: mustGetMethodName("getFeeTokens", priceRegistryABI),
+ ReadType: evmrelaytypes.Method,
+ },
+ },
+ },
+ },
+ }
+}
+
+// HomeChainReaderConfigRaw returns a ChainReaderConfig that can be used to read from the home chain.
+func HomeChainReaderConfigRaw() evmrelaytypes.ChainReaderConfig {
+ return evmrelaytypes.ChainReaderConfig{
+ Contracts: map[string]evmrelaytypes.ChainContractReader{
+ consts.ContractNameCapabilitiesRegistry: {
+ ContractABI: kcr.CapabilitiesRegistryABI,
+ Configs: map[string]*evmrelaytypes.ChainReaderDefinition{
+ consts.MethodNameGetCapability: {
+ ChainSpecificName: mustGetMethodName("getCapability", capabilitiesRegsitryABI),
+ },
+ },
+ },
+ consts.ContractNameCCIPConfig: {
+ ContractABI: ccip_config.CCIPConfigABI,
+ Configs: map[string]*evmrelaytypes.ChainReaderDefinition{
+ consts.MethodNameGetAllChainConfigs: {
+ ChainSpecificName: mustGetMethodName("getAllChainConfigs", ccipConfigABI),
+ },
+ consts.MethodNameGetOCRConfig: {
+ ChainSpecificName: mustGetMethodName("getOCRConfig", ccipConfigABI),
+ },
+ },
+ },
+ },
+ }
+}
+
+func mustGetEventName(event string, tabi abi.ABI) string {
+ e, ok := tabi.Events[event]
+ if !ok {
+ panic(fmt.Sprintf("missing event %s in onrampABI", event))
+ }
+ return e.Name
+}
diff --git a/core/capabilities/ccip/delegate.go b/core/capabilities/ccip/delegate.go
new file mode 100644
index 00000000000..c9974d62e99
--- /dev/null
+++ b/core/capabilities/ccip/delegate.go
@@ -0,0 +1,321 @@
+package ccip
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/smartcontractkit/chainlink-common/pkg/loop"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/common"
+ configsevm "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/configs/evm"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/oraclecreator"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
+ p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types"
+ "github.com/smartcontractkit/chainlink/v2/core/services/registrysyncer"
+
+ ragep2ptypes "github.com/smartcontractkit/libocr/ragep2p/types"
+
+ "github.com/smartcontractkit/chainlink-ccip/pkg/consts"
+ ccipreaderpkg "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
+ "github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
+ "github.com/smartcontractkit/chainlink/v2/core/services/telemetry"
+ "github.com/smartcontractkit/chainlink/v2/plugins"
+)
+
+type RelayGetter interface {
+ Get(types.RelayID) (loop.Relayer, error)
+ GetIDToRelayerMap() (map[types.RelayID]loop.Relayer, error)
+}
+
+type Delegate struct {
+ lggr logger.Logger
+ registrarConfig plugins.RegistrarConfig
+ pipelineRunner pipeline.Runner
+ chains legacyevm.LegacyChainContainer
+ relayers RelayGetter
+ keystore keystore.Master
+ ds sqlutil.DataSource
+ peerWrapper *ocrcommon.SingletonPeerWrapper
+ monitoringEndpointGen telemetry.MonitoringEndpointGenerator
+ capabilityConfig config.Capabilities
+
+ isNewlyCreatedJob bool
+}
+
+func NewDelegate(
+ lggr logger.Logger,
+ registrarConfig plugins.RegistrarConfig,
+ pipelineRunner pipeline.Runner,
+ chains legacyevm.LegacyChainContainer,
+ relayers RelayGetter,
+ keystore keystore.Master,
+ ds sqlutil.DataSource,
+ peerWrapper *ocrcommon.SingletonPeerWrapper,
+ monitoringEndpointGen telemetry.MonitoringEndpointGenerator,
+ capabilityConfig config.Capabilities,
+) *Delegate {
+ return &Delegate{
+ lggr: lggr,
+ registrarConfig: registrarConfig,
+ pipelineRunner: pipelineRunner,
+ chains: chains,
+ relayers: relayers,
+ ds: ds,
+ keystore: keystore,
+ peerWrapper: peerWrapper,
+ monitoringEndpointGen: monitoringEndpointGen,
+ capabilityConfig: capabilityConfig,
+ }
+}
+
+func (d *Delegate) JobType() job.Type {
+ return job.CCIP
+}
+
+func (d *Delegate) BeforeJobCreated(job.Job) {
+ // This is only called first time the job is created
+ d.isNewlyCreatedJob = true
+}
+
+func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) (services []job.ServiceCtx, err error) {
+ // In general there should only be one P2P key but the node may have multiple.
+ // The job spec should specify the correct P2P key to use.
+ peerID, err := p2pkey.MakePeerID(spec.CCIPSpec.P2PKeyID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to make peer ID from provided spec p2p id (%s): %w", spec.CCIPSpec.P2PKeyID, err)
+ }
+
+ p2pID, err := d.keystore.P2P().Get(peerID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get all p2p keys: %w", err)
+ }
+
+ cfg := d.capabilityConfig
+ rid := cfg.ExternalRegistry().RelayID()
+ relayer, err := d.relayers.Get(rid)
+ if err != nil {
+ return nil, fmt.Errorf("could not fetch relayer %s configured for capabilities registry: %w", rid, err)
+ }
+ registrySyncer, err := registrysyncer.New(
+ d.lggr,
+ func() (p2ptypes.PeerID, error) {
+ return p2ptypes.PeerID(p2pID.PeerID()), nil
+ },
+ relayer,
+ cfg.ExternalRegistry().Address(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("could not configure syncer: %w", err)
+ }
+
+ ocrKeys, err := d.getOCRKeys(spec.CCIPSpec.OCRKeyBundleIDs)
+ if err != nil {
+ return nil, err
+ }
+
+ transmitterKeys, err := d.getTransmitterKeys(ctx, d.chains)
+ if err != nil {
+ return nil, err
+ }
+
+ bootstrapperLocators, err := ocrcommon.ParseBootstrapPeers(spec.CCIPSpec.P2PV2Bootstrappers)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse bootstrapper locators: %w", err)
+ }
+
+ // NOTE: we can use the same DB for all plugin instances,
+ // since all queries are scoped by config digest.
+ ocrDB := ocr2.NewDB(d.ds, spec.ID, 0, d.lggr)
+
+ homeChainContractReader, err := d.getHomeChainContractReader(
+ ctx,
+ d.chains,
+ spec.CCIPSpec.CapabilityLabelledName,
+ spec.CCIPSpec.CapabilityVersion)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get home chain contract reader: %w", err)
+ }
+
+ hcr := ccipreaderpkg.NewHomeChainReader(
+ homeChainContractReader,
+ d.lggr.Named("HomeChainReader"),
+ 100*time.Millisecond,
+ )
+
+ oracleCreator := oraclecreator.New(
+ ocrKeys,
+ transmitterKeys,
+ d.chains,
+ d.peerWrapper,
+ spec.ExternalJobID,
+ spec.ID,
+ d.isNewlyCreatedJob,
+ spec.CCIPSpec.PluginConfig,
+ ocrDB,
+ d.lggr,
+ d.monitoringEndpointGen,
+ bootstrapperLocators,
+ hcr,
+ )
+
+ capabilityID := fmt.Sprintf("%s@%s", spec.CCIPSpec.CapabilityLabelledName, spec.CCIPSpec.CapabilityVersion)
+ capLauncher := launcher.New(
+ capabilityID,
+ ragep2ptypes.PeerID(p2pID.PeerID()),
+ d.lggr,
+ hcr,
+ oracleCreator,
+ 12*time.Second,
+ )
+
+ // register the capability launcher with the registry syncer
+ registrySyncer.AddLauncher(capLauncher)
+
+ return []job.ServiceCtx{
+ registrySyncer,
+ hcr,
+ capLauncher,
+ }, nil
+}
+
+func (d *Delegate) AfterJobCreated(spec job.Job) {}
+
+func (d *Delegate) BeforeJobDeleted(spec job.Job) {}
+
+func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job) error {
+ // TODO: shut down needed services?
+ return nil
+}
+
+func (d *Delegate) getOCRKeys(ocrKeyBundleIDs job.JSONConfig) (map[string]ocr2key.KeyBundle, error) {
+ ocrKeys := make(map[string]ocr2key.KeyBundle)
+ for networkType, bundleIDRaw := range ocrKeyBundleIDs {
+ if networkType != relay.NetworkEVM {
+ return nil, fmt.Errorf("unsupported chain type: %s", networkType)
+ }
+
+ bundleID, ok := bundleIDRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("OCRKeyBundleIDs must be a map of chain types to OCR key bundle IDs, got: %T", bundleIDRaw)
+ }
+
+ bundle, err2 := d.keystore.OCR2().Get(bundleID)
+ if err2 != nil {
+ return nil, fmt.Errorf("OCR key bundle with ID %s not found: %w", bundleID, err2)
+ }
+
+ ocrKeys[networkType] = bundle
+ }
+ return ocrKeys, nil
+}
+
+func (d *Delegate) getTransmitterKeys(ctx context.Context, chains legacyevm.LegacyChainContainer) (map[types.RelayID][]string, error) {
+ transmitterKeys := make(map[types.RelayID][]string)
+ for _, chain := range chains.Slice() {
+ relayID := types.NewRelayID(relay.NetworkEVM, chain.ID().String())
+ ethKeys, err2 := d.keystore.Eth().EnabledAddressesForChain(ctx, chain.ID())
+ if err2 != nil {
+ return nil, fmt.Errorf("error getting enabled addresses for chain: %s %w", chain.ID().String(), err2)
+ }
+
+ transmitterKeys[relayID] = func() (r []string) {
+ for _, key := range ethKeys {
+ r = append(r, key.Hex())
+ }
+ return
+ }()
+ }
+ return transmitterKeys, nil
+}
+
+func (d *Delegate) getHomeChainContractReader(
+ ctx context.Context,
+ chains legacyevm.LegacyChainContainer,
+ capabilityLabelledName,
+ capabilityVersion string,
+) (types.ContractReader, error) {
+ // home chain is where the capability registry is deployed,
+ // which should be set correctly in toml config.
+ homeChainRelayID := d.capabilityConfig.ExternalRegistry().RelayID()
+ homeChain, err := chains.Get(homeChainRelayID.ChainID)
+ if err != nil {
+ return nil, fmt.Errorf("home chain relayer not found, chain id: %s, err: %w", homeChainRelayID.String(), err)
+ }
+
+ reader, err := evm.NewChainReaderService(
+ context.Background(),
+ d.lggr,
+ homeChain.LogPoller(),
+ homeChain.HeadTracker(),
+ homeChain.Client(),
+ configsevm.HomeChainReaderConfigRaw(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create home chain contract reader: %w", err)
+ }
+
+ reader, err = bindReader(ctx, reader, d.capabilityConfig.ExternalRegistry().Address(), capabilityLabelledName, capabilityVersion)
+ if err != nil {
+ return nil, fmt.Errorf("failed to bind home chain contract reader: %w", err)
+ }
+
+ return reader, nil
+}
+
+func bindReader(ctx context.Context,
+ reader types.ContractReader,
+ capRegAddress,
+ capabilityLabelledName,
+ capabilityVersion string) (types.ContractReader, error) {
+ err := reader.Bind(ctx, []types.BoundContract{
+ {
+ Address: capRegAddress,
+ Name: consts.ContractNameCapabilitiesRegistry,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to bind home chain contract reader: %w", err)
+ }
+
+ hid, err := common.HashedCapabilityID(capabilityLabelledName, capabilityVersion)
+ if err != nil {
+ return nil, fmt.Errorf("failed to hash capability id: %w", err)
+ }
+
+ var ccipCapabilityInfo kcr.CapabilitiesRegistryCapabilityInfo
+ err = reader.GetLatestValue(ctx, consts.ContractNameCapabilitiesRegistry, consts.MethodNameGetCapability, primitives.Unconfirmed, map[string]any{
+ "hashedId": hid,
+ }, &ccipCapabilityInfo)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get CCIP capability info from chain reader: %w", err)
+ }
+
+ // bind the ccip capability configuration contract
+ err = reader.Bind(ctx, []types.BoundContract{
+ {
+ Address: ccipCapabilityInfo.ConfigurationContract.String(),
+ Name: consts.ContractNameCCIPConfig,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to bind CCIP capability configuration contract: %w", err)
+ }
+
+ return reader, nil
+}
diff --git a/core/capabilities/ccip/delegate_test.go b/core/capabilities/ccip/delegate_test.go
new file mode 100644
index 00000000000..dd8a5124b57
--- /dev/null
+++ b/core/capabilities/ccip/delegate_test.go
@@ -0,0 +1 @@
+package ccip
diff --git a/core/capabilities/ccip/launcher/README.md b/core/capabilities/ccip/launcher/README.md
new file mode 100644
index 00000000000..41fbecfdbd8
--- /dev/null
+++ b/core/capabilities/ccip/launcher/README.md
@@ -0,0 +1,69 @@
+# CCIP Capability Launcher
+
+The CCIP capability launcher is responsible for listening to
+[Capabilities Registry](../../../../contracts/src/v0.8/keystone/CapabilitiesRegistry.sol) (CR) updates
+for the particular CCIP capability (labelled name, version) pair and reacting to them. In
+particular, there are three kinds of events that would affect a particular capability:
+
+1. DON Creation: when `addDON` is called on the CR, the capabilities of this new DON are specified.
+If CCIP is one of those capabilities, the launcher will launch a commit and an execution plugin
+with the OCR configuration specified in the DON creation process. See
+[Types.sol](../../../../contracts/src/v0.8/ccip/capability/libraries/Types.sol) for more details
+on what the OCR configuration contains.
+2. DON update: when `updateDON` is called on the CR, capabilities of the DON can be updated. In the
+CCIP use case specifically, `updateDON` is used to update OCR configuration of that DON. Updates
+follow the blue/green deployment pattern (explained in detail below with a state diagram). In this
+scenario the launcher must either launch brand new instances of the commit and execution plugins
+(in the event a green deployment is made) or promote the currently running green instance to be
+the blue instance.
+3. DON deletion: when `deleteDON` is called on the CR, the launcher must shut down all running plugins
+related to that DON. When a DON is deleted it effectively means that it should no longer function.
+DON deletion is permanent.
+
+## Architecture Diagram
+
+![CCIP Capability Launcher](ccip_capability_launcher.png)
+
+The above diagram shows how the CCIP capability launcher interacts with the rest of the components
+in the CCIP system.
+
+The CCIP capability job, which is created on the Chainlink node, will spin up the CCIP capability
+launcher alongside the home chain reader, which reads the [CCIPConfig.sol](../../../../contracts/src/v0.8/ccip/capability/CCIPConfig.sol)
+contract deployed on the home chain (typically Ethereum Mainnet, though could be "any chain" in theory).
+
+Injected into the launcher is the [OracleCreator](../types/types.go) object which knows how to spin up CCIP
+oracles (both bootstrap and plugin oracles). This is used by the launcher at the appropriate time in order
+to create oracle instances but not start them right away.
+
+After all the required oracles have been created, the launcher will start and shut them down as required
+in order to match the configuration that was posted on-chain in the CR and the CCIPConfig.sol contract.
+
+
+## Config State Diagram
+
+![CCIP Config State Machine](ccip_config_state_machine.png)
+
+CCIP's blue/green deployment paradigm is intentionally kept as simple as possible.
+
+Every CCIP DON starts in the `Init` state. Upon DON creation, which must provide a valid OCR
+configuration, the CCIP DON will move into the `Running` state. In this state, the DON is
+presumed to be fully functional from a configuration standpoint.
+
+When we want to update configuration, we propose a new configuration to the CR that consists of
+an array of two OCR configurations:
+
+1. The first element of the array is the current OCR configuration that is running (termed "blue").
+2. The second element of the array is the future OCR configuration that we want to run (termed "green").
+
+Various checks are done on-chain in order to validate this particular state transition, in particular,
+related to config counts. Doing this will move the state of the configuration to the `Staging` state.
+
+In the `Staging` state, there are effectively four plugins running - one (commit, execution) pair for the
+blue configuration, and one (commit, execution) pair for the green configuration. However, only the blue
+configuration will actually be writing on-chain, where as the green configuration will be "dry running",
+i.e doing everything except transmitting.
+
+This allows us to test out new configurations without committing to them immediately.
+
+Finally, from the `Staging` state, there is only one transition, which is to promote the green configuration
+to be the new blue configuration, and go back into the `Running` state.
diff --git a/core/capabilities/ccip/launcher/bluegreen.go b/core/capabilities/ccip/launcher/bluegreen.go
new file mode 100644
index 00000000000..62458466291
--- /dev/null
+++ b/core/capabilities/ccip/launcher/bluegreen.go
@@ -0,0 +1,178 @@
+package launcher
+
+import (
+ "fmt"
+
+ cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
+
+ "go.uber.org/multierr"
+
+ ccipreaderpkg "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
+)
+
+// blueGreenDeployment represents a blue-green deployment of OCR instances.
+type blueGreenDeployment struct {
+ // blue is the blue OCR instance.
+ // blue must always be present.
+ blue cctypes.CCIPOracle
+
+ // bootstrapBlue is the bootstrap node of the blue OCR instance.
+ // Only a subset of the DON will be running bootstrap instances,
+ // so this may be nil.
+ bootstrapBlue cctypes.CCIPOracle
+
+ // green is the green OCR instance.
+ // green may or may not be present.
+ // green must never be present if blue is not present.
+ // TODO: should we enforce this invariant somehow?
+ green cctypes.CCIPOracle
+
+ // bootstrapGreen is the bootstrap node of the green OCR instance.
+ // Only a subset of the DON will be running bootstrap instances,
+ // so this may be nil, even when green is not nil.
+ bootstrapGreen cctypes.CCIPOracle
+}
+
+// ccipDeployment represents blue-green deployments of both commit and exec
+// OCR instances.
+type ccipDeployment struct {
+ commit blueGreenDeployment
+ exec blueGreenDeployment
+}
+
+// Close shuts down all OCR instances in the deployment.
+func (c *ccipDeployment) Close() error {
+ var err error
+
+ // shutdown blue commit instances.
+ err = multierr.Append(err, c.commit.blue.Close())
+ if c.commit.bootstrapBlue != nil {
+ err = multierr.Append(err, c.commit.bootstrapBlue.Close())
+ }
+
+ // shutdown green commit instances.
+ if c.commit.green != nil {
+ err = multierr.Append(err, c.commit.green.Close())
+ }
+ if c.commit.bootstrapGreen != nil {
+ err = multierr.Append(err, c.commit.bootstrapGreen.Close())
+ }
+
+ // shutdown blue exec instances.
+ err = multierr.Append(err, c.exec.blue.Close())
+ if c.exec.bootstrapBlue != nil {
+ err = multierr.Append(err, c.exec.bootstrapBlue.Close())
+ }
+
+ // shutdown green exec instances.
+ if c.exec.green != nil {
+ err = multierr.Append(err, c.exec.green.Close())
+ }
+ if c.exec.bootstrapGreen != nil {
+ err = multierr.Append(err, c.exec.bootstrapGreen.Close())
+ }
+
+ return err
+}
+
+// StartBlue starts the blue OCR instances.
+func (c *ccipDeployment) StartBlue() error {
+ var err error
+
+ err = multierr.Append(err, c.commit.blue.Start())
+ if c.commit.bootstrapBlue != nil {
+ err = multierr.Append(err, c.commit.bootstrapBlue.Start())
+ }
+ err = multierr.Append(err, c.exec.blue.Start())
+ if c.exec.bootstrapBlue != nil {
+ err = multierr.Append(err, c.exec.bootstrapBlue.Start())
+ }
+
+ return err
+}
+
+// CloseBlue shuts down the blue OCR instances.
+func (c *ccipDeployment) CloseBlue() error {
+ var err error
+
+ err = multierr.Append(err, c.commit.blue.Close())
+ if c.commit.bootstrapBlue != nil {
+ err = multierr.Append(err, c.commit.bootstrapBlue.Close())
+ }
+ err = multierr.Append(err, c.exec.blue.Close())
+ if c.exec.bootstrapBlue != nil {
+ err = multierr.Append(err, c.exec.bootstrapBlue.Close())
+ }
+
+ return err
+}
+
+// HandleBlueGreen handles the blue-green deployment transition.
+// prevDeployment is the previous deployment state.
+// there are two possible cases:
+//
+// 1. both blue and green are present in prevDeployment, but only blue is present in c.
+// this is a promotion of green to blue, so we need to shut down the blue deployment
+// and make green the new blue. In this case green is already running, so there's no
+// need to start it. However, we need to shut down the blue deployment.
+//
+// 2. only blue is present in prevDeployment, both blue and green are present in c.
+// In this case, blue is already running, so there's no need to start it. We need to
+// start green.
+func (c *ccipDeployment) HandleBlueGreen(prevDeployment *ccipDeployment) error {
+ if prevDeployment == nil {
+ return fmt.Errorf("previous deployment is nil")
+ }
+
+ var err error
+ if prevDeployment.commit.green != nil && c.commit.green == nil {
+ err = multierr.Append(err, prevDeployment.commit.blue.Close())
+ if prevDeployment.commit.bootstrapBlue != nil {
+ err = multierr.Append(err, prevDeployment.commit.bootstrapBlue.Close())
+ }
+ } else if prevDeployment.commit.green == nil && c.commit.green != nil {
+ err = multierr.Append(err, c.commit.green.Start())
+ if c.commit.bootstrapGreen != nil {
+ err = multierr.Append(err, c.commit.bootstrapGreen.Start())
+ }
+ } else {
+ return fmt.Errorf("invalid blue-green deployment transition")
+ }
+
+ if prevDeployment.exec.green != nil && c.exec.green == nil {
+ err = multierr.Append(err, prevDeployment.exec.blue.Close())
+ if prevDeployment.exec.bootstrapBlue != nil {
+ err = multierr.Append(err, prevDeployment.exec.bootstrapBlue.Close())
+ }
+ } else if prevDeployment.exec.green == nil && c.exec.green != nil {
+ err = multierr.Append(err, c.exec.green.Start())
+ if c.exec.bootstrapGreen != nil {
+ err = multierr.Append(err, c.exec.bootstrapGreen.Start())
+ }
+ } else {
+ return fmt.Errorf("invalid blue-green deployment transition")
+ }
+
+ return err
+}
+
+// HasGreenInstance returns true if the deployment has a green instance for the
+// given plugin type.
+func (c *ccipDeployment) HasGreenInstance(pluginType cctypes.PluginType) bool {
+ switch pluginType {
+ case cctypes.PluginTypeCCIPCommit:
+ return c.commit.green != nil
+ case cctypes.PluginTypeCCIPExec:
+ return c.exec.green != nil
+ default:
+ return false
+ }
+}
+
+func isNewGreenInstance(pluginType cctypes.PluginType, ocrConfigs []ccipreaderpkg.OCR3ConfigWithMeta, prevDeployment ccipDeployment) bool {
+ return len(ocrConfigs) == 2 && !prevDeployment.HasGreenInstance(pluginType)
+}
+
+func isPromotion(pluginType cctypes.PluginType, ocrConfigs []ccipreaderpkg.OCR3ConfigWithMeta, prevDeployment ccipDeployment) bool {
+ return len(ocrConfigs) == 1 && prevDeployment.HasGreenInstance(pluginType)
+}
diff --git a/core/capabilities/ccip/launcher/bluegreen_test.go b/core/capabilities/ccip/launcher/bluegreen_test.go
new file mode 100644
index 00000000000..9fd71a0cb44
--- /dev/null
+++ b/core/capabilities/ccip/launcher/bluegreen_test.go
@@ -0,0 +1,1043 @@
+package launcher
+
+import (
+ "errors"
+ "testing"
+
+ cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
+ mocktypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types/mocks"
+
+ "github.com/stretchr/testify/require"
+
+ ccipreaderpkg "github.com/smartcontractkit/chainlink-ccip/pkg/reader"
+)
+
+func Test_ccipDeployment_Close(t *testing.T) {
+ type args struct {
+ commitBlue *mocktypes.CCIPOracle
+ commitBlueBootstrap *mocktypes.CCIPOracle
+ commitGreen *mocktypes.CCIPOracle
+ commitGreenBootstrap *mocktypes.CCIPOracle
+ execBlue *mocktypes.CCIPOracle
+ execBlueBootstrap *mocktypes.CCIPOracle
+ execGreen *mocktypes.CCIPOracle
+ execGreenBootstrap *mocktypes.CCIPOracle
+ }
+ tests := []struct {
+ name string
+ args args
+ expect func(t *testing.T, args args)
+ asserts func(t *testing.T, args args)
+ wantErr bool
+ }{
+ {
+ name: "no errors, blue only",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ commitGreenBootstrap: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ execGreenBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "no errors, blue and green",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.commitGreen.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ args.execGreen.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.commitGreen.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ args.execGreen.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "error on commit blue",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(errors.New("failed")).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "bootstrap blue also closed",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.commitBlueBootstrap.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ args.execBlueBootstrap.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.commitBlueBootstrap.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ args.execBlueBootstrap.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "bootstrap green also closed",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ commitGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ execGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.commitBlueBootstrap.On("Close").Return(nil).Once()
+ args.commitGreen.On("Close").Return(nil).Once()
+ args.commitGreenBootstrap.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ args.execBlueBootstrap.On("Close").Return(nil).Once()
+ args.execGreen.On("Close").Return(nil).Once()
+ args.execGreenBootstrap.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.commitBlueBootstrap.AssertExpectations(t)
+ args.commitGreen.AssertExpectations(t)
+ args.commitGreenBootstrap.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ args.execBlueBootstrap.AssertExpectations(t)
+ args.execGreen.AssertExpectations(t)
+ args.execGreenBootstrap.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: tt.args.commitBlue,
+ },
+ exec: blueGreenDeployment{
+ blue: tt.args.execBlue,
+ },
+ }
+ if tt.args.commitGreen != nil {
+ c.commit.green = tt.args.commitGreen
+ }
+ if tt.args.commitBlueBootstrap != nil {
+ c.commit.bootstrapBlue = tt.args.commitBlueBootstrap
+ }
+ if tt.args.commitGreenBootstrap != nil {
+ c.commit.bootstrapGreen = tt.args.commitGreenBootstrap
+ }
+
+ if tt.args.execGreen != nil {
+ c.exec.green = tt.args.execGreen
+ }
+ if tt.args.execBlueBootstrap != nil {
+ c.exec.bootstrapBlue = tt.args.execBlueBootstrap
+ }
+ if tt.args.execGreenBootstrap != nil {
+ c.exec.bootstrapGreen = tt.args.execGreenBootstrap
+ }
+
+ tt.expect(t, tt.args)
+ defer tt.asserts(t, tt.args)
+ err := c.Close()
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func Test_ccipDeployment_StartBlue(t *testing.T) {
+ type args struct {
+ commitBlue *mocktypes.CCIPOracle
+ commitBlueBootstrap *mocktypes.CCIPOracle
+ execBlue *mocktypes.CCIPOracle
+ execBlueBootstrap *mocktypes.CCIPOracle
+ }
+ tests := []struct {
+ name string
+ args args
+ expect func(t *testing.T, args args)
+ asserts func(t *testing.T, args args)
+ wantErr bool
+ }{
+ {
+ name: "no errors, no bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Start").Return(nil).Once()
+ args.execBlue.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "no errors, with bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Start").Return(nil).Once()
+ args.commitBlueBootstrap.On("Start").Return(nil).Once()
+ args.execBlue.On("Start").Return(nil).Once()
+ args.execBlueBootstrap.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.commitBlueBootstrap.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ args.execBlueBootstrap.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "error on commit blue",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Start").Return(errors.New("failed")).Once()
+ args.execBlue.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on exec blue",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Start").Return(nil).Once()
+ args.execBlue.On("Start").Return(errors.New("failed")).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on commit blue bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Start").Return(nil).Once()
+ args.commitBlueBootstrap.On("Start").Return(errors.New("failed")).Once()
+ args.execBlue.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.commitBlueBootstrap.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on exec blue bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Start").Return(nil).Once()
+ args.execBlue.On("Start").Return(nil).Once()
+ args.execBlueBootstrap.On("Start").Return(errors.New("failed")).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ args.execBlueBootstrap.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: tt.args.commitBlue,
+ },
+ exec: blueGreenDeployment{
+ blue: tt.args.execBlue,
+ },
+ }
+ if tt.args.commitBlueBootstrap != nil {
+ c.commit.bootstrapBlue = tt.args.commitBlueBootstrap
+ }
+ if tt.args.execBlueBootstrap != nil {
+ c.exec.bootstrapBlue = tt.args.execBlueBootstrap
+ }
+
+ tt.expect(t, tt.args)
+ defer tt.asserts(t, tt.args)
+ err := c.StartBlue()
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func Test_ccipDeployment_CloseBlue(t *testing.T) {
+ type args struct {
+ commitBlue *mocktypes.CCIPOracle
+ commitBlueBootstrap *mocktypes.CCIPOracle
+ execBlue *mocktypes.CCIPOracle
+ execBlueBootstrap *mocktypes.CCIPOracle
+ }
+ tests := []struct {
+ name string
+ args args
+ expect func(t *testing.T, args args)
+ asserts func(t *testing.T, args args)
+ wantErr bool
+ }{
+ {
+ name: "no errors, no bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "no errors, with bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.commitBlueBootstrap.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ args.execBlueBootstrap.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.commitBlueBootstrap.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ args.execBlueBootstrap.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "error on commit blue",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(errors.New("failed")).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on exec blue",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(errors.New("failed")).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on commit blue bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.commitBlueBootstrap.On("Close").Return(errors.New("failed")).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.commitBlueBootstrap.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on exec blue bootstrap",
+ args: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: nil,
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args) {
+ args.commitBlue.On("Close").Return(nil).Once()
+ args.execBlue.On("Close").Return(nil).Once()
+ args.execBlueBootstrap.On("Close").Return(errors.New("failed")).Once()
+ },
+ asserts: func(t *testing.T, args args) {
+ args.commitBlue.AssertExpectations(t)
+ args.execBlue.AssertExpectations(t)
+ args.execBlueBootstrap.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: tt.args.commitBlue,
+ },
+ exec: blueGreenDeployment{
+ blue: tt.args.execBlue,
+ },
+ }
+ if tt.args.commitBlueBootstrap != nil {
+ c.commit.bootstrapBlue = tt.args.commitBlueBootstrap
+ }
+ if tt.args.execBlueBootstrap != nil {
+ c.exec.bootstrapBlue = tt.args.execBlueBootstrap
+ }
+
+ tt.expect(t, tt.args)
+ defer tt.asserts(t, tt.args)
+ err := c.CloseBlue()
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func Test_ccipDeployment_HandleBlueGreen_PrevDeploymentNil(t *testing.T) {
+ require.Error(t, (&ccipDeployment{}).HandleBlueGreen(nil))
+}
+
+func Test_ccipDeployment_HandleBlueGreen(t *testing.T) {
+ type args struct {
+ commitBlue *mocktypes.CCIPOracle
+ commitBlueBootstrap *mocktypes.CCIPOracle
+ commitGreen *mocktypes.CCIPOracle
+ commitGreenBootstrap *mocktypes.CCIPOracle
+ execBlue *mocktypes.CCIPOracle
+ execBlueBootstrap *mocktypes.CCIPOracle
+ execGreen *mocktypes.CCIPOracle
+ execGreenBootstrap *mocktypes.CCIPOracle
+ }
+ tests := []struct {
+ name string
+ argsPrevDeployment args
+ argsFutureDeployment args
+ expect func(t *testing.T, args args, argsPrevDeployment args)
+ asserts func(t *testing.T, args args, argsPrevDeployment args)
+ wantErr bool
+ }{
+ {
+ name: "promotion blue to green, no bootstrap",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ argsPrevDeployment.commitBlue.On("Close").Return(nil).Once()
+ argsPrevDeployment.execBlue.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ argsPrevDeployment.commitBlue.AssertExpectations(t)
+ argsPrevDeployment.execBlue.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "promotion blue to green, with bootstrap",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ commitGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ execGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ commitGreenBootstrap: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ execGreenBootstrap: nil,
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ argsPrevDeployment.commitBlue.On("Close").Return(nil).Once()
+ argsPrevDeployment.commitBlueBootstrap.On("Close").Return(nil).Once()
+ argsPrevDeployment.execBlue.On("Close").Return(nil).Once()
+ argsPrevDeployment.execBlueBootstrap.On("Close").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ argsPrevDeployment.commitBlue.AssertExpectations(t)
+ argsPrevDeployment.commitBlueBootstrap.AssertExpectations(t)
+ argsPrevDeployment.execBlue.AssertExpectations(t)
+ argsPrevDeployment.execBlueBootstrap.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "new green deployment, no bootstrap",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.On("Start").Return(nil).Once()
+ args.execGreen.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.AssertExpectations(t)
+ args.execGreen.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "new green deployment, with bootstrap",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ commitGreenBootstrap: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ execGreenBootstrap: nil,
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ commitGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ execGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.On("Start").Return(nil).Once()
+ args.commitGreenBootstrap.On("Start").Return(nil).Once()
+ args.execGreen.On("Start").Return(nil).Once()
+ args.execGreenBootstrap.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.AssertExpectations(t)
+ args.commitGreenBootstrap.AssertExpectations(t)
+ args.execGreen.AssertExpectations(t)
+ args.execGreenBootstrap.AssertExpectations(t)
+ },
+ wantErr: false,
+ },
+ {
+ name: "error on commit green start",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.On("Start").Return(errors.New("failed")).Once()
+ args.execGreen.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.AssertExpectations(t)
+ args.execGreen.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on exec green start",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.On("Start").Return(nil).Once()
+ args.execGreen.On("Start").Return(errors.New("failed")).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.AssertExpectations(t)
+ args.execGreen.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on commit green bootstrap start",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ commitGreenBootstrap: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execGreen: nil,
+ execGreenBootstrap: nil,
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ commitGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execBlueBootstrap: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ execGreenBootstrap: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.On("Start").Return(nil).Once()
+ args.commitGreenBootstrap.On("Start").Return(errors.New("failed")).Once()
+ args.execGreen.On("Start").Return(nil).Once()
+ args.execGreenBootstrap.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.AssertExpectations(t)
+ args.commitGreenBootstrap.AssertExpectations(t)
+ args.execGreen.AssertExpectations(t)
+ args.execGreenBootstrap.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid blue-green deployment transition commit: both prev and future deployment have green",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {},
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {},
+ wantErr: true,
+ },
+ {
+ name: "invalid blue-green deployment transition exec: both prev and future deployment have green",
+ argsPrevDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: nil,
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ argsFutureDeployment: args{
+ commitBlue: mocktypes.NewCCIPOracle(t),
+ commitGreen: mocktypes.NewCCIPOracle(t),
+ execBlue: mocktypes.NewCCIPOracle(t),
+ execGreen: mocktypes.NewCCIPOracle(t),
+ },
+ expect: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.On("Start").Return(nil).Once()
+ },
+ asserts: func(t *testing.T, args args, argsPrevDeployment args) {
+ args.commitGreen.AssertExpectations(t)
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ futDeployment := &ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: tt.argsFutureDeployment.commitBlue,
+ },
+ exec: blueGreenDeployment{
+ blue: tt.argsFutureDeployment.execBlue,
+ },
+ }
+ if tt.argsFutureDeployment.commitGreen != nil {
+ futDeployment.commit.green = tt.argsFutureDeployment.commitGreen
+ }
+ if tt.argsFutureDeployment.commitBlueBootstrap != nil {
+ futDeployment.commit.bootstrapBlue = tt.argsFutureDeployment.commitBlueBootstrap
+ }
+ if tt.argsFutureDeployment.commitGreenBootstrap != nil {
+ futDeployment.commit.bootstrapGreen = tt.argsFutureDeployment.commitGreenBootstrap
+ }
+ if tt.argsFutureDeployment.execGreen != nil {
+ futDeployment.exec.green = tt.argsFutureDeployment.execGreen
+ }
+ if tt.argsFutureDeployment.execBlueBootstrap != nil {
+ futDeployment.exec.bootstrapBlue = tt.argsFutureDeployment.execBlueBootstrap
+ }
+ if tt.argsFutureDeployment.execGreenBootstrap != nil {
+ futDeployment.exec.bootstrapGreen = tt.argsFutureDeployment.execGreenBootstrap
+ }
+
+ prevDeployment := &ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: tt.argsPrevDeployment.commitBlue,
+ },
+ exec: blueGreenDeployment{
+ blue: tt.argsPrevDeployment.execBlue,
+ },
+ }
+ if tt.argsPrevDeployment.commitGreen != nil {
+ prevDeployment.commit.green = tt.argsPrevDeployment.commitGreen
+ }
+ if tt.argsPrevDeployment.commitBlueBootstrap != nil {
+ prevDeployment.commit.bootstrapBlue = tt.argsPrevDeployment.commitBlueBootstrap
+ }
+ if tt.argsPrevDeployment.commitGreenBootstrap != nil {
+ prevDeployment.commit.bootstrapGreen = tt.argsPrevDeployment.commitGreenBootstrap
+ }
+ if tt.argsPrevDeployment.execGreen != nil {
+ prevDeployment.exec.green = tt.argsPrevDeployment.execGreen
+ }
+ if tt.argsPrevDeployment.execBlueBootstrap != nil {
+ prevDeployment.exec.bootstrapBlue = tt.argsPrevDeployment.execBlueBootstrap
+ }
+ if tt.argsPrevDeployment.execGreenBootstrap != nil {
+ prevDeployment.exec.bootstrapGreen = tt.argsPrevDeployment.execGreenBootstrap
+ }
+
+ tt.expect(t, tt.argsFutureDeployment, tt.argsPrevDeployment)
+ defer tt.asserts(t, tt.argsFutureDeployment, tt.argsPrevDeployment)
+ err := futDeployment.HandleBlueGreen(prevDeployment)
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func Test_isNewGreenInstance(t *testing.T) {
+ type args struct {
+ pluginType cctypes.PluginType
+ ocrConfigs []ccipreaderpkg.OCR3ConfigWithMeta
+ prevDeployment ccipDeployment
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ "prev deployment only blue",
+ args{
+ pluginType: cctypes.PluginTypeCCIPCommit,
+ ocrConfigs: []ccipreaderpkg.OCR3ConfigWithMeta{
+ {}, {},
+ },
+ prevDeployment: ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ },
+ true,
+ },
+ {
+ "green -> blue promotion",
+ args{
+ pluginType: cctypes.PluginTypeCCIPCommit,
+ ocrConfigs: []ccipreaderpkg.OCR3ConfigWithMeta{
+ {},
+ },
+ prevDeployment: ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ green: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ },
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := isNewGreenInstance(tt.args.pluginType, tt.args.ocrConfigs, tt.args.prevDeployment)
+ require.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func Test_isPromotion(t *testing.T) {
+ type args struct {
+ pluginType cctypes.PluginType
+ ocrConfigs []ccipreaderpkg.OCR3ConfigWithMeta
+ prevDeployment ccipDeployment
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ "prev deployment only blue",
+ args{
+ pluginType: cctypes.PluginTypeCCIPCommit,
+ ocrConfigs: []ccipreaderpkg.OCR3ConfigWithMeta{
+ {}, {},
+ },
+ prevDeployment: ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ },
+ false,
+ },
+ {
+ "green -> blue promotion",
+ args{
+ pluginType: cctypes.PluginTypeCCIPCommit,
+ ocrConfigs: []ccipreaderpkg.OCR3ConfigWithMeta{
+ {},
+ },
+ prevDeployment: ccipDeployment{
+ commit: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ green: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ },
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := isPromotion(tt.args.pluginType, tt.args.ocrConfigs, tt.args.prevDeployment); got != tt.want {
+ t.Errorf("isPromotion() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ccipDeployment_HasGreenInstance(t *testing.T) {
+ type fields struct {
+ commit blueGreenDeployment
+ exec blueGreenDeployment
+ }
+ type args struct {
+ pluginType cctypes.PluginType
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ want bool
+ }{
+ {
+ "commit green present",
+ fields{
+ commit: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ green: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ args{
+ pluginType: cctypes.PluginTypeCCIPCommit,
+ },
+ true,
+ },
+ {
+ "commit green not present",
+ fields{
+ commit: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ args{
+ pluginType: cctypes.PluginTypeCCIPCommit,
+ },
+ false,
+ },
+ {
+ "exec green present",
+ fields{
+ exec: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ green: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ args{
+ pluginType: cctypes.PluginTypeCCIPExec,
+ },
+ true,
+ },
+ {
+ "exec green not present",
+ fields{
+ exec: blueGreenDeployment{
+ blue: mocktypes.NewCCIPOracle(t),
+ },
+ },
+ args{
+ pluginType: cctypes.PluginTypeCCIPExec,
+ },
+ false,
+ },
+ {
+ "invalid plugin type",
+ fields{},
+ args{
+ pluginType: cctypes.PluginType(100),
+ },
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ccipDeployment{}
+ if tt.fields.commit.blue != nil {
+ c.commit.blue = tt.fields.commit.blue
+ }
+ if tt.fields.commit.green != nil {
+ c.commit.green = tt.fields.commit.green
+ }
+ if tt.fields.exec.blue != nil {
+ c.exec.blue = tt.fields.exec.blue
+ }
+ if tt.fields.exec.green != nil {
+ c.exec.green = tt.fields.exec.green
+ }
+ got := c.HasGreenInstance(tt.args.pluginType)
+ require.Equal(t, tt.want, got)
+ })
+ }
+}
diff --git a/core/capabilities/ccip/launcher/ccip_capability_launcher.png b/core/capabilities/ccip/launcher/ccip_capability_launcher.png
new file mode 100644
index 0000000000000000000000000000000000000000..5e90d5ff7daa3643fde8185f49b4c83840b2c298
GIT binary patch
literal 253433
zcmeEubyQSe^e-TaiinB=(kh68lyrkADWPdt&eX*>U#SM;|#EF}#Z;7cnp}@Fc_^KgYnhIE#US
zZGQe7&|>^CKLi8gl8}jrh@6Co2$h_*rJ;$r0S1P+Pna@}ihMI+oO(#GkTGTwfiuBl
zQ;hq!3NgC;bnZ}NKJsC?qOd&w>6IDL)!dI6w|&X4@ah>oGan#&a{awso{sWp(#!1q
z7Ub;yY`gmr_eMPHTszDK2ct-y?A<*aMI0*SNTxu%3kg9|G9N>oF))cS=yJ~uk%8jg
z-Xtf-)VXPTq%}H%^|r7QPSv|-bmY!J!Uv%s!k{LZWYePHB$^b#7+2JI$3=I&}#
zC#+i-rnYb0Y;023sAN0`&JDe#3SVUHf^;_TL>ms}-t`V;@t?7IcOzLyIPRV|>(tTH
z#v{g3?vPuQO1bGTd`0w=ZM>YyWBF+`d=;tS1SwBMg)=W8a4=;J@mVCB-@m!rwDL>@
zM1ebdXn&DYaqoCeMm=LjK8Q}RD_jL%JMv0u=Pp$X{@bvi`um?^j6L~54ILNW
zts7n`pS@9czhTm6*Gjl$3t{^VVq3u{Q^D*QSMx8-3snp$^9dK!8(XhWU!AMbI7eA+oD@i
zdRJJ2i6t-H9h%%LVS)#U)no`}wsW09l0`K-xO|m=#5=(ucch
zW6|b2O@}X{yt!z0*hYbSAO7In7mfoXDRUP4E4)Fm4UBq?
zx#xr$W%L46=r?rkGsZr@UERZq&ok`
zGGDo^}ji0=!kM~`2%J|d(_ZlX)V
z8hq75wg2u;lgae$u0WVD#jVOFvuOg~k9402P2Z?}Oq4s<^0@Z9qwVb|51A*p-#zCa
z7LsL&IwWq*n`}OGCRll_op6S@<~<8}>vfwNVq#psug`OBpZd3bk<5LVNs>_;HEvTC
zXs72kAQtns+lmK{p=@<8?xhVPZHLVDzLv*o-4cTEfamc1YGd`)&)6cb_`mqo-1Wcn
z;!4U@-wOtBR^HUUDX?O)BN)Bv^O*b-`}Z^O`WJeH;Je`a;4@$*#Pz(`fhS~C@5G&{
zT|)5h`_|o>3H6m&k|>v;mFO2ANy3+Gk-RI}F8M|BzT~6i+sO};H6>}^XW!?S-+QV=
zs~%F>^y~$N(>W(@C(=?MS5KJq;nVL4D)O%P5bqzfxJq&-2U>?)$FGa3st!IKl;;*z
zP2iI))o_YmKRYAVLaE56xU8T;N1#AW_f5h6bIkM7#C0#pWVL6gwEI1-iw{llwk~bm
z+ImeV-TN#ot3^IGLoH1$$60zSnMAeic|aP6M3!9E)0Kpw8$|+k`DI}|Qn85wVwKNP
zZ=JpOUY?WIFBE+*n%(d`K$C=s-I+}yN-0V>im%NxN}avK*#0|n6nDUrsV>6K;_vNU
zWkqk*`qa#`9%Mbw;#ad!Lk&d@We@obbr;bW1(`W4>hLJ>n8%XE&hX&af)-p!blF~U
zzY^4Iep`06U$mm1y>qL7W+{9g-dmZrFALHMAnM#=`DD9qjsEQIe
z!*Is(Ot!Gk7tGh?UY|e4>Sj(ntFpU{VA*q+?&?UxRql)ooqb{dS&&6ZJPxjid0{zz8lmkz?r=cnC!A$
zk`2QR2rhwN9$~=)QwF$N2*b_r58+D>I@GJ6`Nfj?YA&JL7o$U?O;Nk-WAiV9*``%Ala
zy>NSJ1`M*L62H+ayC_#JiS`eJ*r0hJKUmaARhdhP-^dcoh5rHHbN+Nb9%40rR*Laa*WLtBQ8x
zMLk7vL3$xUs5X?-;V>$m#o~i%>qu(^rVQrf%UaA;RjtgyOnDC#9dR8A9ra*CxkqwO
zI%{?9z}
z8XaVOwS|hH{7Er(Am;`rIKIGVvIk&V9yYxiGyD!su5X
zxc_RdL%nusW`9i?5ud!Dlq0#?ywR)h$03D?JDqDu%}ghRv3id
zA-hXOu_WDWYoK?_Y~D<=uR@d8X{G*Qf4=n6y(Q`;o^N+rNc+_@wet?8o*34*wY+Sp
zRP<7mN*}cxw~herW5~SYy*|H
zZ%KY$u+uddoC^=0`Iuf?SWB}dj`H6-^lJ5LZ-vDh>{4>E9l-(us@<>h9ir
zrUb+M{Q78>#cgdh<;+XB;c*1+VumyZRn$eH3R)pe4X>q6RcZU|_k_3kaLfbuqQaxk
z9ZJ88=XQU)KSwC8sQIO1r5a;P%hh(E&%F!SQurFENEk{>W6%N5=P|G`Nifa=Pnf`u
z04C|LXHm?17-x>#u`n>aO)#*3d?N$=M*j&0e$ahRexC{P!oUIkbq)A&Ou+j2^~KqQ
zGe4iP&4G6q!tx>#62NbHeQN^)kd3jWZG^F23ea%DN?g?j1A~wT{evm-oO%ryf51dR
z#a2c7DUZIT1*7f@OFaWdM++KV*9NkOBCD!Nv(>tLw-Bvbl3I$j@;e8`$Vun^@VJSc0g~6E{9RXOeYkdvGF_pZOa`n#*5je)g@r3Emlt-z_kesuo*<&Tbh
zOlaDF`&nLL(N0Dc27L;u0z0RFmn@*8-@d=u3Z
ze;xpa5QfBKVFgFbWtBHOi!5t#Cc@kI6#w7j98q5n8`}^}XPySGO;6C^+Ak
zR=k3JT><-kmx7#d-K)lLD9nN8N+ZX;w=hc+a
zQ$?1;1$bF$e|9zgh*j^7T?HR)KI<_@n|*$!Snk@lyFyg79^`u$AajpRh|xJgaqI*4
z^nP^i&5l6)$lcYdI@{Cljop%AD(9c6LSsp+SEbSmCXu+m+C50HC2LFA&ytZmlvC~|
zGmWgOJ;?+9;BKcsHpzNNeB0ZshUA`2qh&oge8e`O;R7A~Bh<)}2$6Fu$U
zf}LGu^c-f}kzvs2dvxBp6;qus5L!s-q3*4Q1+sD9>k~U28er2>*-IUG*&%jY
z{7*zCs9sc1YTm-PY+Gkhzy6EY#}6vWLVL(F3rea_`o{<|L)s+%1XW^6bwcelS-qRLF#@Ulx`o(G33Wd$g>$(5#z+XP>S|>Ds+>HYnnj
zFk`a6ScWZ$Eh-yHK&!#d!8=Ln%6bv+ufgWggB_lKddim?9IWzKzWmNBty`jh5`c-7
zK_JKOVE3q<;E!oM8mvjk4i+1Gj!dD4ICaE1xr?co*|0*f(we
z&eqF;j^Ekd(1vKhR8R9R*QDo!J%Wd5!xVd
zG+c-6%+AQ(Alx3gbFSf*hzm#c6Sb?y^BaG~N>;Q%#Xcf{$fby$vJH(-Ng-zF6Mm52
zFFqKjW7X?qQ}PZf#0NVauM!ih_rk&9$NS*=6@rVYWp*Z{$T-2wGEm@;w_Eq*4tURg
zfjK|sVEYpipI^V!0g>n0{u-Q0juI=m*8BBD4~~kXL4dtle2OSvVBPS`sIo@9Mtnw!ZHXUr8n0uy
zJKCCRY|nu`or=}mD;*EIP*Ddl(!s~9X5$3JH@p2f%li+`_t^o>cBl(cfO@7v!W>MLZQK$!UE}(@7@b#(Zj9M<8GrQL*w^~mk(_w&KU#=IH
zcD1!RpCDL=VBtdezkQ$PU8g3m)B@1!NBK0tKY}=bB
ziwVB)zztc+wU)L#ds;bWIdW!Y$++k!p(w{pNl5@aQKb+jyL13N}*Xfq&Vk0
z8#c*ar7=kc2QI+ILNb81@Quu@_kWU%hbDQ7`vc8Sl7(2<>B%zv~2LXK)piax14v
zdFus+Efg#ZN)EL{r@VNzn;YB#o_2jyyk64cSI?-SR-S9c?Q*BJu!2I%Dhy5W8mRu6y`edgiWw*j&}
z*l!H1OuzG#%HaZ8U0TT*I*8&a3;c6U(==3((Gf}DW8oja%KQlUQCw$d%%8e=BARPH^@0Aa_aceLJFt
z-?^dG8}9*s(#Rw1ZSIXPnO!T{!ysa1iUONhgI_q&K9
zsm2%kCi<(foIXyLOn&`F48Beq$MEiGPub{6xV_Uf)iY=b>_WdFL87)}MyEwnO?t^<^y_>n3b%!DuxY@y#{P-`|^B)p$|Yx3$G-?a^*
zaP`9jb>2pbVeu%6+5JK
zpgSKA1MWP;d`;|=UB=ebVXC2FV+&*YRC}MgZNOL@Us9kly9Sb$w%vyy2U2F9wZdV1
z!y#7i0ngp|t;(5QtsbOCfGpYw*%1-*>&nCcJAkjAbhCXU9XEc6@`nTy1OVceM0$V3
zI%r-jQUP`g=Yb|#&2_@C{2~~c6eu*^E8iaS^NK|)N!0e~Vc(_3T)D$^Gb{_)(_umG
zy*kJ{tlixJ*dzVzSIK$0bH(2?(%lA`#k6?t-hKK(o6CM&%mq*c{0F?+FbVo88JYCS
zstvdc*yvglYDpxANVa!@6|eB}(^nEq3iGG)jd=(3H$3|GjTL4|pjrIeYvQUybHT1{$(w
z1LXlusEA=}MU_zd6z(IeEVK1crQw``
zi764z0L?u+zmYv<8g2CzUvIO6ry{wWf^0N~ZR#X0mot5@zIN^zfI8mByDZ4-1S&MC
zgrF_@Yr!zuYMGRj?=O;JV%H6u7y$D1s+sG1$>
zw=OMs9d*$*qiOb+&xEGORUd387?0U+31%N=C;84lad*5QPBT%ySX_%7ejvkN;%=9^
zXuCW^{rDhsxUtdwZd+TnR7NnIbLS3X3k6k4yKUN+MnUStLzwzu`a!79Y)B-Fkwis&
zVL2`4bh?a1q7*}|e?6E^aH*Clr!-|^jdEoDIme7P8RzFKhHaO7M5bQaQ23N?L=cJ!
z?4hF!`~lMBgNwBX9pObzh^%k%zHG$tAX2+Aszcj8u|$z|yYB#8?)a
z8dAR`@LRUlL-b0?>W8BSe8~PI!r2Uyi_svM0EpWSN+JR=G2ZNQfBJS99{;@i${+&e
zFv7;TGg&+c(In**SW`P#5A?~XSVlXe`|*p_eVBcUqe;^*v;ymI5izP#R8w$sgUGp)
zhm1nKZUzBPfoo4cnusZF%V-qh)Ug#(9S~H_x8qzKXC?!qIcL$!Fq{+AAYddwEH$dC
zMZ8xcvey`a95F9~sumxhxVHK`8s>>ceJMzbg7~3h_XOBqbQ2Gh+f01G<3)Ka4=UOX
zQ^60vXl0NW6xLFtj>c=oQyjsso|y{%$S+m?z9^{F)SQo3_en4(|KJsSgV4{0b8)Fg
z4fXD1M1J8F>TQGTA|@q=ow}7%C1>_UmwexV^ec#F3JQ)BD_i#g+ZA8da2kIcN6=<>
zfe_vhbR1ZoU^^IX=0Wez0imYQ>kF{m9)GSQ2d2I~NXEqnaU0%5Q=`vVP5Jv)s|(aO
zR|id!4^3*2TZ4XyvWojEIZ-Vis@<$4HMXD^=rxr`JPP&Z+fwlZK;*+p4m?nOC!Unh
zsDp-2JB!KsN~ABI)|M*AGjrLG=5K%Otw1g6XKPn2HxnZlE0%XP><-2UKn08}E)!jm
z1y-u9+V2n4Djreun)+J#4d{@kkD>A~
zes1Lkx!Gq(wAULXxBinWC5{s)qE+=N_Id%cjnaD;SfQXIt@qB?8>8bCoCKCqj^6Ws
zn=I{h+m?+(vNv81lvzH&C;LmLYcB^_v}7$L~99{gG#$YJ8S*f`ud*}MFT
zkCd(fR@bNQH`9U#btkrb%R>-b@^?Ia&9Z8rhU~w6;>7
z%?7o&i~6t?Rd(Z4%hPK&52r0KlxDK;^g7?({W=}
z>uHZ75o@TB4V;Mh`0AYzVnj85L|fYC-r~>#8HigSIuEO|vrFyYQ;7>)cyY*T0C8O+
zlC95o7|Q^I7Fg+&9He>Oz(;LUb+7k&2dW)!TUPHyE!`p#pkOs*V3a9WiXTR<5XKK@
zDEkxl=qDd4!`Sab+{ZG$-@VX13Tdav17DlXg82$3Z0S26SQ0ht?B7rTHtt!SdLo!wAX=o*PcSR->y=7AgymBX|=aOX}j<^Oj)}$jU}HB(=W!!|tCi)B*ef
z;lUci@wN~x#{?5}+Ad&DhY)ZEp95;#L(}7Nj>hxC$|%C$=*sTBs9qLps^+s3Cfn-%
zu3BA)wGulA4_iqMaEafwjIo(=ZwX8e)9BdkDmIvCXpzjqfq!Rh=Qr0Gw8O7jGmLRU
z`R1+>2vTY(u|rcZta!xLo^5)ISx_QrHqtU-nji>Q3rK>TvnNpiGyr$_n&EZqMmKn
zR^vvgt++d{j)MC>sz+=w;Mr23d>E#Nam*F%4@anN%jeRNMvai|wpjff4^l%Qv^Kf+
zp2B|b%wV3G?2tiGm~T+kL9?UDx^GK$(7~pDuTps1IR+C2h^E`zn#Ck5Wmc81rSqU_
zHAackfToGBfdIz>1necVqso(<4zi5p>rFwHS82>PS!lQR@qi&amZ2
zV-yn1n}Mq<&+Ij4f+A#PwIIt018zc!s?`SH#)6Kx_dh;pQr0}%*`fm{m&j$t`$P!s
zINb3GqgzdxaF^en0vu+Mhs
zz(;v4uL*KHd*_1*0moI_;mpjdq&Z*wFw$jq4yWN`IkGi7Ejy3n3v7&^jV8Oo?GeYa
z9WB03Y>1GV-SDfVkWI`RVM+KBSS@2&=kF;ytPGLuAIOR}PMhFf6iL!O5OXiqrl#~<
zK|7pM&l1)nqa!)%bXU5f$;<)#+GMU8!*J|fV^(=x~L&>P-O+kUY5vT46k1+2_~
zX16gNu-lcMk%+|k&CUVsJLn3<=u?ZB@D9Cw-o-582fkaRvg&jS3P;WF6nS~2fhd-8#+);bje58v@m+*IPejX0n+O?;$?eq@)z8<}xcdpR-S3K4
zuPMz;?6_T%fsYk*OEz|&aoX!eYIxJ$>h97p1Ph}$m#%r!dUUTiJNUGPbgt(o`L3)a
zrl3uh_Hu~ZsNI*`WfaeH$TP&yrl+Tk`t~9jG>(OBDT2cgh+74m+7k!!#%Kc1(2;dl
zY>LU={IJD6>CVBV5Yc{Fp_}XBg-Dsmhl0Et<23uC`^8!LC`ES2Hu3^^7$u+2$QdWV
zc5WLUvl77p*-4F_()pRU2Q~q*5%f(h`Op(jx3b6NFpXMa2pJUo{
z7}VUpSty}lt>AR8EVI^|x7&~@utRfgb=j;^J)#)Ax?6jD75S;mh4(l&9f0;*C5LdC
zUt!wu105hz`X|e$X?$a}0X=ulffqnV=0_nl%?tPUSb)NscxS#}IC>{;vyH|5!8n;0WHHWm+6+3URA%zHQaA!avRCJ1gk1m4Z}2T
z#&Rxoxg+0tfSWq;)6Xk&=HOgIUiNXfD6VCc`P#!Oa9%zYMJZKfF9>eGV-WVff4lN?
z8{a1S6vV>|*q>OaSpFSvWB_S4&Dh|L-?lBPfVGVCzytQh9x>Q-8L#b)`*|qDflO}x
z2#cYphV6#mV6w|f1^8fjp1c56#xYC#aRMdvsMck<vOCnIcQ}#J{Wwpx
zq&~Pa$mtMRc@CuF#I?Teaxep3bj!w}#%u+)D~4tRE_2e8v|>4BU8Xba+B`abT!#d1
zxcV{s-Vu{=N%ld@vwk8_Sm_D
zQ>WVSt0}5?mUhfZdKJaom@dr3fxB%G^Q;B=kY4sKjOi7l)vGH*+1mCzOw}&&ki^kB
z=a!uKfW~+i)@XMUj>@G9-R~)iTe{t)mP`R>LKmC9$tU4MJMhOI$m}W{eH{&Rko6rK
z**8Tf#KgT36O4Ex?l(*?kvz-vUcof`KC#<$vcBHe_q+AqCYk1zuU2L#VY?O(P;ykc
zfAa&6&EFiHSd23xu@#t!y3q?^5(M(K0ts}7h-v?laU}htRf~{Z_!i*VJSdfVq{ph6
zw0U?^D)a7<3R=W$d3qogMg%^qsoQ^wvll?T7SrE`Ib2M%jB~{s4R>WoQ8!?QqRpD+
zHDaxia4)xRp$(}}Un$u
z#PsYd?JsBNAqqtK7SxXD3Wgez^9o9FPrbPoSkFx2x@`GurFRG@xkR+2=b5Ck3GMh%
zYj8PcfO@qG%3f3QE`3xfoF2&>9wEBq2W;-A%G{*`wMTX(?JEl39g6Rh%95f3xm&y%
zs-7JDh?@F+QST&6Ep@HHurIKceS1H%K=PQg&yGUh2s4$f%tW%;2@m#k`+gm^9A(em
zZ%oRcs=VgCBnzYAcnb4X*MdN4!;GXv>b6+=-*r@8H9Feco$SGlUr=-34vYPif5n^x
zgP%PuN#mv*>U+QaTa^7klGTbukZeHw2nQpn<&h?50B%)@MX}&aRz0GWQXp=H@@U}@
zk?h9tZQC!PMQ6TLdx-HeELjY(DH*R!<1s!Kj{PbF_dpBWpR|K=%Q5Uf-1Z+qA57?t
zelkG#tnxo6&u4BsN+?
zY*B!;62F)KdB}g|Gy1?sjS%NlwrnxLhqbEZ1VFDorEv#*X&-va_}ngLbjm@XKzVh*
zXg@%|0M9HNTg_S5ggIw(Cm}0iGC|X&F4u!p6dFj6^(omc-Hg(DVv0q-Z*sY(8d4~0
zYcj?IIp_z2_iN<~rosZ)u5+2EN{5(Ml20Y~6V(C9y=Q$iSp!ZE&+UpH4yg8}1@by^
zD=864r@mA2S*et0jNEG&W^td$4>MFEd5aotV``{w$o$+IWe@MklZFMqvFR4lFPoX+
zoN0Rk-tjzQ^jAG#SS@8#KZGpXG@LQ`*uJS{GVYj4T?pgPbK*=3+@IgD%)Z_vy}z~a
zo#oJW7OvWK@P%m9pEYz11SWl_@SdM3?SVRM6k_DG>@fSfe&q8dIHBypJtv$arS;2k
z?wi}-ns&{Y!_*N0C?DpFa9pzcPyJZJ@nMgfh^8A;fq4%}E}Nnw1CW
z>EyJ3FK*jov1tz>W(CW@kkhS>`r9Fs%!5WI?5lu$m+u~zS9HF>u7b=AI122pe*JOE
z40^cn@amtIw(~gE5LP0;2m{g>%oB4#7)-H0ED
zb3LXqCKwxCIq>&48D$@(kIBI3a%3|C(EBr7MM-M&+XlpJjGN6nC?rlTRiE=KCwJ
zdT=3e10`M?EPxxC)u
z)TUL!CJ?Kzt{H;uZC0SU7Pg{)6l;3>qt+6VCHs#X(YIUUQ*^^eW|)VBK{
zUK}p$tM)UvIk-n+$9u2t2=`7+ZaGyE)w8%AC$78El^#4DU!R`(KeU+@qN9pj;y<;w
zolWSIv;zznI^|+_UA(|ruw<*g#6iI+xmOV{R4G=vwyb<}g(6;-ZYgD=)HWJwtm-=D
zn}-iYVBGZ**c0Mezg2?E-P=We9or31}+OHrgV2&
z+sX&iJ9Ket9c;zw4HT0q#kCj+GBuhM5H(&U3R1H;zd;Y|;M8%hAu?7=DOfUPb@A|N!a&vX&ll{IJr&9398j)^eyk8BqR^%=(2FVMkjFp0@`B)GH}4yhk+O3)7cm14lnvRXmJD
z7xZ{eAq6(Zb{2r+j|0zd7x(MOy5v^BxNQ3~t(2>p_c*5S
zxJoo~pl823v2Q_`N&yobX5t8Z`l^&5_@EfXmWo>Y@QN|Uge^f;hAJFqsx4^=m!7QR4c9A0+xoVJq^GY8^<_&gzlRbw4*RUsjK*e^
zKhXd}4`Nn`jQK#abc~fw^*}l`m4>sh8aFiEW?0cH$q!}Js(Q^k2B`vuF0qbpxquC^2^q)9q$%i*Levqu%U+Ya9CLvg4ZO{d5
z_XE0gOh}AffeInIa<)6wh
zIoN#$$QJuaIP#Bl<#>BT*7J$EPa)KDE7-U8v-RH!Qr~r0!LR(#b3+q71vM_tIWZ}zw
z@YdMf7u~2CpYpo#tTOcEKl707AeXTxF8w@K@tR2|{|&%3bV=6dtLk-mclI9sUF%4K
z-p9YAd^voYX#vVAFCV*1w~HDsWAuona-}y%hpS@ykLR|tmNvrMOwA!}b6^VF(L}^-
zjKf9TMId7IudrH$5pfEp!S_3H3nr^=BI5>9w{fb=e~56W`3
zZPlF)XTh$XMa0AdA-n6;oUEbW?&M)~qYus&P&Z~=0c6l!x4KyJ4l0qjwdA?ODd0n*
z;wyv&{7C}>`>R{+Qhf?-9njiF3!?OvuuhtBVmKErc`GnQ3M^cR=w5g#@v
zJ1)m=e!v9Re6|0qnI!F8m5PB*+m)U?|f<+v8&mT339
zt-IJ66OzGcy2uYFX1&8$wp(u=HOB24z0)e@VzBSTez;e+S+dg?kxmZpc0mRb;4dmG
z@jIaRe~1Fg_PtefB=S9?_}e!7iS@GpB6TzRWXfOacKl#W3~jW|-
zrsm6?IW91(gK?65?h{SwQy9;-83vSfHuqj0q`Oe-Zrz~;>~*3a13m^QWswO`KU^)!T=k$_
zxj9E6z&zZ45~UX107|A$>h;Ddiz62PtaS&9XF>ra@P8o*|8>wLWO3H_3#{l
z?T5$4=cQ2AS;%3~1@KbSebplmb^8EPSrP!~nQAY`(%qug2Y+U~r1NBh0ugfs_8r?x
zi7eoTVMOd0A5r(HibCo{&`x~YRFdp=MmIapv6=8t021&nzvnIhW=4er>*;o;FckTj
z)9A!HD+yFa4;R)D)r0_%Lnj8-QEWN&<-;$&R(j=0YV#rzy>jX+*gL)LY4^9&FDX)%
z$;NYSbe~ytOJAkwwQ=7cHv`wk=G38&5qnV(Ff3sp%Uv;RZ}IW*6q0Ci6GegApul-`
zXUXVaj5v8P#s*|7D&+91-X0Sx?6`2;N}L(`yg>jBfN2`59}yMZrJq#`PIx1?8z!J2
zfmsnCo2@!CM1W6zMH^Nw`_wFf9psa{JEbv6MdY*`+^C`Ug;p(|`{
z9DVzKVJ5d7_j<-~67Ef)cGM2SPx$kN@kK0x6efRt2twh36rCt#p;*Tb{G&Rjr^$yi
z#~Rxtt+V-t8~jyKA9w1)>}-Q6)v4;D-9@ev;5*Ff=k!jSchB#57rK%KoEpbt_^5@{
z^dg`7M#G(oa$k(i4~N866rD;3P(mSjbU}2BY$NQKEY;o%A;e_iKMVt5k^N~U{fX9M
zFEWm=URwmsUOb&2y)3ygU|CxX=B}sBMb~%s!t|$heQYjPqIR0qJ}vxNtdFhsS;(a5
zC%}gv(LDGI;cN^5z*DyPTD^WUJK-$uPva$ABlx*!oajr}X(kHoa%)VnB}Gb2l*h))
zNtjdrd?$1U@Wvce#p*vQ{eg9>#J)`bGmJc*s?gk{MK0SCZZWv}y7x44K=Z5{I4ZDk
z?x^!Wbs^@Z&^Lm6sPb1`y&2)^=^u|zcTXdUC@-{~a=dJK
zf?D)46Q0%tB|n(T{bjjM(5RLqMAB3V-1$Nr)N#{ijd5;dKc4)_>K{hV_!=Nr=}DHz
zl+P~Q1
zF^i_0FD_B>7v*gF9xaQd3(x&-jofm}xJXYqRPDk_n9FvGCp3wjuD~^^R%vMALr|Hb
zGC9<0vUV&RsQKP;dHMXR#b4hRqK^;yHke6I+j}o29f>2SlLo=v{5eW45NZ6B8}{Z5
zQaZp@HU_gVO2aSC`i`k|zMwVMJLaekIidK`vd$ejx?m-(BKE+V#Ye?Ov5JU})kd=1
zWI2$<_rD{R-%ihekgFqo*7w$kB1-%lJNdBPsi2MlB-DLHytc&T3g?JIlLbjf-Gg7S
zI(cn@j+qmaev6{*9w+QfF_BuANhgyi%V%PQDH|ktxyHed%gB#MPoM#8Pe?KL>)GvY
zw!WG8CMw(EYgLn(?@k2(EKMy0z=@ReP~Y!$Wgd#>*+g?)Zge-J6yz_wo2$xorpUeT
zHM-=~>_sa_?=iDlsXgCsirO;m)uzR2W59rB^MAMF&h766ZaLsFz#)H;2h7+e4MgCj
z{LhyDGRwf1pE*6w`i%udq%gkK8*r~);qZ({B~33
zC&vP)m@T#^(OFOb_$uZGSxsWrP!yz+r}3%_(q~8^B8J07LbtCneTe<*^2o_uh+pVC
zeK5X|o}yChaIDec5ir2~v>djvE^9I5ba
z1y;31*B|C(ol|OL4dZln2BIu=SL^wdJhSTbD_xp2(V_qH=(t5E7T5!5(o#lC{9$p9
zInWa!#1gy@;1+#_F_O(f_<`C1=vFZAXKmyF&4qq=l$&n
z-?{mxSw>IUC!?e20sDd~YY47VWXW}yL)TlyTE0JLvr5d*50^}zX_p*YP|6HfC+TFM
zIGTN%g>8!~`H$uK35BH;^i9cKSZmbZ^a$xNoLyuMR_884?~lgBI{Z^<+X*|f>hg{&
zp#L|bi2_Q$L9EV^;eWYTV6I$e53ftpO6_;?<^v_ZmL~UFcV8@9*gmK$H5lohqMHwY
z_s`wCpDUEs0<@x8@HPSU?{@K+QB5K`L(BCN_w0Y+I5dUf-lbuv@*FWN$L!LA`VHW6
zNgz;?I|nZB4Sd%=GvuPxj8#giTtu(YJyqMGC&!655BUMP`K^i4-pko1!Nm_B{&Nv%
zYCG=%20q{`Jmp`~@ovdw2J(XTUJ(mg=<>zB(KTw*x<($dW}$!9n!aE2W+y{jPG(Xx
z9A5^Kj5(sYZ^vZ6@$;VAKQpo20`%(cCi7Fze?&q>>7hr3Y?dg{U#@fVTrBY;tR7Cn
zDh;F0VoXrKG!+=j1pt_Z0fp|MgV6OeRxbZQ(BlBx^x`x~
zqnGgKWDJ1{_)j@F|6p*o+pW)H%d1Ws_h#gKbbDD(V>y$~{
zrxC;NgbDcp8f)<`XynsBh;EZgC=&veeGO753ObY^c8GwCT%73DYCbH>2c}|?;DQzc
z3e+hLoeRBKmaOiqAp});{8#+*%P6r00uY=kDgTuYI;HqB7W-N2eIVKNdfzS}E40HR
zM0@Eu6_h8OEy-ntKj(!9tPsnptSzcCDYN_~=%zRMKb$#H&dzZF26!@XuYX}+MxR?I
z1V7cwt)bC$2~BN4{4&}3A)vCO=8w^L6<=k9$gSht8r9Q2KD+-1)d1gwP{7UYzWA(n
z|8NW2nlO^X1)Ma|1Kh7_0Mc{%U4>^od9Tw?bMy;=X!JF)%3g;C1N<5((nhxE{#G{g
zyEp)@a039Cy=2Pu50xH3;ILTIJ*q~=B&&TzRyu}Mmi7xwwIJfLcM=Im3U^wmMTw|W
zI%6h97nbMKyV0CuOHV=2nRUJJ|6~`|(WUPF-NyeijQc{82Mq1hXldI55#59hb<+_-
zpsf-UA&1twL3&Sc_h`Kp2+_ipwBL4BJuN{U8}WZq8*2byZCAVH{++~(Nsgq6oENP1
zG^&`2kmi==4QpjfUWPR4m~*yA+|HteP13JR7VajLI}7svuJ6AIrUspz%Faqy`rpXo
z>gW5xGTw=?{^3umg9WG;MhQ
zx*LQB-85Bhx)3}8GNK=mG`Yk9+!x8g8^)c^_HdBK9BGj(!zwd-x%AHk?;p^Y0|H2O
zOudL-n(*@>K^Hq6cRIxZKAd55X+Bn=X=U&}a9&pm_e$(kqkPr{s>5yUH-4=TYqems?l!`$JU9nV?ssCuO?5=D*wi=nS!4im}
zV&j6PT+)Ww!ZYHm@9k7(=J=
z)V7-xBp7Fp;G~KtCU!3`X0EbT&Ok@P_6I?`gQ5eTGWrU0>ix|Q3D}jR5<1_&;ZVev
z8`d4el)4UwOB`2$&L^h!47xI7_dR#Mi#8Tcr4e#{$gB*9z;dYF`?j;^$|v0tB;Pa=
zVXGR3i183uSTO<%`dO5=zMj+tqMmeS?=VIGfK1Tn6IgbZ#nF|zhc$9moD|O6TrlO^ivWIW~^l|FlcXhg6-~aNmz_|Ze@7IB5;lQHp3^i
z=U}>SwpDc7q&u1gJmKPIQFtj}o3P1ceVk1980Y9b-Whf3UQx$c
zdf1HztrxyOXC=9yDtGU8$CcuUcC{95^Dp(;8Cc^cwNb5jh4|aCw6RUioYsaKa)_(a
zS75tQ`}powoZb1A+e)XHKi#ZYHwdg(hU6ICVu7yM}%5{r*r9_n|i*Ll5%)hym62>8DBuU
z&A*Ml9uFTG<3=nsrOzEThz+}|Wvxsl&zjL2HQ@ypdiTV{=IvFE@H~rR2NH|~UCY*I
z{T48V_o^mIviLrJ6S-#VC#=XKSZ!pmS;Z>ITlA8CrwH4mKl
zFr&)m`{W7shremq->UvCt3shbFm}(3`R0mra1-xcG740OHMen6{$r%Fbw^&%9#RYT
zG3xWb3sat8+Xu!}+h`dZqYAE)VV3TVZR5rj)r7)>k~=u_gL;N7qmsbx(@>RhpBx}H
zl0qP1GTFzs`w`F+5VryJync5Z4&zNC1CyCq@cc29;)RWA3fL1LidGTl^KBYz)Nj6S
zB@wVQYan!fg58sR)Ba}Lgx+(odZ~Y$ybx@6IusuKpiyPp9V?I2VFf#avO2akkU;LP
z2gKT3B#Tx!^%JR8N&=4*t(9_uU*b~}mzpq>oy8fk43NwsT4)7W)zN)(G|xM{o_fREXRI-wsLC{bV&0v*`K`m>LGcJKjsvzx
zNj@}V+DE=kWv=kDyEMePmrU)pgnY=U7<>?PWqJ2++Q%6_a*J&CcqkKc)(g(9SQH;y
zepSNQ`eIcqT^W;4Z>LCPVI
zs2lur+h8szt9I$uKGa#S0GGpwva}MWhU2g78`2TjAAa^>W8AEv
zX!H-}9Un&}lnbW*vZV{N4JJOLNR5)p%rLsqWS#+ZCPkmgs?N+#2nY%}>Ncu>RYKbO
ztltOu<48Gac6Uh<5)57_wq*5mV|~k4FHlLK8V@coD4n=v7}Q=?Ir9Dlz}M5&IbjiL
zc`8hV=MIX2zD1cgNAydQX{zFHpy^=LnmW-Wn34o>!Px|HI5no~&z9%>W%W>KHNU2R
z9^K0N-Ckt7Df*k6`adT*T|0ZdD=Nwpfjw?%sB?@0a8g>eYN#Wm1@e8bas+aPqCdDh
z?!41xkVF0Q^Pt*RY63;@;&7SfUs+L<=IjgNzpCRvk(xd}N-^Z*T%GQ;E4!@>=-vw2
zNyR#*5Kq+Y9aWr;0x&O<@6pv;NJs+90~*{upj9HY1Exy5$|t^zO7=TXR1RtsQgrv(
zRx`5t2hQUFLmwzq5NpYuYI;gg?Yq_?iK@Shbu>#}Q$o&|<98fYV8ec=c=
zxPpf@RNYCQdm9v8SsmiJPD0dXSOq@krPyw$v#~(qQBZf&kR9jQerOJ3;y(N=p&c}6@vjH1!p4Y8Z^`FCZEt+ky-6QT5W@2D^q0yDc0D3
z1FiM8s>N%!LJJVsRGQpXdl2IVPt1;Oe;}Q2@MG@NT^Q*BO`Wd*KE1rkp;G|h6Rp+3
zAa%+eSHM6Iw!KgH=>9DQfK);q&;l4o^?x*EJDiQH{^DVu$zHj*cb_~0f%Man*%8eY
zl7b_spu)GNLt6$b{6tCK&z${?|3*pyp5}
zFiYUh7N!h!hj@}BG=tB79Y?Mh|5c`Rp~$-bqnQRpK}}T`|8;Nu&QTHtFM#LCq+cX#
zHAnBcy%bB0^9uM(Fzw7OQ3f9w@9-jl%}I~}afO+VVJ4LK;j$~)u}Y@QU!MYlS*4;B
zA{(N5vT6VLe{j7EScg*Cze8KMSuP8v%1ZF%9#2Y$;PzL8f4<~MYQcrfpK>qkp`xl2
zJvM^f=4?O)7gYf@lF7YPe{w5fu8?lF=xv{`n7G{XF`iB}27_`c}qAXZ8=4#xIZbzsE*gY(GPajzV}&OpA;DV0|H
z)0J`W`pQO5btYgl%rY#$@|O{Lj_u4LawO1Fc=u+nFYfQfJ8cAO#zLgpDS%-A?jf0#
zq-V!UD~oN`MUD=zb26{NR%bMt26!Vc@4C;Lx$f=+i3Z)
zI(K(hgfpt+5r?h)m3&T_n|C`-PxI-iQVcYQ8$%5^OOgxaVA&teg#;`Ge2`tP4iNTz
z8H3xA8yMaK3hG(3TWvi!d1yT9em1l@HSMH-y4Alo4eiIqMW|F+At8ZWQ^TAJ;f4zc~f)SE|);0Z0-Hqz*J@FI-QLTX`1xfa=gAKV+_z)8}GNkEd{RR8#4C
zLw>sCh5v{OFAqJXp`~}MzrvEtL+GHJT5i?R&FZ26k!0%MpLp->MAixj1k+u6vz090
z0fx*{Ju@~p#nrUxE7{PM1^GVBVDFFZO{FQ(Y*0)ySNgdsVC3%Zg$NRi?XX=-){&DDId1Klis)z*a$mOm$lrJS)k`^g|*MDomZ3=<@AUTKCCG3F*2F?AFl0h
z;58?(Z%$p>(KxQ{Ef5vN5n#|NpG0qM!@Esu68#2{|7lk67L&F4`|T`xA2eFx-t`$M
zeqsx_MHg)Q3RHsb{y+Q0JMiAIjD4^Cw>|2A92ZpA9Qi-{h1_mnMJ})!Y{kj{^HabE
z>Hq(bb_?nL?`}}2Giq@U^J_R3!-z(XyPfnDIuUF;m*ky)7Bs$1$eYc7*w%SS_R{=w
zq2bmTA3XtRuXKiBojDR2{$=`hIGO%7L7Q!og7OA#0n?+q_XWQ0?QQ7!*@a^^UGUL*N>&QJh+_m_^}=Bhn&ra
z7i^p4Wtp1E^OxLm`s9}z6>&TZ@j;_%ec17X`-V-ZPy5-==h^E`grEE@pv`~kgOvCq
zOaCr{!{R{Zc!KDnjrB;qWHCR><2wbV^TNN=3~?Tn1&b=Ygm@ky(WR+e|9qJ=Pe`3=
z720PkOM<_g1F(|0$Q(nJ9UMA2C&2R)0S22gji|45=g;ivo4{rK@N~*CF2pli_a8Fv
zxl~$uNb=aB7EJ2Nqx4m-&JN`t&vTOrm5xr-eRqEK!BN+^qqDt}|A*U$s@Dhe#?1BX
zGPVDdkK>D3jF=l3#3tuTRMf-`uMa+8*EFBj#DC;6eB>Bt9rB^rI_#dRaSvq~=d^6Y`*t$0K?6;>utfi`=27
z)L{k{fA;sxxR`w(&(ZxAmB4e!s_k@CiDAh3f_q&mZGVQ|OXJ0#RUrvUfogBvOmmGN
z6Et@vI1k6bb(;uku7kP1IoONUgRRxf;p%P9-QIo}>W%gw#31Vtm2ovkDj
z`&gW(FPVQkQaaP#)#AakG&S?5h`0JOIlp2X>G*I;gjQFll2@!
ziyq?YR9tkm9M}wCrcijc?mYO^k?GDizdM=cCVOw~u%MHmYz5v>)K1J!g+f$+q~+Cf
zhf^4LA1hC$DRP*Vm+?4low8fR^u)0$r-+$Y@f0zW&fM60nX1uK^eoO6_qz1%W{n^T
zB{>DDN#U_gK(f!bvEY)eWJwb#)I)2>T&`@%rJ43xM
zKj@&ZhIs3>@8yksv8Jj5t&co_=Fz!DVT(Ip+MbKdL%t;Z5ZWO=!L~l{bHYCP(OTgE#&SQe4bn
zQnA?4<$b|I$BWFR=JcWm%76Z3%W7zJn2ffvBrbRCH1Ulwf05eV#{9|vSv5`EuZhUqOl$JykDktP~iP`@$&MOf>+9*2AR^QZX
zJ}+;_6#JY>b;+_XC`S)n19;*w>$IgsRgfZW)G_BIauRlVYB
zXnm1=?^CDXGWt!@cz%uTL~mbTA`lCF!`XLs$aZc1U(pZ!lr=lF%emHA)J?N0X2D79
zap2>Isos1nSJw?iQ}Kn64680$j6@mFLaTqKsA8F@-~E9p4)f-PkLD#evZ4dznJ9gF
z_VPtB-;#$i!wdbW053ZDLz}wCSt@XAkQ^xB?%yE#!puzEO&$8(;mpyc8rvT6rR-X%
zsK!VcS7mKAdgqDf{y>Be-DgyuG5iAjVZ#oqmf+T;`EPz7iwdSEJzndd`^XpWn$o47
z!7VoNM!oOTF3+j9s1(@%T2m(*$3XMmu*;3wrUq`HR`P|9>{Zr{}Dt2S-~=S4qLb
z*V9XVrAV5C&V3l!SMr`AN*&B272N9WeH9n61I->noR1$|?wzbHD{ZV1yBjS%C5&qi
zz`1E-?3|4Z(e#p0F&R;Tyxixste}Y8w6GxBTd^|lPmuD5DuTEM&;pBrOY>&h^Uv^A
z1@rfB*e}m0SGr#Aoa~yLbQ=8J|2frLQP*jJdZ4%=`(sk77)xU;+qB4ih4d61WW`B!
z$P$eJQ~vC&JTcdFTw
z;5Ponz^wK(Wsg|kl~&BIpY#!~-&qzXuZqetHQ3B7SChX+C*Qj#|kvljlSZe&m)!6wa-P7Eu*ZlaHsNln;wL;kijdL9ZdbQ8+
zF_nopzQVT2r|iL^a51Mg)jUprlh#}=t+(wAH5Y)y=Ps#Q;{~-j_YdUbAMeNeP|6-l
z{o<+FM$?r&hwXi=dVMyECD*JBRmak0CYy_M?fO7!@Ak%8y?kzVIwP2|@s6K$=r084
z^xRCMk-Ih~Q!lhJ+k+`20p4;VBPFQ+z?2+Xpgl0&tzwJj+pO
z#g-xF*bduex0*912lanRmQRRcV9m1HVuDLO#k(@zljVx#*jZ{k9{1JCs3@w`cC1X+
znjeQ4_aV70ZYF*#Z$8?MVA56lGH(m;hFp^aAc~0X2__+IvR(`{Dj`LQA+wincKl2>
zEbI(%t{MANNo##uaL|vWw(>w{SCY8aLVZ_Bg6RAn(G0Vqg4~4jz9C9#?`Q6ZXfR46
zXAT@_IVR4XTxcXYC~;a*lhRl^*y`TTV6$5topUOz(^>RlY9BMqWVE}O=9xXB3g-QN
zWuzGYZ{5Z_h=cYSAg!fRg*OAFk
zdg2`bp{i;$^Y!=F)OW{M%}v~td7~`u=v2^Cqj_-5^FugiJYn47>yP8&lS8)Ee51WT
zX<`;1#gxpA8>at|*u%^bJXtkq&@0L#l$YrU0CJxT%Yrq4WS8J%Nb!%H8O)9G6-G7%
zxOg;kY<|m~1}&vw!hC@Nx0beYz)f0%5A+%HYFP_=s_|(d9jYQ12Q$G-3ruT9DMFTv
z`ttC>i3|Co%*&}0fkMuAA2qcZn&$I#&df5pwa_U=M&^r|*N?T@IqNHz2~ncyeBTZ#
zmOAGxb`(7*N7C6X#`1JAv@BxteHK+SXOd~7!l^991S0DuTAZEqUt25j6YO!1bzAt6
zm<++Csa>8&X5t*w8_YfmB=?B+a4XZQ!{2eHDR-rw?8Fy$(V{Dqk~wR%=VI@h+xgcP
zB@d(r;X4x-#oBG&i&?t#IZr)*jL#PLXDR*lLU}sNEDs{T`|~FRljj8NCvnxzoR=oi
zWIJrhE|AK3AiR%Zlas&ihE8elo<7Ecj>BE~HaEvG$k1BD`%HY|H!3f1=i>GyC=xq?|IsFF|9a2Ea%hLBbNH&N1t+!{du{ZpLR+mK(TBQYahap
z6?4NyUMfEy(^jKcJHGNPny~ji>W>;M||CE+e-9`K#SV<*=ZFg($cA{WJtx(xoDG
z-x5{!+M(Wwy6Qs%91LnZ4CFbDC3@v)D1XHr%|TdCu2~r*5(%FU7o*ZK?IrzE&{Y^_!uSvvc`<
zRiW8YoSj3}M{_HOez^4M+8YGw<)PfiUn(XS{6sZbGjiOlVWgZl3+)L>v%fnXt8|47
zJ9EC*Tb$cOZ-7+*$nJr*qPDV)SU6aDpCFWyraxqKuqZE#)%?+%tkKdB#_)0}<)tUZ
z`Wy9hXIpiSisj{hZJS>Fk)mJv68HqSZrrUX@=>%{bgNAC)U={ZO4X~K%Zunq
zyP(F-$E@dObF|+n{1dDtdMQ6RX4K~3#|L-&Gn{6Tr!No1UBN*vt8#!Ue~OCt>t})lcOFKB25#g
zq*=u8Npv%Y3)=^M7#Gr8suD7pdzZ=MVos@OloQg*XFK1I)RMoGL?~s+<$Z7~pkvYJ
za*MrEvSWFpjEA6DN`8yA_EH-YJ`0ylL(uPOUUsZ~e16iel(W&P*FK|I5S-fD8vTqu
zyVeV!CQ*F$o$fS7e`%2i@?z*b@6);{FW!IjFw5-g{rO$^+Ar}pzO?_oPakqvKWZdL
z^uxiW+Q>g$+9GN}8G}i02bD@+vnv#T=lJTc=;J6f&(w_ZWj`x?ZfW+tK61SLO8kl?
zkW4X;UwTRT*`VTfvE#|422s|Z+J9_?2-UDM=NWJ94-5orN1H^?yoETi?=6l&rchXd
z6F?QgB75YJY!m@o7azF8v7}BWh_HO8wcMQIvrjejOWH{-e0kbwMGfUZIM343Rmz-p
zmAt$5xdg5P77KffY#=;`Gk-dP{YUso&ofvtT%wcGVc|v97?}Mn3f}|BtbWG=_jFCq
z7Y|Z6W0f6;KI?`G7{(@Y7KAudUo!6;F7@pg5l_hCTKWagpd~m?ng&xvRRzQtqG;*LIdQpZ_@6
z^aCE!B6sIortQ~TT$o)~*3`$m(dH}**^1q8?5TQKh==^(rPKv&hKuKa#-u&yO(Lyh_Q`OoJR
zHS@;G*(Jg8>erIx?;s-3!i-O`@n}JnZuHR4iVZXE(*}GKW^zEtbx5sJqeAfe9oSlQ8X6?ywz(-}2
z(&ras**>OItBE;eT9Am7`r$&IB8PpcZLOqXTVn2_soACab@V>Vym5JcTs6~{
z^77QGScsjohLl$OO|#y
zze-$K@c2_3i{;!~@L>O~#NcDHnitqRr>f@sgnyOk2DDXl?qTs@FX$WbaTLGt-X!WE
z-}OL#vp%MrzNY2egjOX1Y;A7-c>~W_*GjP@0jx1vSww(+ypoe3BQ}x*!11$DkPy+L
z<>YvmuLeGv`KO<>2z$V5xlPV{3Eg^oay|9{0g>AE;usCObz*?
zN{eQeNpq$>&y7SXYsyN|%eI$qM06D4$L387Y%CYMpP6I1`4O#bG{^y
zt;|sa147>0t5xkZRuKtB$mj{nb8lrizTDFg@lwLJrSWLZ;rWxI`aWTEZZ?Zge{k!=
z@1ON?gkrzIImtz`G!Z%-e}1zkm@PfB+6nLN0dF?4|827=d%ZA|{?E9vF}|eMXo$at
zZ}dUQ>6?T9>QOul<~1}>J~Qntd4jpsnU(oEN!wYK)P%?S3lot{Nyd_onwklV1*OHO
z-&pG2RM!_-UTmR7I~9HI>kZLE;j%<-Te@(q~DYM_y
zxgJ=5eli7+yKr-6zja@-Y`mMgNtbB%uB8&&*As*xF2a`qt8fr{~=Ot$`~nXC*$0M8OxRa
z(u<4b(WakoND*qCkmag1Xxo_$!)*}YJ@l(dh*&$7
zq!;Gpu(<2z5kA~!{BhOI)6;Wl_H)9Fol=CTNpn5^86nuYj?kVo_$_lvG{SMQn~Sbk
zzD9T{v^^JOtZMjjCLBn0=>DZ&0)iNyy5whAY)_rD9j$dM&GE#?wsg>jEY%nn>$$ui
zQ&tvVUTDu8s%l%@?L;|9mtX5Rsp!&_fMap$ekzo1Csm&RtPO0&08b}FOILgI;4f97
zbnFiek-EpMSlL=B?aQr$;s4-rLJC4_M{{rv=mJxBL(|NROz>6_4H|kLJ0ooJNCC?q
z&j7bldY&?%-EuNBgW|_-EdKdP2PCD(xX=PO-3ZbR7vztlwc>L_}qAUZ<5h3
zzC=^^i0Ks9dFW)wmheG>#
zgH*W~Bhb$b#*l#oCbE7>esG6EW6}!HYFS-NHl8>&Oh~b_tiZ}XoS{8n(
z(=R5Jxng3jwyTayWIl3G@P`|ZkoooM{Dq0pzJY4b>>y;bIlf#uP%{_neeZX^X-5Z_
zm72&W&-Wi2qpV!4Km4qArH`!0R{cHo=1a`DscCj%|5)(1CZA_}lDwyw_-?vQG3JIJ8ZQ;dYa~}-q)An@Ezi;3j2<(f|Rg8HdtUvH`>>bGm$V5dpi+V4P
z;Pe_qoik1B7aSHviw*PV(k;V2MP_F@@^a0~r=bav6tM;9T)t#4d_V}AO4hE!T*71|
zTOH?_5l;0mJ-5$=vhT4c&;B#|^C_W2dzKC5JF?H6zC9}?wl{8;ukVIFGd+Fac{p2?zV4Zu&Dh3eEd8?=PLq6|9;UgA9e(GHEt1+2
zHSLiaOXHEfHg|tD4qsAhj*l_9>pYVOMb)rePAmnopN7XeRsDuzzhK^a#E&@nL$L3d
z(qFFrJ||gMO?-^-SNR?TjkK_rx4wF+;`HwZ*|;}oYKk6SRg&RHh#WVX->AMa%Wu#V6
z%$SxnhSMdQrjo)v+bt_E%JMu_K&iECmxMHIASCg720%NU)dbDdNe!|0vLU!o6s~Z>c7R
zq|2%Wn|Z%7ICC~(=3#q7r(#e#Z!{F251DgI0mpr}HFJ_uuxX9=E#Ua7Yx*A`9
zx0a>Pt0l|fEyc|=&sQxVc|^lvM@+Q`+d%~)>h-zqJT!qC1jDYk*f-l};0C{I5zFFV
zTl^i$Tk`O=+{7n3|B?z5!~?HH6u9jVXg`p>csgpZk2+BZ9w~EaA`~4xMVaFIO&w_y
z>6*k5C(@TBRfL&eY-(!I`lkV0@CIO&-K}2(v>c6}8`~ZS0LFYcdz?tLv=>gO4A%r}(;7
z$oiUyH6D+Ti?DQ4w0&un^cGlJgiaMWqVv@s$wgN7Mbqv;u)eDxrzp27+?*K9=me7A
zYZ^|-n$7<+yq{3QL=d5hCt%;~k-v%wi#kSY2_U!pz%gn!8PK#+6r1TMc=P^GRAe4-
zOY^4#njf~cA5ym&do$zlQg@>Nq@Ilh9RDXq#kO}UQ>VxLONG%n4t$LIh3oI({SgQGGfYUez||C%N(=<}w2UjEZb
zV&n8*H?QBL2b%mL&(;R#6CZhOA@rs-L7{tc1(0uOQhdwBWBXVC!zcf5!!{9i=MD4P
zJ0|}7`j}n!r`CMnp4B=4mE;(Xw;pGip?rZ=
zcB(5V^bkE*QjN?gbsWz5dQ5O}NT~0JMwLw0j`dClKz|uR-GavEJe~W8AL2x)GK?O1
z{;jr5O-(QDOilc{b79rm*t`y?v}(8>$ns1|daR7^y$TX(f4R;k#W^XNhwP+ZNMAtt
z7$Q9Sn^+I=kxUIO+}BJ~gn1_h%er6EbnLIF=um-bhItG>T2I1Kee5B+#oC6hF3)PV
zgQl)pQrc;Rm0M_AaWmk51QMtT?9okW22kB)GQxz8G)qkJQBHi?N$d?YhO56u=D1h!
z7d7?u(yN~jO8!=6qF$fzT3k*};|R)#b+GT!+oT<#;cp&6*B5IU|+8x_He^+}*Ai&Kb*
z9|^_&I6Y-l-Ra|gVu
zl^R-A+H_2PL%Y@;CP7N5Z0pX|^dm{Ejv>K-;4cl+JJwnQ{EkM{J~2rbjQyMg6$Wb_
zTzR4AW(h|*fh5OyRMO@Q4^R{%gzAW}o>fr^aMPd|R$C_mrzA+)w-~r0yZ*`g1*bMh
zFPs>>evo+1yaS}97Ns5UhX1}n9ibx!YT+)w4%})Ez_eINEk#M!RU336Cjj!!lW!ZW
zy?hj8?5iROIfm(_F9y