diff --git a/.github/actions/setup-create-base64-upgrade-config /action.yml b/.github/actions/setup-create-base64-upgrade-config/action.yml
similarity index 100%
rename from .github/actions/setup-create-base64-upgrade-config /action.yml
rename to .github/actions/setup-create-base64-upgrade-config/action.yml
diff --git a/.github/workflows/certora.yml b/.github/workflows/certora.yml
new file mode 100644
index 0000000000..9f8d223370
--- /dev/null
+++ b/.github/workflows/certora.yml
@@ -0,0 +1,53 @@
+name: certora
+
+on:
+ push:
+ branches:
+ - main
+ - certora
+ pull_request:
+ branches:
+ - main
+ - certora
+
+ workflow_dispatch:
+
+jobs:
+ verify:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ submodules: recursive
+
+ - name: Install python
+ uses: actions/setup-python@v2
+ with: { python-version: 3.9 }
+
+ - name: Install java
+ uses: actions/setup-java@v1
+ with: { java-version: '11', java-package: jre }
+
+ - name: Install certora cli
+ run: pip install certora-cli==7.6.3
+
+ - name: Install solc
+ run: |
+ wget https://github.com/ethereum/solidity/releases/download/v0.8.10/solc-static-linux
+ chmod +x solc-static-linux
+ sudo mv solc-static-linux /usr/local/bin/solc8.10
+
+ - name: Verify rule ${{ matrix.rule }}
+ run: |
+ echo "key length" ${#CERTORAKEY}
+ certoraRun certora/confs/${{ matrix.rule }}
+ env:
+ CERTORAKEY: ${{ secrets.CERTORAKEY }}
+
+ strategy:
+ fail-fast: false
+ max-parallel: 16
+ matrix:
+ rule:
+ - ccip.conf
diff --git a/.gitmodules b/.gitmodules
index e3d87871cb..21fec76324 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,6 @@
[submodule "contracts/foundry-lib/forge-std"]
path = contracts/foundry-lib/forge-std
- url = https://github.com/foundry-rs/forge-std
\ No newline at end of file
+ url = https://github.com/foundry-rs/forge-std
+[submodule "contracts/foundry-lib/solidity-utils"]
+ path = contracts/foundry-lib/solidity-utils
+ url = https://github.com/bgd-labs/solidity-utils
diff --git a/README.md b/README.md
index eff798d46b..db9ffe5360 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,5 @@
+> ❗️ Forked repository of CCIP contracts ([version 2.8.0 release](https://github.com/smartcontractkit/ccip/tree/v2.8.0-ccip1.4.0-release)) that includes modifications for developing custom TokenPool contracts tailored for the [GHO cross-chain strategy](https://governance.aave.com/t/arfc-gho-cross-chain-launch/17616). All relevant code and tests are located in the [GHO pools directory](./contracts/v0.8/ccip/pools/GHO).
+
@@ -232,9 +234,11 @@ flowchart RL
github.com/smartcontractkit/chainlink/core/scripts --> github.com/smartcontractkit/chainlink/v2
```
+
The `integration-tests` and `core/scripts` modules import the root module using a relative replace in their `go.mod` files,
so dependency changes in the root `go.mod` often require changes in those modules as well. After making a change, `go mod tidy`
can be run on all three modules using:
+
```
make gomodtidy
```
@@ -254,6 +258,7 @@ pnpm i
```bash
pnpm test
```
+
NOTE: Chainlink is currently in the process of migrating to Foundry and contains both Foundry and Hardhat tests in some versions. More information can be found here: [Chainlink Foundry Documentation](https://github.com/smartcontractkit/chainlink/blob/develop/contracts/foundry.md).
Any 't.sol' files associated with Foundry tests, contained within the src directories will be ignored by Hardhat.
diff --git a/certora/Makefile b/certora/Makefile
new file mode 100644
index 0000000000..0e33459cab
--- /dev/null
+++ b/certora/Makefile
@@ -0,0 +1,24 @@
+default: help
+
+PATCH = applyHarness.patch
+CONTRACTS_DIR = ../contracts
+MUNGED_DIR = munged
+
+help:
+ @echo "usage:"
+ @echo " make clean: remove all generated files (those ignored by git)"
+ @echo " make $(MUNGED_DIR): create $(MUNGED_DIR) directory by applying the patch file to $(CONTRACTS_DIR)"
+ @echo " make record: record a new patch file capturing the differences between $(CONTRACTS_DIR) and $(MUNGED_DIR)"
+
+munged: $(wildcard $(CONTRACTS_DIR)/*.sol) $(PATCH)
+ rm -rf $@
+ cp -r $(CONTRACTS_DIR) $@
+ patch -p0 -d $@ < $(PATCH)
+
+record:
+ diff -ruN $(CONTRACTS_DIR) $(MUNGED_DIR) | sed 's+\.\./contracts/++g' | sed 's+munged/++g' > $(PATCH)
+
+clean:
+ git clean -fdX
+ touch $(PATCH)
+
diff --git a/certora/confs/ccip.conf b/certora/confs/ccip.conf
new file mode 100644
index 0000000000..003f089502
--- /dev/null
+++ b/certora/confs/ccip.conf
@@ -0,0 +1,18 @@
+{
+ "files": [
+ "contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol",
+ "certora/harness/SimpleERC20.sol"
+ ],
+ "link": [
+ "UpgradeableLockReleaseTokenPool:i_token=SimpleERC20"
+ ],
+ "optimistic_loop": true,
+ "process": "emv",
+ "prover_args": ["-depth 10","-mediumTimeout 700"],
+ "smt_timeout": "600",
+ "solc": "solc8.10",
+ "verify": "UpgradeableLockReleaseTokenPool:certora/specs/ccip.spec",
+ "rule_sanity": "basic",
+ "msg": "CCIP"
+}
+
diff --git a/certora/harness/SimpleERC20.sol b/certora/harness/SimpleERC20.sol
new file mode 100644
index 0000000000..f9d14a7ff6
--- /dev/null
+++ b/certora/harness/SimpleERC20.sol
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: agpl-3.0
+pragma solidity ^0.8.0;
+
+import {IERC20} from "../../contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol";
+
+/**
+A simple ERC implementation used as the underlying_asset for the verification process.
+ */
+contract SimpleERC20 is IERC20 {
+ uint256 t;
+ mapping(address => uint256) b;
+ mapping(address => mapping(address => uint256)) a;
+
+ function add(uint a, uint b) internal pure returns (uint256) {
+ uint c = a + b;
+ require(c >= a);
+ return c;
+ }
+
+ function sub(uint a, uint b) internal pure returns (uint256) {
+ require(a >= b);
+ return a - b;
+ }
+
+ function totalSupply() external view override returns (uint256) {
+ return t;
+ }
+
+ function balanceOf(address account) external view override returns (uint256) {
+ return b[account];
+ }
+
+ function transfer(address recipient, uint256 amount) external override returns (bool) {
+ b[msg.sender] = sub(b[msg.sender], amount);
+ b[recipient] = add(b[recipient], amount);
+ return true;
+ }
+
+ function allowance(address owner, address spender) external view override returns (uint256) {
+ return a[owner][spender];
+ }
+
+ function approve(address spender, uint256 amount) external override returns (bool) {
+ a[msg.sender][spender] = amount;
+ return true;
+ }
+
+ function transferFrom(
+ address sender,
+ address recipient,
+ uint256 amount
+ ) external override returns (bool) {
+ b[sender] = sub(b[sender], amount);
+ b[recipient] = add(b[recipient], amount);
+ a[sender][msg.sender] = sub(a[sender][msg.sender], amount);
+ return true;
+ }
+}
diff --git a/certora/munged/.gitignore b/certora/munged/.gitignore
new file mode 100644
index 0000000000..d6b7ef32c8
--- /dev/null
+++ b/certora/munged/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/certora/specs/ccip.spec b/certora/specs/ccip.spec
new file mode 100644
index 0000000000..1eee1bebe1
--- /dev/null
+++ b/certora/specs/ccip.spec
@@ -0,0 +1,113 @@
+/*
+ This is a Specification File for Smart Contract Verification with the Certora Prover.
+ Contract name: UpgradeableLockReleaseTokenPool
+*/
+
+using SimpleERC20 as erc20;
+
+methods {
+ function getCurrentBridgedAmount() external returns (uint256) envfree;
+ function getBridgeLimit() external returns (uint256) envfree;
+ function getRebalancer() external returns (address) envfree;
+}
+
+
+rule sanity {
+ env e;
+ calldataarg arg;
+ method f;
+ f(e, arg);
+ satisfy true;
+}
+
+
+
+/* ==============================================================================
+ invariant: currentBridge_LEQ_bridgeLimit.
+ Description: The value of s_currentBridged is LEQ than the value of s_bridgeLimit.
+ Note: this may be violated if one calls to setBridgeLimit(newBridgeLimit) with
+ newBridgeLimit < s_currentBridged.
+ ============================================================================*/
+invariant currentBridge_LEQ_bridgeLimit()
+ getCurrentBridgedAmount() <= getBridgeLimit()
+ filtered { f ->
+ !f.isView &&
+ f.selector != sig:setBridgeLimit(uint256).selector}
+ {
+ preserved initialize(address owner, address[] allowlist, address router, uint256 bridgeLimit) with (env e2) {
+ require getCurrentBridgedAmount()==0;
+ }
+ }
+
+
+/* ==============================================================================
+ rule: withdrawLiquidity_correctness
+ description: The rule checks that:
+ - only the rebalancer can call to withdrawLiquidity
+ - the balance of the contract is as expected.
+ ============================================================================*/
+rule withdrawLiquidity_correctness(env e) {
+ uint256 amount;
+
+ require e.msg.sender != currentContract;
+ uint256 bal_before = erc20.balanceOf(e, currentContract);
+ withdrawLiquidity(e, amount);
+ uint256 bal_after = erc20.balanceOf(e, currentContract);
+
+ assert e.msg.sender == getRebalancer();
+ assert (to_mathint(bal_after) == bal_before - amount);
+}
+
+
+/* ==============================================================================
+ rule: provideLiquidity_correctness
+ description: The rule checks that:
+ - only the rebalancer can call to provideLiquidity
+ - the balance of the contract is as expected.
+ ============================================================================*/
+rule provideLiquidity_correctness(env e) {
+ uint256 amount;
+
+ require e.msg.sender != currentContract;
+ uint256 bal_before = erc20.balanceOf(e, currentContract);
+ provideLiquidity(e, amount);
+ uint256 bal_after = erc20.balanceOf(e, currentContract);
+
+ assert e.msg.sender == getRebalancer();
+ assert (to_mathint(bal_after) == bal_before + amount);
+}
+
+
+/* ==============================================================================
+ rule: only_lockOrBurn_can_increase_currentBridged
+ ============================================================================*/
+rule only_lockOrBurn_can_increase_currentBridged(env e) {
+ method f;
+ calldataarg args;
+
+ uint256 curr_bridge_before = getCurrentBridgedAmount();
+ f (e,args);
+ uint256 curr_bridge_after = getCurrentBridgedAmount();
+
+ assert
+ curr_bridge_after > curr_bridge_before =>
+ f.selector==sig:lockOrBurn(address,bytes calldata,uint256,uint64,bytes calldata).selector;
+}
+
+
+/* ==============================================================================
+ rule: only_releaseOrMint_can_deccrease_currentBridged
+ ============================================================================*/
+rule only_releaseOrMint_can_decrease_currentBridged(env e) {
+ method f;
+ calldataarg args;
+
+ uint256 curr_bridge_before = getCurrentBridgedAmount();
+ f (e,args);
+ uint256 curr_bridge_after = getCurrentBridgedAmount();
+
+ assert
+ curr_bridge_after < curr_bridge_before =>
+ f.selector==sig:releaseOrMint(bytes memory,address,uint256,uint64,bytes memory).selector;
+}
+
diff --git a/contracts/foundry-lib/forge-std b/contracts/foundry-lib/forge-std
index f73c73d201..4513bc2063 160000
--- a/contracts/foundry-lib/forge-std
+++ b/contracts/foundry-lib/forge-std
@@ -1 +1 @@
-Subproject commit f73c73d2018eb6a111f35e4dae7b4f27401e9421
+Subproject commit 4513bc2063f23c57bee6558799584b518d387a39
diff --git a/contracts/foundry-lib/solidity-utils b/contracts/foundry-lib/solidity-utils
new file mode 160000
index 0000000000..9d4d041562
--- /dev/null
+++ b/contracts/foundry-lib/solidity-utils
@@ -0,0 +1 @@
+Subproject commit 9d4d041562f7ac2918e216e2e7c74172afe3d2af
diff --git a/contracts/remappings.txt b/contracts/remappings.txt
index a314323752..8fbfa33413 100644
--- a/contracts/remappings.txt
+++ b/contracts/remappings.txt
@@ -6,3 +6,5 @@ forge-std/=foundry-lib/forge-std/src/
hardhat/=node_modules/hardhat/
@eth-optimism/=node_modules/@eth-optimism/
@scroll-tech/=node_modules/@scroll-tech/
+@aave/gho-core/=node_modules/@aave/gho/src/contracts/
+solidity-utils/=foundry-lib/solidity-utils/src/
diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol
new file mode 100644
index 0000000000..cc0f24af39
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity ^0.8.0;
+
+import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol";
+import {IBurnMintERC20} from "../../../shared/token/ERC20/IBurnMintERC20.sol";
+
+import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol";
+import {UpgradeableBurnMintTokenPoolAbstract} from "./UpgradeableBurnMintTokenPoolAbstract.sol";
+
+import {IRouter} from "../../interfaces/IRouter.sol";
+import {VersionedInitializable} from "./VersionedInitializable.sol";
+
+/// @title UpgradeableBurnMintTokenPool
+/// @author Aave Labs
+/// @notice Upgradeable version of Chainlink's CCIP BurnMintTokenPool
+/// @dev Contract adaptations:
+/// - Implementation of VersionedInitializable to allow upgrades
+/// - Move of allowlist and router definition to initialization stage
+contract UpgradeableBurnMintTokenPool is VersionedInitializable, UpgradeableBurnMintTokenPoolAbstract, ITypeAndVersion {
+ string public constant override typeAndVersion = "BurnMintTokenPool 1.4.0";
+
+ /// @dev Constructor
+ /// @param token The bridgeable token that is managed by this pool.
+ /// @param armProxy The address of the arm proxy
+ /// @param allowlistEnabled True if pool is set to access-controlled mode, false otherwise
+ constructor(
+ address token,
+ address armProxy,
+ bool allowlistEnabled
+ ) UpgradeableTokenPool(IBurnMintERC20(token), armProxy, allowlistEnabled) {}
+
+ /// @dev Initializer
+ /// @dev The address passed as `owner` must accept ownership after initialization.
+ /// @dev The `allowlist` is only effective if pool is set to access-controlled mode
+ /// @param owner The address of the owner
+ /// @param allowlist A set of addresses allowed to trigger lockOrBurn as original senders
+ /// @param router The address of the router
+ function initialize(address owner, address[] memory allowlist, address router) public virtual initializer {
+ if (owner == address(0)) revert ZeroAddressNotAllowed();
+ if (router == address(0)) revert ZeroAddressNotAllowed();
+ _transferOwnership(owner);
+
+ s_router = IRouter(router);
+
+ // Pool can be set as permissioned or permissionless at deployment time only to save hot-path gas.
+ if (i_allowlistEnabled) {
+ _applyAllowListUpdates(new address[](0), allowlist);
+ }
+ }
+
+ /// @inheritdoc UpgradeableBurnMintTokenPoolAbstract
+ function _burn(uint256 amount) internal virtual override {
+ IBurnMintERC20(address(i_token)).burn(amount);
+ }
+
+ /// @notice Returns the revision number
+ /// @return The revision number
+ function REVISION() public pure virtual returns (uint256) {
+ return 1;
+ }
+
+ /// @inheritdoc VersionedInitializable
+ function getRevision() internal pure virtual override returns (uint256) {
+ return REVISION();
+ }
+}
diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol
new file mode 100644
index 0000000000..e228732855
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity ^0.8.0;
+
+import {IBurnMintERC20} from "../../../shared/token/ERC20/IBurnMintERC20.sol";
+
+import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol";
+
+abstract contract UpgradeableBurnMintTokenPoolAbstract is UpgradeableTokenPool {
+ /// @notice Contains the specific burn call for a pool.
+ /// @dev overriding this method allows us to create pools with different burn signatures
+ /// without duplicating the underlying logic.
+ function _burn(uint256 amount) internal virtual;
+
+ /// @notice Burn the token in the pool
+ /// @param amount Amount to burn
+ /// @dev The whenHealthy check is important to ensure that even if a ramp is compromised
+ /// we're able to stop token movement via ARM.
+ function lockOrBurn(
+ address originalSender,
+ bytes calldata,
+ uint256 amount,
+ uint64 remoteChainSelector,
+ bytes calldata
+ )
+ external
+ virtual
+ override
+ onlyOnRamp(remoteChainSelector)
+ checkAllowList(originalSender)
+ whenHealthy
+ returns (bytes memory)
+ {
+ _consumeOutboundRateLimit(remoteChainSelector, amount);
+ _burn(amount);
+ emit Burned(msg.sender, amount);
+ return "";
+ }
+
+ /// @notice Mint tokens from the pool to the recipient
+ /// @param receiver Recipient address
+ /// @param amount Amount to mint
+ /// @dev The whenHealthy check is important to ensure that even if a ramp is compromised
+ /// we're able to stop token movement via ARM.
+ function releaseOrMint(
+ bytes memory,
+ address receiver,
+ uint256 amount,
+ uint64 remoteChainSelector,
+ bytes memory
+ ) external virtual override whenHealthy onlyOffRamp(remoteChainSelector) {
+ _consumeInboundRateLimit(remoteChainSelector, amount);
+ IBurnMintERC20(address(i_token)).mint(receiver, amount);
+ emit Minted(msg.sender, receiver, amount);
+ }
+}
diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol
new file mode 100644
index 0000000000..0fac98c708
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity ^0.8.0;
+
+import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol";
+import {ILiquidityContainer} from "../../../rebalancer/interfaces/ILiquidityContainer.sol";
+
+import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol";
+import {RateLimiter} from "../../libraries/RateLimiter.sol";
+
+import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol";
+import {SafeERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol";
+
+import {IRouter} from "../../interfaces/IRouter.sol";
+import {VersionedInitializable} from "./VersionedInitializable.sol";
+
+/// @title UpgradeableLockReleaseTokenPool
+/// @author Aave Labs
+/// @notice Upgradeable version of Chainlink's CCIP LockReleaseTokenPool
+/// @dev Contract adaptations:
+/// - Implementation of VersionedInitializable to allow upgrades
+/// - Move of allowlist and router definition to initialization stage
+/// - Addition of a bridge limit to regulate the maximum amount of tokens that can be transferred out (burned/locked)
+contract UpgradeableLockReleaseTokenPool is
+ VersionedInitializable,
+ UpgradeableTokenPool,
+ ILiquidityContainer,
+ ITypeAndVersion
+{
+ using SafeERC20 for IERC20;
+
+ error InsufficientLiquidity();
+ error LiquidityNotAccepted();
+ error Unauthorized(address caller);
+
+ error BridgeLimitExceeded(uint256 bridgeLimit);
+ error NotEnoughBridgedAmount();
+ event BridgeLimitUpdated(uint256 oldBridgeLimit, uint256 newBridgeLimit);
+
+ string public constant override typeAndVersion = "LockReleaseTokenPool 1.4.0";
+
+ /// @dev The unique lock release pool flag to signal through EIP 165.
+ bytes4 private constant LOCK_RELEASE_INTERFACE_ID = bytes4(keccak256("LockReleaseTokenPool"));
+
+ /// @dev Whether or not the pool accepts liquidity.
+ /// External liquidity is not required when there is one canonical token deployed to a chain,
+ /// and CCIP is facilitating mint/burn on all the other chains, in which case the invariant
+ /// balanceOf(pool) on home chain == sum(totalSupply(mint/burn "wrapped" token) on all remote chains) should always hold
+ bool internal immutable i_acceptLiquidity;
+ /// @notice The address of the rebalancer.
+ address internal s_rebalancer;
+ /// @notice The address of the rate limiter admin.
+ /// @dev Can be address(0) if none is configured.
+ address internal s_rateLimitAdmin;
+
+ /// @notice Maximum amount of tokens that can be bridged to other chains
+ uint256 private s_bridgeLimit;
+ /// @notice Amount of tokens bridged (transferred out)
+ /// @dev Must always be equal to or below the bridge limit
+ uint256 private s_currentBridged;
+ /// @notice The address of the bridge limit admin.
+ /// @dev Can be address(0) if none is configured.
+ address internal s_bridgeLimitAdmin;
+
+ /// @dev Constructor
+ /// @param token The bridgeable token that is managed by this pool.
+ /// @param armProxy The address of the arm proxy
+ /// @param allowlistEnabled True if pool is set to access-controlled mode, false otherwise
+ /// @param acceptLiquidity True if the pool accepts liquidity, false otherwise
+ constructor(
+ address token,
+ address armProxy,
+ bool allowlistEnabled,
+ bool acceptLiquidity
+ ) UpgradeableTokenPool(IERC20(token), armProxy, allowlistEnabled) {
+ i_acceptLiquidity = acceptLiquidity;
+ }
+
+ /// @dev Initializer
+ /// @dev The address passed as `owner` must accept ownership after initialization.
+ /// @dev The `allowlist` is only effective if pool is set to access-controlled mode
+ /// @param owner The address of the owner
+ /// @param allowlist A set of addresses allowed to trigger lockOrBurn as original senders
+ /// @param router The address of the router
+ /// @param bridgeLimit The maximum amount of tokens that can be bridged to other chains
+ function initialize(
+ address owner,
+ address[] memory allowlist,
+ address router,
+ uint256 bridgeLimit
+ ) public virtual initializer {
+ if (owner == address(0)) revert ZeroAddressNotAllowed();
+ if (router == address(0)) revert ZeroAddressNotAllowed();
+ _transferOwnership(owner);
+
+ s_router = IRouter(router);
+
+ // Pool can be set as permissioned or permissionless at deployment time only to save hot-path gas.
+ if (i_allowlistEnabled) {
+ _applyAllowListUpdates(new address[](0), allowlist);
+ }
+ s_bridgeLimit = bridgeLimit;
+ }
+
+ /// @notice Locks the token in the pool
+ /// @param amount Amount to lock
+ /// @dev The whenHealthy check is important to ensure that even if a ramp is compromised
+ /// we're able to stop token movement via ARM.
+ function lockOrBurn(
+ address originalSender,
+ bytes calldata,
+ uint256 amount,
+ uint64 remoteChainSelector,
+ bytes calldata
+ )
+ external
+ virtual
+ override
+ onlyOnRamp(remoteChainSelector)
+ checkAllowList(originalSender)
+ whenHealthy
+ returns (bytes memory)
+ {
+ // Increase bridged amount because tokens are leaving the source chain
+ if ((s_currentBridged += amount) > s_bridgeLimit) revert BridgeLimitExceeded(s_bridgeLimit);
+
+ _consumeOutboundRateLimit(remoteChainSelector, amount);
+ emit Locked(msg.sender, amount);
+ return "";
+ }
+
+ /// @notice Release tokens from the pool to the recipient
+ /// @param receiver Recipient address
+ /// @param amount Amount to release
+ /// @dev The whenHealthy check is important to ensure that even if a ramp is compromised
+ /// we're able to stop token movement via ARM.
+ function releaseOrMint(
+ bytes memory,
+ address receiver,
+ uint256 amount,
+ uint64 remoteChainSelector,
+ bytes memory
+ ) external virtual override onlyOffRamp(remoteChainSelector) whenHealthy {
+ // This should never occur. Amount should never exceed the current bridged amount
+ if (amount > s_currentBridged) revert NotEnoughBridgedAmount();
+ // Reduce bridged amount because tokens are back to source chain
+ s_currentBridged -= amount;
+
+ _consumeInboundRateLimit(remoteChainSelector, amount);
+ getToken().safeTransfer(receiver, amount);
+ emit Released(msg.sender, receiver, amount);
+ }
+
+ /// @notice returns the lock release interface flag used for EIP165 identification.
+ function getLockReleaseInterfaceId() public pure returns (bytes4) {
+ return LOCK_RELEASE_INTERFACE_ID;
+ }
+
+ // @inheritdoc IERC165
+ function supportsInterface(bytes4 interfaceId) public pure virtual override returns (bool) {
+ return
+ interfaceId == LOCK_RELEASE_INTERFACE_ID ||
+ interfaceId == type(ILiquidityContainer).interfaceId ||
+ super.supportsInterface(interfaceId);
+ }
+
+ /// @notice Gets Rebalancer, can be address(0) if none is configured.
+ /// @return The current liquidity manager.
+ function getRebalancer() external view returns (address) {
+ return s_rebalancer;
+ }
+
+ /// @notice Sets the Rebalancer address.
+ /// @dev Only callable by the owner.
+ function setRebalancer(address rebalancer) external onlyOwner {
+ s_rebalancer = rebalancer;
+ }
+
+ /// @notice Sets the rate limiter admin address.
+ /// @dev Only callable by the owner.
+ /// @param rateLimitAdmin The new rate limiter admin address.
+ function setRateLimitAdmin(address rateLimitAdmin) external onlyOwner {
+ s_rateLimitAdmin = rateLimitAdmin;
+ }
+
+ /// @notice Sets the bridge limit, the maximum amount of tokens that can be bridged out
+ /// @dev Only callable by the owner or the bridge limit admin.
+ /// @dev Bridge limit changes should be carefully managed, specially when reducing below the current bridged amount
+ /// @param newBridgeLimit The new bridge limit
+ function setBridgeLimit(uint256 newBridgeLimit) external {
+ if (msg.sender != s_bridgeLimitAdmin && msg.sender != owner()) revert Unauthorized(msg.sender);
+ uint256 oldBridgeLimit = s_bridgeLimit;
+ s_bridgeLimit = newBridgeLimit;
+ emit BridgeLimitUpdated(oldBridgeLimit, newBridgeLimit);
+ }
+
+ /// @notice Sets the bridge limit admin address.
+ /// @dev Only callable by the owner.
+ /// @param bridgeLimitAdmin The new bridge limit admin address.
+ function setBridgeLimitAdmin(address bridgeLimitAdmin) external onlyOwner {
+ s_bridgeLimitAdmin = bridgeLimitAdmin;
+ }
+
+ /// @notice Gets the bridge limit
+ /// @return The maximum amount of tokens that can be transferred out to other chains
+ function getBridgeLimit() external view virtual returns (uint256) {
+ return s_bridgeLimit;
+ }
+
+ /// @notice Gets the current bridged amount to other chains
+ /// @return The amount of tokens transferred out to other chains
+ function getCurrentBridgedAmount() external view virtual returns (uint256) {
+ return s_currentBridged;
+ }
+
+ /// @notice Gets the rate limiter admin address.
+ function getRateLimitAdmin() external view returns (address) {
+ return s_rateLimitAdmin;
+ }
+
+ /// @notice Gets the bridge limiter admin address.
+ function getBridgeLimitAdmin() external view returns (address) {
+ return s_bridgeLimitAdmin;
+ }
+
+ /// @notice Checks if the pool can accept liquidity.
+ /// @return true if the pool can accept liquidity, false otherwise.
+ function canAcceptLiquidity() external view returns (bool) {
+ return i_acceptLiquidity;
+ }
+
+ /// @notice Adds liquidity to the pool. The tokens should be approved first.
+ /// @param amount The amount of liquidity to provide.
+ function provideLiquidity(uint256 amount) external {
+ if (!i_acceptLiquidity) revert LiquidityNotAccepted();
+ if (s_rebalancer != msg.sender) revert Unauthorized(msg.sender);
+
+ i_token.safeTransferFrom(msg.sender, address(this), amount);
+ emit LiquidityAdded(msg.sender, amount);
+ }
+
+ /// @notice Removed liquidity to the pool. The tokens will be sent to msg.sender.
+ /// @param amount The amount of liquidity to remove.
+ function withdrawLiquidity(uint256 amount) external {
+ if (s_rebalancer != msg.sender) revert Unauthorized(msg.sender);
+
+ if (i_token.balanceOf(address(this)) < amount) revert InsufficientLiquidity();
+ i_token.safeTransfer(msg.sender, amount);
+ emit LiquidityRemoved(msg.sender, amount);
+ }
+
+ /// @notice Sets the rate limiter admin address.
+ /// @dev Only callable by the owner or the rate limiter admin. NOTE: overwrites the normal
+ /// onlyAdmin check in the base implementation to also allow the rate limiter admin.
+ /// @param remoteChainSelector The remote chain selector for which the rate limits apply.
+ /// @param outboundConfig The new outbound rate limiter config.
+ /// @param inboundConfig The new inbound rate limiter config.
+ function setChainRateLimiterConfig(
+ uint64 remoteChainSelector,
+ RateLimiter.Config memory outboundConfig,
+ RateLimiter.Config memory inboundConfig
+ ) external override {
+ if (msg.sender != s_rateLimitAdmin && msg.sender != owner()) revert Unauthorized(msg.sender);
+
+ _setRateLimitConfig(remoteChainSelector, outboundConfig, inboundConfig);
+ }
+
+ /// @notice Returns the revision number
+ /// @return The revision number
+ function REVISION() public pure virtual returns (uint256) {
+ return 1;
+ }
+
+ /// @inheritdoc VersionedInitializable
+ function getRevision() internal pure virtual override returns (uint256) {
+ return REVISION();
+ }
+}
diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol
new file mode 100644
index 0000000000..ee359ac1f8
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity ^0.8.0;
+
+import {IPool} from "../../interfaces/pools/IPool.sol";
+import {IARM} from "../../interfaces/IARM.sol";
+import {IRouter} from "../../interfaces/IRouter.sol";
+
+import {OwnerIsCreator} from "../../../shared/access/OwnerIsCreator.sol";
+import {RateLimiter} from "../../libraries/RateLimiter.sol";
+
+import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol";
+import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol";
+import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol";
+
+/// @notice Base abstract class with common functions for all token pools.
+/// A token pool serves as isolated place for holding tokens and token specific logic
+/// that may execute as tokens move across the bridge.
+abstract contract UpgradeableTokenPool is IPool, OwnerIsCreator, IERC165 {
+ using EnumerableSet for EnumerableSet.AddressSet;
+ using EnumerableSet for EnumerableSet.UintSet;
+ using RateLimiter for RateLimiter.TokenBucket;
+
+ error CallerIsNotARampOnRouter(address caller);
+ error ZeroAddressNotAllowed();
+ error SenderNotAllowed(address sender);
+ error AllowListNotEnabled();
+ error NonExistentChain(uint64 remoteChainSelector);
+ error ChainNotAllowed(uint64 remoteChainSelector);
+ error BadARMSignal();
+ error ChainAlreadyExists(uint64 chainSelector);
+
+ event Locked(address indexed sender, uint256 amount);
+ event Burned(address indexed sender, uint256 amount);
+ event Released(address indexed sender, address indexed recipient, uint256 amount);
+ event Minted(address indexed sender, address indexed recipient, uint256 amount);
+ event ChainAdded(
+ uint64 remoteChainSelector,
+ RateLimiter.Config outboundRateLimiterConfig,
+ RateLimiter.Config inboundRateLimiterConfig
+ );
+ event ChainConfigured(
+ uint64 remoteChainSelector,
+ RateLimiter.Config outboundRateLimiterConfig,
+ RateLimiter.Config inboundRateLimiterConfig
+ );
+ event ChainRemoved(uint64 remoteChainSelector);
+ event AllowListAdd(address sender);
+ event AllowListRemove(address sender);
+ event RouterUpdated(address oldRouter, address newRouter);
+
+ struct ChainUpdate {
+ uint64 remoteChainSelector; // ──╮ Remote chain selector
+ bool allowed; // ────────────────╯ Whether the chain is allowed
+ RateLimiter.Config outboundRateLimiterConfig; // Outbound rate limited config, meaning the rate limits for all of the onRamps for the given chain
+ RateLimiter.Config inboundRateLimiterConfig; // Inbound rate limited config, meaning the rate limits for all of the offRamps for the given chain
+ }
+
+ /// @dev The bridgeable token that is managed by this pool.
+ IERC20 internal immutable i_token;
+ /// @dev The address of the arm proxy
+ address internal immutable i_armProxy;
+ /// @dev The immutable flag that indicates if the pool is access-controlled.
+ bool internal immutable i_allowlistEnabled;
+ /// @dev A set of addresses allowed to trigger lockOrBurn as original senders.
+ /// Only takes effect if i_allowlistEnabled is true.
+ /// This can be used to ensure only token-issuer specified addresses can
+ /// move tokens.
+ EnumerableSet.AddressSet internal s_allowList;
+ /// @dev The address of the router
+ IRouter internal s_router;
+ /// @dev A set of allowed chain selectors. We want the allowlist to be enumerable to
+ /// be able to quickly determine (without parsing logs) who can access the pool.
+ /// @dev The chain selectors are in uin256 format because of the EnumerableSet implementation.
+ EnumerableSet.UintSet internal s_remoteChainSelectors;
+ /// @dev Outbound rate limits. Corresponds to the inbound rate limit for the pool
+ /// on the remote chain.
+ mapping(uint64 => RateLimiter.TokenBucket) internal s_outboundRateLimits;
+ /// @dev Inbound rate limits. This allows per destination chain
+ /// token issuer specified rate limiting (e.g. issuers may trust chains to varying
+ /// degrees and prefer different limits)
+ mapping(uint64 => RateLimiter.TokenBucket) internal s_inboundRateLimits;
+
+ constructor(IERC20 token, address armProxy, bool allowlistEnabled) {
+ if (address(token) == address(0)) revert ZeroAddressNotAllowed();
+ i_token = token;
+ i_armProxy = armProxy;
+ i_allowlistEnabled = allowlistEnabled;
+ }
+
+ /// @notice Get ARM proxy address
+ /// @return armProxy Address of arm proxy
+ function getArmProxy() public view returns (address armProxy) {
+ return i_armProxy;
+ }
+
+ /// @inheritdoc IPool
+ function getToken() public view override returns (IERC20 token) {
+ return i_token;
+ }
+
+ /// @notice Gets the pool's Router
+ /// @return router The pool's Router
+ function getRouter() public view returns (address router) {
+ return address(s_router);
+ }
+
+ /// @notice Sets the pool's Router
+ /// @param newRouter The new Router
+ function setRouter(address newRouter) public onlyOwner {
+ if (newRouter == address(0)) revert ZeroAddressNotAllowed();
+ address oldRouter = address(s_router);
+ s_router = IRouter(newRouter);
+
+ emit RouterUpdated(oldRouter, newRouter);
+ }
+
+ /// @inheritdoc IERC165
+ function supportsInterface(bytes4 interfaceId) public pure virtual override returns (bool) {
+ return interfaceId == type(IPool).interfaceId || interfaceId == type(IERC165).interfaceId;
+ }
+
+ // ================================================================
+ // │ Chain permissions │
+ // ================================================================
+
+ /// @notice Checks whether a chain selector is permissioned on this contract.
+ /// @return true if the given chain selector is a permissioned remote chain.
+ function isSupportedChain(uint64 remoteChainSelector) public view returns (bool) {
+ return s_remoteChainSelectors.contains(remoteChainSelector);
+ }
+
+ /// @notice Get list of allowed chains
+ /// @return list of chains.
+ function getSupportedChains() public view returns (uint64[] memory) {
+ uint256[] memory uint256ChainSelectors = s_remoteChainSelectors.values();
+ uint64[] memory chainSelectors = new uint64[](uint256ChainSelectors.length);
+ for (uint256 i = 0; i < uint256ChainSelectors.length; ++i) {
+ chainSelectors[i] = uint64(uint256ChainSelectors[i]);
+ }
+
+ return chainSelectors;
+ }
+
+ /// @notice Sets the permissions for a list of chains selectors. Actual senders for these chains
+ /// need to be allowed on the Router to interact with this pool.
+ /// @dev Only callable by the owner
+ /// @param chains A list of chains and their new permission status & rate limits. Rate limits
+ /// are only used when the chain is being added through `allowed` being true.
+ function applyChainUpdates(ChainUpdate[] calldata chains) external virtual onlyOwner {
+ for (uint256 i = 0; i < chains.length; ++i) {
+ ChainUpdate memory update = chains[i];
+ RateLimiter._validateTokenBucketConfig(update.outboundRateLimiterConfig, !update.allowed);
+ RateLimiter._validateTokenBucketConfig(update.inboundRateLimiterConfig, !update.allowed);
+
+ if (update.allowed) {
+ // If the chain already exists, revert
+ if (!s_remoteChainSelectors.add(update.remoteChainSelector)) {
+ revert ChainAlreadyExists(update.remoteChainSelector);
+ }
+
+ s_outboundRateLimits[update.remoteChainSelector] = RateLimiter.TokenBucket({
+ rate: update.outboundRateLimiterConfig.rate,
+ capacity: update.outboundRateLimiterConfig.capacity,
+ tokens: update.outboundRateLimiterConfig.capacity,
+ lastUpdated: uint32(block.timestamp),
+ isEnabled: update.outboundRateLimiterConfig.isEnabled
+ });
+
+ s_inboundRateLimits[update.remoteChainSelector] = RateLimiter.TokenBucket({
+ rate: update.inboundRateLimiterConfig.rate,
+ capacity: update.inboundRateLimiterConfig.capacity,
+ tokens: update.inboundRateLimiterConfig.capacity,
+ lastUpdated: uint32(block.timestamp),
+ isEnabled: update.inboundRateLimiterConfig.isEnabled
+ });
+ emit ChainAdded(update.remoteChainSelector, update.outboundRateLimiterConfig, update.inboundRateLimiterConfig);
+ } else {
+ // If the chain doesn't exist, revert
+ if (!s_remoteChainSelectors.remove(update.remoteChainSelector)) {
+ revert NonExistentChain(update.remoteChainSelector);
+ }
+
+ delete s_inboundRateLimits[update.remoteChainSelector];
+ delete s_outboundRateLimits[update.remoteChainSelector];
+ emit ChainRemoved(update.remoteChainSelector);
+ }
+ }
+ }
+
+ // ================================================================
+ // │ Rate limiting │
+ // ================================================================
+
+ /// @notice Consumes outbound rate limiting capacity in this pool
+ function _consumeOutboundRateLimit(uint64 remoteChainSelector, uint256 amount) internal {
+ s_outboundRateLimits[remoteChainSelector]._consume(amount, address(i_token));
+ }
+
+ /// @notice Consumes inbound rate limiting capacity in this pool
+ function _consumeInboundRateLimit(uint64 remoteChainSelector, uint256 amount) internal {
+ s_inboundRateLimits[remoteChainSelector]._consume(amount, address(i_token));
+ }
+
+ /// @notice Gets the token bucket with its values for the block it was requested at.
+ /// @return The token bucket.
+ function getCurrentOutboundRateLimiterState(
+ uint64 remoteChainSelector
+ ) external view returns (RateLimiter.TokenBucket memory) {
+ return s_outboundRateLimits[remoteChainSelector]._currentTokenBucketState();
+ }
+
+ /// @notice Gets the token bucket with its values for the block it was requested at.
+ /// @return The token bucket.
+ function getCurrentInboundRateLimiterState(
+ uint64 remoteChainSelector
+ ) external view returns (RateLimiter.TokenBucket memory) {
+ return s_inboundRateLimits[remoteChainSelector]._currentTokenBucketState();
+ }
+
+ /// @notice Sets the chain rate limiter config.
+ /// @param remoteChainSelector The remote chain selector for which the rate limits apply.
+ /// @param outboundConfig The new outbound rate limiter config, meaning the onRamp rate limits for the given chain.
+ /// @param inboundConfig The new inbound rate limiter config, meaning the offRamp rate limits for the given chain.
+ function setChainRateLimiterConfig(
+ uint64 remoteChainSelector,
+ RateLimiter.Config memory outboundConfig,
+ RateLimiter.Config memory inboundConfig
+ ) external virtual onlyOwner {
+ _setRateLimitConfig(remoteChainSelector, outboundConfig, inboundConfig);
+ }
+
+ function _setRateLimitConfig(
+ uint64 remoteChainSelector,
+ RateLimiter.Config memory outboundConfig,
+ RateLimiter.Config memory inboundConfig
+ ) internal {
+ if (!isSupportedChain(remoteChainSelector)) revert NonExistentChain(remoteChainSelector);
+ RateLimiter._validateTokenBucketConfig(outboundConfig, false);
+ s_outboundRateLimits[remoteChainSelector]._setTokenBucketConfig(outboundConfig);
+ RateLimiter._validateTokenBucketConfig(inboundConfig, false);
+ s_inboundRateLimits[remoteChainSelector]._setTokenBucketConfig(inboundConfig);
+ emit ChainConfigured(remoteChainSelector, outboundConfig, inboundConfig);
+ }
+
+ // ================================================================
+ // │ Access │
+ // ================================================================
+
+ /// @notice Checks whether remote chain selector is configured on this contract, and if the msg.sender
+ /// is a permissioned onRamp for the given chain on the Router.
+ modifier onlyOnRamp(uint64 remoteChainSelector) {
+ if (!isSupportedChain(remoteChainSelector)) revert ChainNotAllowed(remoteChainSelector);
+ if (!(msg.sender == s_router.getOnRamp(remoteChainSelector))) revert CallerIsNotARampOnRouter(msg.sender);
+ _;
+ }
+
+ /// @notice Checks whether remote chain selector is configured on this contract, and if the msg.sender
+ /// is a permissioned offRamp for the given chain on the Router.
+ modifier onlyOffRamp(uint64 remoteChainSelector) {
+ if (!isSupportedChain(remoteChainSelector)) revert ChainNotAllowed(remoteChainSelector);
+ if (!s_router.isOffRamp(remoteChainSelector, msg.sender)) revert CallerIsNotARampOnRouter(msg.sender);
+ _;
+ }
+
+ // ================================================================
+ // │ Allowlist │
+ // ================================================================
+
+ modifier checkAllowList(address sender) {
+ if (i_allowlistEnabled && !s_allowList.contains(sender)) revert SenderNotAllowed(sender);
+ _;
+ }
+
+ /// @notice Gets whether the allowList functionality is enabled.
+ /// @return true is enabled, false if not.
+ function getAllowListEnabled() external view returns (bool) {
+ return i_allowlistEnabled;
+ }
+
+ /// @notice Gets the allowed addresses.
+ /// @return The allowed addresses.
+ function getAllowList() external view returns (address[] memory) {
+ return s_allowList.values();
+ }
+
+ /// @notice Apply updates to the allow list.
+ /// @param removes The addresses to be removed.
+ /// @param adds The addresses to be added.
+ /// @dev allowListing will be removed before public launch
+ function applyAllowListUpdates(address[] calldata removes, address[] calldata adds) external onlyOwner {
+ _applyAllowListUpdates(removes, adds);
+ }
+
+ /// @notice Internal version of applyAllowListUpdates to allow for reuse in the constructor.
+ function _applyAllowListUpdates(address[] memory removes, address[] memory adds) internal {
+ if (!i_allowlistEnabled) revert AllowListNotEnabled();
+
+ for (uint256 i = 0; i < removes.length; ++i) {
+ address toRemove = removes[i];
+ if (s_allowList.remove(toRemove)) {
+ emit AllowListRemove(toRemove);
+ }
+ }
+ for (uint256 i = 0; i < adds.length; ++i) {
+ address toAdd = adds[i];
+ if (toAdd == address(0)) {
+ continue;
+ }
+ if (s_allowList.add(toAdd)) {
+ emit AllowListAdd(toAdd);
+ }
+ }
+ }
+
+ /// @notice Ensure that there is no active curse.
+ modifier whenHealthy() {
+ if (IARM(i_armProxy).isCursed()) revert BadARMSignal();
+ _;
+ }
+}
diff --git a/contracts/src/v0.8/ccip/pools/GHO/VersionedInitializable.sol b/contracts/src/v0.8/ccip/pools/GHO/VersionedInitializable.sol
new file mode 100644
index 0000000000..b9fb054fa0
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/VersionedInitializable.sol
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: AGPL-3.0
+pragma solidity ^0.8.0;
+
+/**
+ * @title VersionedInitializable
+ * @author Aave, inspired by the OpenZeppelin Initializable contract
+ * @notice Helper contract to implement initializer functions. To use it, replace
+ * the constructor with a function that has the `initializer` modifier.
+ * @dev WARNING: Unlike constructors, initializer functions must be manually
+ * invoked. This applies both to deploying an Initializable contract, as well
+ * as extending an Initializable contract via inheritance.
+ * WARNING: When used with inheritance, manual care must be taken to not invoke
+ * a parent initializer twice, or ensure that all initializers are idempotent,
+ * because this is not dealt with automatically as with constructors.
+ */
+abstract contract VersionedInitializable {
+ /**
+ * @dev Indicates that the contract has been initialized.
+ */
+ uint256 private lastInitializedRevision = 0;
+
+ /**
+ * @dev Indicates that the contract is in the process of being initialized.
+ */
+ bool private initializing;
+
+ /**
+ * @dev Modifier to use in the initializer function of a contract.
+ */
+ modifier initializer() {
+ uint256 revision = getRevision();
+ require(
+ initializing || isConstructor() || revision > lastInitializedRevision,
+ "Contract instance has already been initialized"
+ );
+
+ bool isTopLevelCall = !initializing;
+ if (isTopLevelCall) {
+ initializing = true;
+ lastInitializedRevision = revision;
+ }
+
+ _;
+
+ if (isTopLevelCall) {
+ initializing = false;
+ }
+ }
+
+ /**
+ * @notice Returns the revision number of the contract
+ * @dev Needs to be defined in the inherited class as a constant.
+ * @return The revision number
+ */
+ function getRevision() internal pure virtual returns (uint256);
+
+ /**
+ * @notice Returns true if and only if the function is running in the constructor
+ * @return True if the function is running in the constructor
+ */
+ function isConstructor() private view returns (bool) {
+ // extcodesize checks the size of the code stored in an address, and
+ // address returns the current address. Since the code is still not
+ // deployed when running a constructor, any checks on its code size will
+ // yield zero, making it an effective way to detect if a contract is
+ // under construction or not.
+ uint256 cs;
+ //solium-disable-next-line
+ assembly {
+ cs := extcodesize(address())
+ }
+ return cs == 0;
+ }
+
+ // Reserved storage space to allow for layout changes in the future.
+ uint256[50] private ______gap;
+}
diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/BurnMintTokenPoolAbstract_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/BurnMintTokenPoolAbstract_diff.md
new file mode 100644
index 0000000000..11c20c0a6d
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/BurnMintTokenPoolAbstract_diff.md
@@ -0,0 +1,21 @@
+```diff
+diff --git a/src/v0.8/ccip/pools/BurnMintTokenPoolAbstract.sol b/src/v0.8/ccip/pools/UpgradeableBurnMintTokenPoolAbstract.sol
+index f5eb135186..651965e40b 100644
+--- a/src/v0.8/ccip/pools/BurnMintTokenPoolAbstract.sol
++++ b/src/v0.8/ccip/pools/UpgradeableBurnMintTokenPoolAbstract.sol
+@@ -1,11 +1,11 @@
+ // SPDX-License-Identifier: BUSL-1.1
+-pragma solidity 0.8.19;
++pragma solidity ^0.8.0;
+
+ import {IBurnMintERC20} from "../../shared/token/ERC20/IBurnMintERC20.sol";
+
+-import {TokenPool} from "./TokenPool.sol";
++import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol";
+
+-abstract contract BurnMintTokenPoolAbstract is TokenPool {
++abstract contract UpgradeableBurnMintTokenPoolAbstract is UpgradeableTokenPool {
+ /// @notice Contains the specific burn call for a pool.
+ /// @dev overriding this method allows us to create pools with different burn signatures
+ /// without duplicating the underlying logic.
+```
diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/BurnMintTokenPool_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/BurnMintTokenPool_diff.md
new file mode 100644
index 0000000000..1dfabb1e60
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/BurnMintTokenPool_diff.md
@@ -0,0 +1,87 @@
+```diff
+diff --git a/src/v0.8/ccip/pools/BurnMintTokenPool.sol b/src/v0.8/ccip/pools/UpgradeableBurnMintTokenPool.sol
+index 9af0f22f4c..f07f8c3a28 100644
+--- a/src/v0.8/ccip/pools/BurnMintTokenPool.sol
++++ b/src/v0.8/ccip/pools/UpgradeableBurnMintTokenPool.sol
+@@ -1,29 +1,66 @@
+ // SPDX-License-Identifier: BUSL-1.1
+-pragma solidity 0.8.19;
++pragma solidity ^0.8.0;
+
+ import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol";
+ import {IBurnMintERC20} from "../../shared/token/ERC20/IBurnMintERC20.sol";
+
+-import {TokenPool} from "./TokenPool.sol";
+-import {BurnMintTokenPoolAbstract} from "./BurnMintTokenPoolAbstract.sol";
++import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol";
++import {UpgradeableBurnMintTokenPoolAbstract} from "./UpgradeableBurnMintTokenPoolAbstract.sol";
+
+-/// @notice This pool mints and burns a 3rd-party token.
+-/// @dev Pool whitelisting mode is set in the constructor and cannot be modified later.
+-/// It either accepts any address as originalSender, or only accepts whitelisted originalSender.
+-/// The only way to change whitelisting mode is to deploy a new pool.
+-/// If that is expected, please make sure the token's burner/minter roles are adjustable.
+-contract BurnMintTokenPool is BurnMintTokenPoolAbstract, ITypeAndVersion {
++import {IRouter} from "../interfaces/IRouter.sol";
++import {VersionedInitializable} from "./VersionedInitializable.sol";
++
++/// @title UpgradeableBurnMintTokenPool
++/// @author Aave Labs
++/// @notice Upgradeable version of Chainlink's CCIP BurnMintTokenPool
++/// @dev Contract adaptations:
++/// - Implementation of VersionedInitializable to allow upgrades
++/// - Move of allowlist and router definition to initialization stage
++contract UpgradeableBurnMintTokenPool is VersionedInitializable, UpgradeableBurnMintTokenPoolAbstract, ITypeAndVersion {
+ string public constant override typeAndVersion = "BurnMintTokenPool 1.4.0";
+
++ /// @dev Constructor
++ /// @param token The bridgeable token that is managed by this pool.
++ /// @param armProxy The address of the arm proxy
++ /// @param allowlistEnabled True if pool is set to access-controlled mode, false otherwise
+ constructor(
+- IBurnMintERC20 token,
+- address[] memory allowlist,
++ address token,
+ address armProxy,
+- address router
+- ) TokenPool(token, allowlist, armProxy, router) {}
++ bool allowlistEnabled
++ ) UpgradeableTokenPool(IBurnMintERC20(token), armProxy, allowlistEnabled) {}
+
+- /// @inheritdoc BurnMintTokenPoolAbstract
++ /// @dev Initializer
++ /// @dev The address passed as `owner` must accept ownership after initialization.
++ /// @dev The `allowlist` is only effective if pool is set to access-controlled mode
++ /// @param owner The address of the owner
++ /// @param allowlist A set of addresses allowed to trigger lockOrBurn as original senders
++ /// @param router The address of the router
++ function initialize(address owner, address[] memory allowlist, address router) public virtual initializer {
++ if (owner == address(0)) revert ZeroAddressNotAllowed();
++ if (router == address(0)) revert ZeroAddressNotAllowed();
++ _transferOwnership(owner);
++
++ s_router = IRouter(router);
++
++ // Pool can be set as permissioned or permissionless at deployment time only to save hot-path gas.
++ if (i_allowlistEnabled) {
++ _applyAllowListUpdates(new address[](0), allowlist);
++ }
++ }
++
++ /// @inheritdoc UpgradeableBurnMintTokenPoolAbstract
+ function _burn(uint256 amount) internal virtual override {
+ IBurnMintERC20(address(i_token)).burn(amount);
+ }
++
++ /// @notice Returns the revision number
++ /// @return The revision number
++ function REVISION() public pure virtual returns (uint256) {
++ return 1;
++ }
++
++ /// @inheritdoc VersionedInitializable
++ function getRevision() internal pure virtual override returns (uint256) {
++ return REVISION();
++ }
+ }
+```
diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/LockReleaseTokenPool_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/LockReleaseTokenPool_diff.md
new file mode 100644
index 0000000000..ac5d7bf30e
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/LockReleaseTokenPool_diff.md
@@ -0,0 +1,201 @@
+```diff
+diff --git a/src/v0.8/ccip/pools/LockReleaseTokenPool.sol b/src/v0.8/ccip/pools/UpgradeableLockReleaseTokenPool.sol
+index 1a17fa0398..7ca3d5f389 100644
+--- a/src/v0.8/ccip/pools/LockReleaseTokenPool.sol
++++ b/src/v0.8/ccip/pools/UpgradeableLockReleaseTokenPool.sol
+@@ -1,26 +1,41 @@
+ // SPDX-License-Identifier: BUSL-1.1
+-pragma solidity 0.8.19;
++pragma solidity ^0.8.0;
+
+ import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol";
+ import {ILiquidityContainer} from "../../rebalancer/interfaces/ILiquidityContainer.sol";
+
+-import {TokenPool} from "./TokenPool.sol";
++import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol";
+ import {RateLimiter} from "../libraries/RateLimiter.sol";
+
+ import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol";
+ import {SafeERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol";
+
+-/// @notice Token pool used for tokens on their native chain. This uses a lock and release mechanism.
+-/// Because of lock/unlock requiring liquidity, this pool contract also has function to add and remove
+-/// liquidity. This allows for proper bookkeeping for both user and liquidity provider balances.
+-/// @dev One token per LockReleaseTokenPool.
+-contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion {
++import {IRouter} from "../interfaces/IRouter.sol";
++import {VersionedInitializable} from "./VersionedInitializable.sol";
++
++/// @title UpgradeableLockReleaseTokenPool
++/// @author Aave Labs
++/// @notice Upgradeable version of Chainlink's CCIP LockReleaseTokenPool
++/// @dev Contract adaptations:
++/// - Implementation of VersionedInitializable to allow upgrades
++/// - Move of allowlist and router definition to initialization stage
++/// - Addition of a bridge limit to regulate the maximum amount of tokens that can be transferred out (burned/locked)
++contract UpgradeableLockReleaseTokenPool is
++ VersionedInitializable,
++ UpgradeableTokenPool,
++ ILiquidityContainer,
++ ITypeAndVersion
++{
+ using SafeERC20 for IERC20;
+
+ error InsufficientLiquidity();
+ error LiquidityNotAccepted();
+ error Unauthorized(address caller);
+
++ error BridgeLimitExceeded(uint256 bridgeLimit);
++ error NotEnoughBridgedAmount();
++ event BridgeLimitUpdated(uint256 oldBridgeLimit, uint256 newBridgeLimit);
++
+ string public constant override typeAndVersion = "LockReleaseTokenPool 1.4.0";
+
+ /// @dev The unique lock release pool flag to signal through EIP 165.
+@@ -37,16 +52,55 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion
+ /// @dev Can be address(0) if none is configured.
+ address internal s_rateLimitAdmin;
+
++ /// @notice Maximum amount of tokens that can be bridged to other chains
++ uint256 private s_bridgeLimit;
++ /// @notice Amount of tokens bridged (transferred out)
++ /// @dev Must always be equal to or below the bridge limit
++ uint256 private s_currentBridged;
++ /// @notice The address of the bridge limit admin.
++ /// @dev Can be address(0) if none is configured.
++ address internal s_bridgeLimitAdmin;
++
++ /// @dev Constructor
++ /// @param token The bridgeable token that is managed by this pool.
++ /// @param armProxy The address of the arm proxy
++ /// @param allowlistEnabled True if pool is set to access-controlled mode, false otherwise
++ /// @param acceptLiquidity True if the pool accepts liquidity, false otherwise
+ constructor(
+- IERC20 token,
+- address[] memory allowlist,
++ address token,
+ address armProxy,
+- bool acceptLiquidity,
+- address router
+- ) TokenPool(token, allowlist, armProxy, router) {
++ bool allowlistEnabled,
++ bool acceptLiquidity
++ ) UpgradeableTokenPool(IERC20(token), armProxy, allowlistEnabled) {
+ i_acceptLiquidity = acceptLiquidity;
+ }
+
++ /// @dev Initializer
++ /// @dev The address passed as `owner` must accept ownership after initialization.
++ /// @dev The `allowlist` is only effective if pool is set to access-controlled mode
++ /// @param owner The address of the owner
++ /// @param allowlist A set of addresses allowed to trigger lockOrBurn as original senders
++ /// @param router The address of the router
++ /// @param bridgeLimit The maximum amount of tokens that can be bridged to other chains
++ function initialize(
++ address owner,
++ address[] memory allowlist,
++ address router,
++ uint256 bridgeLimit
++ ) public virtual initializer {
++ if (owner == address(0)) revert ZeroAddressNotAllowed();
++ if (router == address(0)) revert ZeroAddressNotAllowed();
++ _transferOwnership(owner);
++
++ s_router = IRouter(router);
++
++ // Pool can be set as permissioned or permissionless at deployment time only to save hot-path gas.
++ if (i_allowlistEnabled) {
++ _applyAllowListUpdates(new address[](0), allowlist);
++ }
++ s_bridgeLimit = bridgeLimit;
++ }
++
+ /// @notice Locks the token in the pool
+ /// @param amount Amount to lock
+ /// @dev The whenHealthy check is important to ensure that even if a ramp is compromised
+@@ -66,6 +120,9 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion
+ whenHealthy
+ returns (bytes memory)
+ {
++ // Increase bridged amount because tokens are leaving the source chain
++ if ((s_currentBridged += amount) > s_bridgeLimit) revert BridgeLimitExceeded(s_bridgeLimit);
++
+ _consumeOutboundRateLimit(remoteChainSelector, amount);
+ emit Locked(msg.sender, amount);
+ return "";
+@@ -83,6 +140,11 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion
+ uint64 remoteChainSelector,
+ bytes memory
+ ) external virtual override onlyOffRamp(remoteChainSelector) whenHealthy {
++ // This should never occur. Amount should never exceed the current bridged amount
++ if (amount > s_currentBridged) revert NotEnoughBridgedAmount();
++ // Reduce bridged amount because tokens are back to source chain
++ s_currentBridged -= amount;
++
+ _consumeInboundRateLimit(remoteChainSelector, amount);
+ getToken().safeTransfer(receiver, amount);
+ emit Released(msg.sender, receiver, amount);
+@@ -120,11 +182,46 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion
+ s_rateLimitAdmin = rateLimitAdmin;
+ }
+
++ /// @notice Sets the bridge limit, the maximum amount of tokens that can be bridged out
++ /// @dev Only callable by the owner or the bridge limit admin.
++ /// @dev Bridge limit changes should be carefully managed, specially when reducing below the current bridged amount
++ /// @param newBridgeLimit The new bridge limit
++ function setBridgeLimit(uint256 newBridgeLimit) external {
++ if (msg.sender != s_bridgeLimitAdmin && msg.sender != owner()) revert Unauthorized(msg.sender);
++ uint256 oldBridgeLimit = s_bridgeLimit;
++ s_bridgeLimit = newBridgeLimit;
++ emit BridgeLimitUpdated(oldBridgeLimit, newBridgeLimit);
++ }
++
++ /// @notice Sets the bridge limit admin address.
++ /// @dev Only callable by the owner.
++ /// @param bridgeLimitAdmin The new bridge limit admin address.
++ function setBridgeLimitAdmin(address bridgeLimitAdmin) external onlyOwner {
++ s_bridgeLimitAdmin = bridgeLimitAdmin;
++ }
++
++ /// @notice Gets the bridge limit
++ /// @return The maximum amount of tokens that can be transferred out to other chains
++ function getBridgeLimit() external view virtual returns (uint256) {
++ return s_bridgeLimit;
++ }
++
++ /// @notice Gets the current bridged amount to other chains
++ /// @return The amount of tokens transferred out to other chains
++ function getCurrentBridgedAmount() external view virtual returns (uint256) {
++ return s_currentBridged;
++ }
++
+ /// @notice Gets the rate limiter admin address.
+ function getRateLimitAdmin() external view returns (address) {
+ return s_rateLimitAdmin;
+ }
+
++ /// @notice Gets the bridge limiter admin address.
++ function getBridgeLimitAdmin() external view returns (address) {
++ return s_bridgeLimitAdmin;
++ }
++
+ /// @notice Checks if the pool can accept liquidity.
+ /// @return true if the pool can accept liquidity, false otherwise.
+ function canAcceptLiquidity() external view returns (bool) {
+@@ -166,4 +263,15 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion
+
+ _setRateLimitConfig(remoteChainSelector, outboundConfig, inboundConfig);
+ }
++
++ /// @notice Returns the revision number
++ /// @return The revision number
++ function REVISION() public pure virtual returns (uint256) {
++ return 1;
++ }
++
++ /// @inheritdoc VersionedInitializable
++ function getRevision() internal pure virtual override returns (uint256) {
++ return REVISION();
++ }
+ }
+```
diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/TokenPool_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/TokenPool_diff.md
new file mode 100644
index 0000000000..6ff8893172
--- /dev/null
+++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/TokenPool_diff.md
@@ -0,0 +1,51 @@
+```diff
+diff --git a/src/v0.8/ccip/pools/TokenPool.sol b/src/v0.8/ccip/pools/UpgradeableTokenPool.sol
+index b3571bb449..fcd8948098 100644
+--- a/src/v0.8/ccip/pools/TokenPool.sol
++++ b/src/v0.8/ccip/pools/UpgradeableTokenPool.sol
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: BUSL-1.1
+-pragma solidity 0.8.19;
++pragma solidity ^0.8.0;
+
+ import {IPool} from "../interfaces/pools/IPool.sol";
+ import {IARM} from "../interfaces/IARM.sol";
+@@ -15,7 +15,7 @@ import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts
+ /// @notice Base abstract class with common functions for all token pools.
+ /// A token pool serves as isolated place for holding tokens and token specific logic
+ /// that may execute as tokens move across the bridge.
+-abstract contract TokenPool is IPool, OwnerIsCreator, IERC165 {
++abstract contract UpgradeableTokenPool is IPool, OwnerIsCreator, IERC165 {
+ using EnumerableSet for EnumerableSet.AddressSet;
+ using EnumerableSet for EnumerableSet.UintSet;
+ using RateLimiter for RateLimiter.TokenBucket;
+@@ -74,23 +74,17 @@ abstract contract TokenPool is IPool, OwnerIsCreator, IERC165 {
+ EnumerableSet.UintSet internal s_remoteChainSelectors;
+ /// @dev Outbound rate limits. Corresponds to the inbound rate limit for the pool
+ /// on the remote chain.
+- mapping(uint64 remoteChainSelector => RateLimiter.TokenBucket) internal s_outboundRateLimits;
++ mapping(uint64 => RateLimiter.TokenBucket) internal s_outboundRateLimits;
+ /// @dev Inbound rate limits. This allows per destination chain
+ /// token issuer specified rate limiting (e.g. issuers may trust chains to varying
+ /// degrees and prefer different limits)
+- mapping(uint64 remoteChainSelector => RateLimiter.TokenBucket) internal s_inboundRateLimits;
++ mapping(uint64 => RateLimiter.TokenBucket) internal s_inboundRateLimits;
+
+- constructor(IERC20 token, address[] memory allowlist, address armProxy, address router) {
+- if (address(token) == address(0) || router == address(0)) revert ZeroAddressNotAllowed();
++ constructor(IERC20 token, address armProxy, bool allowlistEnabled) {
++ if (address(token) == address(0)) revert ZeroAddressNotAllowed();
+ i_token = token;
+ i_armProxy = armProxy;
+- s_router = IRouter(router);
+-
+- // Pool can be set as permissioned or permissionless at deployment time only to save hot-path gas.
+- i_allowlistEnabled = allowlist.length > 0;
+- if (i_allowlistEnabled) {
+- _applyAllowListUpdates(new address[](0), allowlist);
+- }
++ i_allowlistEnabled = allowlistEnabled;
+ }
+
+ /// @notice Get ARM proxy address
+```
diff --git a/contracts/src/v0.8/ccip/test/BaseTest.t.sol b/contracts/src/v0.8/ccip/test/BaseTest.t.sol
index 33d2e649c4..f645a6e612 100644
--- a/contracts/src/v0.8/ccip/test/BaseTest.t.sol
+++ b/contracts/src/v0.8/ccip/test/BaseTest.t.sol
@@ -2,9 +2,17 @@
pragma solidity 0.8.19;
import {Test, stdError} from "forge-std/Test.sol";
+import {StdInvariant} from "forge-std/StdInvariant.sol";
+import {StdCheats} from "forge-std/StdCheats.sol";
+import {StdUtils} from "forge-std/StdUtils.sol";
import {MockARM} from "./mocks/MockARM.sol";
import {StructFactory} from "./StructFactory.sol";
+import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol";
+import {UpgradeableLockReleaseTokenPool} from "../pools/GHO/UpgradeableLockReleaseTokenPool.sol";
+import {UpgradeableBurnMintTokenPool} from "../pools/GHO/UpgradeableBurnMintTokenPool.sol";
+import {IBurnMintERC20} from "../../shared/token/ERC20/IBurnMintERC20.sol";
+
contract BaseTest is Test, StructFactory {
bool private s_baseTestInitialized;
@@ -26,4 +34,91 @@ contract BaseTest is Test, StructFactory {
s_mockARM = new MockARM();
}
+
+ function _deployUpgradeableBurnMintTokenPool(
+ address ghoToken,
+ address arm,
+ address router,
+ address owner,
+ address proxyAdmin
+ ) internal returns (address) {
+ // Deploy BurnMintTokenPool for GHO token on source chain
+ UpgradeableBurnMintTokenPool tokenPoolImpl = new UpgradeableBurnMintTokenPool(ghoToken, arm, false);
+ // Imple init
+ address[] memory emptyArray = new address[](0);
+ tokenPoolImpl.initialize(owner, emptyArray, router);
+ // proxy deploy and init
+ bytes memory tokenPoolInitParams = abi.encodeWithSignature(
+ "initialize(address,address[],address)",
+ owner,
+ emptyArray,
+ router
+ );
+ TransparentUpgradeableProxy tokenPoolProxy = new TransparentUpgradeableProxy(
+ address(tokenPoolImpl),
+ proxyAdmin,
+ tokenPoolInitParams
+ );
+ // Manage ownership
+ vm.stopPrank();
+ vm.prank(owner);
+ UpgradeableBurnMintTokenPool(address(tokenPoolProxy)).acceptOwnership();
+ vm.startPrank(OWNER);
+
+ return address(tokenPoolProxy);
+ }
+
+ function _deployUpgradeableLockReleaseTokenPool(
+ address ghoToken,
+ address arm,
+ address router,
+ address owner,
+ uint256 bridgeLimit,
+ address proxyAdmin
+ ) internal returns (address) {
+ UpgradeableLockReleaseTokenPool tokenPoolImpl = new UpgradeableLockReleaseTokenPool(ghoToken, arm, false, true);
+ // Imple init
+ address[] memory emptyArray = new address[](0);
+ tokenPoolImpl.initialize(owner, emptyArray, router, bridgeLimit);
+ // proxy deploy and init
+ bytes memory tokenPoolInitParams = abi.encodeWithSignature(
+ "initialize(address,address[],address,uint256)",
+ owner,
+ emptyArray,
+ router,
+ bridgeLimit
+ );
+ TransparentUpgradeableProxy tokenPoolProxy = new TransparentUpgradeableProxy(
+ address(tokenPoolImpl),
+ proxyAdmin,
+ tokenPoolInitParams
+ );
+
+ // Manage ownership
+ vm.stopPrank();
+ vm.prank(owner);
+ UpgradeableLockReleaseTokenPool(address(tokenPoolProxy)).acceptOwnership();
+ vm.startPrank(OWNER);
+
+ return address(tokenPoolProxy);
+ }
+
+ function _inflateFacilitatorLevel(address tokenPool, address ghoToken, uint256 amount) internal {
+ vm.stopPrank();
+ vm.prank(tokenPool);
+ IBurnMintERC20(ghoToken).mint(address(0), amount);
+ }
+
+ function _getProxyAdminAddress(address proxy) internal view returns (address) {
+ bytes32 ERC1967_ADMIN_SLOT = 0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103;
+ bytes32 adminSlot = vm.load(proxy, ERC1967_ADMIN_SLOT);
+ return address(uint160(uint256(adminSlot)));
+ }
+
+ function _getProxyImplementationAddress(address proxy) internal view returns (address) {
+ bytes32 ERC1967_IMPLEMENTATION_SLOT = 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc;
+ bytes32 implSlot = vm.load(proxy, ERC1967_IMPLEMENTATION_SLOT);
+ return address(uint160(uint256(implSlot)));
+ }
+
}
diff --git a/contracts/src/v0.8/ccip/test/mocks/MockUpgradeable.sol b/contracts/src/v0.8/ccip/test/mocks/MockUpgradeable.sol
new file mode 100644
index 0000000000..bd80dee812
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/mocks/MockUpgradeable.sol
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.0;
+
+import {VersionedInitializable} from "../../pools/GHO/VersionedInitializable.sol";
+
+/**
+ * @dev Mock contract to test upgrades, not to be used in production.
+ */
+contract MockUpgradeable is VersionedInitializable {
+ /**
+ * @dev Constructor
+ */
+ constructor() {
+ // Intentionally left bank
+ }
+
+ /**
+ * @dev Initializer
+ */
+ function initialize() public initializer {
+ // Intentionally left bank
+ }
+
+ /**
+ * @notice Returns the revision number
+ * @return The revision number
+ */
+ function REVISION() public pure returns (uint256) {
+ return 2;
+ }
+
+ /// @inheritdoc VersionedInitializable
+ function getRevision() internal pure virtual override returns (uint256) {
+ return REVISION();
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/End2End.t.sol b/contracts/src/v0.8/ccip/test/pools/End2End.t.sol
new file mode 100644
index 0000000000..9abbef2ac9
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/End2End.t.sol
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import "../helpers/MerkleHelper.sol";
+import "../commitStore/CommitStore.t.sol";
+import "../onRamp/EVM2EVMOnRampSetup.t.sol";
+import "../offRamp/EVM2EVMOffRampSetup.t.sol";
+
+contract E2E is EVM2EVMOnRampSetup, CommitStoreSetup, EVM2EVMOffRampSetup {
+ using Internal for Internal.EVM2EVMMessage;
+
+ function setUp() public virtual override(EVM2EVMOnRampSetup, CommitStoreSetup, EVM2EVMOffRampSetup) {
+ EVM2EVMOnRampSetup.setUp();
+ CommitStoreSetup.setUp();
+ EVM2EVMOffRampSetup.setUp();
+
+ deployOffRamp(s_commitStore, s_destRouter, address(0));
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereum.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereum.t.sol
new file mode 100644
index 0000000000..8eacd4232e
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereum.t.sol
@@ -0,0 +1,678 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol";
+
+import {stdError} from "forge-std/Test.sol";
+import {MockUpgradeable} from "../../mocks/MockUpgradeable.sol";
+import {IPool} from "../../../interfaces/pools/IPool.sol";
+import {LockReleaseTokenPool} from "../../../pools/LockReleaseTokenPool.sol";
+import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {EVM2EVMOffRamp} from "../../../offRamp/EVM2EVMOffRamp.sol";
+import {RateLimiter} from "../../../libraries/RateLimiter.sol";
+import {IERC165} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol";
+import {GHOTokenPoolEthereumSetup} from "./GHOTokenPoolEthereumSetup.t.sol";
+
+contract GHOTokenPoolEthereum_setRebalancer is GHOTokenPoolEthereumSetup {
+ function testSetRebalancerSuccess() public {
+ assertEq(address(s_ghoTokenPool.getRebalancer()), OWNER);
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.setRebalancer(STRANGER);
+ assertEq(address(s_ghoTokenPool.getRebalancer()), STRANGER);
+ }
+
+ function testSetRebalancerReverts() public {
+ vm.startPrank(STRANGER);
+
+ vm.expectRevert("Only callable by owner");
+ s_ghoTokenPool.setRebalancer(STRANGER);
+ }
+}
+
+contract GHOTokenPoolEthereum_lockOrBurn is GHOTokenPoolEthereumSetup {
+ error SenderNotAllowed(address sender);
+
+ event Locked(address indexed sender, uint256 amount);
+ event TokensConsumed(uint256 tokens);
+
+ function testFuzz_LockOrBurnNoAllowListSuccess(uint256 amount, uint256 bridgedAmount) public {
+ uint256 maxAmount = getOutboundRateLimiterConfig().capacity < INITIAL_BRIDGE_LIMIT
+ ? getOutboundRateLimiterConfig().capacity
+ : INITIAL_BRIDGE_LIMIT;
+ amount = bound(amount, 1, maxAmount);
+ bridgedAmount = bound(bridgedAmount, 0, INITIAL_BRIDGE_LIMIT - amount);
+
+ changePrank(s_allowedOnRamp);
+ if (bridgedAmount > 0) {
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), bridgedAmount, DEST_CHAIN_SELECTOR, bytes(""));
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), bridgedAmount);
+ }
+
+ vm.expectEmit();
+ emit TokensConsumed(amount);
+ vm.expectEmit();
+ emit Locked(s_allowedOnRamp, amount);
+
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), bridgedAmount + amount);
+ }
+
+ function testTokenMaxCapacityExceededReverts() public {
+ RateLimiter.Config memory rateLimiterConfig = getOutboundRateLimiterConfig();
+ uint256 capacity = rateLimiterConfig.capacity;
+ uint256 amount = 10 * capacity;
+
+ // increase bridge limit to hit the rate limit error
+ vm.startPrank(AAVE_DAO);
+ s_ghoTokenPool.setBridgeLimit(amount);
+
+ vm.expectRevert(
+ abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_token))
+ );
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testTokenBridgeLimitExceededReverts() public {
+ uint256 bridgeLimit = s_ghoTokenPool.getBridgeLimit();
+ uint256 amount = bridgeLimit + 1;
+
+ vm.expectRevert(abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, bridgeLimit));
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+}
+
+contract GHOTokenPoolEthereum_releaseOrMint is GHOTokenPoolEthereumSetup {
+ event TokensConsumed(uint256 tokens);
+ event Released(address indexed sender, address indexed recipient, uint256 amount);
+
+ function setUp() public virtual override {
+ GHOTokenPoolEthereumSetup.setUp();
+
+ UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1);
+ chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: SOURCE_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.applyChainUpdates(chainUpdate);
+ }
+
+ function test_ReleaseOrMintSuccess() public {
+ uint256 amount = 100;
+ deal(address(s_token), address(s_ghoTokenPool), amount);
+
+ // Inflate current bridged amount so it can be reduced in `releaseOrMint` function
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+
+ vm.expectEmit();
+ emit TokensConsumed(amount);
+ vm.expectEmit();
+ emit Released(s_allowedOffRamp, OWNER, amount);
+
+ vm.startPrank(s_allowedOffRamp);
+ s_ghoTokenPool.releaseOrMint(bytes(""), OWNER, amount, SOURCE_CHAIN_SELECTOR, bytes(""));
+
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), 0);
+ }
+
+ function testFuzz_ReleaseOrMintSuccess(address recipient, uint256 amount, uint256 bridgedAmount) public {
+ // Since the owner already has tokens this would break the checks
+ vm.assume(recipient != OWNER);
+ vm.assume(recipient != address(0));
+ vm.assume(recipient != address(s_token));
+
+ amount = uint128(bound(amount, 2, type(uint128).max));
+ bridgedAmount = uint128(bound(bridgedAmount, amount, type(uint128).max));
+
+ // Inflate current bridged amount so it can be reduced in `releaseOrMint` function
+ vm.startPrank(AAVE_DAO);
+ s_ghoTokenPool.setBridgeLimit(bridgedAmount);
+ s_ghoTokenPool.setChainRateLimiterConfig(
+ DEST_CHAIN_SELECTOR,
+ RateLimiter.Config({isEnabled: true, capacity: type(uint128).max, rate: 1e15}),
+ RateLimiter.Config({isEnabled: true, capacity: type(uint128).max, rate: 1e15})
+ );
+ vm.warp(block.timestamp + 1e50); // wait to refill capacity
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), bridgedAmount, DEST_CHAIN_SELECTOR, bytes(""));
+
+ // Makes sure the pool always has enough funds
+ deal(address(s_token), address(s_ghoTokenPool), amount);
+ vm.startPrank(s_allowedOffRamp);
+
+ uint256 capacity = getInboundRateLimiterConfig().capacity;
+ uint256 bridgedAmountAfter = bridgedAmount;
+ // Determine if we hit the rate limit or the txs should succeed.
+ if (amount > capacity) {
+ vm.expectRevert(
+ abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_token))
+ );
+ } else {
+ // Only rate limit if the amount is >0
+ if (amount > 0) {
+ vm.expectEmit();
+ emit TokensConsumed(amount);
+ }
+
+ vm.expectEmit();
+ emit Released(s_allowedOffRamp, recipient, amount);
+
+ bridgedAmountAfter -= amount;
+ }
+
+ s_ghoTokenPool.releaseOrMint(bytes(""), recipient, amount, SOURCE_CHAIN_SELECTOR, bytes(""));
+
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), bridgedAmountAfter);
+ }
+
+ function testChainNotAllowedReverts() public {
+ UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1);
+ chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: SOURCE_CHAIN_SELECTOR,
+ allowed: false,
+ outboundRateLimiterConfig: RateLimiter.Config({isEnabled: false, capacity: 0, rate: 0}),
+ inboundRateLimiterConfig: RateLimiter.Config({isEnabled: false, capacity: 0, rate: 0})
+ });
+
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.applyChainUpdates(chainUpdate);
+ vm.stopPrank();
+
+ vm.startPrank(s_allowedOffRamp);
+
+ vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.ChainNotAllowed.selector, SOURCE_CHAIN_SELECTOR));
+ s_ghoTokenPool.releaseOrMint(bytes(""), OWNER, 1e5, SOURCE_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testPoolMintNotHealthyReverts() public {
+ // Should not mint tokens if cursed.
+ s_mockARM.voteToCurse(bytes32(0));
+ uint256 before = s_token.balanceOf(OWNER);
+ vm.startPrank(s_allowedOffRamp);
+ vm.expectRevert(EVM2EVMOffRamp.BadARMSignal.selector);
+ s_ghoTokenPool.releaseOrMint(bytes(""), OWNER, 1e5, SOURCE_CHAIN_SELECTOR, bytes(""));
+ assertEq(s_token.balanceOf(OWNER), before);
+ }
+
+ function testReleaseNoFundsReverts() public {
+ uint256 amount = 1;
+
+ // Inflate current bridged amount so it can be reduced in `releaseOrMint` function
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+
+ vm.expectRevert(stdError.arithmeticError);
+ vm.startPrank(s_allowedOffRamp);
+ s_ghoTokenPool.releaseOrMint(bytes(""), STRANGER, amount, SOURCE_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testTokenMaxCapacityExceededReverts() public {
+ RateLimiter.Config memory rateLimiterConfig = getInboundRateLimiterConfig();
+ uint256 capacity = rateLimiterConfig.capacity;
+ uint256 amount = 10 * capacity;
+
+ // Inflate current bridged amount so it can be reduced in `releaseOrMint` function
+ vm.startPrank(AAVE_DAO);
+ s_ghoTokenPool.setBridgeLimit(amount);
+ s_ghoTokenPool.setChainRateLimiterConfig(
+ DEST_CHAIN_SELECTOR,
+ RateLimiter.Config({isEnabled: true, capacity: type(uint128).max, rate: 1e15}),
+ getInboundRateLimiterConfig()
+ );
+ vm.warp(block.timestamp + 1e50); // wait to refill capacity
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+
+ vm.expectRevert(
+ abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_token))
+ );
+ vm.startPrank(s_allowedOffRamp);
+ s_ghoTokenPool.releaseOrMint(bytes(""), STRANGER, amount, SOURCE_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testBridgedAmountNoEnoughReverts() public {
+ uint256 amount = 10;
+ vm.expectRevert(abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.NotEnoughBridgedAmount.selector));
+ vm.startPrank(s_allowedOffRamp);
+ s_ghoTokenPool.releaseOrMint(bytes(""), STRANGER, amount, SOURCE_CHAIN_SELECTOR, bytes(""));
+ }
+}
+
+contract GHOTokenPoolEthereum_canAcceptLiquidity is GHOTokenPoolEthereumSetup {
+ function test_CanAcceptLiquiditySuccess() public {
+ assertEq(true, s_ghoTokenPool.canAcceptLiquidity());
+
+ s_ghoTokenPool = new UpgradeableLockReleaseTokenPool(address(s_token), address(s_mockARM), false, false);
+
+ assertEq(false, s_ghoTokenPool.canAcceptLiquidity());
+ }
+}
+
+contract GHOTokenPoolEthereum_provideLiquidity is GHOTokenPoolEthereumSetup {
+ function testFuzz_ProvideLiquiditySuccess(uint256 amount) public {
+ vm.assume(amount < type(uint128).max);
+
+ uint256 balancePre = s_token.balanceOf(OWNER);
+ s_token.approve(address(s_ghoTokenPool), amount);
+
+ s_ghoTokenPool.provideLiquidity(amount);
+
+ assertEq(s_token.balanceOf(OWNER), balancePre - amount);
+ assertEq(s_token.balanceOf(address(s_ghoTokenPool)), amount);
+ }
+
+ // Reverts
+
+ function test_UnauthorizedReverts() public {
+ vm.startPrank(STRANGER);
+ vm.expectRevert(abi.encodeWithSelector(LockReleaseTokenPool.Unauthorized.selector, STRANGER));
+
+ s_ghoTokenPool.provideLiquidity(1);
+ }
+
+ function testFuzz_ExceedsAllowance(uint256 amount) public {
+ vm.assume(amount > 0);
+ vm.expectRevert(stdError.arithmeticError);
+ s_ghoTokenPool.provideLiquidity(amount);
+ }
+
+ function testLiquidityNotAcceptedReverts() public {
+ s_ghoTokenPool = new UpgradeableLockReleaseTokenPool(address(s_token), address(s_mockARM), false, false);
+
+ vm.expectRevert(LockReleaseTokenPool.LiquidityNotAccepted.selector);
+ s_ghoTokenPool.provideLiquidity(1);
+ }
+}
+
+contract GHOTokenPoolEthereum_withdrawalLiquidity is GHOTokenPoolEthereumSetup {
+ function testFuzz_WithdrawalLiquiditySuccess(uint256 amount) public {
+ vm.assume(amount < type(uint128).max);
+
+ uint256 balancePre = s_token.balanceOf(OWNER);
+ s_token.approve(address(s_ghoTokenPool), amount);
+ s_ghoTokenPool.provideLiquidity(amount);
+
+ s_ghoTokenPool.withdrawLiquidity(amount);
+
+ assertEq(s_token.balanceOf(OWNER), balancePre);
+ }
+
+ // Reverts
+
+ function test_UnauthorizedReverts() public {
+ vm.startPrank(STRANGER);
+ vm.expectRevert(abi.encodeWithSelector(LockReleaseTokenPool.Unauthorized.selector, STRANGER));
+
+ s_ghoTokenPool.withdrawLiquidity(1);
+ }
+
+ function testInsufficientLiquidityReverts() public {
+ uint256 maxUint128 = 2 ** 128 - 1;
+ s_token.approve(address(s_ghoTokenPool), maxUint128);
+ s_ghoTokenPool.provideLiquidity(maxUint128);
+
+ changePrank(address(s_ghoTokenPool));
+ s_token.transfer(OWNER, maxUint128);
+ changePrank(OWNER);
+
+ vm.expectRevert(LockReleaseTokenPool.InsufficientLiquidity.selector);
+ s_ghoTokenPool.withdrawLiquidity(1);
+ }
+}
+
+contract GHOTokenPoolEthereum_supportsInterface is GHOTokenPoolEthereumSetup {
+ function testSupportsInterfaceSuccess() public {
+ assertTrue(s_ghoTokenPool.supportsInterface(s_ghoTokenPool.getLockReleaseInterfaceId()));
+ assertTrue(s_ghoTokenPool.supportsInterface(type(IPool).interfaceId));
+ assertTrue(s_ghoTokenPool.supportsInterface(type(IERC165).interfaceId));
+ }
+}
+
+contract GHOTokenPoolEthereum_setChainRateLimiterConfig is GHOTokenPoolEthereumSetup {
+ event ConfigChanged(RateLimiter.Config);
+ event ChainConfigured(
+ uint64 chainSelector,
+ RateLimiter.Config outboundRateLimiterConfig,
+ RateLimiter.Config inboundRateLimiterConfig
+ );
+
+ uint64 internal s_remoteChainSelector;
+
+ function setUp() public virtual override {
+ GHOTokenPoolEthereumSetup.setUp();
+ UpgradeableTokenPool.ChainUpdate[] memory chainUpdates = new UpgradeableTokenPool.ChainUpdate[](1);
+ s_remoteChainSelector = 123124;
+ chainUpdates[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: s_remoteChainSelector,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.applyChainUpdates(chainUpdates);
+ changePrank(OWNER);
+ }
+
+ function testFuzz_SetChainRateLimiterConfigSuccess(uint128 capacity, uint128 rate, uint32 newTime) public {
+ // Cap the lower bound to 4 so 4/2 is still >= 2
+ vm.assume(capacity >= 4);
+ // Cap the lower bound to 2 so 2/2 is still >= 1
+ rate = uint128(bound(rate, 2, capacity - 2));
+ // Bucket updates only work on increasing time
+ newTime = uint32(bound(newTime, block.timestamp + 1, type(uint32).max));
+ vm.warp(newTime);
+
+ uint256 oldOutboundTokens = s_ghoTokenPool.getCurrentOutboundRateLimiterState(s_remoteChainSelector).tokens;
+ uint256 oldInboundTokens = s_ghoTokenPool.getCurrentInboundRateLimiterState(s_remoteChainSelector).tokens;
+
+ RateLimiter.Config memory newOutboundConfig = RateLimiter.Config({isEnabled: true, capacity: capacity, rate: rate});
+ RateLimiter.Config memory newInboundConfig = RateLimiter.Config({
+ isEnabled: true,
+ capacity: capacity / 2,
+ rate: rate / 2
+ });
+
+ vm.expectEmit();
+ emit ConfigChanged(newOutboundConfig);
+ vm.expectEmit();
+ emit ConfigChanged(newInboundConfig);
+ vm.expectEmit();
+ emit ChainConfigured(s_remoteChainSelector, newOutboundConfig, newInboundConfig);
+
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.setChainRateLimiterConfig(s_remoteChainSelector, newOutboundConfig, newInboundConfig);
+
+ uint256 expectedTokens = RateLimiter._min(newOutboundConfig.capacity, oldOutboundTokens);
+
+ RateLimiter.TokenBucket memory bucket = s_ghoTokenPool.getCurrentOutboundRateLimiterState(s_remoteChainSelector);
+ assertEq(bucket.capacity, newOutboundConfig.capacity);
+ assertEq(bucket.rate, newOutboundConfig.rate);
+ assertEq(bucket.tokens, expectedTokens);
+ assertEq(bucket.lastUpdated, newTime);
+
+ expectedTokens = RateLimiter._min(newInboundConfig.capacity, oldInboundTokens);
+
+ bucket = s_ghoTokenPool.getCurrentInboundRateLimiterState(s_remoteChainSelector);
+ assertEq(bucket.capacity, newInboundConfig.capacity);
+ assertEq(bucket.rate, newInboundConfig.rate);
+ assertEq(bucket.tokens, expectedTokens);
+ assertEq(bucket.lastUpdated, newTime);
+ }
+
+ function testOnlyOwnerOrRateLimitAdminSuccess() public {
+ address rateLimiterAdmin = address(28973509103597907);
+
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.setRateLimitAdmin(rateLimiterAdmin);
+
+ changePrank(rateLimiterAdmin);
+
+ s_ghoTokenPool.setChainRateLimiterConfig(
+ s_remoteChainSelector,
+ getOutboundRateLimiterConfig(),
+ getInboundRateLimiterConfig()
+ );
+
+ changePrank(AAVE_DAO);
+
+ s_ghoTokenPool.setChainRateLimiterConfig(
+ s_remoteChainSelector,
+ getOutboundRateLimiterConfig(),
+ getInboundRateLimiterConfig()
+ );
+ }
+
+ // Reverts
+
+ function testOnlyOwnerReverts() public {
+ changePrank(STRANGER);
+
+ vm.expectRevert(abi.encodeWithSelector(LockReleaseTokenPool.Unauthorized.selector, STRANGER));
+ s_ghoTokenPool.setChainRateLimiterConfig(
+ s_remoteChainSelector,
+ getOutboundRateLimiterConfig(),
+ getInboundRateLimiterConfig()
+ );
+ }
+
+ function testNonExistentChainReverts() public {
+ uint64 wrongChainSelector = 9084102894;
+
+ vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.NonExistentChain.selector, wrongChainSelector));
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.setChainRateLimiterConfig(
+ wrongChainSelector,
+ getOutboundRateLimiterConfig(),
+ getInboundRateLimiterConfig()
+ );
+ }
+}
+
+contract GHOTokenPoolEthereum_setRateLimitAdmin is GHOTokenPoolEthereumSetup {
+ function testSetRateLimitAdminSuccess() public {
+ assertEq(address(0), s_ghoTokenPool.getRateLimitAdmin());
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.setRateLimitAdmin(OWNER);
+ assertEq(OWNER, s_ghoTokenPool.getRateLimitAdmin());
+ }
+
+ // Reverts
+
+ function testSetRateLimitAdminReverts() public {
+ vm.startPrank(STRANGER);
+
+ vm.expectRevert("Only callable by owner");
+ s_ghoTokenPool.setRateLimitAdmin(STRANGER);
+ }
+}
+
+contract GHOTokenPoolEthereum_setBridgeLimit is GHOTokenPoolEthereumSetup {
+ event BridgeLimitUpdated(uint256 oldBridgeLimit, uint256 newBridgeLimit);
+
+ function testSetBridgeLimitAdminSuccess() public {
+ assertEq(INITIAL_BRIDGE_LIMIT, s_ghoTokenPool.getBridgeLimit());
+
+ uint256 newBridgeLimit = INITIAL_BRIDGE_LIMIT * 2;
+
+ vm.expectEmit();
+ emit BridgeLimitUpdated(INITIAL_BRIDGE_LIMIT, newBridgeLimit);
+
+ vm.startPrank(AAVE_DAO);
+ s_ghoTokenPool.setBridgeLimit(newBridgeLimit);
+
+ assertEq(newBridgeLimit, s_ghoTokenPool.getBridgeLimit());
+
+ // Bridge Limit Admin
+ address bridgeLimitAdmin = address(28973509103597907);
+ s_ghoTokenPool.setBridgeLimitAdmin(bridgeLimitAdmin);
+
+ vm.startPrank(bridgeLimitAdmin);
+ newBridgeLimit += 1;
+
+ s_ghoTokenPool.setBridgeLimit(newBridgeLimit);
+
+ assertEq(newBridgeLimit, s_ghoTokenPool.getBridgeLimit());
+ }
+
+ function testZeroBridgeLimitReverts() public {
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ s_ghoTokenPool.setBridgeLimit(0);
+
+ uint256 amount = 1;
+
+ vm.expectRevert(abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, 0));
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testBridgeLimitBelowCurrent() public {
+ // Increase current bridged amount to 10
+ uint256 amount = 10e18;
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+
+ // Reduce bridge limit below current bridged amount
+ vm.startPrank(AAVE_DAO);
+ uint256 newBridgeLimit = amount - 1;
+ s_ghoTokenPool.setBridgeLimit(newBridgeLimit);
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), amount);
+ assertEq(s_ghoTokenPool.getBridgeLimit(), newBridgeLimit);
+ assertGt(s_ghoTokenPool.getCurrentBridgedAmount(), s_ghoTokenPool.getBridgeLimit());
+
+ // Lock reverts due to maxed out bridge limit
+ vm.expectRevert(
+ abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, newBridgeLimit)
+ );
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), 1, DEST_CHAIN_SELECTOR, bytes(""));
+
+ // Increase bridge limit by 1
+ vm.startPrank(AAVE_DAO);
+ newBridgeLimit = amount + 1;
+ s_ghoTokenPool.setBridgeLimit(newBridgeLimit);
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), amount);
+ assertEq(s_ghoTokenPool.getBridgeLimit(), newBridgeLimit);
+ assertGt(s_ghoTokenPool.getBridgeLimit(), s_ghoTokenPool.getCurrentBridgedAmount());
+
+ // Bridge limit maxed out again
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), 1, DEST_CHAIN_SELECTOR, bytes(""));
+ assertEq(s_ghoTokenPool.getBridgeLimit(), s_ghoTokenPool.getCurrentBridgedAmount());
+ }
+
+ function testCurrentBridgedAmountRecover() public {
+ // Reach maximum
+ vm.startPrank(s_allowedOnRamp);
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), INITIAL_BRIDGE_LIMIT, DEST_CHAIN_SELECTOR, bytes(""));
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT);
+ assertEq(s_ghoTokenPool.getBridgeLimit(), s_ghoTokenPool.getCurrentBridgedAmount());
+
+ // Lock reverts due to maxed out bridge limit
+ vm.expectRevert(
+ abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, INITIAL_BRIDGE_LIMIT)
+ );
+ s_ghoTokenPool.lockOrBurn(STRANGER, bytes(""), 1, DEST_CHAIN_SELECTOR, bytes(""));
+
+ // Amount available to bridge recovers thanks to liquidity coming back
+ UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1);
+ chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: SOURCE_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.applyChainUpdates(chainUpdate);
+
+ uint256 amount = 10;
+ deal(address(s_token), address(s_ghoTokenPool), amount);
+ vm.startPrank(s_allowedOffRamp);
+ s_ghoTokenPool.releaseOrMint(bytes(""), OWNER, amount, SOURCE_CHAIN_SELECTOR, bytes(""));
+ assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT - amount);
+ }
+
+ // Reverts
+
+ function testSetBridgeLimitAdminReverts() public {
+ vm.startPrank(STRANGER);
+
+ vm.expectRevert(abi.encodeWithSelector(LockReleaseTokenPool.Unauthorized.selector, STRANGER));
+ s_ghoTokenPool.setBridgeLimit(0);
+ }
+}
+
+contract GHOTokenPoolEthereum_setBridgeLimitAdmin is GHOTokenPoolEthereumSetup {
+ function testSetBridgeLimitAdminSuccess() public {
+ assertEq(address(0), s_ghoTokenPool.getBridgeLimitAdmin());
+
+ address bridgeLimitAdmin = address(28973509103597907);
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.setBridgeLimitAdmin(bridgeLimitAdmin);
+
+ assertEq(bridgeLimitAdmin, s_ghoTokenPool.getBridgeLimitAdmin());
+ }
+
+ // Reverts
+
+ function testSetBridgeLimitAdminReverts() public {
+ vm.startPrank(STRANGER);
+
+ vm.expectRevert("Only callable by owner");
+ s_ghoTokenPool.setBridgeLimitAdmin(STRANGER);
+ }
+}
+
+contract GHOTokenPoolEthereum_upgradeability is GHOTokenPoolEthereumSetup {
+ function testInitialization() public {
+ // Upgradeability
+ assertEq(s_ghoTokenPool.REVISION(), 1);
+ vm.startPrank(PROXY_ADMIN);
+ (bool ok, bytes memory result) = address(s_ghoTokenPool).staticcall(
+ abi.encodeWithSelector(TransparentUpgradeableProxy.admin.selector)
+ );
+ assertTrue(ok, "proxy admin fetch failed");
+ address decodedProxyAdmin = abi.decode(result, (address));
+ assertEq(decodedProxyAdmin, PROXY_ADMIN, "proxy admin is wrong");
+ assertEq(decodedProxyAdmin, _getProxyAdminAddress(address(s_ghoTokenPool)), "proxy admin is wrong");
+
+ // TokenPool
+ vm.startPrank(OWNER);
+ assertEq(s_ghoTokenPool.getAllowList().length, 0);
+ assertEq(s_ghoTokenPool.getAllowListEnabled(), false);
+ assertEq(s_ghoTokenPool.getArmProxy(), address(s_mockARM));
+ assertEq(s_ghoTokenPool.getRouter(), address(s_sourceRouter));
+ assertEq(address(s_ghoTokenPool.getToken()), address(s_token));
+ assertEq(s_ghoTokenPool.owner(), AAVE_DAO, "owner is wrong");
+ }
+
+ function testUpgrade() public {
+ MockUpgradeable newImpl = new MockUpgradeable();
+ bytes memory mockImpleParams = abi.encodeWithSignature("initialize()");
+ vm.startPrank(PROXY_ADMIN);
+ TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).upgradeToAndCall(address(newImpl), mockImpleParams);
+
+ vm.startPrank(OWNER);
+ assertEq(s_ghoTokenPool.REVISION(), 2);
+ }
+
+ function testUpgradeAdminReverts() public {
+ vm.expectRevert();
+ TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).upgradeToAndCall(address(0), bytes(""));
+ assertEq(s_ghoTokenPool.REVISION(), 1);
+
+ vm.expectRevert();
+ TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).upgradeTo(address(0));
+ assertEq(s_ghoTokenPool.REVISION(), 1);
+ }
+
+ function testChangeAdmin() public {
+ assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), PROXY_ADMIN);
+
+ address newAdmin = makeAddr("newAdmin");
+ vm.startPrank(PROXY_ADMIN);
+ TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).changeAdmin(newAdmin);
+
+ assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), newAdmin, "Admin change failed");
+ }
+
+ function testChangeAdminAdminReverts() public {
+ assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), PROXY_ADMIN);
+
+ address newAdmin = makeAddr("newAdmin");
+ vm.expectRevert();
+ TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).changeAdmin(newAdmin);
+
+ assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), PROXY_ADMIN, "Unauthorized admin change");
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimit.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimit.t.sol
new file mode 100644
index 0000000000..6bf367d5a1
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimit.t.sol
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+
+import {IPool} from "../../../interfaces/pools/IPool.sol";
+import {GHOTokenPoolEthereumBridgeLimitSetup} from "./GHOTokenPoolEthereumBridgeLimitSetup.t.sol";
+
+contract GHOTokenPoolEthereumBridgeLimitSimpleScenario is GHOTokenPoolEthereumBridgeLimitSetup {
+ function setUp() public virtual override {
+ super.setUp();
+
+ // Arbitrum
+ _addBridge(1, INITIAL_BRIDGE_LIMIT);
+ _enableLane(0, 1);
+ }
+
+ function testFuzz_Bridge(uint256 amount) public {
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ amount = bound(amount, 1, maxAmount);
+
+ _assertInvariant();
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ deal(tokens[0], USER, amount);
+ _moveGhoOrigin(0, 1, USER, amount);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount);
+ assertEq(_getMaxToBridgeIn(0), amount);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ _moveGhoDestination(0, 1, USER, amount);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount);
+ assertEq(_getMaxToBridgeIn(0), amount);
+ assertEq(_getMaxToBridgeOut(1), bucketLevels[1]);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - bucketLevels[1]);
+
+ _assertInvariant();
+ }
+
+ function testBridgeAll() public {
+ _assertInvariant();
+
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ deal(tokens[0], USER, maxAmount);
+ _moveGhoOrigin(0, 1, USER, maxAmount);
+
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), maxAmount);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ _moveGhoDestination(0, 1, USER, maxAmount);
+
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), maxAmount);
+ assertEq(_getMaxToBridgeOut(1), bucketCapacities[1]);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bridge out two times
+ function testFuzz_BridgeTwoSteps(uint256 amount1, uint256 amount2) public {
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ amount1 = bound(amount1, 1, maxAmount);
+ amount2 = bound(amount2, 1, maxAmount);
+
+ _assertInvariant();
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ deal(tokens[0], USER, amount1);
+ _moveGhoOrigin(0, 1, USER, amount1);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount1);
+ assertEq(_getMaxToBridgeIn(0), amount1);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ _moveGhoDestination(0, 1, USER, amount1);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount1);
+ assertEq(_getMaxToBridgeIn(0), amount1);
+ assertEq(_getMaxToBridgeOut(1), bucketLevels[1]);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - bucketLevels[1]);
+
+ _assertInvariant();
+
+ // Bridge up to bridge limit amount
+ if (amount1 + amount2 > maxAmount) {
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[0]).lockOrBurn(USER, bytes(""), amount2, uint64(1), bytes(""));
+
+ amount2 = maxAmount - amount1;
+ }
+
+ if (amount2 > 0) {
+ _assertInvariant();
+
+ uint256 acc = amount1 + amount2;
+ deal(tokens[0], USER, amount2);
+ _moveGhoOrigin(0, 1, USER, amount2);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - acc);
+ assertEq(_getMaxToBridgeIn(0), acc);
+ assertEq(_getMaxToBridgeOut(1), amount1);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - amount1);
+
+ _moveGhoDestination(0, 1, USER, amount2);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - acc);
+ assertEq(_getMaxToBridgeIn(0), acc);
+ assertEq(_getMaxToBridgeOut(1), acc);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - acc);
+
+ _assertInvariant();
+ }
+ }
+
+ /// @dev Bridge some tokens out and later, bridge them back in
+ function testFuzz_BridgeBackAndForth(uint256 amountOut, uint256 amountIn) public {
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ amountOut = bound(amountOut, 1, maxAmount);
+ amountIn = bound(amountIn, 1, _getCapacity(1));
+
+ _assertInvariant();
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ deal(tokens[0], USER, amountOut);
+ _moveGhoOrigin(0, 1, USER, amountOut);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amountOut);
+ assertEq(_getMaxToBridgeIn(0), amountOut);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+
+ _moveGhoDestination(0, 1, USER, amountOut);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amountOut);
+ assertEq(_getMaxToBridgeIn(0), amountOut);
+ assertEq(_getMaxToBridgeOut(1), bucketLevels[1]);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - bucketLevels[1]);
+
+ _assertInvariant();
+
+ // Bridge up to current bridged amount
+ if (amountIn > amountOut) {
+ // Simulate revert on destination
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[0]).releaseOrMint(bytes(""), USER, amountIn, uint64(1), bytes(""));
+
+ amountIn = amountOut;
+ }
+
+ if (amountIn > 0) {
+ _assertInvariant();
+
+ uint256 acc = amountOut - amountIn;
+ deal(tokens[1], USER, amountIn);
+ _moveGhoOrigin(1, 0, USER, amountIn);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amountOut);
+ assertEq(_getMaxToBridgeIn(0), amountOut);
+ assertEq(_getMaxToBridgeOut(1), acc);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - acc);
+
+ _moveGhoDestination(1, 0, USER, amountIn);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - acc);
+ assertEq(_getMaxToBridgeIn(0), acc);
+ assertEq(_getMaxToBridgeOut(1), acc);
+ assertEq(_getMaxToBridgeIn(1), maxAmount - acc);
+
+ _assertInvariant();
+ }
+ }
+
+ /// @dev Bridge from Ethereum to Arbitrum reverts if amount is higher than bridge limit
+ function testFuzz_BridgeBridgeLimitExceededSourceReverts(uint256 amount, uint256 bridgeAmount) public {
+ vm.assume(amount < type(uint128).max);
+ vm.assume(bridgeAmount < INITIAL_BRIDGE_LIMIT);
+
+ // Inflate bridgeAmount
+ if (bridgeAmount > 0) {
+ deal(tokens[0], USER, bridgeAmount);
+ _bridgeGho(0, 1, USER, bridgeAmount);
+ }
+
+ deal(tokens[0], USER, amount);
+ // Simulate CCIP pull of funds
+ vm.prank(USER);
+ GhoToken(tokens[0]).transfer(pools[0], amount);
+
+ if (bridgeAmount + amount > INITIAL_BRIDGE_LIMIT) {
+ vm.expectRevert();
+ }
+ vm.prank(RAMP);
+ IPool(pools[0]).lockOrBurn(USER, bytes(""), amount, uint64(1), bytes(""));
+ }
+
+ /// @dev Bridge from Ethereum to Arbitrum reverts if amount is higher than capacity available
+ function testFuzz_BridgeCapacityExceededDestinationReverts(uint256 amount, uint256 level) public {
+ (uint256 capacity, ) = GhoToken(tokens[1]).getFacilitatorBucket(pools[1]);
+ vm.assume(level < capacity);
+ amount = bound(amount, 1, type(uint128).max);
+
+ // Inflate level
+ if (level > 0) {
+ _inflateFacilitatorLevel(pools[1], tokens[1], level);
+ }
+
+ // Skip origin move
+
+ // Destination execution
+ if (amount > capacity - level) {
+ vm.expectRevert();
+ }
+ vm.prank(RAMP);
+ IPool(pools[1]).releaseOrMint(bytes(""), USER, amount, uint64(0), bytes(""));
+ }
+
+ /// @dev Bridge from Arbitrum To Ethereum reverts if Arbitrum level is lower than amount
+ function testFuzz_BridgeBackZeroLevelSourceReverts(uint256 amount, uint256 level) public {
+ (uint256 capacity, ) = GhoToken(tokens[1]).getFacilitatorBucket(pools[1]);
+ vm.assume(level < capacity);
+ amount = bound(amount, 1, capacity - level);
+
+ // Inflate level
+ if (level > 0) {
+ _inflateFacilitatorLevel(pools[1], tokens[1], level);
+ }
+
+ deal(tokens[1], USER, amount);
+ // Simulate CCIP pull of funds
+ vm.prank(USER);
+ GhoToken(tokens[1]).transfer(pools[1], amount);
+
+ if (amount > level) {
+ vm.expectRevert();
+ }
+ vm.prank(RAMP);
+ IPool(pools[1]).lockOrBurn(USER, bytes(""), amount, uint64(0), bytes(""));
+ }
+
+ /// @dev Bridge from Arbitrum To Ethereum reverts if Ethereum current bridged amount is lower than amount
+ function testFuzz_BridgeBackZeroBridgeLimitDestinationReverts(uint256 amount, uint256 bridgeAmount) public {
+ (uint256 capacity, ) = GhoToken(tokens[1]).getFacilitatorBucket(pools[1]);
+ amount = bound(amount, 1, capacity);
+ bridgeAmount = bound(bridgeAmount, 0, capacity - amount);
+
+ // Inflate bridgeAmount
+ if (bridgeAmount > 0) {
+ deal(tokens[0], USER, bridgeAmount);
+ _bridgeGho(0, 1, USER, bridgeAmount);
+ }
+
+ // Inflate level on Arbitrum
+ _inflateFacilitatorLevel(pools[1], tokens[1], amount);
+
+ // Skip origin move
+
+ // Destination execution
+ if (amount > bridgeAmount) {
+ vm.expectRevert();
+ }
+ vm.prank(RAMP);
+ IPool(pools[0]).releaseOrMint(bytes(""), USER, amount, uint64(1), bytes(""));
+ }
+
+ /// @dev Bucket capacity reduction. Caution: bridge limit reduction must happen first
+ function testReduceBucketCapacity() public {
+ // Max out capacity
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, maxAmount);
+ _bridgeGho(0, 1, USER, maxAmount);
+
+ assertEq(_getMaxToBridgeIn(1), 0);
+ assertEq(_getCapacity(1), maxAmount);
+ assertEq(_getLevel(1), maxAmount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] - 10;
+ // 1. Reduce bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ // 2. Reduce bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ // Maximum to bridge in is all minted on Arbitrum
+ assertEq(_getMaxToBridgeIn(0), maxAmount);
+ assertEq(_getMaxToBridgeOut(1), maxAmount);
+
+ _bridgeGho(1, 0, USER, maxAmount);
+ assertEq(_getMaxToBridgeOut(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), newBucketCapacity);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bucket capacity reduction, performed following wrong order procedure
+ function testReduceBucketCapacityIncorrectProcedure() public {
+ // Bridge a third of the capacity
+ uint256 amount = _getMaxToBridgeOut(0) / 3;
+ uint256 availableToBridge = _getMaxToBridgeOut(0) - amount;
+
+ deal(tokens[0], USER, amount);
+ _bridgeGho(0, 1, USER, amount);
+
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - amount);
+ assertEq(_getLevel(1), amount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] - 10;
+ /// @dev INCORRECT ORDER PROCEDURE!! bridge limit reduction should happen first
+ // 1. Reduce bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), availableToBridge); // this is the UX issue
+ assertEq(_getMaxToBridgeIn(1), availableToBridge - 10);
+
+ // User can come and try to max bridge on Arbitrum
+ // Transaction will succeed on Ethereum, but revert on Arbitrum
+ deal(tokens[0], USER, availableToBridge);
+ _moveGhoOrigin(0, 1, USER, availableToBridge);
+ assertEq(_getMaxToBridgeOut(0), 0);
+
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[1]).releaseOrMint(bytes(""), USER, availableToBridge, uint64(0), bytes(""));
+
+ // User can only bridge up to new bucket capacity (10 units less)
+ assertEq(_getMaxToBridgeIn(1), availableToBridge - 10);
+ vm.prank(RAMP);
+ IPool(pools[1]).releaseOrMint(bytes(""), USER, availableToBridge - 10, uint64(0), bytes(""));
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ // 2. Reduce bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(1), 0);
+ }
+
+ /// @dev Bucket capacity reduction, with a bridge out in between
+ function testReduceBucketCapacityWithBridgeOutInBetween() public {
+ // Bridge a third of the capacity
+ uint256 amount = _getMaxToBridgeOut(0) / 3;
+ uint256 availableToBridge = _getMaxToBridgeOut(0) - amount;
+
+ deal(tokens[0], USER, amount);
+ _bridgeGho(0, 1, USER, amount);
+
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - amount);
+ assertEq(_getLevel(1), amount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] - 10;
+ // 1. Reduce bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), availableToBridge - 10);
+ assertEq(_getMaxToBridgeIn(1), availableToBridge);
+
+ // User initiates bridge out action
+ uint256 amount2 = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, amount2);
+ _moveGhoOrigin(0, 1, USER, amount2);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), newBucketCapacity);
+
+ // 2. Reduce bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ // Destination execution can happen, no more bridge out actions can be initiated
+ assertEq(_getMaxToBridgeOut(1), amount);
+ assertEq(_getMaxToBridgeIn(1), amount2);
+
+ // Finalize bridge out action
+ _moveGhoDestination(0, 1, USER, amount2);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(1), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bucket capacity reduction, with a bridge in in between
+ function testReduceBucketCapacityWithBridgeInInBetween() public {
+ // Bridge max amount
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+
+ deal(tokens[0], USER, maxAmount);
+ _bridgeGho(0, 1, USER, maxAmount);
+
+ assertEq(_getMaxToBridgeIn(1), 0);
+ assertEq(_getCapacity(1), maxAmount);
+ assertEq(_getLevel(1), maxAmount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] - 10;
+ // 1. Reduce bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ // User initiates bridge in action
+ _moveGhoOrigin(1, 0, USER, maxAmount);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), maxAmount);
+
+ // 2. Reduce bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), maxAmount);
+
+ // Finalize bridge in action
+ _moveGhoDestination(1, 0, USER, maxAmount);
+ assertEq(_getMaxToBridgeOut(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), newBucketCapacity);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bucket capacity increase. Caution: bridge limit increase must happen afterwards
+ function testIncreaseBucketCapacity() public {
+ // Max out capacity
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, maxAmount);
+ _bridgeGho(0, 1, USER, maxAmount);
+
+ assertEq(_getMaxToBridgeIn(1), 0);
+ assertEq(_getCapacity(1), maxAmount);
+ assertEq(_getLevel(1), maxAmount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] + 10;
+ // 2. Increase bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(1), 10);
+
+ // Reverts if a user tries to bridge out 10
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[0]).lockOrBurn(USER, bytes(""), 10, uint64(1), bytes(""));
+
+ // 2. Increase bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 10);
+ assertEq(_getMaxToBridgeIn(1), 10);
+
+ _assertInvariant();
+
+ // Now it is possible to bridge some again
+ _bridgeGho(1, 0, USER, maxAmount);
+ assertEq(_getMaxToBridgeOut(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), newBucketCapacity);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bucket capacity increase, performed following wrong order procedure
+ function testIncreaseBucketCapacityIncorrectProcedure() public {
+ // Max out capacity
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, maxAmount);
+ _bridgeGho(0, 1, USER, maxAmount);
+
+ assertEq(_getMaxToBridgeIn(1), 0);
+ assertEq(_getCapacity(1), maxAmount);
+ assertEq(_getLevel(1), maxAmount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] + 10;
+
+ /// @dev INCORRECT ORDER PROCEDURE!! bucket capacity increase should happen first
+ // 1. Increase bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 10);
+ assertEq(_getMaxToBridgeIn(1), 0); // this is the UX issue
+
+ // User can come and try to max bridge on Arbitrum
+ // Transaction will succeed on Ethereum, but revert on Arbitrum
+ deal(tokens[0], USER, 10);
+ _moveGhoOrigin(0, 1, USER, 10);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), newBucketCapacity);
+
+ // Execution on destination will revert until bucket capacity gets increased
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[1]).releaseOrMint(bytes(""), USER, 10, uint64(0), bytes(""));
+
+ // 2. Increase bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(1), maxAmount);
+ assertEq(_getMaxToBridgeIn(1), 10);
+
+ // Now it is possible to execute on destination
+ _moveGhoDestination(0, 1, USER, 10);
+
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(1), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bucket capacity increase, with a bridge out in between
+ function testIncreaseBucketCapacityWithBridgeOutInBetween() public {
+ // Bridge a third of the capacity
+ uint256 amount = _getMaxToBridgeOut(0) / 3;
+ uint256 availableToBridge = _getMaxToBridgeOut(0) - amount;
+ deal(tokens[0], USER, amount);
+ _bridgeGho(0, 1, USER, amount);
+
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - amount);
+ assertEq(_getLevel(1), amount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] + 10;
+ // 1. Increase bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), availableToBridge);
+ assertEq(_getMaxToBridgeIn(1), availableToBridge + 10);
+
+ // Reverts if a user tries to bridge out all up to new bucket capacity
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[0]).lockOrBurn(USER, bytes(""), availableToBridge + 10, uint64(1), bytes(""));
+
+ // User initiates bridge out action
+ deal(tokens[0], USER, availableToBridge);
+ _bridgeGho(0, 1, USER, availableToBridge);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(1), 10);
+
+ // 2. Increase bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 10);
+ assertEq(_getMaxToBridgeIn(1), 10);
+
+ _assertInvariant();
+
+ // Now it is possible to bridge some again
+ deal(tokens[0], USER, 10);
+ _bridgeGho(0, 1, USER, 10);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(1), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bucket capacity increase, with a bridge in in between
+ function testIncreaseBucketCapacityWithBridgeInInBetween() public {
+ // Max out capacity
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, maxAmount);
+ _bridgeGho(0, 1, USER, maxAmount);
+
+ assertEq(_getMaxToBridgeIn(1), 0);
+ assertEq(_getCapacity(1), maxAmount);
+ assertEq(_getLevel(1), maxAmount);
+
+ _assertInvariant();
+
+ uint256 newBucketCapacity = bucketCapacities[1] + 10;
+ // 1. Increase bucket capacity
+ _updateBucketCapacity(1, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), maxAmount);
+ assertEq(_getMaxToBridgeOut(1), maxAmount);
+ assertEq(_getMaxToBridgeIn(1), 10);
+
+ // User initiates bridge in action
+ _moveGhoOrigin(1, 0, USER, maxAmount);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), newBucketCapacity);
+
+ // 2. Increase bridge limit
+ _updateBridgeLimit(newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 10);
+ assertEq(_getMaxToBridgeIn(0), maxAmount);
+
+ // User finalizes bridge in action
+ _moveGhoDestination(1, 0, USER, maxAmount);
+ assertEq(_getMaxToBridgeOut(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(0), 0);
+
+ _assertInvariant();
+
+ // Now it is possible to bridge new bucket capacity
+ deal(tokens[0], USER, newBucketCapacity);
+ _bridgeGho(0, 1, USER, newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(0), 0);
+ assertEq(_getMaxToBridgeIn(0), newBucketCapacity);
+ assertEq(_getMaxToBridgeOut(1), newBucketCapacity);
+ assertEq(_getMaxToBridgeIn(1), 0);
+
+ _assertInvariant();
+ }
+}
+
+contract GHOTokenPoolEthereumBridgeLimitTripleScenario is GHOTokenPoolEthereumBridgeLimitSetup {
+ function setUp() public virtual override {
+ super.setUp();
+
+ // Arbitrum
+ _addBridge(1, INITIAL_BRIDGE_LIMIT);
+ _enableLane(0, 1);
+
+ // Avalanche
+ _addBridge(2, INITIAL_BRIDGE_LIMIT);
+ _enableLane(1, 2);
+ _enableLane(0, 2);
+ }
+
+ /// @dev Bridge out some tokens to third chain via second chain (Ethereum to Arbitrum, Arbitrum to Avalanche)
+ function testFuzz_BridgeToTwoToThree(uint256 amount) public {
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ amount = bound(amount, 1, maxAmount);
+
+ _assertInvariant();
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount);
+ assertEq(_getMaxToBridgeIn(0), 0);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+ assertEq(_getMaxToBridgeOut(2), 0);
+ assertEq(_getMaxToBridgeIn(2), bucketCapacities[2]);
+
+ deal(tokens[0], USER, amount);
+ _moveGhoOrigin(0, 1, USER, amount);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount);
+ assertEq(_getMaxToBridgeIn(0), amount);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+ assertEq(_getMaxToBridgeOut(2), 0);
+ assertEq(_getMaxToBridgeIn(2), bucketCapacities[2]);
+
+ _moveGhoDestination(0, 1, USER, amount);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount);
+ assertEq(_getMaxToBridgeIn(0), amount);
+ assertEq(_getMaxToBridgeOut(1), amount);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1] - bucketLevels[1]);
+ assertEq(_getMaxToBridgeOut(2), 0);
+ assertEq(_getMaxToBridgeIn(2), bucketCapacities[2]);
+
+ _assertInvariant();
+
+ _moveGhoOrigin(1, 2, USER, amount);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount);
+ assertEq(_getMaxToBridgeIn(0), amount);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+ assertEq(_getMaxToBridgeOut(2), 0);
+ assertEq(_getMaxToBridgeIn(2), bucketCapacities[2]);
+
+ _moveGhoDestination(1, 2, USER, amount);
+
+ assertEq(_getMaxToBridgeOut(0), maxAmount - amount);
+ assertEq(_getMaxToBridgeIn(0), amount);
+ assertEq(_getMaxToBridgeOut(1), 0);
+ assertEq(_getMaxToBridgeIn(1), bucketCapacities[1]);
+ assertEq(_getMaxToBridgeOut(2), amount);
+ assertEq(_getMaxToBridgeIn(2), bucketCapacities[2] - amount);
+
+ _assertInvariant();
+ }
+
+ /// @dev Bridge out some tokens to second and third chain randomly
+ function testFuzz_BridgeRandomlyToTwoAndThree(uint64[] memory amounts) public {
+ vm.assume(amounts.length < 30);
+
+ uint256 maxAmount = _getMaxToBridgeOut(0);
+ uint256 sourceAcc;
+ uint256 amount;
+ uint256 dest;
+ bool lastTime;
+ for (uint256 i = 0; i < amounts.length && !lastTime; i++) {
+ amount = amounts[i];
+
+ if (amount == 0) amount += 1;
+ if (sourceAcc + amount > maxAmount) {
+ amount = maxAmount - sourceAcc;
+ lastTime = true;
+ }
+
+ dest = (amount % 2) + 1;
+ deal(tokens[0], USER, amount);
+ _bridgeGho(0, dest, USER, amount);
+
+ sourceAcc += amount;
+ }
+ assertEq(sourceAcc, bridged);
+
+ // Bridge all to Avalanche
+ uint256 toBridge = _getMaxToBridgeOut(1);
+ if (toBridge > 0) {
+ _bridgeGho(1, 2, USER, toBridge);
+ assertEq(sourceAcc, bridged);
+ assertEq(_getLevel(2), bridged);
+ assertEq(_getLevel(1), 0);
+ }
+ }
+
+ /// @dev All remote liquidity is on one chain or the other
+ function testLiquidityUnbalanced() public {
+ // Bridge all out to Arbitrum
+ uint256 amount = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, amount);
+ _bridgeGho(0, 1, USER, amount);
+
+ // No more liquidity can go remotely
+ assertEq(_getMaxToBridgeOut(0), 0);
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[0]).lockOrBurn(USER, bytes(""), 1, uint64(1), bytes(""));
+ vm.prank(RAMP);
+ vm.expectRevert();
+ IPool(pools[0]).lockOrBurn(USER, bytes(""), 1, uint64(2), bytes(""));
+
+ // All liquidity on Arbitrum, 0 on Avalanche
+ assertEq(_getLevel(1), bridged);
+ assertEq(_getLevel(1), _getCapacity(1));
+ assertEq(_getLevel(2), 0);
+
+ // Move all liquidity to Avalanche
+ _bridgeGho(1, 2, USER, amount);
+ assertEq(_getLevel(1), 0);
+ assertEq(_getLevel(2), bridged);
+ assertEq(_getLevel(2), _getCapacity(2));
+
+ // Move all liquidity back to Ethereum
+ _bridgeGho(2, 0, USER, amount);
+ assertEq(_getLevel(1), 0);
+ assertEq(_getLevel(2), 0);
+ assertEq(bridged, 0);
+ assertEq(_getMaxToBridgeOut(0), amount);
+ }
+
+ /// @dev Test showcasing incorrect bridge limit and bucket capacity configuration
+ function testIncorrectBridgeLimitBucketConfig() public {
+ // BridgeLimit 10, Arbitrum 9, Avalanche Bucket 10
+ _updateBridgeLimit(10);
+ _updateBucketCapacity(1, 9);
+ _updateBucketCapacity(2, 10);
+
+ assertEq(_getMaxToBridgeOut(0), 10);
+ assertEq(_getMaxToBridgeIn(1), 9); // here the issue
+ assertEq(_getMaxToBridgeIn(2), 10);
+
+ // Possible to bridge 10 out to 2
+ deal(tokens[0], USER, 10);
+ _bridgeGho(0, 2, USER, 10);
+
+ // Liquidity comes back
+ _bridgeGho(2, 0, USER, 10);
+
+ // Not possible to bridge 10 out to 1
+ _moveGhoOrigin(0, 1, USER, 10);
+ // Reverts on destination
+ vm.expectRevert();
+ vm.prank(RAMP);
+ IPool(pools[1]).releaseOrMint(bytes(""), USER, 10, uint64(0), bytes(""));
+
+ // Only if bucket capacity gets increased, execution can succeed
+ _updateBucketCapacity(1, 10);
+ _moveGhoDestination(0, 1, USER, 10);
+ }
+
+ /// @dev Test showcasing a user locked due to a bridge limit reduction below current bridged amount
+ function testUserLockedBridgeLimitReductionBelowLevel() public {
+ // Bridge all out to Arbitrum
+ uint256 amount = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, amount);
+ _bridgeGho(0, 1, USER, amount);
+
+ // Reduce bridge limit below current bridged amount
+ uint256 newBridgeLimit = amount / 2;
+ _updateBridgeLimit(newBridgeLimit);
+ _updateBucketCapacity(1, newBridgeLimit);
+ // _updateBucketCapacity(2, newBridgeLimit);
+
+ // assertEq(_getMaxToBridgeIn(2), newBridgeLimit);
+
+ // Reverts
+ _bridgeGho(1, 2, USER, amount);
+ }
+
+ /// @dev Test showcasing a user locked due to a bridge limit reduction below current bridged amount
+ function testUserLockedBridgeLimitReductionBelowLevel2() public {
+ // Bridge all out to Arbitrum
+ uint256 amount = _getMaxToBridgeOut(0);
+ deal(tokens[0], USER, amount);
+ _bridgeGho(0, 1, USER, amount);
+
+ // Reduce bridge limit below current bridged amount
+ uint256 newBridgeLimit = amount / 2;
+ _updateBridgeLimit(newBridgeLimit);
+ _updateBucketCapacity(2, newBridgeLimit);
+
+ // assertEq(_getMaxToBridgeIn(2), newBridgeLimit);
+
+ // Reverts
+ _bridgeGho(1, 2, USER, amount);
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimitInvariant.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimitInvariant.t.sol
new file mode 100644
index 0000000000..300b2caf21
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimitInvariant.t.sol
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+
+import {IPool} from "../../../interfaces/pools/IPool.sol";
+import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {RateLimiter} from "../../../libraries/RateLimiter.sol";
+
+import {StdInvariant} from "forge-std/StdInvariant.sol";
+import {BaseTest} from "../../BaseTest.t.sol";
+
+import {console2} from "forge-std/console2.sol";
+
+contract GHOTokenPoolHandler is BaseTest {
+ address internal ARM_PROXY = makeAddr("ARM_PROXY");
+ address internal ROUTER = makeAddr("ROUTER");
+ address internal RAMP = makeAddr("RAMP");
+ address internal AAVE_DAO = makeAddr("AAVE_DAO");
+ address internal PROXY_ADMIN = makeAddr("PROXY_ADMIN");
+ address internal USER = makeAddr("USER");
+
+ uint256 public immutable INITIAL_BRIDGE_LIMIT = 100e6 * 1e18;
+
+ uint256[] public chainsList;
+ mapping(uint256 => address) public pools; // chainId => bridgeTokenPool
+ mapping(uint256 => address) public tokens; // chainId => ghoToken
+ mapping(uint256 => uint256) public bucketCapacities; // chainId => bucketCapacities
+ mapping(uint256 => uint256) public bucketLevels; // chainId => bucketLevels
+ mapping(uint256 => uint256) public liquidity; // chainId => liquidity
+ uint256 public remoteLiquidity;
+ uint256 public bridged;
+ bool public capacityBelowLevelUpdate;
+
+ constructor() {
+ // Ethereum with id 0
+ chainsList.push(0);
+ tokens[0] = address(new GhoToken(AAVE_DAO));
+ pools[0] = _deployUpgradeableLockReleaseTokenPool(
+ tokens[0],
+ ARM_PROXY,
+ ROUTER,
+ OWNER,
+ INITIAL_BRIDGE_LIMIT,
+ PROXY_ADMIN
+ );
+
+ // Mock calls for bridging
+ vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))), abi.encode(RAMP));
+ vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("isOffRamp(uint64,address)"))), abi.encode(true));
+ vm.mockCall(ARM_PROXY, abi.encodeWithSelector(bytes4(keccak256("isCursed()"))), abi.encode(false));
+
+ // Arbitrum
+ _addBridge(1, INITIAL_BRIDGE_LIMIT);
+ _enableLane(0, 1);
+
+ // Avalanche
+ _addBridge(2, INITIAL_BRIDGE_LIMIT);
+ _enableLane(0, 2);
+ _enableLane(1, 2);
+ }
+
+ /// forge-config: ccip.fuzz.runs = 500
+ function bridgeGho(uint256 fromChain, uint256 toChain, uint256 amount) public {
+ fromChain = bound(fromChain, 0, 2);
+ toChain = bound(toChain, 0, 2);
+ vm.assume(fromChain != toChain);
+ uint256 maxBalance = GhoToken(tokens[fromChain]).balanceOf(address(this));
+ uint256 maxToBridge = _getMaxToBridgeOut(fromChain);
+ uint256 maxAmount = maxBalance > maxToBridge ? maxToBridge : maxBalance;
+ amount = bound(amount, 0, maxAmount);
+
+ console2.log("bridgeGho", fromChain, toChain, amount);
+ console2.log("bridgeLimit", UpgradeableLockReleaseTokenPool(pools[0]).getBridgeLimit());
+ console2.log("currentBridged", UpgradeableLockReleaseTokenPool(pools[0]).getBridgeLimit());
+ if (!_isEthereumChain(fromChain)) {
+ console2.log("bucket from", fromChain, _getCapacity(fromChain), _getLevel(fromChain));
+ }
+ if (!_isEthereumChain(toChain)) {
+ console2.log("bucket to", toChain, _getCapacity(toChain), _getLevel(toChain));
+ }
+
+ if (amount > 0) {
+ _bridgeGho(fromChain, toChain, address(this), amount);
+ }
+ }
+
+ /// forge-config: ccip.fuzz.runs = 500
+ function updateBucketCapacity(uint256 chain, uint128 newCapacity) public {
+ chain = bound(chain, 1, 2);
+ uint256 otherChain = (chain % 2) + 1;
+ vm.assume(newCapacity >= bridged);
+
+ uint256 oldCapacity = bucketCapacities[chain];
+
+ console2.log("updateBucketCapacity", chain, oldCapacity, newCapacity);
+ if (newCapacity < bucketLevels[chain]) {
+ capacityBelowLevelUpdate = true;
+ } else {
+ capacityBelowLevelUpdate = false;
+ }
+
+ if (newCapacity > oldCapacity) {
+ // Increase
+ _updateBucketCapacity(chain, newCapacity);
+ // keep bridge limit as the minimum bucket capacity
+ if (newCapacity < bucketCapacities[otherChain]) {
+ _updateBridgeLimit(newCapacity);
+ }
+ } else {
+ // Reduction
+ // keep bridge limit as the minimum bucket capacity
+ if (newCapacity < bucketCapacities[otherChain]) {
+ _updateBridgeLimit(newCapacity);
+ }
+ _updateBucketCapacity(chain, newCapacity);
+ }
+ }
+
+ function _enableLane(uint256 fromId, uint256 toId) internal {
+ // from
+ UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1);
+ RateLimiter.Config memory emptyRateConfig = RateLimiter.Config(false, 0, 0);
+ chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: uint64(toId),
+ allowed: true,
+ outboundRateLimiterConfig: emptyRateConfig,
+ inboundRateLimiterConfig: emptyRateConfig
+ });
+
+ vm.startPrank(OWNER);
+ UpgradeableTokenPool(pools[fromId]).applyChainUpdates(chainUpdate);
+
+ // to
+ chainUpdate[0].remoteChainSelector = uint64(fromId);
+ UpgradeableTokenPool(pools[toId]).applyChainUpdates(chainUpdate);
+ vm.stopPrank();
+ }
+
+ function _addBridge(uint256 chainId, uint256 bucketCapacity) internal {
+ require(tokens[chainId] == address(0), "BRIDGE_ALREADY_EXISTS");
+
+ chainsList.push(chainId);
+
+ // GHO Token
+ GhoToken ghoToken = new GhoToken(AAVE_DAO);
+ tokens[chainId] = address(ghoToken);
+
+ // UpgradeableTokenPool
+ address bridgeTokenPool = _deployUpgradeableBurnMintTokenPool(
+ address(ghoToken),
+ ARM_PROXY,
+ ROUTER,
+ OWNER,
+ PROXY_ADMIN
+ );
+ pools[chainId] = bridgeTokenPool;
+
+ // Facilitator
+ bucketCapacities[chainId] = bucketCapacity;
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ ghoToken.grantRole(ghoToken.FACILITATOR_MANAGER_ROLE(), AAVE_DAO);
+ ghoToken.addFacilitator(bridgeTokenPool, "UpgradeableTokenPool", uint128(bucketCapacity));
+ vm.stopPrank();
+ }
+
+ function _updateBridgeLimit(uint256 newBridgeLimit) internal {
+ vm.stopPrank();
+ vm.startPrank(OWNER);
+ UpgradeableLockReleaseTokenPool(pools[0]).setBridgeLimit(newBridgeLimit);
+ vm.stopPrank();
+ }
+
+ function _updateBucketCapacity(uint256 chainId, uint256 newBucketCapacity) internal {
+ bucketCapacities[chainId] = newBucketCapacity;
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ GhoToken(tokens[chainId]).grantRole(GhoToken(tokens[chainId]).BUCKET_MANAGER_ROLE(), AAVE_DAO);
+ GhoToken(tokens[chainId]).setFacilitatorBucketCapacity(pools[chainId], uint128(newBucketCapacity));
+ vm.stopPrank();
+ }
+
+ function _getCapacity(uint256 chain) internal view returns (uint256) {
+ require(!_isEthereumChain(chain), "No bucket on Ethereum");
+ (uint256 capacity, ) = GhoToken(tokens[chain]).getFacilitatorBucket(pools[chain]);
+ return capacity;
+ }
+
+ function _getLevel(uint256 chain) internal view returns (uint256) {
+ require(!_isEthereumChain(chain), "No bucket on Ethereum");
+ (, uint256 level) = GhoToken(tokens[chain]).getFacilitatorBucket(pools[chain]);
+ return level;
+ }
+
+ function _getMaxToBridgeOut(uint256 fromChain) internal view returns (uint256) {
+ if (_isEthereumChain(fromChain)) {
+ UpgradeableLockReleaseTokenPool ethTokenPool = UpgradeableLockReleaseTokenPool(pools[0]);
+ uint256 bridgeLimit = ethTokenPool.getBridgeLimit();
+ uint256 currentBridged = ethTokenPool.getCurrentBridgedAmount();
+ return currentBridged > bridgeLimit ? 0 : bridgeLimit - currentBridged;
+ } else {
+ (, uint256 level) = GhoToken(tokens[fromChain]).getFacilitatorBucket(pools[fromChain]);
+ return level;
+ }
+ }
+
+ function _bridgeGho(uint256 fromChain, uint256 toChain, address user, uint256 amount) internal {
+ _moveGhoOrigin(fromChain, toChain, user, amount);
+ _moveGhoDestination(fromChain, toChain, user, amount);
+ }
+
+ function _moveGhoOrigin(uint256 fromChain, uint256 toChain, address user, uint256 amount) internal {
+ // Simulate CCIP pull of funds
+ vm.startPrank(user);
+ GhoToken(tokens[fromChain]).transfer(pools[fromChain], amount);
+
+ vm.startPrank(RAMP);
+ IPool(pools[fromChain]).lockOrBurn(user, bytes(""), amount, uint64(toChain), bytes(""));
+
+ if (_isEthereumChain(fromChain)) {
+ // Lock
+ bridged += amount;
+ } else {
+ // Burn
+ bucketLevels[fromChain] -= amount;
+ liquidity[fromChain] -= amount;
+ remoteLiquidity -= amount;
+ }
+ }
+
+ function _moveGhoDestination(uint256 fromChain, uint256 toChain, address user, uint256 amount) internal {
+ vm.startPrank(RAMP);
+ IPool(pools[toChain]).releaseOrMint(bytes(""), user, amount, uint64(fromChain), bytes(""));
+
+ if (_isEthereumChain(toChain)) {
+ // Release
+ bridged -= amount;
+ } else {
+ // Mint
+ bucketLevels[toChain] += amount;
+ liquidity[toChain] += amount;
+ remoteLiquidity += amount;
+ }
+ }
+
+ function _isEthereumChain(uint256 chainId) internal pure returns (bool) {
+ return chainId == 0;
+ }
+
+ function getChainsList() public view returns (uint256[] memory) {
+ return chainsList;
+ }
+}
+
+contract GHOTokenPoolEthereumBridgeLimitInvariant is BaseTest {
+ GHOTokenPoolHandler handler;
+
+ function setUp() public override {
+ super.setUp();
+
+ handler = new GHOTokenPoolHandler();
+ handler.getChainsList();
+ deal(handler.tokens(0), address(handler), handler.INITIAL_BRIDGE_LIMIT());
+
+ targetContract(address(handler));
+ }
+
+ /// forge-config: ccip.invariant.fail-on-revert = true
+ /// forge-config: ccip.invariant.runs = 2000
+ /// forge-config: ccip.invariant.depth = 50
+ function invariant_bridgeLimit() public {
+ // Check bridged
+ assertEq(UpgradeableLockReleaseTokenPool(handler.pools(0)).getCurrentBridgedAmount(), handler.bridged());
+
+ // Check levels and buckets
+ uint256 sumLevels;
+ uint256 chainId;
+ uint256 capacity;
+ uint256 level;
+ uint256[] memory chainsListLocal = handler.getChainsList();
+ for (uint i = 1; i < chainsListLocal.length; i++) {
+ // not counting Ethereum -{0}
+ chainId = chainsListLocal[i];
+ (capacity, level) = GhoToken(handler.tokens(chainId)).getFacilitatorBucket(handler.pools(chainId));
+
+ // Aggregate levels
+ sumLevels += level;
+
+ assertEq(capacity, handler.bucketCapacities(chainId), "wrong bucket capacity");
+ assertEq(level, handler.bucketLevels(chainId), "wrong bucket level");
+
+ assertGe(
+ capacity,
+ UpgradeableLockReleaseTokenPool(handler.pools(0)).getBridgeLimit(),
+ "capacity must be equal to bridgeLimit"
+ );
+
+ // This invariant only holds if there were no bridge limit reductions below the current bridged amount
+ if (!handler.capacityBelowLevelUpdate()) {
+ assertLe(
+ level,
+ UpgradeableLockReleaseTokenPool(handler.pools(0)).getBridgeLimit(),
+ "level cannot be higher than bridgeLimit"
+ );
+ }
+ }
+ // Check bridged is equal to sum of levels
+ assertEq(UpgradeableLockReleaseTokenPool(handler.pools(0)).getCurrentBridgedAmount(), sumLevels, "wrong bridged");
+ assertEq(handler.remoteLiquidity(), sumLevels, "wrong bridged");
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimitSetup.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimitSetup.t.sol
new file mode 100644
index 0000000000..5e9b5f147a
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumBridgeLimitSetup.t.sol
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+
+import {BaseTest} from "../../BaseTest.t.sol";
+import {IPool} from "../../../interfaces/pools/IPool.sol";
+import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol";
+import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {RateLimiter} from "../../../libraries/RateLimiter.sol";
+
+import {console2} from "forge-std/console2.sol";
+contract GHOTokenPoolEthereumBridgeLimitSetup is BaseTest {
+ address internal ARM_PROXY = makeAddr("ARM_PROXY");
+ address internal ROUTER = makeAddr("ROUTER");
+ address internal RAMP = makeAddr("RAMP");
+ address internal AAVE_DAO = makeAddr("AAVE_DAO");
+ address internal PROXY_ADMIN = makeAddr("PROXY_ADMIN");
+ address internal USER = makeAddr("USER");
+
+ uint256 public immutable INITIAL_BRIDGE_LIMIT = 100e6 * 1e18;
+
+ uint256[] public chainsList;
+ mapping(uint256 => address) public pools; // chainId => bridgeTokenPool
+ mapping(uint256 => address) public tokens; // chainId => ghoToken
+ mapping(uint256 => uint256) public bucketCapacities; // chainId => bucketCapacities
+ mapping(uint256 => uint256) public bucketLevels; // chainId => bucketLevels
+ mapping(uint256 => uint256) public liquidity; // chainId => liquidity
+ uint256 public remoteLiquidity;
+ uint256 public bridged;
+
+ function setUp() public virtual override {
+ console2.log("ENTRA");
+ // Ethereum with id 0
+ chainsList.push(0);
+ tokens[0] = address(new GhoToken(AAVE_DAO));
+ pools[0] = _deployUpgradeableLockReleaseTokenPool(
+ tokens[0],
+ ARM_PROXY,
+ ROUTER,
+ OWNER,
+ INITIAL_BRIDGE_LIMIT,
+ PROXY_ADMIN
+ );
+
+ // Mock calls for bridging
+ vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))), abi.encode(RAMP));
+ vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("isOffRamp(uint64,address)"))), abi.encode(true));
+ vm.mockCall(ARM_PROXY, abi.encodeWithSelector(bytes4(keccak256("isCursed()"))), abi.encode(false));
+ }
+
+ function _enableLane(uint256 fromId, uint256 toId) internal {
+ // from
+ UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1);
+ RateLimiter.Config memory emptyRateConfig = RateLimiter.Config(false, 0, 0);
+ chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: uint64(toId),
+ allowed: true,
+ outboundRateLimiterConfig: emptyRateConfig,
+ inboundRateLimiterConfig: emptyRateConfig
+ });
+
+ vm.startPrank(OWNER);
+ UpgradeableTokenPool(pools[fromId]).applyChainUpdates(chainUpdate);
+
+ // to
+ chainUpdate[0].remoteChainSelector = uint64(fromId);
+ UpgradeableTokenPool(pools[toId]).applyChainUpdates(chainUpdate);
+ vm.stopPrank();
+ }
+
+ function _addBridge(uint256 chainId, uint256 bucketCapacity) internal {
+ require(tokens[chainId] == address(0), "BRIDGE_ALREADY_EXISTS");
+
+ chainsList.push(chainId);
+
+ // GHO Token
+ GhoToken ghoToken = new GhoToken(AAVE_DAO);
+ tokens[chainId] = address(ghoToken);
+
+ // UpgradeableTokenPool
+ address bridgeTokenPool = _deployUpgradeableBurnMintTokenPool(
+ address(ghoToken),
+ ARM_PROXY,
+ ROUTER,
+ OWNER,
+ PROXY_ADMIN
+ );
+ pools[chainId] = bridgeTokenPool;
+
+ // Facilitator
+ bucketCapacities[chainId] = bucketCapacity;
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ ghoToken.grantRole(ghoToken.FACILITATOR_MANAGER_ROLE(), AAVE_DAO);
+ ghoToken.addFacilitator(bridgeTokenPool, "UpgradeableTokenPool", uint128(bucketCapacity));
+ vm.stopPrank();
+ }
+
+ function _updateBridgeLimit(uint256 newBridgeLimit) internal {
+ vm.prank(OWNER);
+ UpgradeableLockReleaseTokenPool(pools[0]).setBridgeLimit(newBridgeLimit);
+ }
+
+ function _updateBucketCapacity(uint256 chainId, uint256 newBucketCapacity) internal {
+ bucketCapacities[chainId] = newBucketCapacity;
+ vm.startPrank(AAVE_DAO);
+ GhoToken(tokens[chainId]).grantRole(GhoToken(tokens[chainId]).BUCKET_MANAGER_ROLE(), AAVE_DAO);
+ GhoToken(tokens[chainId]).setFacilitatorBucketCapacity(pools[chainId], uint128(newBucketCapacity));
+ vm.stopPrank();
+ }
+
+ function _getMaxToBridgeOut(uint256 fromChain) internal view returns (uint256) {
+ if (_isEthereumChain(fromChain)) {
+ UpgradeableLockReleaseTokenPool ethTokenPool = UpgradeableLockReleaseTokenPool(pools[0]);
+ uint256 bridgeLimit = ethTokenPool.getBridgeLimit();
+ uint256 currentBridged = ethTokenPool.getCurrentBridgedAmount();
+ return currentBridged > bridgeLimit ? 0 : bridgeLimit - currentBridged;
+ } else {
+ (, uint256 level) = GhoToken(tokens[fromChain]).getFacilitatorBucket(pools[fromChain]);
+ return level;
+ }
+ }
+
+ function _getMaxToBridgeIn(uint256 toChain) internal view returns (uint256) {
+ if (_isEthereumChain(toChain)) {
+ UpgradeableLockReleaseTokenPool ethTokenPool = UpgradeableLockReleaseTokenPool(pools[0]);
+ return ethTokenPool.getCurrentBridgedAmount();
+ } else {
+ (uint256 capacity, uint256 level) = GhoToken(tokens[toChain]).getFacilitatorBucket(pools[toChain]);
+ return level > capacity ? 0 : capacity - level;
+ }
+ }
+
+ function _getCapacity(uint256 chain) internal view returns (uint256) {
+ require(!_isEthereumChain(chain), "No bucket on Ethereum");
+ (uint256 capacity, ) = GhoToken(tokens[chain]).getFacilitatorBucket(pools[chain]);
+ return capacity;
+ }
+
+ function _getLevel(uint256 chain) internal view returns (uint256) {
+ require(!_isEthereumChain(chain), "No bucket on Ethereum");
+ (, uint256 level) = GhoToken(tokens[chain]).getFacilitatorBucket(pools[chain]);
+ return level;
+ }
+
+ function _bridgeGho(uint256 fromChain, uint256 toChain, address user, uint256 amount) internal {
+ _moveGhoOrigin(fromChain, toChain, user, amount);
+ _moveGhoDestination(fromChain, toChain, user, amount);
+ }
+
+ function _moveGhoOrigin(uint256 fromChain, uint256 toChain, address user, uint256 amount) internal {
+ // Simulate CCIP pull of funds
+ vm.prank(user);
+ GhoToken(tokens[fromChain]).transfer(pools[fromChain], amount);
+
+ vm.prank(RAMP);
+ IPool(pools[fromChain]).lockOrBurn(user, bytes(""), amount, uint64(toChain), bytes(""));
+
+ if (_isEthereumChain(fromChain)) {
+ // Lock
+ bridged += amount;
+ } else {
+ // Burn
+ bucketLevels[fromChain] -= amount;
+ liquidity[fromChain] -= amount;
+ remoteLiquidity -= amount;
+ }
+ }
+
+ function _moveGhoDestination(uint256 fromChain, uint256 toChain, address user, uint256 amount) internal {
+ vm.prank(RAMP);
+ IPool(pools[toChain]).releaseOrMint(bytes(""), user, amount, uint64(fromChain), bytes(""));
+
+ if (_isEthereumChain(toChain)) {
+ // Release
+ bridged -= amount;
+ } else {
+ // Mint
+ bucketLevels[toChain] += amount;
+ liquidity[toChain] += amount;
+ remoteLiquidity += amount;
+ }
+ }
+
+ function _isEthereumChain(uint256 chainId) internal pure returns (bool) {
+ return chainId == 0;
+ }
+
+ function _assertInvariant() internal {
+ // Check bridged
+ assertEq(UpgradeableLockReleaseTokenPool(pools[0]).getCurrentBridgedAmount(), bridged);
+
+ // Check levels and buckets
+ uint256 sumLevels;
+ uint256 chainId;
+ uint256 capacity;
+ uint256 level;
+ for (uint i = 1; i < chainsList.length; i++) {
+ // not counting Ethereum -{0}
+ chainId = chainsList[i];
+ (capacity, level) = GhoToken(tokens[chainId]).getFacilitatorBucket(pools[chainId]);
+
+ // Aggregate levels
+ sumLevels += level;
+
+ assertEq(capacity, bucketCapacities[chainId], "wrong bucket capacity");
+ assertEq(level, bucketLevels[chainId], "wrong bucket level");
+
+ assertEq(
+ capacity,
+ UpgradeableLockReleaseTokenPool(pools[0]).getBridgeLimit(),
+ "capacity must be equal to bridgeLimit"
+ );
+ assertLe(
+ level,
+ UpgradeableLockReleaseTokenPool(pools[0]).getBridgeLimit(),
+ "level cannot be higher than bridgeLimit"
+ );
+ }
+ // Check bridged is equal to sum of levels
+ assertEq(UpgradeableLockReleaseTokenPool(pools[0]).getCurrentBridgedAmount(), sumLevels, "wrong bridged");
+ assertEq(remoteLiquidity, sumLevels, "wrong bridged");
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumE2E.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumE2E.t.sol
new file mode 100644
index 0000000000..52ef9f5d6d
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumE2E.t.sol
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol";
+
+import "../../helpers/MerkleHelper.sol";
+import "../../commitStore/CommitStore.t.sol";
+import "../../onRamp/EVM2EVMOnRampSetup.t.sol";
+import "../../offRamp/EVM2EVMOffRampSetup.t.sol";
+import {IBurnMintERC20} from "../../../../shared/token/ERC20/IBurnMintERC20.sol";
+import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol";
+import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {IPool} from "../../../interfaces/pools/IPool.sol";
+import {RateLimiter} from "../../../libraries/RateLimiter.sol";
+import {E2E} from "../End2End.t.sol";
+
+contract GHOTokenPoolEthereumE2E is E2E {
+ using Internal for Internal.EVM2EVMMessage;
+
+ address internal USER = makeAddr("user");
+ address internal AAVE_DAO = makeAddr("AAVE_DAO");
+ address internal PROXY_ADMIN = makeAddr("PROXY_ADMIN");
+
+ uint256 internal INITIAL_BRIDGE_LIMIT = 100e6 * 1e18;
+
+ IBurnMintERC20 internal srcGhoToken;
+ IBurnMintERC20 internal dstGhoToken;
+ UpgradeableLockReleaseTokenPool internal srcGhoTokenPool;
+ UpgradeableBurnMintTokenPool internal dstGhoTokenPool;
+
+ function setUp() public virtual override {
+ E2E.setUp();
+
+ // Deploy GHO Token on source chain
+ srcGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO)));
+ deal(address(srcGhoToken), OWNER, type(uint128).max);
+ // Add GHO token to source token list
+ s_sourceTokens.push(address(srcGhoToken));
+
+ // Deploy GHO Token on destination chain
+ dstGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO)));
+ deal(address(dstGhoToken), OWNER, type(uint128).max);
+ // Add GHO token to destination token list
+ s_destTokens.push(address(dstGhoToken));
+
+ // Deploy LockReleaseTokenPool for GHO token on source chain
+ srcGhoTokenPool = UpgradeableLockReleaseTokenPool(
+ _deployUpgradeableLockReleaseTokenPool(
+ address(srcGhoToken),
+ address(s_mockARM),
+ address(s_sourceRouter),
+ AAVE_DAO,
+ INITIAL_BRIDGE_LIMIT,
+ PROXY_ADMIN
+ )
+ );
+
+ // Add GHO UpgradeableTokenPool to source token pool list
+ s_sourcePools.push(address(srcGhoTokenPool));
+
+ // Deploy BurnMintTokenPool for GHO token on destination chain
+ dstGhoTokenPool = UpgradeableBurnMintTokenPool(
+ _deployUpgradeableBurnMintTokenPool(
+ address(dstGhoToken),
+ address(s_mockARM),
+ address(s_destRouter),
+ AAVE_DAO,
+ PROXY_ADMIN
+ )
+ );
+
+ // Add GHO UpgradeableTokenPool to destination token pool list
+ s_destPools.push(address(dstGhoTokenPool));
+
+ // Give mint and burn privileges to destination UpgradeableTokenPool (GHO-specific related)
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ GhoToken(address(dstGhoToken)).grantRole(GhoToken(address(dstGhoToken)).FACILITATOR_MANAGER_ROLE(), AAVE_DAO);
+ GhoToken(address(dstGhoToken)).addFacilitator(address(dstGhoTokenPool), "UpgradeableTokenPool", type(uint128).max);
+ vm.stopPrank();
+ vm.startPrank(OWNER);
+
+ // Add config for source and destination chains
+ UpgradeableTokenPool.ChainUpdate[] memory srcChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1);
+ srcChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: DEST_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+ UpgradeableTokenPool.ChainUpdate[] memory dstChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1);
+ dstChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: SOURCE_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ srcGhoTokenPool.applyChainUpdates(srcChainUpdates);
+ dstGhoTokenPool.applyChainUpdates(dstChainUpdates);
+ vm.stopPrank();
+ vm.startPrank(OWNER);
+
+ // Update GHO Token price on source PriceRegistry
+ EVM2EVMOnRamp.DynamicConfig memory onRampDynamicConfig = s_onRamp.getDynamicConfig();
+ PriceRegistry onRampPriceRegistry = PriceRegistry(onRampDynamicConfig.priceRegistry);
+ onRampPriceRegistry.updatePrices(getSingleTokenPriceUpdateStruct(address(srcGhoToken), 1e18));
+
+ // Update GHO Token price on destination PriceRegistry
+ EVM2EVMOffRamp.DynamicConfig memory offRampDynamicConfig = s_offRamp.getDynamicConfig();
+ PriceRegistry offRampPriceRegistry = PriceRegistry(offRampDynamicConfig.priceRegistry);
+ offRampPriceRegistry.updatePrices(getSingleTokenPriceUpdateStruct(address(dstGhoToken), 1e18));
+
+ // Add UpgradeableTokenPool to OnRamp
+ address[] memory srcTokens = new address[](1);
+ IPool[] memory srcPools = new IPool[](1);
+ srcTokens[0] = address(srcGhoToken);
+ srcPools[0] = IPool(address(srcGhoTokenPool));
+ s_onRamp.applyPoolUpdates(new Internal.PoolUpdate[](0), getTokensAndPools(srcTokens, srcPools));
+
+ // Add UpgradeableTokenPool to OffRamp, matching source token with destination UpgradeableTokenPool
+ IPool[] memory dstPools = new IPool[](1);
+ dstPools[0] = IPool(address(dstGhoTokenPool));
+ s_offRamp.applyPoolUpdates(new Internal.PoolUpdate[](0), getTokensAndPools(srcTokens, dstPools));
+ }
+
+ function testE2E_MessagesSuccess_gas() public {
+ vm.pauseGasMetering();
+ uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER);
+ uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool));
+ uint256 preBridgedAmount = srcGhoTokenPool.getCurrentBridgedAmount();
+ uint256 preBridgeLimit = srcGhoTokenPool.getBridgeLimit();
+
+ Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](1);
+ messages[0] = sendRequestGho(1, 1000 * 1e18, false, false);
+
+ uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage());
+ // Asserts that the tokens have been sent and the fee has been paid.
+ assertEq(preGhoTokenBalanceOwner - 1000 * 1e18, srcGhoToken.balanceOf(OWNER));
+ assertEq(preGhoTokenBalancePool + 1000 * 1e18, srcGhoToken.balanceOf(address(srcGhoTokenPool)));
+ assertGt(expectedFee, 0);
+
+ assertEq(preBridgedAmount + 1000 * 1e18, srcGhoTokenPool.getCurrentBridgedAmount());
+ assertEq(preBridgeLimit, srcGhoTokenPool.getBridgeLimit());
+
+ bytes32 metaDataHash = s_offRamp.metadataHash();
+
+ bytes32[] memory hashedMessages = new bytes32[](1);
+ hashedMessages[0] = messages[0]._hash(metaDataHash);
+ messages[0].messageId = hashedMessages[0];
+
+ bytes32[] memory merkleRoots = new bytes32[](1);
+ merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages);
+
+ address[] memory onRamps = new address[](1);
+ onRamps[0] = ON_RAMP_ADDRESS;
+
+ bytes memory commitReport = abi.encode(
+ CommitStore.CommitReport({
+ priceUpdates: getEmptyPriceUpdates(),
+ interval: CommitStore.Interval(messages[0].sequenceNumber, messages[0].sequenceNumber),
+ merkleRoot: merkleRoots[0]
+ })
+ );
+
+ vm.resumeGasMetering();
+ s_commitStore.report(commitReport, ++s_latestEpochAndRound);
+ vm.pauseGasMetering();
+
+ bytes32[] memory proofs = new bytes32[](0);
+ uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1);
+ assertEq(BLOCK_TIME, timestamp);
+
+ // We change the block time so when execute would e.g. use the current
+ // block time instead of the committed block time the value would be
+ // incorrect in the checks below.
+ vm.warp(BLOCK_TIME + 2000);
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[0].sequenceNumber,
+ messages[0].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages);
+
+ uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER);
+ (uint256 preCapacity, uint256 preLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket(
+ address(dstGhoTokenPool)
+ );
+
+ vm.resumeGasMetering();
+ s_offRamp.execute(execReport, new uint256[](0));
+ vm.pauseGasMetering();
+
+ assertEq(preGhoTokenBalanceUser + 1000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination");
+ // Facilitator checks
+ (uint256 postCapacity, uint256 postLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket(
+ address(dstGhoTokenPool)
+ );
+ assertEq(postCapacity, preCapacity);
+ assertEq(preLevel + 1000 * 1e18, postLevel, "wrong facilitator bucket level");
+ }
+
+ function testE2E_3MessagesSuccess_gas() public {
+ vm.pauseGasMetering();
+ uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER);
+ uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool));
+ uint256 preBridgedAmount = srcGhoTokenPool.getCurrentBridgedAmount();
+ uint256 preBridgeLimit = srcGhoTokenPool.getBridgeLimit();
+
+ Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](3);
+ messages[0] = sendRequestGho(1, 1000 * 1e18, false, false);
+ messages[1] = sendRequestGho(2, 2000 * 1e18, false, false);
+ messages[2] = sendRequestGho(3, 3000 * 1e18, false, false);
+
+ uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage());
+ // Asserts that the tokens have been sent and the fee has been paid.
+ assertEq(preGhoTokenBalanceOwner - 6000 * 1e18, srcGhoToken.balanceOf(OWNER));
+ assertEq(preGhoTokenBalancePool + 6000 * 1e18, srcGhoToken.balanceOf(address(srcGhoTokenPool)));
+ assertGt(expectedFee, 0);
+
+ assertEq(preBridgedAmount + 6000 * 1e18, srcGhoTokenPool.getCurrentBridgedAmount());
+ assertEq(preBridgeLimit, srcGhoTokenPool.getBridgeLimit());
+
+ bytes32 metaDataHash = s_offRamp.metadataHash();
+
+ bytes32[] memory hashedMessages = new bytes32[](3);
+ hashedMessages[0] = messages[0]._hash(metaDataHash);
+ messages[0].messageId = hashedMessages[0];
+ hashedMessages[1] = messages[1]._hash(metaDataHash);
+ messages[1].messageId = hashedMessages[1];
+ hashedMessages[2] = messages[2]._hash(metaDataHash);
+ messages[2].messageId = hashedMessages[2];
+
+ bytes32[] memory merkleRoots = new bytes32[](1);
+ merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages);
+
+ address[] memory onRamps = new address[](1);
+ onRamps[0] = ON_RAMP_ADDRESS;
+
+ bytes memory commitReport = abi.encode(
+ CommitStore.CommitReport({
+ priceUpdates: getEmptyPriceUpdates(),
+ interval: CommitStore.Interval(messages[0].sequenceNumber, messages[2].sequenceNumber),
+ merkleRoot: merkleRoots[0]
+ })
+ );
+
+ vm.resumeGasMetering();
+ s_commitStore.report(commitReport, ++s_latestEpochAndRound);
+ vm.pauseGasMetering();
+
+ bytes32[] memory proofs = new bytes32[](0);
+ uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1);
+ assertEq(BLOCK_TIME, timestamp);
+
+ // We change the block time so when execute would e.g. use the current
+ // block time instead of the committed block time the value would be
+ // incorrect in the checks below.
+ vm.warp(BLOCK_TIME + 2000);
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[0].sequenceNumber,
+ messages[0].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[1].sequenceNumber,
+ messages[1].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[2].sequenceNumber,
+ messages[2].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages);
+
+ uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER);
+ (uint256 preCapacity, uint256 preLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket(
+ address(dstGhoTokenPool)
+ );
+
+ vm.resumeGasMetering();
+ s_offRamp.execute(execReport, new uint256[](0));
+ vm.pauseGasMetering();
+
+ assertEq(preGhoTokenBalanceUser + 6000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination");
+ // Facilitator checks
+ (uint256 postCapacity, uint256 postLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket(
+ address(dstGhoTokenPool)
+ );
+ assertEq(postCapacity, preCapacity);
+ assertEq(preLevel + 6000 * 1e18, postLevel, "wrong facilitator bucket level");
+ }
+
+ function testRevertRateLimitReached() public {
+ // increase bridge limit to hit the rate limit error
+ vm.startPrank(AAVE_DAO);
+ srcGhoTokenPool.setBridgeLimit(type(uint256).max);
+ vm.startPrank(OWNER);
+
+ RateLimiter.Config memory rateLimiterConfig = getOutboundRateLimiterConfig();
+
+ // will revert due to rate limit of tokenPool
+ sendRequestGho(1, rateLimiterConfig.capacity + 1, true, false);
+
+ // max capacity, won't revert
+ sendRequestGho(1, rateLimiterConfig.capacity, false, false);
+
+ // revert due to capacity exceed
+ sendRequestGho(2, 100, true, false);
+
+ // increase blocktime to refill capacity
+ vm.warp(BLOCK_TIME + 1);
+
+ // won't revert due to refill
+ sendRequestGho(2, 100, false, false);
+ }
+
+ function testRevertOnLessTokenToCoverFee() public {
+ sendRequestGho(1, 1000, false, true);
+ }
+
+ function testRevertBridgeLimitReached() public {
+ // increase ccip rate limit to hit the bridge limit error
+ vm.startPrank(AAVE_DAO);
+ srcGhoTokenPool.setChainRateLimiterConfig(
+ DEST_CHAIN_SELECTOR,
+ RateLimiter.Config({isEnabled: true, capacity: uint128(INITIAL_BRIDGE_LIMIT * 2), rate: 1e15}),
+ getInboundRateLimiterConfig()
+ );
+ vm.warp(block.timestamp + 100); // wait to refill capacity
+ vm.startPrank(OWNER);
+
+ // will revert due to bridge limit
+ sendRequestGho(1, uint128(INITIAL_BRIDGE_LIMIT + 1), true, false);
+
+ // max bridge limit, won't revert
+ sendRequestGho(1, uint128(INITIAL_BRIDGE_LIMIT), false, false);
+ assertEq(srcGhoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT);
+
+ // revert due to bridge limit exceed
+ sendRequestGho(2, 1, true, false);
+
+ // increase bridge limit
+ vm.startPrank(AAVE_DAO);
+ srcGhoTokenPool.setBridgeLimit(INITIAL_BRIDGE_LIMIT + 1);
+ vm.startPrank(OWNER);
+
+ // won't revert due to refill
+ sendRequestGho(2, 1, false, false);
+ assertEq(srcGhoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT + 1);
+ }
+
+ function sendRequestGho(
+ uint64 expectedSeqNum,
+ uint256 amount,
+ bool expectRevert,
+ bool sendLessFee
+ ) public returns (Internal.EVM2EVMMessage memory) {
+ Client.EVM2AnyMessage memory message = _generateSingleTokenMessage(address(srcGhoToken), amount);
+ uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, message);
+
+ // err mgmt
+ uint256 feeToSend = sendLessFee ? expectedFee - 1 : expectedFee;
+ expectRevert = sendLessFee ? true : expectRevert;
+
+ IERC20(s_sourceTokens[0]).approve(address(s_sourceRouter), feeToSend); // fee
+ IERC20(srcGhoToken).approve(address(s_sourceRouter), amount); // amount
+
+ message.receiver = abi.encode(USER);
+ Internal.EVM2EVMMessage memory geEvent = _messageToEvent(
+ message,
+ expectedSeqNum,
+ expectedSeqNum,
+ expectedFee,
+ OWNER
+ );
+
+ if (!expectRevert) {
+ vm.expectEmit();
+ emit CCIPSendRequested(geEvent);
+ } else {
+ vm.expectRevert();
+ }
+ vm.resumeGasMetering();
+ s_sourceRouter.ccipSend(DEST_CHAIN_SELECTOR, message);
+ vm.pauseGasMetering();
+
+ return geEvent;
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumSetup.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumSetup.t.sol
new file mode 100644
index 0000000000..44038b9eb4
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolEthereumSetup.t.sol
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol";
+
+import {stdError} from "forge-std/Test.sol";
+import {BaseTest} from "../../BaseTest.t.sol";
+import {IPool} from "../../../interfaces/pools/IPool.sol";
+import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {EVM2EVMOnRamp} from "../../../onRamp/EVM2EVMOnRamp.sol";
+import {EVM2EVMOffRamp} from "../../../offRamp/EVM2EVMOffRamp.sol";
+import {RateLimiter} from "../../../libraries/RateLimiter.sol";
+import {BurnMintERC677} from "../../../../shared/token/ERC677/BurnMintERC677.sol";
+import {Router} from "../../../Router.sol";
+import {IERC165} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol";
+import {IERC20} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol";
+import {RouterSetup} from "../../router/RouterSetup.t.sol";
+
+contract GHOTokenPoolEthereumSetup is RouterSetup {
+ IERC20 internal s_token;
+ UpgradeableLockReleaseTokenPool internal s_ghoTokenPool;
+
+ address internal s_allowedOnRamp = address(123);
+ address internal s_allowedOffRamp = address(234);
+
+ address internal AAVE_DAO = makeAddr("AAVE_DAO");
+ address internal PROXY_ADMIN = makeAddr("PROXY_ADMIN");
+
+ uint256 internal INITIAL_BRIDGE_LIMIT = 100e6 * 1e18;
+
+ function setUp() public virtual override {
+ RouterSetup.setUp();
+
+ // GHO deployment
+ GhoToken ghoToken = new GhoToken(AAVE_DAO);
+ s_token = IERC20(address(ghoToken));
+ deal(address(s_token), OWNER, type(uint128).max);
+
+ // Set up UpgradeableTokenPool with permission to mint/burn
+ s_ghoTokenPool = UpgradeableLockReleaseTokenPool(
+ _deployUpgradeableLockReleaseTokenPool(
+ address(s_token),
+ address(s_mockARM),
+ address(s_sourceRouter),
+ AAVE_DAO,
+ INITIAL_BRIDGE_LIMIT,
+ PROXY_ADMIN
+ )
+ );
+
+ UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1);
+ chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: DEST_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+
+ changePrank(AAVE_DAO);
+ s_ghoTokenPool.applyChainUpdates(chainUpdate);
+ s_ghoTokenPool.setRebalancer(OWNER);
+ changePrank(OWNER);
+
+ Router.OnRamp[] memory onRampUpdates = new Router.OnRamp[](1);
+ Router.OffRamp[] memory offRampUpdates = new Router.OffRamp[](1);
+ onRampUpdates[0] = Router.OnRamp({destChainSelector: DEST_CHAIN_SELECTOR, onRamp: s_allowedOnRamp});
+ offRampUpdates[0] = Router.OffRamp({sourceChainSelector: SOURCE_CHAIN_SELECTOR, offRamp: s_allowedOffRamp});
+ s_sourceRouter.applyRampUpdates(onRampUpdates, new Router.OffRamp[](0), offRampUpdates);
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemote.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemote.t.sol
new file mode 100644
index 0000000000..b1027365c2
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemote.t.sol
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol";
+
+import {stdError} from "forge-std/Test.sol";
+import {MockUpgradeable} from "../../mocks/MockUpgradeable.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {EVM2EVMOnRamp} from "../../../onRamp/EVM2EVMOnRamp.sol";
+import {EVM2EVMOffRamp} from "../../../offRamp/EVM2EVMOffRamp.sol";
+import {BurnMintTokenPool} from "../../../pools/BurnMintTokenPool.sol";
+import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol";
+import {RateLimiter} from "../../../libraries/RateLimiter.sol";
+import {GHOTokenPoolRemoteSetup} from "./GHOTokenPoolRemoteSetup.t.sol";
+
+contract GHOTokenPoolRemote_lockOrBurn is GHOTokenPoolRemoteSetup {
+ function testSetupSuccess() public {
+ assertEq(address(s_burnMintERC677), address(s_pool.getToken()));
+ assertEq(address(s_mockARM), s_pool.getArmProxy());
+ assertEq(false, s_pool.getAllowListEnabled());
+ assertEq("BurnMintTokenPool 1.4.0", s_pool.typeAndVersion());
+ }
+
+ function testPoolBurnSuccess() public {
+ uint256 burnAmount = 20_000e18;
+ // inflate facilitator level
+ _inflateFacilitatorLevel(address(s_pool), address(s_burnMintERC677), burnAmount);
+
+ deal(address(s_burnMintERC677), address(s_pool), burnAmount);
+ assertEq(s_burnMintERC677.balanceOf(address(s_pool)), burnAmount);
+
+ vm.startPrank(s_burnMintOnRamp);
+
+ vm.expectEmit();
+ emit TokensConsumed(burnAmount);
+
+ vm.expectEmit();
+ emit Transfer(address(s_pool), address(0), burnAmount);
+
+ vm.expectEmit();
+ emit Burned(address(s_burnMintOnRamp), burnAmount);
+
+ bytes4 expectedSignature = bytes4(keccak256("burn(uint256)"));
+ vm.expectCall(address(s_burnMintERC677), abi.encodeWithSelector(expectedSignature, burnAmount));
+
+ (uint256 preCapacity, uint256 preLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(address(s_pool));
+
+ s_pool.lockOrBurn(OWNER, bytes(""), burnAmount, DEST_CHAIN_SELECTOR, bytes(""));
+
+ // Facilitator checks
+ (uint256 postCapacity, uint256 postLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(
+ address(s_pool)
+ );
+ assertEq(postCapacity, preCapacity);
+ assertEq(preLevel - burnAmount, postLevel, "wrong facilitator bucket level");
+
+ assertEq(s_burnMintERC677.balanceOf(address(s_pool)), 0);
+ }
+
+ // Should not burn tokens if cursed.
+ function testPoolBurnRevertNotHealthyReverts() public {
+ s_mockARM.voteToCurse(bytes32(0));
+ uint256 before = s_burnMintERC677.balanceOf(address(s_pool));
+ vm.startPrank(s_burnMintOnRamp);
+
+ vm.expectRevert(EVM2EVMOnRamp.BadARMSignal.selector);
+ s_pool.lockOrBurn(OWNER, bytes(""), 1e5, DEST_CHAIN_SELECTOR, bytes(""));
+
+ assertEq(s_burnMintERC677.balanceOf(address(s_pool)), before);
+ }
+
+ function testChainNotAllowedReverts() public {
+ uint64 wrongChainSelector = 8838833;
+ vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.ChainNotAllowed.selector, wrongChainSelector));
+ s_pool.lockOrBurn(OWNER, bytes(""), 1, wrongChainSelector, bytes(""));
+ }
+
+ function testPoolBurnNoPrivilegesReverts() public {
+ // Remove privileges
+ vm.startPrank(AAVE_DAO);
+ GhoToken(address(s_burnMintERC677)).removeFacilitator(address(s_pool));
+ vm.stopPrank();
+
+ uint256 amount = 1;
+ vm.startPrank(s_burnMintOnRamp);
+ vm.expectRevert(stdError.arithmeticError);
+ s_pool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testBucketLevelNotEnoughReverts() public {
+ (, uint256 bucketLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(address(s_pool));
+ assertEq(bucketLevel, 0);
+
+ uint256 amount = 1;
+ vm.expectCall(address(s_burnMintERC677), abi.encodeWithSelector(GhoToken.burn.selector, amount));
+ vm.expectRevert(stdError.arithmeticError);
+ vm.startPrank(s_burnMintOnRamp);
+ s_pool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testTokenMaxCapacityExceededReverts() public {
+ RateLimiter.Config memory rateLimiterConfig = getOutboundRateLimiterConfig();
+ uint256 capacity = rateLimiterConfig.capacity;
+ uint256 amount = 10 * capacity;
+
+ vm.expectRevert(
+ abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_burnMintERC677))
+ );
+ vm.startPrank(s_burnMintOnRamp);
+ s_pool.lockOrBurn(STRANGER, bytes(""), amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+}
+
+contract GHOTokenPoolRemote_releaseOrMint is GHOTokenPoolRemoteSetup {
+ function testPoolMintSuccess() public {
+ uint256 amount = 1e19;
+ vm.startPrank(s_burnMintOffRamp);
+ vm.expectEmit();
+ emit Transfer(address(0), OWNER, amount);
+ s_pool.releaseOrMint(bytes(""), OWNER, amount, DEST_CHAIN_SELECTOR, bytes(""));
+ assertEq(s_burnMintERC677.balanceOf(OWNER), amount);
+ }
+
+ function testPoolMintNotHealthyReverts() public {
+ // Should not mint tokens if cursed.
+ s_mockARM.voteToCurse(bytes32(0));
+ uint256 before = s_burnMintERC677.balanceOf(OWNER);
+ vm.startPrank(s_burnMintOffRamp);
+ vm.expectRevert(EVM2EVMOffRamp.BadARMSignal.selector);
+ s_pool.releaseOrMint(bytes(""), OWNER, 1e5, DEST_CHAIN_SELECTOR, bytes(""));
+ assertEq(s_burnMintERC677.balanceOf(OWNER), before);
+ }
+
+ function testChainNotAllowedReverts() public {
+ uint64 wrongChainSelector = 8838833;
+ vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.ChainNotAllowed.selector, wrongChainSelector));
+ s_pool.releaseOrMint(bytes(""), STRANGER, 1, wrongChainSelector, bytes(""));
+ }
+
+ function testPoolMintNoPrivilegesReverts() public {
+ // Remove privileges
+ vm.startPrank(AAVE_DAO);
+ GhoToken(address(s_burnMintERC677)).removeFacilitator(address(s_pool));
+ vm.stopPrank();
+
+ uint256 amount = 1;
+ vm.startPrank(s_burnMintOffRamp);
+ vm.expectRevert("FACILITATOR_BUCKET_CAPACITY_EXCEEDED");
+ s_pool.releaseOrMint(bytes(""), STRANGER, amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testBucketCapacityExceededReverts() public {
+ // Mint all the bucket capacity
+ (uint256 bucketCapacity, ) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(address(s_pool));
+ _inflateFacilitatorLevel(address(s_pool), address(s_burnMintERC677), bucketCapacity);
+ (uint256 currCapacity, uint256 currLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(
+ address(s_pool)
+ );
+ assertEq(currCapacity, currLevel);
+
+ uint256 amount = 1;
+ vm.expectCall(address(s_burnMintERC677), abi.encodeWithSelector(GhoToken.mint.selector, STRANGER, amount));
+ vm.expectRevert("FACILITATOR_BUCKET_CAPACITY_EXCEEDED");
+ vm.startPrank(s_burnMintOffRamp);
+ s_pool.releaseOrMint(bytes(""), STRANGER, amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+
+ function testTokenMaxCapacityExceededReverts() public {
+ RateLimiter.Config memory rateLimiterConfig = getInboundRateLimiterConfig();
+ uint256 capacity = rateLimiterConfig.capacity;
+ uint256 amount = 10 * capacity;
+
+ vm.expectRevert(
+ abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_burnMintERC677))
+ );
+ vm.startPrank(s_burnMintOffRamp);
+ s_pool.releaseOrMint(bytes(""), STRANGER, amount, DEST_CHAIN_SELECTOR, bytes(""));
+ }
+}
+
+contract GHOTokenPoolEthereum_upgradeability is GHOTokenPoolRemoteSetup {
+ function testInitialization() public {
+ // Upgradeability
+ assertEq(s_pool.REVISION(), 1);
+ vm.startPrank(PROXY_ADMIN);
+ (bool ok, bytes memory result) = address(s_pool).staticcall(
+ abi.encodeWithSelector(TransparentUpgradeableProxy.admin.selector)
+ );
+ assertTrue(ok, "proxy admin fetch failed");
+ address decodedProxyAdmin = abi.decode(result, (address));
+ assertEq(decodedProxyAdmin, PROXY_ADMIN, "proxy admin is wrong");
+ assertEq(decodedProxyAdmin, _getProxyAdminAddress(address(s_pool)), "proxy admin is wrong");
+
+ // TokenPool
+ vm.startPrank(OWNER);
+ assertEq(s_pool.getAllowList().length, 0);
+ assertEq(s_pool.getAllowListEnabled(), false);
+ assertEq(s_pool.getArmProxy(), address(s_mockARM));
+ assertEq(s_pool.getRouter(), address(s_sourceRouter));
+ assertEq(address(s_pool.getToken()), address(s_burnMintERC677));
+ assertEq(s_pool.owner(), AAVE_DAO, "owner is wrong");
+ }
+
+ function testUpgrade() public {
+ MockUpgradeable newImpl = new MockUpgradeable();
+ bytes memory mockImpleParams = abi.encodeWithSignature("initialize()");
+ vm.startPrank(PROXY_ADMIN);
+ TransparentUpgradeableProxy(payable(address(s_pool))).upgradeToAndCall(address(newImpl), mockImpleParams);
+
+ vm.startPrank(OWNER);
+ assertEq(s_pool.REVISION(), 2);
+ }
+
+ function testUpgradeAdminReverts() public {
+ vm.expectRevert();
+ TransparentUpgradeableProxy(payable(address(s_pool))).upgradeToAndCall(address(0), bytes(""));
+ assertEq(s_pool.REVISION(), 1);
+
+ vm.expectRevert();
+ TransparentUpgradeableProxy(payable(address(s_pool))).upgradeTo(address(0));
+ assertEq(s_pool.REVISION(), 1);
+ }
+
+ function testChangeAdmin() public {
+ assertEq(_getProxyAdminAddress(address(s_pool)), PROXY_ADMIN);
+
+ address newAdmin = makeAddr("newAdmin");
+ vm.startPrank(PROXY_ADMIN);
+ TransparentUpgradeableProxy(payable(address(s_pool))).changeAdmin(newAdmin);
+
+ assertEq(_getProxyAdminAddress(address(s_pool)), newAdmin, "Admin change failed");
+ }
+
+ function testChangeAdminAdminReverts() public {
+ assertEq(_getProxyAdminAddress(address(s_pool)), PROXY_ADMIN);
+
+ address newAdmin = makeAddr("newAdmin");
+ vm.expectRevert();
+ TransparentUpgradeableProxy(payable(address(s_pool))).changeAdmin(newAdmin);
+
+ assertEq(_getProxyAdminAddress(address(s_pool)), PROXY_ADMIN, "Unauthorized admin change");
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemoteE2E.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemoteE2E.t.sol
new file mode 100644
index 0000000000..ccad39ce6c
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemoteE2E.t.sol
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol";
+
+import "../../helpers/MerkleHelper.sol";
+import "../../commitStore/CommitStore.t.sol";
+import "../../onRamp/EVM2EVMOnRampSetup.t.sol";
+import "../../offRamp/EVM2EVMOffRampSetup.t.sol";
+import {IBurnMintERC20} from "../../../../shared/token/ERC20/IBurnMintERC20.sol";
+import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol";
+import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {IPool} from "../../../interfaces/pools/IPool.sol";
+import {RateLimiter} from "../../../libraries/RateLimiter.sol";
+import {E2E} from "../End2End.t.sol";
+
+contract GHOTokenPoolRemoteE2E is E2E {
+ using Internal for Internal.EVM2EVMMessage;
+
+ address internal USER = makeAddr("user");
+ address internal AAVE_DAO = makeAddr("AAVE_DAO");
+ address internal PROXY_ADMIN = makeAddr("PROXY_ADMIN");
+
+ uint256 internal INITIAL_BRIDGE_LIMIT = 100e6 * 1e18;
+
+ IBurnMintERC20 internal srcGhoToken;
+ IBurnMintERC20 internal dstGhoToken;
+ UpgradeableBurnMintTokenPool internal srcGhoTokenPool;
+ UpgradeableLockReleaseTokenPool internal dstGhoTokenPool;
+
+ function setUp() public virtual override {
+ E2E.setUp();
+
+ // Deploy GHO Token on source chain
+ srcGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO)));
+ deal(address(srcGhoToken), OWNER, type(uint128).max);
+ // Add GHO token to source token list
+ s_sourceTokens.push(address(srcGhoToken));
+
+ // Deploy GHO Token on destination chain
+ dstGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO)));
+ deal(address(dstGhoToken), OWNER, type(uint128).max);
+ // Add GHO token to destination token list
+ s_destTokens.push(address(dstGhoToken));
+
+ // Deploy BurnMintTokenPool for GHO token on source chain
+ srcGhoTokenPool = UpgradeableBurnMintTokenPool(
+ _deployUpgradeableBurnMintTokenPool(
+ address(srcGhoToken),
+ address(s_mockARM),
+ address(s_sourceRouter),
+ AAVE_DAO,
+ PROXY_ADMIN
+ )
+ );
+
+ // Add GHO UpgradeableTokenPool to source token pool list
+ s_sourcePools.push(address(srcGhoTokenPool));
+
+ // Deploy LockReleaseTokenPool for GHO token on destination chain
+ dstGhoTokenPool = UpgradeableLockReleaseTokenPool(
+ _deployUpgradeableLockReleaseTokenPool(
+ address(dstGhoToken),
+ address(s_mockARM),
+ address(s_destRouter),
+ AAVE_DAO,
+ INITIAL_BRIDGE_LIMIT,
+ PROXY_ADMIN
+ )
+ );
+
+ // Add GHO UpgradeableTokenPool to destination token pool list
+ s_destPools.push(address(dstGhoTokenPool));
+
+ // Give mint and burn privileges to source UpgradeableTokenPool (GHO-specific related)
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ GhoToken(address(srcGhoToken)).grantRole(GhoToken(address(srcGhoToken)).FACILITATOR_MANAGER_ROLE(), AAVE_DAO);
+ GhoToken(address(srcGhoToken)).addFacilitator(address(srcGhoTokenPool), "UpgradeableTokenPool", type(uint128).max);
+ vm.stopPrank();
+ vm.startPrank(OWNER);
+
+ // Add config for source and destination chains
+ UpgradeableTokenPool.ChainUpdate[] memory srcChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1);
+ srcChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: DEST_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+ UpgradeableTokenPool.ChainUpdate[] memory dstChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1);
+ dstChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: SOURCE_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ srcGhoTokenPool.applyChainUpdates(srcChainUpdates);
+ dstGhoTokenPool.applyChainUpdates(dstChainUpdates);
+ vm.stopPrank();
+ vm.startPrank(OWNER);
+
+ // Update GHO Token price on source PriceRegistry
+ EVM2EVMOnRamp.DynamicConfig memory onRampDynamicConfig = s_onRamp.getDynamicConfig();
+ PriceRegistry onRampPriceRegistry = PriceRegistry(onRampDynamicConfig.priceRegistry);
+ onRampPriceRegistry.updatePrices(getSingleTokenPriceUpdateStruct(address(srcGhoToken), 1e18));
+
+ // Update GHO Token price on destination PriceRegistry
+ EVM2EVMOffRamp.DynamicConfig memory offRampDynamicConfig = s_offRamp.getDynamicConfig();
+ PriceRegistry offRampPriceRegistry = PriceRegistry(offRampDynamicConfig.priceRegistry);
+ offRampPriceRegistry.updatePrices(getSingleTokenPriceUpdateStruct(address(dstGhoToken), 1e18));
+
+ // Add UpgradeableTokenPool to OnRamp
+ address[] memory srcTokens = new address[](1);
+ IPool[] memory srcPools = new IPool[](1);
+ srcTokens[0] = address(srcGhoToken);
+ srcPools[0] = IPool(address(srcGhoTokenPool));
+ s_onRamp.applyPoolUpdates(new Internal.PoolUpdate[](0), getTokensAndPools(srcTokens, srcPools));
+
+ // Add UpgradeableTokenPool to OffRamp, matching source token with destination UpgradeableTokenPool
+ IPool[] memory dstPools = new IPool[](1);
+ dstPools[0] = IPool(address(dstGhoTokenPool));
+ s_offRamp.applyPoolUpdates(new Internal.PoolUpdate[](0), getTokensAndPools(srcTokens, dstPools));
+
+ address[] memory dstTokens = new address[](1);
+ dstTokens[0] = address(dstGhoToken);
+ s_onRamp.applyPoolUpdates(new Internal.PoolUpdate[](0), getTokensAndPools(dstTokens, dstPools));
+ }
+
+ function testE2E_MessagesSuccess_gas() public {
+ vm.pauseGasMetering();
+
+ // Mint some GHO to inflate UpgradeableBurnMintTokenPool facilitator level
+ _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), 1000 * 1e18);
+ vm.startPrank(OWNER);
+
+ // Lock some GHO on destination so it can be released later on
+ dstGhoToken.transfer(address(dstGhoTokenPool), 1000 * 1e18);
+ // Inflate current bridged amount so it can be reduced in `releaseOrMint` function
+ vm.stopPrank();
+ vm.startPrank(address(s_onRamp));
+ vm.mockCall(
+ address(s_destRouter),
+ abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))),
+ abi.encode(s_onRamp)
+ );
+ dstGhoTokenPool.lockOrBurn(STRANGER, bytes(""), 1000 * 1e18, SOURCE_CHAIN_SELECTOR, bytes(""));
+ assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 1000 * 1e18);
+ vm.startPrank(address(OWNER));
+
+ uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER);
+ uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool));
+ (uint256 preCapacity, uint256 preLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket(
+ address(srcGhoTokenPool)
+ );
+
+ Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](1);
+ messages[0] = sendRequestGho(1, 1000 * 1e18, false, false);
+
+ uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage());
+ // Asserts that the tokens have been sent and the fee has been paid.
+ assertEq(preGhoTokenBalanceOwner - 1000 * 1e18, srcGhoToken.balanceOf(OWNER));
+ assertEq(preGhoTokenBalancePool, srcGhoToken.balanceOf(address(srcGhoTokenPool))); // GHO gets burned
+ assertGt(expectedFee, 0);
+ assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 1000 * 1e18);
+
+ // Facilitator checks
+ (uint256 postCapacity, uint256 postLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket(
+ address(srcGhoTokenPool)
+ );
+ assertEq(postCapacity, preCapacity);
+ assertEq(preLevel - 1000 * 1e18, postLevel, "wrong facilitator bucket level");
+
+ bytes32 metaDataHash = s_offRamp.metadataHash();
+
+ bytes32[] memory hashedMessages = new bytes32[](1);
+ hashedMessages[0] = messages[0]._hash(metaDataHash);
+ messages[0].messageId = hashedMessages[0];
+
+ bytes32[] memory merkleRoots = new bytes32[](1);
+ merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages);
+
+ address[] memory onRamps = new address[](1);
+ onRamps[0] = ON_RAMP_ADDRESS;
+
+ bytes memory commitReport = abi.encode(
+ CommitStore.CommitReport({
+ priceUpdates: getEmptyPriceUpdates(),
+ interval: CommitStore.Interval(messages[0].sequenceNumber, messages[0].sequenceNumber),
+ merkleRoot: merkleRoots[0]
+ })
+ );
+
+ vm.resumeGasMetering();
+ s_commitStore.report(commitReport, ++s_latestEpochAndRound);
+ vm.pauseGasMetering();
+
+ bytes32[] memory proofs = new bytes32[](0);
+ uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1);
+ assertEq(BLOCK_TIME, timestamp);
+
+ // We change the block time so when execute would e.g. use the current
+ // block time instead of the committed block time the value would be
+ // incorrect in the checks below.
+ vm.warp(BLOCK_TIME + 2000);
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[0].sequenceNumber,
+ messages[0].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages);
+
+ uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER);
+
+ vm.resumeGasMetering();
+ s_offRamp.execute(execReport, new uint256[](0));
+ vm.pauseGasMetering();
+
+ assertEq(preGhoTokenBalanceUser + 1000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination");
+ assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 0);
+ }
+
+ function testE2E_3MessagesSuccess_gas() public {
+ vm.pauseGasMetering();
+
+ // Mint some GHO to inflate UpgradeableTokenPool facilitator level
+ _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), 6000 * 1e18);
+ vm.startPrank(OWNER);
+
+ // Lock some GHO on destination so it can be released later on
+ dstGhoToken.transfer(address(dstGhoTokenPool), 6000 * 1e18);
+ // Inflate current bridged amount so it can be reduced in `releaseOrMint` function
+ vm.stopPrank();
+ vm.startPrank(address(s_onRamp));
+ vm.mockCall(
+ address(s_destRouter),
+ abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))),
+ abi.encode(s_onRamp)
+ );
+ dstGhoTokenPool.lockOrBurn(STRANGER, bytes(""), 6000 * 1e18, SOURCE_CHAIN_SELECTOR, bytes(""));
+ assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 6000 * 1e18);
+ vm.startPrank(address(OWNER));
+
+ uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER);
+ uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool));
+ (uint256 preCapacity, uint256 preLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket(
+ address(srcGhoTokenPool)
+ );
+
+ Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](3);
+ messages[0] = sendRequestGho(1, 1000 * 1e18, false, false);
+ messages[1] = sendRequestGho(2, 2000 * 1e18, false, false);
+ messages[2] = sendRequestGho(3, 3000 * 1e18, false, false);
+
+ uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage());
+ // Asserts that the tokens have been sent and the fee has been paid.
+ assertEq(preGhoTokenBalanceOwner - 6000 * 1e18, srcGhoToken.balanceOf(OWNER));
+ assertEq(preGhoTokenBalancePool, srcGhoToken.balanceOf(address(srcGhoTokenPool))); // GHO gets burned
+ assertGt(expectedFee, 0);
+ assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 6000 * 1e18);
+
+ // Facilitator checks
+ (uint256 postCapacity, uint256 postLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket(
+ address(srcGhoTokenPool)
+ );
+ assertEq(postCapacity, preCapacity);
+ assertEq(preLevel - 6000 * 1e18, postLevel, "wrong facilitator bucket level");
+
+ bytes32 metaDataHash = s_offRamp.metadataHash();
+
+ bytes32[] memory hashedMessages = new bytes32[](3);
+ hashedMessages[0] = messages[0]._hash(metaDataHash);
+ messages[0].messageId = hashedMessages[0];
+ hashedMessages[1] = messages[1]._hash(metaDataHash);
+ messages[1].messageId = hashedMessages[1];
+ hashedMessages[2] = messages[2]._hash(metaDataHash);
+ messages[2].messageId = hashedMessages[2];
+
+ bytes32[] memory merkleRoots = new bytes32[](1);
+ merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages);
+
+ address[] memory onRamps = new address[](1);
+ onRamps[0] = ON_RAMP_ADDRESS;
+
+ bytes memory commitReport = abi.encode(
+ CommitStore.CommitReport({
+ priceUpdates: getEmptyPriceUpdates(),
+ interval: CommitStore.Interval(messages[0].sequenceNumber, messages[2].sequenceNumber),
+ merkleRoot: merkleRoots[0]
+ })
+ );
+
+ vm.resumeGasMetering();
+ s_commitStore.report(commitReport, ++s_latestEpochAndRound);
+ vm.pauseGasMetering();
+
+ bytes32[] memory proofs = new bytes32[](0);
+ uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1);
+ assertEq(BLOCK_TIME, timestamp);
+
+ // We change the block time so when execute would e.g. use the current
+ // block time instead of the committed block time the value would be
+ // incorrect in the checks below.
+ vm.warp(BLOCK_TIME + 2000);
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[0].sequenceNumber,
+ messages[0].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[1].sequenceNumber,
+ messages[1].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ vm.expectEmit();
+ emit ExecutionStateChanged(
+ messages[2].sequenceNumber,
+ messages[2].messageId,
+ Internal.MessageExecutionState.SUCCESS,
+ ""
+ );
+
+ Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages);
+
+ uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER);
+
+ vm.resumeGasMetering();
+ s_offRamp.execute(execReport, new uint256[](0));
+ vm.pauseGasMetering();
+
+ assertEq(preGhoTokenBalanceUser + 6000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination");
+ assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 0);
+ }
+
+ function testRevertRateLimitReached() public {
+ RateLimiter.Config memory rateLimiterConfig = getOutboundRateLimiterConfig();
+
+ // will revert due to rate limit of tokenPool
+ sendRequestGho(1, rateLimiterConfig.capacity + 1, true, false);
+
+ // max capacity, won't revert
+
+ // Mint some GHO to inflate UpgradeableTokenPool facilitator level
+ _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), rateLimiterConfig.capacity);
+ vm.startPrank(OWNER);
+ sendRequestGho(1, rateLimiterConfig.capacity, false, false);
+
+ // revert due to capacity exceed
+ sendRequestGho(2, 100, true, false);
+
+ // increase blocktime to refill capacity
+ vm.warp(BLOCK_TIME + 1);
+
+ // won't revert due to refill
+ _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), 100);
+ vm.startPrank(OWNER);
+ sendRequestGho(2, 100, false, false);
+ }
+
+ function testRevertOnLessTokenToCoverFee() public {
+ sendRequestGho(1, 1000, false, true);
+ }
+
+ function sendRequestGho(
+ uint64 expectedSeqNum,
+ uint256 amount,
+ bool expectRevert,
+ bool sendLessFee
+ ) public returns (Internal.EVM2EVMMessage memory) {
+ Client.EVM2AnyMessage memory message = _generateSingleTokenMessage(address(srcGhoToken), amount);
+ uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, message);
+
+ // err mgmt
+ uint256 feeToSend = sendLessFee ? expectedFee - 1 : expectedFee;
+ expectRevert = sendLessFee ? true : expectRevert;
+
+ IERC20(s_sourceTokens[0]).approve(address(s_sourceRouter), feeToSend); // fee
+ IERC20(srcGhoToken).approve(address(s_sourceRouter), amount); // amount
+
+ message.receiver = abi.encode(USER);
+ Internal.EVM2EVMMessage memory geEvent = _messageToEvent(
+ message,
+ expectedSeqNum,
+ expectedSeqNum,
+ expectedFee,
+ OWNER
+ );
+
+ if (!expectRevert) {
+ vm.expectEmit();
+ emit CCIPSendRequested(geEvent);
+ } else {
+ vm.expectRevert();
+ }
+ vm.resumeGasMetering();
+ s_sourceRouter.ccipSend(DEST_CHAIN_SELECTOR, message);
+ vm.pauseGasMetering();
+
+ return geEvent;
+ }
+}
diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemoteSetup.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemoteSetup.t.sol
new file mode 100644
index 0000000000..402ca41b17
--- /dev/null
+++ b/contracts/src/v0.8/ccip/test/pools/GHO/GHOTokenPoolRemoteSetup.t.sol
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: BUSL-1.1
+pragma solidity 0.8.19;
+
+import {GhoToken} from "@aave/gho-core/gho/GhoToken.sol";
+import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol";
+
+import {stdError} from "forge-std/Test.sol";
+import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol";
+import {Router} from "../../../Router.sol";
+import {BurnMintERC677} from "../../../../shared/token/ERC677/BurnMintERC677.sol";
+import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol";
+import {RouterSetup} from "../../router/RouterSetup.t.sol";
+
+contract GHOTokenPoolRemoteSetup is RouterSetup {
+ event Transfer(address indexed from, address indexed to, uint256 value);
+ event TokensConsumed(uint256 tokens);
+ event Burned(address indexed sender, uint256 amount);
+
+ BurnMintERC677 internal s_burnMintERC677;
+ address internal s_burnMintOffRamp = makeAddr("burn_mint_offRamp");
+ address internal s_burnMintOnRamp = makeAddr("burn_mint_onRamp");
+
+ UpgradeableBurnMintTokenPool internal s_pool;
+
+ address internal AAVE_DAO = makeAddr("AAVE_DAO");
+ address internal PROXY_ADMIN = makeAddr("PROXY_ADMIN");
+
+ function setUp() public virtual override {
+ RouterSetup.setUp();
+
+ // GHO deployment
+ GhoToken ghoToken = new GhoToken(AAVE_DAO);
+ s_burnMintERC677 = BurnMintERC677(address(ghoToken));
+
+ s_pool = UpgradeableBurnMintTokenPool(
+ _deployUpgradeableBurnMintTokenPool(
+ address(s_burnMintERC677),
+ address(s_mockARM),
+ address(s_sourceRouter),
+ AAVE_DAO,
+ PROXY_ADMIN
+ )
+ );
+
+ // Give mint and burn privileges to source UpgradeableTokenPool (GHO-specific related)
+ vm.stopPrank();
+ vm.startPrank(AAVE_DAO);
+ GhoToken(address(s_burnMintERC677)).grantRole(
+ GhoToken(address(s_burnMintERC677)).FACILITATOR_MANAGER_ROLE(),
+ AAVE_DAO
+ );
+ GhoToken(address(s_burnMintERC677)).addFacilitator(address(s_pool), "UpgradeableTokenPool", type(uint128).max);
+ vm.stopPrank();
+
+ _applyChainUpdates(address(s_pool));
+ }
+
+ function _applyChainUpdates(address pool) internal {
+ UpgradeableTokenPool.ChainUpdate[] memory chains = new UpgradeableTokenPool.ChainUpdate[](1);
+ chains[0] = UpgradeableTokenPool.ChainUpdate({
+ remoteChainSelector: DEST_CHAIN_SELECTOR,
+ allowed: true,
+ outboundRateLimiterConfig: getOutboundRateLimiterConfig(),
+ inboundRateLimiterConfig: getInboundRateLimiterConfig()
+ });
+
+ vm.startPrank(AAVE_DAO);
+ UpgradeableBurnMintTokenPool(pool).applyChainUpdates(chains);
+ vm.stopPrank();
+ vm.startPrank(OWNER);
+
+ Router.OnRamp[] memory onRampUpdates = new Router.OnRamp[](1);
+ onRampUpdates[0] = Router.OnRamp({destChainSelector: DEST_CHAIN_SELECTOR, onRamp: s_burnMintOnRamp});
+ Router.OffRamp[] memory offRampUpdates = new Router.OffRamp[](1);
+ offRampUpdates[0] = Router.OffRamp({sourceChainSelector: DEST_CHAIN_SELECTOR, offRamp: s_burnMintOffRamp});
+ s_sourceRouter.applyRampUpdates(onRampUpdates, new Router.OffRamp[](0), offRampUpdates);
+ }
+}
diff --git a/contracts/src/v0.8/shared/access/ConfirmedOwnerWithProposal.sol b/contracts/src/v0.8/shared/access/ConfirmedOwnerWithProposal.sol
index 7b68418754..b02296f9ff 100644
--- a/contracts/src/v0.8/shared/access/ConfirmedOwnerWithProposal.sol
+++ b/contracts/src/v0.8/shared/access/ConfirmedOwnerWithProposal.sol
@@ -45,10 +45,7 @@ contract ConfirmedOwnerWithProposal is IOwnable {
}
/// @notice validate, transfer ownership, and emit relevant events
- function _transferOwnership(address to) private {
- // solhint-disable-next-line custom-errors
- require(to != msg.sender, "Cannot transfer to self");
-
+ function _transferOwnership(address to) internal {
s_pendingOwner = to;
emit OwnershipTransferRequested(s_owner, to);