From 1d6f4ce9e96cf5604617314049f56e2e6fb5fcdf Mon Sep 17 00:00:00 2001 From: Sam Davies Date: Thu, 14 Sep 2023 13:52:10 -0400 Subject: [PATCH] Implement Data Streams plugin --- .../scripts/native_solc_compile_all_llo-feeds | 8 +- .../v0.8/llo-feeds/dev/StreamConfigStore.sol | 107 ++ .../dev/interfaces/IStreamConfigStore.sol | 34 + .../stream_config_store.go | 1322 +++++++++++++++++ ...rapper-dependency-versions-do-not-edit.txt | 1 + core/gethwrappers/llo-feeds/go_generate.go | 1 + core/scripts/go.sum | 4 +- core/services/functions/connector_handler.go | 4 +- core/services/gateway/connector/connector.go | 4 +- core/services/job/orm.go | 2 +- core/services/job/spawner.go | 10 +- .../keystore/keys/ocr2key/cosmos_keyring.go | 59 +- .../keystore/keys/ocr2key/evm_keyring.go | 49 +- .../keys/ocr2key/generic_key_bundle.go | 9 + .../keystore/keys/ocr2key/key_bundle.go | 8 + .../keystore/keys/ocr2key/solana_keyring.go | 46 +- .../keystore/keys/starkkey/ocr2key.go | 9 +- core/services/ocr2/delegate.go | 99 +- .../evmregistry/v21/logprovider/recoverer.go | 2 +- core/services/ocr2/plugins/s4/plugin_test.go | 2 +- .../ocr2/plugins/streams/config/config.go | 52 + .../ocr2/plugins/streams/helpers_test.go | 463 ++++++ .../ocr2/plugins/streams/integration_test.go | 364 +++++ core/services/ocr2/plugins/streams/plugin.go | 1 + core/services/ocr2/validate/validate.go | 12 + core/services/relay/evm/evm.go | 49 +- .../relay/evm/functions/logpoller_wrapper.go | 4 +- .../services/relay/evm/mercury/transmitter.go | 6 +- .../relay/evm/mercury/transmitter_test.go | 33 +- .../relay/evm/mercury/v1/data_source.go | 2 +- .../relay/evm/mocks/loop_relay_adapter.go | 30 + core/services/relay/evm/streams_provider.go | 86 ++ .../streams/channel_definition_cache.go | 228 +++ .../channel_definition_cache_factory.go | 37 + .../streams/channel_definition_cache_test.go | 9 + core/services/streams/data_source.go | 91 ++ core/services/streams/data_source_test.go | 90 ++ core/services/streams/delegate.go | 92 ++ core/services/streams/keyring.go | 60 + core/services/streams/keyring_test.go | 7 + core/services/streams/orm.go | 53 + core/services/streams/orm_test.go | 71 + core/services/streams/stream.go | 118 ++ core/services/streams/stream_cache.go | 43 + core/services/streams/stream_cache_test.go | 73 + core/services/streams/transmitter.go | 74 + core/services/vrf/v1/listener_v1.go | 2 +- .../migrations/0213_create_streams.sql | 10 + .../0214_create_channel_definition_caches.sql | 14 + go.md | 3 + go.mod | 11 +- go.sum | 18 +- integration-tests/go.sum | 7 + 53 files changed, 3886 insertions(+), 107 deletions(-) create mode 100644 contracts/src/v0.8/llo-feeds/dev/StreamConfigStore.sol create mode 100644 contracts/src/v0.8/llo-feeds/dev/interfaces/IStreamConfigStore.sol create mode 100644 core/gethwrappers/llo-feeds/generated/stream_config_store/stream_config_store.go create mode 100644 core/services/ocr2/plugins/streams/config/config.go create mode 100644 core/services/ocr2/plugins/streams/helpers_test.go create mode 100644 core/services/ocr2/plugins/streams/integration_test.go create mode 100644 core/services/ocr2/plugins/streams/plugin.go create mode 100644 core/services/relay/evm/streams_provider.go create mode 100644 core/services/streams/channel_definition_cache.go create mode 100644 core/services/streams/channel_definition_cache_factory.go create mode 100644 core/services/streams/channel_definition_cache_test.go create mode 100644 core/services/streams/data_source.go create mode 100644 core/services/streams/data_source_test.go create mode 100644 core/services/streams/delegate.go create mode 100644 core/services/streams/keyring.go create mode 100644 core/services/streams/keyring_test.go create mode 100644 core/services/streams/orm.go create mode 100644 core/services/streams/orm_test.go create mode 100644 core/services/streams/stream.go create mode 100644 core/services/streams/stream_cache.go create mode 100644 core/services/streams/stream_cache_test.go create mode 100644 core/services/streams/transmitter.go create mode 100644 core/store/migrate/migrations/0213_create_streams.sql create mode 100644 core/store/migrate/migrations/0214_create_channel_definition_caches.sql diff --git a/contracts/scripts/native_solc_compile_all_llo-feeds b/contracts/scripts/native_solc_compile_all_llo-feeds index 2caa6fb98de..9410d815757 100755 --- a/contracts/scripts/native_solc_compile_all_llo-feeds +++ b/contracts/scripts/native_solc_compile_all_llo-feeds @@ -33,6 +33,10 @@ compileContract llo-feeds/VerifierProxy.sol compileContract llo-feeds/FeeManager.sol compileContract llo-feeds/RewardManager.sol -#Test | Mocks + +# Test | Mocks compileContract llo-feeds/test/mocks/ErroredVerifier.sol -compileContract llo-feeds/test/mocks/ExposedVerifier.sol \ No newline at end of file +compileContract llo-feeds/test/mocks/ExposedVerifier.sol + +# Streams +compileContract llo-feeds/dev/StreamConfigStore.sol diff --git a/contracts/src/v0.8/llo-feeds/dev/StreamConfigStore.sol b/contracts/src/v0.8/llo-feeds/dev/StreamConfigStore.sol new file mode 100644 index 00000000000..4b1954db5a9 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/StreamConfigStore.sol @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.16; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {IStreamConfigStore} from "./interfaces/IStreamConfigStore.sol"; +import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.sol"; + +contract StreamConfigStore is ConfirmedOwner, IStreamConfigStore, TypeAndVersionInterface { + + mapping(bytes32 => ChannelDefinition) private s_channelDefinitions; + + mapping(bytes32 => ChannelConfiguration) private s_channelProductionConfigurations; + mapping(bytes32 => ChannelConfiguration) private s_channelStagingConfigurations; + + event NewChannelDefinition(bytes32 channelId, ChannelDefinition channelDefinition); + event ChannelDefinitionRemoved(bytes32 channelId); + event NewProductionConfig(ChannelConfiguration channelConfig); + event NewStagingConfig(ChannelConfiguration channelConfig); + event PromoteStagingConfig(bytes32 channelId); + + error OnlyCallableByEOA(); + error StagingConfigAlreadyPromoted(); + error EmptyStreamIDs(); + error ZeroReportFormat(); + error ZeroChainSelector(); + error ChannelDefinitionNotFound(); + + constructor() ConfirmedOwner(msg.sender) {} + + function setStagingConfig(bytes32 channelId, ChannelConfiguration calldata channelConfig) external onlyOwner { + s_channelStagingConfigurations[channelId] = channelConfig; + + emit NewStagingConfig(channelConfig); + } + + //// this will trigger the following: + //// - offchain ShouldRetireCache will start returning true for the old (production) + //// protocol instance + //// - once the old production instance retires it will generate a handover + //// retirement report + //// - the staging instance will become the new production instance once + //// any honest oracle that is on both instances forward the retirement + //// report from the old instance to the new instace via the + //// PredecessorRetirementReportCache + //// + //// Note: the promotion flow only works if the previous production instance + //// is working correctly & generating reports. If that's not the case, the + //// owner is expected to "setProductionConfig" directly instead. This will + //// cause "gaps" to be created, but that seems unavoidable in such a scenario. + function promoteStagingConfig(bytes32 channelId) external onlyOwner { + ChannelConfiguration memory stagingConfig = s_channelStagingConfigurations[channelId]; + + if(stagingConfig.channelConfigId.length == 0) { + revert StagingConfigAlreadyPromoted(); + } + + s_channelProductionConfigurations[channelId] = s_channelStagingConfigurations[channelId]; + + emit PromoteStagingConfig(channelId); + } + + function addChannel(bytes32 channelId, ChannelDefinition calldata channelDefinition) external onlyOwner { + + if(channelDefinition.streamIDs.length == 0) { + revert EmptyStreamIDs(); + } + + if(channelDefinition.chainSelector == 0) { + revert ZeroChainSelector(); + } + + if(channelDefinition.reportFormat == 0) { + revert ZeroReportFormat(); + } + + s_channelDefinitions[channelId] = channelDefinition; + + emit NewChannelDefinition(channelId, channelDefinition); + } + + function removeChannel(bytes32 channelId) external onlyOwner { + if(s_channelDefinitions[channelId].streamIDs.length == 0) { + revert ChannelDefinitionNotFound(); + } + + delete s_channelDefinitions[channelId]; + + emit ChannelDefinitionRemoved(channelId); + } + + function getChannelDefinitions(bytes32 channelId) external view returns (ChannelDefinition memory) { + if(msg.sender != tx.origin) { + revert OnlyCallableByEOA(); + } + + return s_channelDefinitions[channelId]; + } + + function typeAndVersion() external override pure returns (string memory) { + return "StreamConfigStore 0.0.0"; + } + + function supportsInterface(bytes4 interfaceId) external pure returns (bool) { + return interfaceId == type(IStreamConfigStore).interfaceId; + } +} + diff --git a/contracts/src/v0.8/llo-feeds/dev/interfaces/IStreamConfigStore.sol b/contracts/src/v0.8/llo-feeds/dev/interfaces/IStreamConfigStore.sol new file mode 100644 index 00000000000..8ae67432787 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/interfaces/IStreamConfigStore.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; + +interface IStreamConfigStore is IERC165 { + + function setStagingConfig(bytes32 channelId, ChannelConfiguration calldata channelConfig) external; + + function promoteStagingConfig(bytes32 channelId) external; + + function addChannel(bytes32 channelId, ChannelDefinition calldata channelDefinition) external; + + function removeChannel(bytes32 channelId) external; + + function getChannelDefinitions(bytes32 channelId) external view returns (ChannelDefinition memory); + + struct ChannelConfiguration { + bytes32 channelConfigId; + } + + struct ChannelDefinition { + // e.g. evm, solana, CosmWasm, kalechain, etc... + bytes8 reportFormat; + // Specifies the chain on which this channel can be verified. Currently uses + // CCIP chain selectors, but lots of other schemes are possible as well. + uint64 chainSelector; + // We assume that StreamIDs is always non-empty and that the 0-th stream + // contains the verification price in LINK and the 1-st stream contains the + // verification price in the native coin. + bytes32[] streamIDs; + } + +} diff --git a/core/gethwrappers/llo-feeds/generated/stream_config_store/stream_config_store.go b/core/gethwrappers/llo-feeds/generated/stream_config_store/stream_config_store.go new file mode 100644 index 00000000000..95617672aa1 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/stream_config_store/stream_config_store.go @@ -0,0 +1,1322 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package stream_config_store + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type IStreamConfigStoreChannelConfiguration struct { + ChannelConfigId [32]byte +} + +type IStreamConfigStoreChannelDefinition struct { + ReportFormat [8]byte + ChainSelector uint64 + StreamIDs [][32]byte +} + +var StreamConfigStoreMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ChannelDefinitionNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyStreamIDs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByEOA\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"StagingConfigAlreadyPromoted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroChainSelector\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroReportFormat\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"}],\"name\":\"ChannelDefinitionRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bytes8\",\"name\":\"reportFormat\",\"type\":\"bytes8\"},{\"internalType\":\"uint64\",\"name\":\"chainSelector\",\"type\":\"uint64\"},{\"internalType\":\"bytes32[]\",\"name\":\"streamIDs\",\"type\":\"bytes32[]\"}],\"indexed\":false,\"internalType\":\"structIStreamConfigStore.ChannelDefinition\",\"name\":\"channelDefinition\",\"type\":\"tuple\"}],\"name\":\"NewChannelDefinition\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"channelConfigId\",\"type\":\"bytes32\"}],\"indexed\":false,\"internalType\":\"structIStreamConfigStore.ChannelConfiguration\",\"name\":\"channelConfig\",\"type\":\"tuple\"}],\"name\":\"NewProductionConfig\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"channelConfigId\",\"type\":\"bytes32\"}],\"indexed\":false,\"internalType\":\"structIStreamConfigStore.ChannelConfiguration\",\"name\":\"channelConfig\",\"type\":\"tuple\"}],\"name\":\"NewStagingConfig\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"}],\"name\":\"PromoteStagingConfig\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bytes8\",\"name\":\"reportFormat\",\"type\":\"bytes8\"},{\"internalType\":\"uint64\",\"name\":\"chainSelector\",\"type\":\"uint64\"},{\"internalType\":\"bytes32[]\",\"name\":\"streamIDs\",\"type\":\"bytes32[]\"}],\"internalType\":\"structIStreamConfigStore.ChannelDefinition\",\"name\":\"channelDefinition\",\"type\":\"tuple\"}],\"name\":\"addChannel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"}],\"name\":\"getChannelDefinitions\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes8\",\"name\":\"reportFormat\",\"type\":\"bytes8\"},{\"internalType\":\"uint64\",\"name\":\"chainSelector\",\"type\":\"uint64\"},{\"internalType\":\"bytes32[]\",\"name\":\"streamIDs\",\"type\":\"bytes32[]\"}],\"internalType\":\"structIStreamConfigStore.ChannelDefinition\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"}],\"name\":\"promoteStagingConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"}],\"name\":\"removeChannel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"channelId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"channelConfigId\",\"type\":\"bytes32\"}],\"internalType\":\"structIStreamConfigStore.ChannelConfiguration\",\"name\":\"channelConfig\",\"type\":\"tuple\"}],\"name\":\"setStagingConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615610097576100978161009f565b505050610148565b336001600160a01b038216036100f75760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b610eef806101576000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c806379ba5097116100765780639a223b8e1161005b5780639a223b8e146101d7578063dd2ef3ef146101ea578063f2fde38b1461020a57600080fd5b806379ba5097146101a75780638da5cb5b146101af57600080fd5b8063181f5a77116100a7578063181f5a7714610142578063414aa93414610181578063567ed3371461019457600080fd5b806301ffc9a7146100c357806302da0e511461012d575b600080fd5b6101186100d13660046108c8565b7fffffffff00000000000000000000000000000000000000000000000000000000167f52e2bc33000000000000000000000000000000000000000000000000000000001490565b60405190151581526020015b60405180910390f35b61014061013b366004610911565b61021d565b005b604080518082018252601781527f53747265616d436f6e66696753746f726520302e302e3000000000000000000060208201529051610124919061092a565b61014061018f366004610996565b6102e9565b6101406101a2366004610911565b610340565b6101406103a0565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610124565b6101406101e53660046109ec565b6104a2565b6101fd6101f8366004610911565b6105fb565b6040516101249190610a3a565b610140610218366004610acc565b61070a565b61022561071e565b600081815260026020526040812060010154900361026f576040517fd1a751e200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600081815260026020526040812080547fffffffffffffffffffffffffffffffff00000000000000000000000000000000168155906102b16001830182610896565b50506040518181527fa65638d745b306456ab0961a502338c1f24d1f962615be3d154c4e86e879fc749060200160405180910390a150565b6102f161071e565b60008281526004602052604090208135815581905050604051813581527f56d7e0e88863044d9b3e139e6e9c18977c6e81c387e44ce9661611f53035c7ed906020015b60405180910390a15050565b61034861071e565b6000818152600460209081526040808320815180840183529054808252858552600384529382902093909355518381527fe644aaaa8169119e133c9b338279b4305419a255ace92b4383df2f45f7daa7a89101610334565b60015473ffffffffffffffffffffffffffffffffffffffff163314610426576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6104aa61071e565b6104b76040820182610b02565b90506000036104f2576040517f4b620e2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6105026040820160208301610b87565b67ffffffffffffffff16600003610545576040517ff89d762900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6105526020820182610bd2565b7fffffffffffffffff000000000000000000000000000000000000000000000000166000036105ad576040517febd3ef0200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260026020526040902081906105c78282610cb7565b9050507f3f9f883dedd481d6ebe8e3efd5ef57f4b0293a3cf1d85946913dd82723542cc68282604051610334929190610e05565b60408051606080820183526000808352602083015291810191909152333214610650576040517f74e2cd5100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000828152600260209081526040918290208251606081018452815460c081901b7fffffffffffffffff00000000000000000000000000000000000000000000000016825268010000000000000000900467ffffffffffffffff1681840152600182018054855181860281018601875281815292959394938601938301828280156106fa57602002820191906000526020600020905b8154815260200190600101908083116106e6575b5050505050815250509050919050565b61071261071e565b61071b816107a1565b50565b60005473ffffffffffffffffffffffffffffffffffffffff16331461079f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161041d565b565b3373ffffffffffffffffffffffffffffffffffffffff821603610820576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161041d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b508054600082559060005260206000209081019061071b91905b808211156108c457600081556001016108b0565b5090565b6000602082840312156108da57600080fd5b81357fffffffff000000000000000000000000000000000000000000000000000000008116811461090a57600080fd5b9392505050565b60006020828403121561092357600080fd5b5035919050565b600060208083528351808285015260005b818110156109575785810183015185820160400152820161093b565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b60008082840360408112156109aa57600080fd5b8335925060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0820112156109de57600080fd5b506020830190509250929050565b600080604083850312156109ff57600080fd5b82359150602083013567ffffffffffffffff811115610a1d57600080fd5b830160608186031215610a2f57600080fd5b809150509250929050565b60006020808352608083017fffffffffffffffff0000000000000000000000000000000000000000000000008551168285015267ffffffffffffffff82860151166040850152604085015160608086015281815180845260a0870191508483019350600092505b80831015610ac15783518252928401926001929092019190840190610aa1565b509695505050505050565b600060208284031215610ade57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461090a57600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610b3757600080fd5b83018035915067ffffffffffffffff821115610b5257600080fd5b6020019150600581901b3603821315610b6a57600080fd5b9250929050565b67ffffffffffffffff8116811461071b57600080fd5b600060208284031215610b9957600080fd5b813561090a81610b71565b7fffffffffffffffff0000000000000000000000000000000000000000000000008116811461071b57600080fd5b600060208284031215610be457600080fd5b813561090a81610ba4565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b67ffffffffffffffff831115610c3657610c36610bef565b68010000000000000000831115610c4f57610c4f610bef565b805483825580841015610c86576000828152602081208581019083015b80821015610c8257828255600182019150610c6c565b5050505b5060008181526020812083915b85811015610caf57823582820155602090920191600101610c93565b505050505050565b8135610cc281610ba4565b8060c01c90508154817fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000082161783556020840135610cff81610b71565b6fffffffffffffffff00000000000000008160401b16837fffffffffffffffffffffffffffffffff0000000000000000000000000000000084161717845550505060408201357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1833603018112610d7557600080fd5b8201803567ffffffffffffffff811115610d8e57600080fd5b6020820191508060051b3603821315610da657600080fd5b610db4818360018601610c1e565b50505050565b81835260007f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff831115610dec57600080fd5b8260051b80836020870137939093016020019392505050565b8281526040602082015260008235610e1c81610ba4565b7fffffffffffffffff0000000000000000000000000000000000000000000000001660408301526020830135610e5181610b71565b67ffffffffffffffff8082166060850152604085013591507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1853603018212610e9957600080fd5b6020918501918201913581811115610eb057600080fd5b8060051b3603831315610ec257600080fd5b60606080860152610ed760a086018285610dba565b97965050505050505056fea164736f6c6343000810000a", +} + +var StreamConfigStoreABI = StreamConfigStoreMetaData.ABI + +var StreamConfigStoreBin = StreamConfigStoreMetaData.Bin + +func DeployStreamConfigStore(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *StreamConfigStore, error) { + parsed, err := StreamConfigStoreMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StreamConfigStoreBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &StreamConfigStore{address: address, abi: *parsed, StreamConfigStoreCaller: StreamConfigStoreCaller{contract: contract}, StreamConfigStoreTransactor: StreamConfigStoreTransactor{contract: contract}, StreamConfigStoreFilterer: StreamConfigStoreFilterer{contract: contract}}, nil +} + +type StreamConfigStore struct { + address common.Address + abi abi.ABI + StreamConfigStoreCaller + StreamConfigStoreTransactor + StreamConfigStoreFilterer +} + +type StreamConfigStoreCaller struct { + contract *bind.BoundContract +} + +type StreamConfigStoreTransactor struct { + contract *bind.BoundContract +} + +type StreamConfigStoreFilterer struct { + contract *bind.BoundContract +} + +type StreamConfigStoreSession struct { + Contract *StreamConfigStore + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type StreamConfigStoreCallerSession struct { + Contract *StreamConfigStoreCaller + CallOpts bind.CallOpts +} + +type StreamConfigStoreTransactorSession struct { + Contract *StreamConfigStoreTransactor + TransactOpts bind.TransactOpts +} + +type StreamConfigStoreRaw struct { + Contract *StreamConfigStore +} + +type StreamConfigStoreCallerRaw struct { + Contract *StreamConfigStoreCaller +} + +type StreamConfigStoreTransactorRaw struct { + Contract *StreamConfigStoreTransactor +} + +func NewStreamConfigStore(address common.Address, backend bind.ContractBackend) (*StreamConfigStore, error) { + abi, err := abi.JSON(strings.NewReader(StreamConfigStoreABI)) + if err != nil { + return nil, err + } + contract, err := bindStreamConfigStore(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &StreamConfigStore{address: address, abi: abi, StreamConfigStoreCaller: StreamConfigStoreCaller{contract: contract}, StreamConfigStoreTransactor: StreamConfigStoreTransactor{contract: contract}, StreamConfigStoreFilterer: StreamConfigStoreFilterer{contract: contract}}, nil +} + +func NewStreamConfigStoreCaller(address common.Address, caller bind.ContractCaller) (*StreamConfigStoreCaller, error) { + contract, err := bindStreamConfigStore(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &StreamConfigStoreCaller{contract: contract}, nil +} + +func NewStreamConfigStoreTransactor(address common.Address, transactor bind.ContractTransactor) (*StreamConfigStoreTransactor, error) { + contract, err := bindStreamConfigStore(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &StreamConfigStoreTransactor{contract: contract}, nil +} + +func NewStreamConfigStoreFilterer(address common.Address, filterer bind.ContractFilterer) (*StreamConfigStoreFilterer, error) { + contract, err := bindStreamConfigStore(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &StreamConfigStoreFilterer{contract: contract}, nil +} + +func bindStreamConfigStore(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := StreamConfigStoreMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_StreamConfigStore *StreamConfigStoreRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StreamConfigStore.Contract.StreamConfigStoreCaller.contract.Call(opts, result, method, params...) +} + +func (_StreamConfigStore *StreamConfigStoreRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamConfigStore.Contract.StreamConfigStoreTransactor.contract.Transfer(opts) +} + +func (_StreamConfigStore *StreamConfigStoreRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StreamConfigStore.Contract.StreamConfigStoreTransactor.contract.Transact(opts, method, params...) +} + +func (_StreamConfigStore *StreamConfigStoreCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StreamConfigStore.Contract.contract.Call(opts, result, method, params...) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamConfigStore.Contract.contract.Transfer(opts) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StreamConfigStore.Contract.contract.Transact(opts, method, params...) +} + +func (_StreamConfigStore *StreamConfigStoreCaller) GetChannelDefinitions(opts *bind.CallOpts, channelId [32]byte) (IStreamConfigStoreChannelDefinition, error) { + var out []interface{} + err := _StreamConfigStore.contract.Call(opts, &out, "getChannelDefinitions", channelId) + + if err != nil { + return *new(IStreamConfigStoreChannelDefinition), err + } + + out0 := *abi.ConvertType(out[0], new(IStreamConfigStoreChannelDefinition)).(*IStreamConfigStoreChannelDefinition) + + return out0, err + +} + +func (_StreamConfigStore *StreamConfigStoreSession) GetChannelDefinitions(channelId [32]byte) (IStreamConfigStoreChannelDefinition, error) { + return _StreamConfigStore.Contract.GetChannelDefinitions(&_StreamConfigStore.CallOpts, channelId) +} + +func (_StreamConfigStore *StreamConfigStoreCallerSession) GetChannelDefinitions(channelId [32]byte) (IStreamConfigStoreChannelDefinition, error) { + return _StreamConfigStore.Contract.GetChannelDefinitions(&_StreamConfigStore.CallOpts, channelId) +} + +func (_StreamConfigStore *StreamConfigStoreCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _StreamConfigStore.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_StreamConfigStore *StreamConfigStoreSession) Owner() (common.Address, error) { + return _StreamConfigStore.Contract.Owner(&_StreamConfigStore.CallOpts) +} + +func (_StreamConfigStore *StreamConfigStoreCallerSession) Owner() (common.Address, error) { + return _StreamConfigStore.Contract.Owner(&_StreamConfigStore.CallOpts) +} + +func (_StreamConfigStore *StreamConfigStoreCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _StreamConfigStore.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_StreamConfigStore *StreamConfigStoreSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _StreamConfigStore.Contract.SupportsInterface(&_StreamConfigStore.CallOpts, interfaceId) +} + +func (_StreamConfigStore *StreamConfigStoreCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _StreamConfigStore.Contract.SupportsInterface(&_StreamConfigStore.CallOpts, interfaceId) +} + +func (_StreamConfigStore *StreamConfigStoreCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _StreamConfigStore.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_StreamConfigStore *StreamConfigStoreSession) TypeAndVersion() (string, error) { + return _StreamConfigStore.Contract.TypeAndVersion(&_StreamConfigStore.CallOpts) +} + +func (_StreamConfigStore *StreamConfigStoreCallerSession) TypeAndVersion() (string, error) { + return _StreamConfigStore.Contract.TypeAndVersion(&_StreamConfigStore.CallOpts) +} + +func (_StreamConfigStore *StreamConfigStoreTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamConfigStore.contract.Transact(opts, "acceptOwnership") +} + +func (_StreamConfigStore *StreamConfigStoreSession) AcceptOwnership() (*types.Transaction, error) { + return _StreamConfigStore.Contract.AcceptOwnership(&_StreamConfigStore.TransactOpts) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _StreamConfigStore.Contract.AcceptOwnership(&_StreamConfigStore.TransactOpts) +} + +func (_StreamConfigStore *StreamConfigStoreTransactor) AddChannel(opts *bind.TransactOpts, channelId [32]byte, channelDefinition IStreamConfigStoreChannelDefinition) (*types.Transaction, error) { + return _StreamConfigStore.contract.Transact(opts, "addChannel", channelId, channelDefinition) +} + +func (_StreamConfigStore *StreamConfigStoreSession) AddChannel(channelId [32]byte, channelDefinition IStreamConfigStoreChannelDefinition) (*types.Transaction, error) { + return _StreamConfigStore.Contract.AddChannel(&_StreamConfigStore.TransactOpts, channelId, channelDefinition) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorSession) AddChannel(channelId [32]byte, channelDefinition IStreamConfigStoreChannelDefinition) (*types.Transaction, error) { + return _StreamConfigStore.Contract.AddChannel(&_StreamConfigStore.TransactOpts, channelId, channelDefinition) +} + +func (_StreamConfigStore *StreamConfigStoreTransactor) PromoteStagingConfig(opts *bind.TransactOpts, channelId [32]byte) (*types.Transaction, error) { + return _StreamConfigStore.contract.Transact(opts, "promoteStagingConfig", channelId) +} + +func (_StreamConfigStore *StreamConfigStoreSession) PromoteStagingConfig(channelId [32]byte) (*types.Transaction, error) { + return _StreamConfigStore.Contract.PromoteStagingConfig(&_StreamConfigStore.TransactOpts, channelId) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorSession) PromoteStagingConfig(channelId [32]byte) (*types.Transaction, error) { + return _StreamConfigStore.Contract.PromoteStagingConfig(&_StreamConfigStore.TransactOpts, channelId) +} + +func (_StreamConfigStore *StreamConfigStoreTransactor) RemoveChannel(opts *bind.TransactOpts, channelId [32]byte) (*types.Transaction, error) { + return _StreamConfigStore.contract.Transact(opts, "removeChannel", channelId) +} + +func (_StreamConfigStore *StreamConfigStoreSession) RemoveChannel(channelId [32]byte) (*types.Transaction, error) { + return _StreamConfigStore.Contract.RemoveChannel(&_StreamConfigStore.TransactOpts, channelId) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorSession) RemoveChannel(channelId [32]byte) (*types.Transaction, error) { + return _StreamConfigStore.Contract.RemoveChannel(&_StreamConfigStore.TransactOpts, channelId) +} + +func (_StreamConfigStore *StreamConfigStoreTransactor) SetStagingConfig(opts *bind.TransactOpts, channelId [32]byte, channelConfig IStreamConfigStoreChannelConfiguration) (*types.Transaction, error) { + return _StreamConfigStore.contract.Transact(opts, "setStagingConfig", channelId, channelConfig) +} + +func (_StreamConfigStore *StreamConfigStoreSession) SetStagingConfig(channelId [32]byte, channelConfig IStreamConfigStoreChannelConfiguration) (*types.Transaction, error) { + return _StreamConfigStore.Contract.SetStagingConfig(&_StreamConfigStore.TransactOpts, channelId, channelConfig) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorSession) SetStagingConfig(channelId [32]byte, channelConfig IStreamConfigStoreChannelConfiguration) (*types.Transaction, error) { + return _StreamConfigStore.Contract.SetStagingConfig(&_StreamConfigStore.TransactOpts, channelId, channelConfig) +} + +func (_StreamConfigStore *StreamConfigStoreTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _StreamConfigStore.contract.Transact(opts, "transferOwnership", to) +} + +func (_StreamConfigStore *StreamConfigStoreSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _StreamConfigStore.Contract.TransferOwnership(&_StreamConfigStore.TransactOpts, to) +} + +func (_StreamConfigStore *StreamConfigStoreTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _StreamConfigStore.Contract.TransferOwnership(&_StreamConfigStore.TransactOpts, to) +} + +type StreamConfigStoreChannelDefinitionRemovedIterator struct { + Event *StreamConfigStoreChannelDefinitionRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamConfigStoreChannelDefinitionRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreChannelDefinitionRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreChannelDefinitionRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamConfigStoreChannelDefinitionRemovedIterator) Error() error { + return it.fail +} + +func (it *StreamConfigStoreChannelDefinitionRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamConfigStoreChannelDefinitionRemoved struct { + ChannelId [32]byte + Raw types.Log +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) FilterChannelDefinitionRemoved(opts *bind.FilterOpts) (*StreamConfigStoreChannelDefinitionRemovedIterator, error) { + + logs, sub, err := _StreamConfigStore.contract.FilterLogs(opts, "ChannelDefinitionRemoved") + if err != nil { + return nil, err + } + return &StreamConfigStoreChannelDefinitionRemovedIterator{contract: _StreamConfigStore.contract, event: "ChannelDefinitionRemoved", logs: logs, sub: sub}, nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) WatchChannelDefinitionRemoved(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreChannelDefinitionRemoved) (event.Subscription, error) { + + logs, sub, err := _StreamConfigStore.contract.WatchLogs(opts, "ChannelDefinitionRemoved") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamConfigStoreChannelDefinitionRemoved) + if err := _StreamConfigStore.contract.UnpackLog(event, "ChannelDefinitionRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) ParseChannelDefinitionRemoved(log types.Log) (*StreamConfigStoreChannelDefinitionRemoved, error) { + event := new(StreamConfigStoreChannelDefinitionRemoved) + if err := _StreamConfigStore.contract.UnpackLog(event, "ChannelDefinitionRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StreamConfigStoreNewChannelDefinitionIterator struct { + Event *StreamConfigStoreNewChannelDefinition + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamConfigStoreNewChannelDefinitionIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreNewChannelDefinition) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreNewChannelDefinition) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamConfigStoreNewChannelDefinitionIterator) Error() error { + return it.fail +} + +func (it *StreamConfigStoreNewChannelDefinitionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamConfigStoreNewChannelDefinition struct { + ChannelId [32]byte + ChannelDefinition IStreamConfigStoreChannelDefinition + Raw types.Log +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) FilterNewChannelDefinition(opts *bind.FilterOpts) (*StreamConfigStoreNewChannelDefinitionIterator, error) { + + logs, sub, err := _StreamConfigStore.contract.FilterLogs(opts, "NewChannelDefinition") + if err != nil { + return nil, err + } + return &StreamConfigStoreNewChannelDefinitionIterator{contract: _StreamConfigStore.contract, event: "NewChannelDefinition", logs: logs, sub: sub}, nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) WatchNewChannelDefinition(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreNewChannelDefinition) (event.Subscription, error) { + + logs, sub, err := _StreamConfigStore.contract.WatchLogs(opts, "NewChannelDefinition") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamConfigStoreNewChannelDefinition) + if err := _StreamConfigStore.contract.UnpackLog(event, "NewChannelDefinition", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) ParseNewChannelDefinition(log types.Log) (*StreamConfigStoreNewChannelDefinition, error) { + event := new(StreamConfigStoreNewChannelDefinition) + if err := _StreamConfigStore.contract.UnpackLog(event, "NewChannelDefinition", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StreamConfigStoreNewProductionConfigIterator struct { + Event *StreamConfigStoreNewProductionConfig + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamConfigStoreNewProductionConfigIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreNewProductionConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreNewProductionConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamConfigStoreNewProductionConfigIterator) Error() error { + return it.fail +} + +func (it *StreamConfigStoreNewProductionConfigIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamConfigStoreNewProductionConfig struct { + ChannelConfig IStreamConfigStoreChannelConfiguration + Raw types.Log +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) FilterNewProductionConfig(opts *bind.FilterOpts) (*StreamConfigStoreNewProductionConfigIterator, error) { + + logs, sub, err := _StreamConfigStore.contract.FilterLogs(opts, "NewProductionConfig") + if err != nil { + return nil, err + } + return &StreamConfigStoreNewProductionConfigIterator{contract: _StreamConfigStore.contract, event: "NewProductionConfig", logs: logs, sub: sub}, nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) WatchNewProductionConfig(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreNewProductionConfig) (event.Subscription, error) { + + logs, sub, err := _StreamConfigStore.contract.WatchLogs(opts, "NewProductionConfig") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamConfigStoreNewProductionConfig) + if err := _StreamConfigStore.contract.UnpackLog(event, "NewProductionConfig", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) ParseNewProductionConfig(log types.Log) (*StreamConfigStoreNewProductionConfig, error) { + event := new(StreamConfigStoreNewProductionConfig) + if err := _StreamConfigStore.contract.UnpackLog(event, "NewProductionConfig", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StreamConfigStoreNewStagingConfigIterator struct { + Event *StreamConfigStoreNewStagingConfig + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamConfigStoreNewStagingConfigIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreNewStagingConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreNewStagingConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamConfigStoreNewStagingConfigIterator) Error() error { + return it.fail +} + +func (it *StreamConfigStoreNewStagingConfigIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamConfigStoreNewStagingConfig struct { + ChannelConfig IStreamConfigStoreChannelConfiguration + Raw types.Log +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) FilterNewStagingConfig(opts *bind.FilterOpts) (*StreamConfigStoreNewStagingConfigIterator, error) { + + logs, sub, err := _StreamConfigStore.contract.FilterLogs(opts, "NewStagingConfig") + if err != nil { + return nil, err + } + return &StreamConfigStoreNewStagingConfigIterator{contract: _StreamConfigStore.contract, event: "NewStagingConfig", logs: logs, sub: sub}, nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) WatchNewStagingConfig(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreNewStagingConfig) (event.Subscription, error) { + + logs, sub, err := _StreamConfigStore.contract.WatchLogs(opts, "NewStagingConfig") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamConfigStoreNewStagingConfig) + if err := _StreamConfigStore.contract.UnpackLog(event, "NewStagingConfig", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) ParseNewStagingConfig(log types.Log) (*StreamConfigStoreNewStagingConfig, error) { + event := new(StreamConfigStoreNewStagingConfig) + if err := _StreamConfigStore.contract.UnpackLog(event, "NewStagingConfig", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StreamConfigStoreOwnershipTransferRequestedIterator struct { + Event *StreamConfigStoreOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamConfigStoreOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamConfigStoreOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *StreamConfigStoreOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamConfigStoreOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StreamConfigStoreOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StreamConfigStore.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &StreamConfigStoreOwnershipTransferRequestedIterator{contract: _StreamConfigStore.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StreamConfigStore.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamConfigStoreOwnershipTransferRequested) + if err := _StreamConfigStore.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) ParseOwnershipTransferRequested(log types.Log) (*StreamConfigStoreOwnershipTransferRequested, error) { + event := new(StreamConfigStoreOwnershipTransferRequested) + if err := _StreamConfigStore.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StreamConfigStoreOwnershipTransferredIterator struct { + Event *StreamConfigStoreOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamConfigStoreOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamConfigStoreOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamConfigStoreOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *StreamConfigStoreOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamConfigStoreOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StreamConfigStoreOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StreamConfigStore.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &StreamConfigStoreOwnershipTransferredIterator{contract: _StreamConfigStore.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StreamConfigStore.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamConfigStoreOwnershipTransferred) + if err := _StreamConfigStore.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) ParseOwnershipTransferred(log types.Log) (*StreamConfigStoreOwnershipTransferred, error) { + event := new(StreamConfigStoreOwnershipTransferred) + if err := _StreamConfigStore.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StreamConfigStorePromoteStagingConfigIterator struct { + Event *StreamConfigStorePromoteStagingConfig + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamConfigStorePromoteStagingConfigIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamConfigStorePromoteStagingConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamConfigStorePromoteStagingConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamConfigStorePromoteStagingConfigIterator) Error() error { + return it.fail +} + +func (it *StreamConfigStorePromoteStagingConfigIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamConfigStorePromoteStagingConfig struct { + ChannelId [32]byte + Raw types.Log +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) FilterPromoteStagingConfig(opts *bind.FilterOpts) (*StreamConfigStorePromoteStagingConfigIterator, error) { + + logs, sub, err := _StreamConfigStore.contract.FilterLogs(opts, "PromoteStagingConfig") + if err != nil { + return nil, err + } + return &StreamConfigStorePromoteStagingConfigIterator{contract: _StreamConfigStore.contract, event: "PromoteStagingConfig", logs: logs, sub: sub}, nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) WatchPromoteStagingConfig(opts *bind.WatchOpts, sink chan<- *StreamConfigStorePromoteStagingConfig) (event.Subscription, error) { + + logs, sub, err := _StreamConfigStore.contract.WatchLogs(opts, "PromoteStagingConfig") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamConfigStorePromoteStagingConfig) + if err := _StreamConfigStore.contract.UnpackLog(event, "PromoteStagingConfig", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamConfigStore *StreamConfigStoreFilterer) ParsePromoteStagingConfig(log types.Log) (*StreamConfigStorePromoteStagingConfig, error) { + event := new(StreamConfigStorePromoteStagingConfig) + if err := _StreamConfigStore.contract.UnpackLog(event, "PromoteStagingConfig", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_StreamConfigStore *StreamConfigStore) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _StreamConfigStore.abi.Events["ChannelDefinitionRemoved"].ID: + return _StreamConfigStore.ParseChannelDefinitionRemoved(log) + case _StreamConfigStore.abi.Events["NewChannelDefinition"].ID: + return _StreamConfigStore.ParseNewChannelDefinition(log) + case _StreamConfigStore.abi.Events["NewProductionConfig"].ID: + return _StreamConfigStore.ParseNewProductionConfig(log) + case _StreamConfigStore.abi.Events["NewStagingConfig"].ID: + return _StreamConfigStore.ParseNewStagingConfig(log) + case _StreamConfigStore.abi.Events["OwnershipTransferRequested"].ID: + return _StreamConfigStore.ParseOwnershipTransferRequested(log) + case _StreamConfigStore.abi.Events["OwnershipTransferred"].ID: + return _StreamConfigStore.ParseOwnershipTransferred(log) + case _StreamConfigStore.abi.Events["PromoteStagingConfig"].ID: + return _StreamConfigStore.ParsePromoteStagingConfig(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (StreamConfigStoreChannelDefinitionRemoved) Topic() common.Hash { + return common.HexToHash("0xa65638d745b306456ab0961a502338c1f24d1f962615be3d154c4e86e879fc74") +} + +func (StreamConfigStoreNewChannelDefinition) Topic() common.Hash { + return common.HexToHash("0x3f9f883dedd481d6ebe8e3efd5ef57f4b0293a3cf1d85946913dd82723542cc6") +} + +func (StreamConfigStoreNewProductionConfig) Topic() common.Hash { + return common.HexToHash("0xf484d8aa0665a5502456cd66a8bf6268922b4da7dc29f3bb1fcf67a7da444a2a") +} + +func (StreamConfigStoreNewStagingConfig) Topic() common.Hash { + return common.HexToHash("0x56d7e0e88863044d9b3e139e6e9c18977c6e81c387e44ce9661611f53035c7ed") +} + +func (StreamConfigStoreOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (StreamConfigStoreOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (StreamConfigStorePromoteStagingConfig) Topic() common.Hash { + return common.HexToHash("0xe644aaaa8169119e133c9b338279b4305419a255ace92b4383df2f45f7daa7a8") +} + +func (_StreamConfigStore *StreamConfigStore) Address() common.Address { + return _StreamConfigStore.address +} + +type StreamConfigStoreInterface interface { + GetChannelDefinitions(opts *bind.CallOpts, channelId [32]byte) (IStreamConfigStoreChannelDefinition, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddChannel(opts *bind.TransactOpts, channelId [32]byte, channelDefinition IStreamConfigStoreChannelDefinition) (*types.Transaction, error) + + PromoteStagingConfig(opts *bind.TransactOpts, channelId [32]byte) (*types.Transaction, error) + + RemoveChannel(opts *bind.TransactOpts, channelId [32]byte) (*types.Transaction, error) + + SetStagingConfig(opts *bind.TransactOpts, channelId [32]byte, channelConfig IStreamConfigStoreChannelConfiguration) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterChannelDefinitionRemoved(opts *bind.FilterOpts) (*StreamConfigStoreChannelDefinitionRemovedIterator, error) + + WatchChannelDefinitionRemoved(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreChannelDefinitionRemoved) (event.Subscription, error) + + ParseChannelDefinitionRemoved(log types.Log) (*StreamConfigStoreChannelDefinitionRemoved, error) + + FilterNewChannelDefinition(opts *bind.FilterOpts) (*StreamConfigStoreNewChannelDefinitionIterator, error) + + WatchNewChannelDefinition(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreNewChannelDefinition) (event.Subscription, error) + + ParseNewChannelDefinition(log types.Log) (*StreamConfigStoreNewChannelDefinition, error) + + FilterNewProductionConfig(opts *bind.FilterOpts) (*StreamConfigStoreNewProductionConfigIterator, error) + + WatchNewProductionConfig(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreNewProductionConfig) (event.Subscription, error) + + ParseNewProductionConfig(log types.Log) (*StreamConfigStoreNewProductionConfig, error) + + FilterNewStagingConfig(opts *bind.FilterOpts) (*StreamConfigStoreNewStagingConfigIterator, error) + + WatchNewStagingConfig(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreNewStagingConfig) (event.Subscription, error) + + ParseNewStagingConfig(log types.Log) (*StreamConfigStoreNewStagingConfig, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StreamConfigStoreOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*StreamConfigStoreOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StreamConfigStoreOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *StreamConfigStoreOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*StreamConfigStoreOwnershipTransferred, error) + + FilterPromoteStagingConfig(opts *bind.FilterOpts) (*StreamConfigStorePromoteStagingConfigIterator, error) + + WatchPromoteStagingConfig(opts *bind.WatchOpts, sink chan<- *StreamConfigStorePromoteStagingConfig) (event.Subscription, error) + + ParsePromoteStagingConfig(log types.Log) (*StreamConfigStorePromoteStagingConfig, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt index 293defcfbe0..6fa700cc22c 100644 --- a/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -5,6 +5,7 @@ fee_manager: ../../../contracts/solc/v0.8.16/FeeManager/FeeManager.abi ../../../ llo_feeds: ../../../contracts/solc/v0.8.16/FeeManager.abi ../../../contracts/solc/v0.8.16/FeeManager.bin cb71e018f67e49d7bc0e194c822204dfd59f79ff42e4fc8fd8ab63f3acd71361 llo_feeds_test: ../../../contracts/solc/v0.8.16/ExposedVerifier.abi ../../../contracts/solc/v0.8.16/ExposedVerifier.bin 6932cea8f2738e874d3ec9e1a4231d2421704030c071d9e15dd2f7f08482c246 reward_manager: ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.abi ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.bin db73e9062b17a1d5aa14c06881fe2be49bd95b00b7f1a8943910c5e4ded5b221 +stream_config_store: ../../../contracts/solc/v0.8.16/StreamConfigStore/StreamConfigStore.abi ../../../contracts/solc/v0.8.16/StreamConfigStore/StreamConfigStore.bin 45ae1b0a45a90b3dee076023052aef73c212c8ef8825b829397f751f6b0a1598 verifier: ../../../contracts/solc/v0.8.16/Verifier/Verifier.abi ../../../contracts/solc/v0.8.16/Verifier/Verifier.bin df12786bbeccf3a8f3389479cf93c055b4efd5904b9f99a4835f81af43fe62bf verifier_proxy: ../../../contracts/solc/v0.8.16/VerifierProxy/VerifierProxy.abi ../../../contracts/solc/v0.8.16/VerifierProxy/VerifierProxy.bin 6393443d0a323f2dbe9687dc30fd77f8dfa918944b61c651759746ff2d76e4e5 werc20_mock: ../../../contracts/solc/v0.8.19/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock.bin ff2ca3928b2aa9c412c892cb8226c4d754c73eeb291bb7481c32c48791b2aa94 diff --git a/core/gethwrappers/llo-feeds/go_generate.go b/core/gethwrappers/llo-feeds/go_generate.go index 5b2088f43a0..c1549da742f 100644 --- a/core/gethwrappers/llo-feeds/go_generate.go +++ b/core/gethwrappers/llo-feeds/go_generate.go @@ -9,3 +9,4 @@ package gethwrappers //go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/ExposedVerifier/ExposedVerifier.abi ../../../contracts/solc/v0.8.16/ExposedVerifier/ExposedVerifier.bin ExposedVerifier exposed_verifier //go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.abi ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.bin RewardManager reward_manager //go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/FeeManager/FeeManager.abi ../../../contracts/solc/v0.8.16/FeeManager/FeeManager.bin FeeManager fee_manager +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/StreamConfigStore/StreamConfigStore.abi ../../../contracts/solc/v0.8.16/StreamConfigStore/StreamConfigStore.bin StreamConfigStore stream_config_store diff --git a/core/scripts/go.sum b/core/scripts/go.sum index dcf47d392cc..eca642e1a1e 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1233,8 +1233,8 @@ github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoM github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 h1:3SNcvBmEPE1YlB1JpVZouslJpI3GBNoiqW7+wb0Rz7w= github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= -github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= -github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= +github.com/stretchr/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= +github.com/stretchr/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a h1:YuO+afVc3eqrjiCUizNCxI53bl/BnPiVwXqLzqYTqgU= github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47zCZp9FrtGcWyo1VjbgDaodxX9ovZvgLb/MxaA= github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= diff --git a/core/services/functions/connector_handler.go b/core/services/functions/connector_handler.go index 1594dc6eb56..4a0511873cf 100644 --- a/core/services/functions/connector_handler.go +++ b/core/services/functions/connector_handler.go @@ -254,7 +254,7 @@ func (h *functionsConnectorHandler) handleOffchainRequest(request *OffchainReque defer cancel() err := h.listener.HandleOffchainRequest(ctx, request) if err != nil { - h.lggr.Errorw("internal error while processing", "id", request.RequestId, "error", err) + h.lggr.Errorw("internal error while processing", "id", request.RequestId, "err", err) h.mu.Lock() defer h.mu.Unlock() state, ok := h.heartbeatRequests[RequestID(request.RequestId)] @@ -313,7 +313,7 @@ func (h *functionsConnectorHandler) cacheNewRequestLocked(requestId RequestID, r func (h *functionsConnectorHandler) sendResponseAndLog(ctx context.Context, gatewayId string, requestBody *api.MessageBody, payload any) { err := h.sendResponse(ctx, gatewayId, requestBody, payload) if err != nil { - h.lggr.Errorw("failed to send response to gateway", "id", gatewayId, "error", err) + h.lggr.Errorw("failed to send response to gateway", "id", gatewayId, "err", err) } else { h.lggr.Debugw("sent to gateway", "id", gatewayId, "messageId", requestBody.MessageId, "donId", requestBody.DonId, "method", requestBody.Method) } diff --git a/core/services/gateway/connector/connector.go b/core/services/gateway/connector/connector.go index 27db8fd44b6..7ee1572bb40 100644 --- a/core/services/gateway/connector/connector.go +++ b/core/services/gateway/connector/connector.go @@ -158,7 +158,7 @@ func (c *gatewayConnector) readLoop(gatewayState *gatewayState) { break } if err = msg.Validate(); err != nil { - c.lggr.Errorw("failed to validate message signature", "id", gatewayState.config.Id, "error", err) + c.lggr.Errorw("failed to validate message signature", "id", gatewayState.config.Id, "err", err) break } c.handler.HandleGatewayMessage(ctx, gatewayState.config.Id, msg) @@ -174,7 +174,7 @@ func (c *gatewayConnector) reconnectLoop(gatewayState *gatewayState) { for { conn, err := gatewayState.wsClient.Connect(ctx, gatewayState.url) if err != nil { - c.lggr.Errorw("connection error", "url", gatewayState.url, "error", err) + c.lggr.Errorw("connection error", "url", gatewayState.url, "err", err) } else { c.lggr.Infow("connected successfully", "url", gatewayState.url) closeCh := gatewayState.conn.Reset(conn) diff --git a/core/services/job/orm.go b/core/services/job/orm.go index 6c5a879ebd0..2ead3f6a6f1 100644 --- a/core/services/job/orm.go +++ b/core/services/job/orm.go @@ -464,7 +464,7 @@ func (o *orm) CreateJob(jb *Job, qopts ...pg.QOpt) error { // ValidateKeyStoreMatch confirms that the key has a valid match in the keystore func ValidateKeyStoreMatch(spec *OCR2OracleSpec, keyStore keystore.Master, key string) error { - if spec.PluginType == types.Mercury { + if spec.PluginType == types.Mercury || spec.PluginType == types.Streams { _, err := keyStore.CSA().Get(key) if err != nil { return errors.Errorf("no CSA key matching: %q", key) diff --git a/core/services/job/spawner.go b/core/services/job/spawner.go index 5ed017b8743..9ee34f70f10 100644 --- a/core/services/job/spawner.go +++ b/core/services/job/spawner.go @@ -65,20 +65,20 @@ type ( Delegate interface { JobType() Type // BeforeJobCreated is only called once on first time job create. - BeforeJobCreated(spec Job) + BeforeJobCreated(Job) // ServicesForSpec returns services to be started and stopped for this // job. In case a given job type relies upon well-defined startup/shutdown // ordering for services, they are started in the order they are given // and stopped in reverse order. - ServicesForSpec(spec Job) ([]ServiceCtx, error) - AfterJobCreated(spec Job) - BeforeJobDeleted(spec Job) + ServicesForSpec(Job) ([]ServiceCtx, error) + AfterJobCreated(Job) + BeforeJobDeleted(Job) // OnDeleteJob will be called from within DELETE db transaction. Any db // commands issued within OnDeleteJob() should be performed first, before any // non-db side effects. This is required in order to guarantee mutual atomicity between // all tasks intended to happen during job deletion. For the same reason, the job will // not show up in the db within OnDeleteJob(), even though it is still actively running. - OnDeleteJob(spec Job, q pg.Queryer) error + OnDeleteJob(jb Job, q pg.Queryer) error } activeJob struct { diff --git a/core/services/keystore/keys/ocr2key/cosmos_keyring.go b/core/services/keystore/keys/ocr2key/cosmos_keyring.go index 490fa0cbfcb..19d475673c9 100644 --- a/core/services/keystore/keys/ocr2key/cosmos_keyring.go +++ b/core/services/keystore/keys/ocr2key/cosmos_keyring.go @@ -7,6 +7,7 @@ import ( "github.com/hdevalence/ed25519consensus" "github.com/pkg/errors" + "github.com/smartcontractkit/libocr/offchainreporting2/types" "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "golang.org/x/crypto/blake2s" @@ -29,11 +30,11 @@ func newCosmosKeyring(material io.Reader) (*cosmosKeyring, error) { return &cosmosKeyring{pubKey: pubKey, privKey: privKey}, nil } -func (tk *cosmosKeyring) PublicKey() ocrtypes.OnchainPublicKey { - return []byte(tk.pubKey) +func (ckr *cosmosKeyring) PublicKey() ocrtypes.OnchainPublicKey { + return []byte(ckr.pubKey) } -func (tk *cosmosKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { +func (ckr *cosmosKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { rawReportContext := evmutil.RawReportContext(reportCtx) h, err := blake2s.New256(nil) if err != nil { @@ -49,48 +50,64 @@ func (tk *cosmosKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, repor return h.Sum(nil), nil } -func (tk *cosmosKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { - sigData, err := tk.reportToSigData(reportCtx, report) +func (ckr *cosmosKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + sigData, err := ckr.reportToSigData(reportCtx, report) if err != nil { return nil, err } - signedMsg := ed25519.Sign(tk.privKey, sigData) + return ckr.signBlob(sigData) +} + +func (ckr *cosmosKeyring) Sign3(digest types.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + panic("TODO") +} + +func (ckr *cosmosKeyring) signBlob(b []byte) ([]byte, error) { + signedMsg := ed25519.Sign(ckr.privKey, b) // match on-chain parsing (first 32 bytes are for pubkey, remaining are for signature) - return utils.ConcatBytes(tk.PublicKey(), signedMsg), nil + return utils.ConcatBytes(ckr.PublicKey(), signedMsg), nil +} + +func (ckr *cosmosKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { + hash, err := ckr.reportToSigData(reportCtx, report) + if err != nil { + return false + } + return ckr.verifyBlob(publicKey, hash, signature) +} + +func (ckr *cosmosKeyring) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + panic("TODO") } -func (tk *cosmosKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { +func (ckr *cosmosKeyring) verifyBlob(pubkey ocrtypes.OnchainPublicKey, b, sig []byte) bool { // Ed25519 signatures are always 64 bytes and the // public key (always prefixed, see Sign above) is always, // 32 bytes, so we always require the max signature length. - if len(signature) != tk.MaxSignatureLength() { + if len(sig) != ckr.MaxSignatureLength() { return false } - if len(publicKey) != ed25519.PublicKeySize { - return false - } - hash, err := tk.reportToSigData(reportCtx, report) - if err != nil { + if len(pubkey) != ed25519.PublicKeySize { return false } - return ed25519consensus.Verify(ed25519.PublicKey(publicKey), hash, signature[32:]) + return ed25519consensus.Verify(ed25519.PublicKey(pubkey), b, sig[32:]) } -func (tk *cosmosKeyring) MaxSignatureLength() int { +func (ckr *cosmosKeyring) MaxSignatureLength() int { // Reference: https://pkg.go.dev/crypto/ed25519 return ed25519.PublicKeySize + ed25519.SignatureSize // 32 + 64 } -func (tk *cosmosKeyring) Marshal() ([]byte, error) { - return tk.privKey.Seed(), nil +func (ckr *cosmosKeyring) Marshal() ([]byte, error) { + return ckr.privKey.Seed(), nil } -func (tk *cosmosKeyring) Unmarshal(in []byte) error { +func (ckr *cosmosKeyring) Unmarshal(in []byte) error { if len(in) != ed25519.SeedSize { return errors.Errorf("unexpected seed size, got %d want %d", len(in), ed25519.SeedSize) } privKey := ed25519.NewKeyFromSeed(in) - tk.privKey = privKey - tk.pubKey = privKey.Public().(ed25519.PublicKey) + ckr.privKey = privKey + ckr.pubKey = privKey.Public().(ed25519.PublicKey) return nil } diff --git a/core/services/keystore/keys/ocr2key/evm_keyring.go b/core/services/keystore/keys/ocr2key/evm_keyring.go index cc4076391b4..345c86a673f 100644 --- a/core/services/keystore/keys/ocr2key/evm_keyring.go +++ b/core/services/keystore/keys/ocr2key/evm_keyring.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/smartcontractkit/libocr/offchainreporting2/types" "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) @@ -26,12 +27,12 @@ func newEVMKeyring(material io.Reader) (*evmKeyring, error) { } // XXX: PublicKey returns the address of the public key not the public key itself -func (ok *evmKeyring) PublicKey() ocrtypes.OnchainPublicKey { - address := ok.signingAddress() +func (ekr *evmKeyring) PublicKey() ocrtypes.OnchainPublicKey { + address := ekr.signingAddress() return address[:] } -func (ok *evmKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) []byte { +func (ekr *evmKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) []byte { rawReportContext := evmutil.RawReportContext(reportCtx) sigData := crypto.Keccak256(report) sigData = append(sigData, rawReportContext[0][:]...) @@ -40,38 +41,54 @@ func (ok *evmKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report o return crypto.Keccak256(sigData) } -func (ok *evmKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { - return crypto.Sign(ok.reportToSigData(reportCtx, report), &ok.privateKey) +func (ekr *evmKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + return ekr.signBlob(ekr.reportToSigData(reportCtx, report)) +} + +func (ekr *evmKeyring) Sign3(digest types.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + panic("TODO") +} + +func (ekr *evmKeyring) signBlob(b []byte) (sig []byte, err error) { + return crypto.Sign(b, &ekr.privateKey) +} + +func (ekr *evmKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { + hash := ekr.reportToSigData(reportCtx, report) + return ekr.verifyBlob(publicKey, hash, signature) +} +func (ekr *evmKeyring) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + panic("TODO") } -func (ok *evmKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { - hash := ok.reportToSigData(reportCtx, report) - authorPubkey, err := crypto.SigToPub(hash, signature) +func (ekr *evmKeyring) verifyBlob(pubkey types.OnchainPublicKey, b, sig []byte) bool { + authorPubkey, err := crypto.SigToPub(b, sig) if err != nil { return false } authorAddress := crypto.PubkeyToAddress(*authorPubkey) - return bytes.Equal(publicKey[:], authorAddress[:]) + // no need for constant time compare since neither arg is sensitive + return bytes.Equal(pubkey[:], authorAddress[:]) } -func (ok *evmKeyring) MaxSignatureLength() int { +func (ekr *evmKeyring) MaxSignatureLength() int { return 65 } -func (ok *evmKeyring) signingAddress() common.Address { - return crypto.PubkeyToAddress(*(&ok.privateKey).Public().(*ecdsa.PublicKey)) +func (ekr *evmKeyring) signingAddress() common.Address { + return crypto.PubkeyToAddress(*(&ekr.privateKey).Public().(*ecdsa.PublicKey)) } -func (ok *evmKeyring) Marshal() ([]byte, error) { - return crypto.FromECDSA(&ok.privateKey), nil +func (ekr *evmKeyring) Marshal() ([]byte, error) { + return crypto.FromECDSA(&ekr.privateKey), nil } -func (ok *evmKeyring) Unmarshal(in []byte) error { +func (ekr *evmKeyring) Unmarshal(in []byte) error { privateKey, err := crypto.ToECDSA(in) if err != nil { return err } - ok.privateKey = *privateKey + ekr.privateKey = *privateKey return nil } diff --git a/core/services/keystore/keys/ocr2key/generic_key_bundle.go b/core/services/keystore/keys/ocr2key/generic_key_bundle.go index be401becfb3..2c5e4bd8559 100644 --- a/core/services/keystore/keys/ocr2key/generic_key_bundle.go +++ b/core/services/keystore/keys/ocr2key/generic_key_bundle.go @@ -18,6 +18,7 @@ import ( type ( keyring interface { ocrtypes.OnchainKeyring + OCR3SignerVerifier Marshal() ([]byte, error) Unmarshal(in []byte) error } @@ -92,10 +93,18 @@ func (kb *keyBundle[K]) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.R return kb.keyring.Sign(reportCtx, report) } +func (kb *keyBundle[K]) Sign3(digest ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + return kb.keyring.Sign3(digest, seqNr, r) +} + func (kb *keyBundle[K]) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { return kb.keyring.Verify(publicKey, reportCtx, report, signature) } +func (kb *keyBundle[K]) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + return kb.keyring.Verify3(publicKey, cd, seqNr, r, signature) +} + // OnChainPublicKey returns public component of the keypair used on chain func (kb *keyBundle[K]) OnChainPublicKey() string { return hex.EncodeToString(kb.keyring.PublicKey()) diff --git a/core/services/keystore/keys/ocr2key/key_bundle.go b/core/services/keystore/keys/ocr2key/key_bundle.go index 79d8ad70d52..2c3a4bebeb0 100644 --- a/core/services/keystore/keys/ocr2key/key_bundle.go +++ b/core/services/keystore/keys/ocr2key/key_bundle.go @@ -14,12 +14,20 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/store/models" ) +type OCR3SignerVerifier interface { + Sign3(digest ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) + Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool +} + // nolint type KeyBundle interface { // OnchainKeyring is used for signing reports (groups of observations, verified onchain) ocrtypes.OnchainKeyring // OffchainKeyring is used for signing observations ocrtypes.OffchainKeyring + + OCR3SignerVerifier + ID() string ChainType() chaintype.ChainType Marshal() ([]byte, error) diff --git a/core/services/keystore/keys/ocr2key/solana_keyring.go b/core/services/keystore/keys/ocr2key/solana_keyring.go index aebe33e1d19..6ebb8d1c312 100644 --- a/core/services/keystore/keys/ocr2key/solana_keyring.go +++ b/core/services/keystore/keys/ocr2key/solana_keyring.go @@ -7,6 +7,7 @@ import ( "io" "github.com/ethereum/go-ethereum/crypto" + "github.com/smartcontractkit/libocr/offchainreporting2/types" "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) @@ -26,12 +27,12 @@ func newSolanaKeyring(material io.Reader) (*solanaKeyring, error) { } // XXX: PublicKey returns the evm-style address of the public key not the public key itself -func (ok *solanaKeyring) PublicKey() ocrtypes.OnchainPublicKey { - address := crypto.PubkeyToAddress(*(&ok.privateKey).Public().(*ecdsa.PublicKey)) +func (skr *solanaKeyring) PublicKey() ocrtypes.OnchainPublicKey { + address := crypto.PubkeyToAddress(*(&skr.privateKey).Public().(*ecdsa.PublicKey)) return address[:] } -func (ok *solanaKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) []byte { +func (skr *solanaKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) []byte { rawReportContext := evmutil.RawReportContext(reportCtx) h := sha256.New() h.Write([]byte{uint8(len(report))}) @@ -42,30 +43,47 @@ func (ok *solanaKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, repor return h.Sum(nil) } -func (ok *solanaKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { - return crypto.Sign(ok.reportToSigData(reportCtx, report), &ok.privateKey) +func (skr *solanaKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + return skr.signBlob(skr.reportToSigData(reportCtx, report)) } -func (ok *solanaKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { - hash := ok.reportToSigData(reportCtx, report) - authorPubkey, err := crypto.SigToPub(hash, signature) +func (skr *solanaKeyring) Sign3(digest types.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + panic("TODO") +} + +func (skr *solanaKeyring) signBlob(b []byte) (sig []byte, err error) { + return crypto.Sign(b, &skr.privateKey) +} + +func (skr *solanaKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { + hash := skr.reportToSigData(reportCtx, report) + return skr.verifyBlob(publicKey, hash, signature) +} + +func (skr *solanaKeyring) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + panic("TODO") +} + +func (skr *solanaKeyring) verifyBlob(pubkey types.OnchainPublicKey, b, sig []byte) bool { + authorPubkey, err := crypto.SigToPub(b, sig) if err != nil { return false } authorAddress := crypto.PubkeyToAddress(*authorPubkey) - return bytes.Equal(publicKey[:], authorAddress[:]) + // no need for constant time compare since neither arg is sensitive + return bytes.Equal(pubkey[:], authorAddress[:]) } -func (ok *solanaKeyring) MaxSignatureLength() int { +func (skr *solanaKeyring) MaxSignatureLength() int { return 65 } -func (ok *solanaKeyring) Marshal() ([]byte, error) { - return crypto.FromECDSA(&ok.privateKey), nil +func (skr *solanaKeyring) Marshal() ([]byte, error) { + return crypto.FromECDSA(&skr.privateKey), nil } -func (ok *solanaKeyring) Unmarshal(in []byte) error { +func (skr *solanaKeyring) Unmarshal(in []byte) error { privateKey, err := crypto.ToECDSA(in) - ok.privateKey = *privateKey + skr.privateKey = *privateKey return err } diff --git a/core/services/keystore/keys/starkkey/ocr2key.go b/core/services/keystore/keys/starkkey/ocr2key.go index 41ab3a4708d..bb7db4b523c 100644 --- a/core/services/keystore/keys/starkkey/ocr2key.go +++ b/core/services/keystore/keys/starkkey/ocr2key.go @@ -58,7 +58,6 @@ func (sk *OCR2Key) Sign(reportCtx types.ReportContext, report types.Report) ([]b if err != nil { return []byte{}, err } - r, s, err := caigo.Curve.Sign(hash, sk.priv) if err != nil { return []byte{}, err @@ -85,6 +84,10 @@ func (sk *OCR2Key) Sign(reportCtx types.ReportContext, report types.Report) ([]b return out, nil } +func (sk *OCR2Key) Sign3(digest types.ConfigDigest, seqNr uint64, r types.Report) (signature []byte, err error) { + panic("TODO") +} + func (sk *OCR2Key) Verify(publicKey types.OnchainPublicKey, reportCtx types.ReportContext, report types.Report, signature []byte) bool { // check valid signature length if len(signature) != sk.MaxSignatureLength() { @@ -120,6 +123,10 @@ func (sk *OCR2Key) Verify(publicKey types.OnchainPublicKey, reportCtx types.Repo return caigo.Curve.Verify(hash, r, s, keys[0].X, keys[0].Y) || caigo.Curve.Verify(hash, r, s, keys[1].X, keys[1].Y) } +func (sk *OCR2Key) Verify3(publicKey types.OnchainPublicKey, cd types.ConfigDigest, seqNr uint64, r types.Report, signature []byte) bool { + panic("TODO") +} + func (sk *OCR2Key) MaxSignatureLength() int { return 32 + 32 + 32 // publickey + r + s } diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 1b7be2b7f0e..d69a58a025b 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -69,6 +69,7 @@ import ( evmmercury "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury" mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils" evmrelaytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" "github.com/smartcontractkit/chainlink/v2/core/services/synchronization" "github.com/smartcontractkit/chainlink/v2/core/services/telemetry" "github.com/smartcontractkit/chainlink/v2/plugins" @@ -431,6 +432,9 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { case types.Mercury: return d.newServicesMercury(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + case types.Streams: + return d.newServicesStreams(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + case types.Median: return d.newServicesMedian(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) @@ -463,7 +467,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { func GetEVMEffectiveTransmitterID(jb *job.Job, chain legacyevm.Chain, lggr logger.SugaredLogger) (string, error) { spec := jb.OCR2OracleSpec - if spec.PluginType == types.Mercury { + if spec.PluginType == types.Mercury || spec.PluginType == types.Streams { return spec.TransmitterID.String, nil } @@ -729,6 +733,99 @@ func (d *Delegate) newServicesMercury( return mercuryServices, err2 } +func (d *Delegate) newServicesStreams( + ctx context.Context, + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, +) ([]job.ServiceCtx, error) { + lggr = logger.Sugared(d.lggr.Named("Streams")) + spec := jb.OCR2OracleSpec + transmitterID := spec.TransmitterID.String + if len(transmitterID) != 64 { + return nil, errors.Errorf("ServicesForSpec: streams job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID) + } + if _, err := hex.DecodeString(transmitterID); err != nil { + return nil, errors.Wrapf(err, "ServicesForSpec: streams job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID) + } + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "streams"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("streams services: expected EVM relayer got %s", rid.Network) + } + relayer, err := d.RelayGetter.Get(rid) + if err != nil { + return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: "streams"} + } + + provider, err2 := relayer.NewStreamsProvider(ctx, + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(spec.PluginType), + }, types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err2 != nil { + return nil, err2 + } + + streamsProvider, ok := provider.(types.StreamsProvider) + if !ok { + return nil, errors.New("could not coerce PluginProvider to streamsProvider") + } + + // chEnhancedTelem := make(chan ocrcommon.EnhancedTelemetryMercuryData, 100) + + // lloServices, err2 := llo.NewServices(jb, streamsProvider, d.pipelineRunner, runResults, lggr, oracleArgsNoPlugin, d.cfg.JobPipeline(), chEnhancedTelem, chain, d.mercuryORM, (mercuryutils.FeedID)(*spec.FeedID)) + + // if ocrcommon.ShouldCollectEnhancedTelemetryMercury(jb) { + // enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, chEnhancedTelem, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.FeedID.String(), synchronization.EnhancedEAMercury), lggr.Named("EnhancedTelemetryMercury")) + // mercuryServices = append(mercuryServices, enhancedTelemService) + // } else { + // lggr.Infow("Enhanced telemetry is disabled for llo job", "job", jb.Name) + // } + + kr := streams.NewOnchainKeyring(kb) + + orm := NewORM(d.cfg.Queryer) + + cfg := streams.DelegateConfig{ + Logger: lggr, + Queryer: pg.NewQ(d.db, d.lggr, d.cfg.Database()), + Runner: d.pipelineRunner, + + ORM: orm, + ChannelDefinitionCache: streamsProvider.ChannelDefinitionCache(), + + // TODO + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: streamsProvider.ContractTransmitter(), + ContractConfigTracker: streamsProvider.ContractConfigTracker(), + Database: ocrDB, + LocalConfig: lc, + MonitoringEndpoint: nil, // TODO + OffchainConfigDigester: streamsProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kr, + OCRLogger: ocrLogger, + } + oracle := streams.NewDelegate(cfg) + return []job.ServiceCtx{cdc, oracle}, nil +} + func (d *Delegate) newServicesMedian( ctx context.Context, lggr logger.SugaredLogger, diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go index c7f6884426f..aa519ce3e82 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go @@ -540,7 +540,7 @@ func (r *logRecoverer) selectFilterBatch(filters []upkeepFilter) []upkeepFilter for len(results) < batchSize && len(filters) != 0 { i, err := r.randIntn(len(filters)) if err != nil { - r.lggr.Debugw("error generating random number", "error", err.Error()) + r.lggr.Debugw("error generating random number", "err", err.Error()) continue } results = append(results, filters[i]) diff --git a/core/services/ocr2/plugins/s4/plugin_test.go b/core/services/ocr2/plugins/s4/plugin_test.go index e0aa84183e1..82235053a50 100644 --- a/core/services/ocr2/plugins/s4/plugin_test.go +++ b/core/services/ocr2/plugins/s4/plugin_test.go @@ -222,7 +222,7 @@ func TestPlugin_ShouldAcceptFinalizedReport(t *testing.T) { }) - t.Run("error", func(t *testing.T) { + t.Run("err", func(t *testing.T) { testErr := errors.New("some error") rows := generateTestRows(t, 1, time.Minute) orm.On("Update", mock.Anything, mock.Anything).Return(testErr).Once() diff --git a/core/services/ocr2/plugins/streams/config/config.go b/core/services/ocr2/plugins/streams/config/config.go new file mode 100644 index 00000000000..cb0b15d7e57 --- /dev/null +++ b/core/services/ocr2/plugins/streams/config/config.go @@ -0,0 +1,52 @@ +// config is a separate package so that we can validate +// the config in other packages, for example in job at job create time. + +package config + +import ( + "errors" + "fmt" + "net/url" + "regexp" + + pkgerrors "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +type PluginConfig struct { + RawServerURL string `json:"serverURL" toml:"serverURL"` + ServerPubKey utils.PlainHexBytes `json:"serverPubKey" toml:"serverPubKey"` +} + +func (p PluginConfig) Validate() (merr error) { + if p.RawServerURL == "" { + merr = errors.New("streams: ServerURL must be specified") + } else { + var normalizedURI string + if schemeRegexp.MatchString(p.RawServerURL) { + normalizedURI = p.RawServerURL + } else { + normalizedURI = fmt.Sprintf("wss://%s", p.RawServerURL) + } + uri, err := url.ParseRequestURI(normalizedURI) + if err != nil { + merr = pkgerrors.Wrap(err, "streams: invalid value for ServerURL") + } else if uri.Scheme != "wss" { + merr = pkgerrors.Errorf(`streams: invalid scheme specified for MercuryServer, got: %q (scheme: %q) but expected a websocket url e.g. "192.0.2.2:4242" or "wss://192.0.2.2:4242"`, p.RawServerURL, uri.Scheme) + } + } + + if len(p.ServerPubKey) != 32 { + merr = errors.Join(merr, errors.New("streams: ServerPubKey is required and must be a 32-byte hex string")) + } + + return merr +} + +var schemeRegexp = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+.-]*://`) +var wssRegexp = regexp.MustCompile(`^wss://`) + +func (p PluginConfig) ServerURL() string { + return wssRegexp.ReplaceAllString(p.RawServerURL, "") +} diff --git a/core/services/ocr2/plugins/streams/helpers_test.go b/core/services/ocr2/plugins/streams/helpers_test.go new file mode 100644 index 00000000000..5cf9f6f8eb0 --- /dev/null +++ b/core/services/ocr2/plugins/streams/helpers_test.go @@ -0,0 +1,463 @@ +package streams_test + +import ( + "context" + "crypto/ed25519" + "encoding/binary" + "errors" + "fmt" + "math/big" + "net" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/smartcontractkit/wsrpc" + "github.com/smartcontractkit/wsrpc/credentials" + "github.com/smartcontractkit/wsrpc/peer" + + "github.com/smartcontractkit/libocr/offchainreporting2/chains/evmutil" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/keystest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" + "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink/v2/core/store/models" + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +var _ pb.MercuryServer = &mercuryServer{} + +type request struct { + pk credentials.StaticSizedPublicKey + req *pb.TransmitRequest +} + +type mercuryServer struct { + privKey ed25519.PrivateKey + reqsCh chan request + t *testing.T + buildReport func() []byte +} + +func NewMercuryServer(t *testing.T, privKey ed25519.PrivateKey, reqsCh chan request, buildReport func() []byte) *mercuryServer { + return &mercuryServer{privKey, reqsCh, t, buildReport} +} + +func (s *mercuryServer) Transmit(ctx context.Context, req *pb.TransmitRequest) (*pb.TransmitResponse, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("could not extract public key") + } + r := request{p.PublicKey, req} + s.reqsCh <- r + + return &pb.TransmitResponse{ + Code: 1, + Error: "", + }, nil +} + +func (s *mercuryServer) LatestReport(ctx context.Context, lrr *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("could not extract public key") + } + s.t.Logf("mercury server got latest report from %x for feed id 0x%x", p.PublicKey, lrr.FeedId) + + out := new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = lrr.FeedId + + report := s.buildReport() + payload, err := mercury.PayloadTypes.Pack(evmutil.RawReportContext(ocrtypes.ReportContext{}), report, [][32]byte{}, [][32]byte{}, [32]byte{}) + if err != nil { + panic(err) + } + out.Report.Payload = payload + return out, nil +} + +func startMercuryServer(t *testing.T, srv *mercuryServer, pubKeys []ed25519.PublicKey) (serverURL string) { + // Set up the wsrpc server + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("[MAIN] failed to listen: %v", err) + } + serverURL = lis.Addr().String() + s := wsrpc.NewServer(wsrpc.Creds(srv.privKey, pubKeys)) + + // Register mercury implementation with the wsrpc server + pb.RegisterMercuryServer(s, srv) + + // Start serving + go s.Serve(lis) + t.Cleanup(s.Stop) + + return +} + +type Job struct { + name string + id [32]byte + baseBenchmarkPrice *big.Int + baseBid *big.Int + baseAsk *big.Int +} + +func randomFeedID(version uint16) [32]byte { + id := [32]byte(utils.NewHash()) + binary.BigEndian.PutUint16(id[:2], version) + return id +} + +type Node struct { + App chainlink.Application + ClientPubKey credentials.StaticSizedPublicKey + KeyBundle ocr2key.KeyBundle +} + +func (node *Node) AddJob(t *testing.T, spec string) { + c := node.App.GetConfig() + job, err := validate.ValidatedOracleSpecToml(c.OCR2(), c.Insecure(), spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) +} + +func (node *Node) AddBootstrapJob(t *testing.T, spec string) { + job, err := ocrbootstrap.ValidatedBootstrapSpecToml(spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) +} + +func setupNode( + t *testing.T, + port int, + dbName string, + backend *backends.SimulatedBackend, + csaKey csakey.KeyV2, +) (app chainlink.Application, peerID string, clientPubKey credentials.StaticSizedPublicKey, ocr2kb ocr2key.KeyBundle, observedLogs *observer.ObservedLogs) { + k := big.NewInt(int64(port)) // keys unique to port + p2pKey := p2pkey.MustNewV2XXXTestingOnly(k) + rdr := keystest.NewRandReaderFromSeed(int64(port)) + ocr2kb = ocr2key.MustNewInsecure(rdr, chaintype.EVM) + + p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)} + + config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { + // [JobPipeline] + // MaxSuccessfulRuns = 0 + c.JobPipeline.MaxSuccessfulRuns = ptr(uint64(0)) + + // [Feature] + // UICSAKeys=true + // LogPoller = true + // FeedsManager = false + c.Feature.UICSAKeys = ptr(true) + c.Feature.LogPoller = ptr(true) + c.Feature.FeedsManager = ptr(false) + + // [OCR] + // Enabled = false + c.OCR.Enabled = ptr(false) + + // [OCR2] + // Enabled = true + c.OCR2.Enabled = ptr(true) + + // [P2P] + // PeerID = '$PEERID' + // TraceLogging = true + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.TraceLogging = ptr(true) + + // [P2P.V2] + // Enabled = true + // AnnounceAddresses = ['$EXT_IP:17775'] + // ListenAddresses = ['127.0.0.1:17775'] + // DeltaDial = 500ms + // DeltaReconcile = 5s + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.AnnounceAddresses = &p2paddresses + c.P2P.V2.ListenAddresses = &p2paddresses + c.P2P.V2.DeltaDial = models.MustNewDuration(500 * time.Millisecond) + c.P2P.V2.DeltaReconcile = models.MustNewDuration(5 * time.Second) + }) + + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel) + app = cltest.NewApplicationWithConfigV2OnSimulatedBlockchain(t, config, backend, p2pKey, ocr2kb, csaKey, lggr.Named(dbName)) + err := app.Start(testutils.Context(t)) + require.NoError(t, err) + + t.Cleanup(func() { + assert.NoError(t, app.Stop()) + }) + + return app, p2pKey.PeerID().Raw(), csaKey.StaticSizedPublicKey(), ocr2kb, observedLogs +} + +func ptr[T any](t T) *T { return &t } + +func addBootstrapJob(t *testing.T, bootstrapNode Node, chainID *big.Int, verifierAddress common.Address, name string) { + bootstrapNode.AddBootstrapJob(t, fmt.Sprintf(` +type = "bootstrap" +relay = "evm" +schemaVersion = 1 +name = "boot-%s" +contractID = "%s" +contractConfigTrackerPollInterval = "1s" + +[relayConfig] +chainID = %d + `, name, verifierAddress, chainID)) +} + +func addV1MercuryJob( + t *testing.T, + node Node, + i int, + verifierAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + bmBridge, + bidBridge, + askBridge, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + feedName string, + feedID [32]byte, + chainID *big.Int, + fromBlock int, +) { + node.AddJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "mercury-%[1]d-%[14]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +feedID = "0x%[11]x" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "mercury" +transmitterID = "%[10]x" +observationSource = """ + // Benchmark Price + price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + price1_parse [type=jsonparse path="result"]; + price1_multiply [type=multiply times=100000000 index=0]; + + price1 -> price1_parse -> price1_multiply; + + // Bid + bid [type=bridge name="%[6]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + bid_parse [type=jsonparse path="result"]; + bid_multiply [type=multiply times=100000000 index=1]; + + bid -> bid_parse -> bid_multiply; + + // Ask + ask [type=bridge name="%[7]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + ask_parse [type=jsonparse path="result"]; + ask_multiply [type=multiply times=100000000 index=2]; + + ask -> ask_parse -> ask_multiply; +""" + +[pluginConfig] +serverURL = "%[8]s" +serverPubKey = "%[9]x" +initialBlockNumber = %[13]d + +[relayConfig] +chainID = %[12]d + + `, + i, + verifierAddress, + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + bmBridge, + bidBridge, + askBridge, + serverURL, + serverPubKey, + clientPubKey, + feedID, + chainID, + fromBlock, + feedName, + )) +} + +func addV2MercuryJob( + t *testing.T, + node Node, + i int, + verifierAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + bmBridge, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + feedName string, + feedID [32]byte, + linkFeedID [32]byte, + nativeFeedID [32]byte, +) { + node.AddJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "mercury-%[1]d-%[10]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +feedID = "0x%[9]x" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "mercury" +transmitterID = "%[8]x" +observationSource = """ + // Benchmark Price + price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + price1_parse [type=jsonparse path="result"]; + price1_multiply [type=multiply times=100000000 index=0]; + + price1 -> price1_parse -> price1_multiply; +""" + +[pluginConfig] +serverURL = "%[6]s" +serverPubKey = "%[7]x" +linkFeedID = "0x%[11]x" +nativeFeedID = "0x%[12]x" + +[relayConfig] +chainID = 1337 + `, + i, + verifierAddress, + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + bmBridge, + serverURL, + serverPubKey, + clientPubKey, + feedID, + feedName, + linkFeedID, + nativeFeedID, + )) +} + +func addV3MercuryJob( + t *testing.T, + node Node, + i int, + verifierAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + bmBridge, + bidBridge, + askBridge, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + feedName string, + feedID [32]byte, + linkFeedID [32]byte, + nativeFeedID [32]byte, +) { + node.AddJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "mercury-%[1]d-%[12]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +feedID = "0x%[11]x" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "mercury" +transmitterID = "%[10]x" +observationSource = """ + // Benchmark Price + price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + price1_parse [type=jsonparse path="result"]; + price1_multiply [type=multiply times=100000000 index=0]; + + price1 -> price1_parse -> price1_multiply; + + // Bid + bid [type=bridge name="%[6]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + bid_parse [type=jsonparse path="result"]; + bid_multiply [type=multiply times=100000000 index=1]; + + bid -> bid_parse -> bid_multiply; + + // Ask + ask [type=bridge name="%[7]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + ask_parse [type=jsonparse path="result"]; + ask_multiply [type=multiply times=100000000 index=2]; + + ask -> ask_parse -> ask_multiply; +""" + +[pluginConfig] +serverURL = "%[8]s" +serverPubKey = "%[9]x" +linkFeedID = "0x%[13]x" +nativeFeedID = "0x%[14]x" + +[relayConfig] +chainID = 1337 + `, + i, + verifierAddress, + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + bmBridge, + bidBridge, + askBridge, + serverURL, + serverPubKey, + clientPubKey, + feedID, + feedName, + linkFeedID, + nativeFeedID, + )) +} diff --git a/core/services/ocr2/plugins/streams/integration_test.go b/core/services/ocr2/plugins/streams/integration_test.go new file mode 100644 index 00000000000..1a21dff0891 --- /dev/null +++ b/core/services/ocr2/plugins/streams/integration_test.go @@ -0,0 +1,364 @@ +package streams_test + +import ( + "crypto/ed25519" + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/smartcontractkit/chainlink-data-streams/streams" + "github.com/smartcontractkit/libocr/gethwrappers2/ocrconfigurationstoreevmsimple" + "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/wsrpc/credentials" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/stream_config_store" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury" +) + +var ( + f = uint8(1) + n = 4 // number of nodes + multiplier int64 = 100000000 +) + +func setupBlockchain(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBackend, *stream_config_store.StreamConfigStore, common.Address) { + steve := testutils.MustNewSimTransactor(t) // config contract deployer and owner + genesisData := core.GenesisAlloc{steve.From: {Balance: assets.Ether(1000).ToInt()}} + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + backend.Commit() // ensure starting block number at least 1 + stopMining := cltest.Mine(backend, 1*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + t.Cleanup(stopMining) + + // Deploy contracts + verifierAddress, _, verifierContract, err := stream_config_store.DeployStreamConfigStore(steve, backend) + require.NoError(t, err) + + // linkTokenAddress, _, linkToken, err := link_token_interface.DeployLinkToken(steve, backend) + // require.NoError(t, err) + // _, err = linkToken.Transfer(steve, steve.From, big.NewInt(1000)) + // require.NoError(t, err) + // nativeTokenAddress, _, nativeToken, err := link_token_interface.DeployLinkToken(steve, backend) + // require.NoError(t, err) + // _, err = nativeToken.Transfer(steve, steve.From, big.NewInt(1000)) + // require.NoError(t, err) + // verifierProxyAddr, _, verifierProxy, err := verifier_proxy.DeployVerifierProxy(steve, backend, common.Address{}) // zero address for access controller disables access control + // require.NoError(t, err) + // verifierAddress, _, verifier, err := verifier.DeployVerifier(steve, backend, verifierProxyAddr) + // require.NoError(t, err) + // _, err = verifierProxy.InitializeVerifier(steve, verifierAddress) + // require.NoError(t, err) + // rewardManagerAddr, _, rewardManager, err := reward_manager.DeployRewardManager(steve, backend, linkTokenAddress) + // require.NoError(t, err) + // feeManagerAddr, _, _, err := fee_manager.DeployFeeManager(steve, backend, linkTokenAddress, nativeTokenAddress, verifierProxyAddr, rewardManagerAddr) + // require.NoError(t, err) + // _, err = verifierProxy.SetFeeManager(steve, feeManagerAddr) + // require.NoError(t, err) + // _, err = rewardManager.SetFeeManager(steve, feeManagerAddr) + // require.NoError(t, err) + + backend.Commit() + + return steve, backend, verifierContract, verifierAddress +} + +func detectPanicLogs(t *testing.T, logObservers []*observer.ObservedLogs) { + var panicLines []string + for _, observedLogs := range logObservers { + panicLogs := observedLogs.Filter(func(e observer.LoggedEntry) bool { + return e.Level >= zapcore.DPanicLevel + }) + for _, log := range panicLogs.All() { + line := fmt.Sprintf("%v\t%s\t%s\t%s\t%s", log.Time.Format(time.RFC3339), log.Level.CapitalString(), log.LoggerName, log.Caller.TrimmedPath(), log.Message) + panicLines = append(panicLines, line) + } + } + if len(panicLines) > 0 { + t.Errorf("Found logs with DPANIC or higher level:\n%s", strings.Join(panicLines, "\n")) + } +} + +// type mercuryServer struct { +// privKey ed25519.PrivateKey +// reqsCh chan request +// t *testing.T +// } + +// func NewMercuryServer(t *testing.T, privKey ed25519.PrivateKey, reqsCh chan request) *mercuryServer { +// return &mercuryServer{privKey, reqsCh, t} +// } + +// func (s *mercuryServer) Transmit(ctx context.Context, req *pb.TransmitRequest) (*pb.TransmitResponse, error) { +// p, ok := peer.FromContext(ctx) +// if !ok { +// return nil, errors.New("could not extract public key") +// } +// r := request{p.PublicKey, req} +// s.reqsCh <- r + +// return &pb.TransmitResponse{ +// Code: 1, +// Error: "", +// }, nil +// } + +// func (s *mercuryServer) LatestReport(ctx context.Context, lrr *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { +// panic("not needed for llo") +// } + +func TestIntegration_Streams(t *testing.T) { + // TODO: + + t.Parallel() + + var logObservers []*observer.ObservedLogs + t.Cleanup(func() { + detectPanicLogs(t, logObservers) + }) + const fromBlock = 1 // cannot use zero, start from block 1 + // testStartTimeStamp := uint32(time.Now().Unix()) + + reqs := make(chan request) + serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1)) + serverPubKey := serverKey.PublicKey + srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs, nil) + clientCSAKeys := make([]csakey.KeyV2, n+1) + clientPubKeys := make([]ed25519.PublicKey, n+1) + for i := 0; i < n+1; i++ { + k := big.NewInt(int64(i)) + key := csakey.MustNewV2XXXTestingOnly(k) + clientCSAKeys[i] = key + clientPubKeys[i] = key.PublicKey + } + serverURL := startMercuryServer(t, srv, clientPubKeys) + chainID := testutils.SimulatedChainID + + steve, backend, configContract, configAddress := setupBlockchain(t) + // TODO + + // Setup bootstrap + oracle nodes + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n]) + bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb} + logObservers = append(logObservers, observedLogs) + + // Set up n oracles + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + ports := freeport.GetN(t, n) + for i := 0; i < n; i++ { + app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_streams_%d", i), backend, clientCSAKeys[i]) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + logObservers = append(logObservers, observedLogs) + } + + addBootstrapJob(t, bootstrapNode, chainID, configAddress, "job-1") + + // createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) { + // bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + // b, err := io.ReadAll(req.Body) + // require.NoError(t, err) + // require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b)) + + // res.WriteHeader(http.StatusOK) + // val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String() + // resp := fmt.Sprintf(`{"result": %s}`, val) + // _, err = res.Write([]byte(resp)) + // require.NoError(t, err) + // })) + // t.Cleanup(bridge.Close) + // u, _ := url.Parse(bridge.URL) + // bridgeName = fmt.Sprintf("bridge-%s-%d", name, i) + // require.NoError(t, borm.CreateBridgeType(&bridges.BridgeType{ + // Name: bridges.BridgeName(bridgeName), + // URL: models.WebURL(*u), + // })) + + // return bridgeName + // } + + // Add OCR jobs - one per feed on each node + for i, node := range nodes { + addStreamsJob( + t, + node, + configAddress, + bootstrapPeerID, + bootstrapNodePort, + serverURL, + serverPubKey, + clientPubKeys[i], + "feed-1", + chainID, + fromBlock, + ) + } + + // Setup config on contract + rawOnchainConfig := streams.OnchainConfig{} + // TODO: Move away from JSON + onchainConfig, err := (&streams.JSONOnchainConfigCodec{}).Encode(rawOnchainConfig) + require.NoError(t, err) + + rawReportingPluginConfig := streams.OffchainConfig{} + reportingPluginConfig, err := rawReportingPluginConfig.Encode() + require.NoError(t, err) + + signers, _, _, onchainConfig, _, _, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02( + 2*time.Second, // DeltaProgress + 20*time.Second, // DeltaResend + 400*time.Millisecond, // DeltaInitial + 100*time.Millisecond, // DeltaRound + 0, // DeltaGrace + 300*time.Millisecond, // DeltaCertifiedCommitRequest + 1*time.Minute, // DeltaStage + 100, // rMax + []int{len(nodes)}, // S + oracles, + reportingPluginConfig, // reportingPluginConfig []byte, + 250*time.Millisecond, // Max duration observation + int(f), // f + onchainConfig, + ) + + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + + offchainTransmitters := make([][32]byte, n) + for i := 0; i < n; i++ { + offchainTransmitters[i] = nodes[i].ClientPubKey + } + + cfg := ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimpleConfigurationEVMSimple{ + signerAddresses, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + nil, + } + _, err = configContract.AddConfig( + steve, + cfg, + ) + require.NoError(t, err) + backend.Commit() + + // Bury it with finality depth + ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String()) + require.NoError(t, err) + finalityDepth := ch.Config().EVM().FinalityDepth() + for i := 0; i < int(finalityDepth); i++ { + backend.Commit() + } + + t.Run("receives at least one report per feed from each oracle when EAs are at 100% reliability", func(t *testing.T) { + // Expect at least one report per feed from each oracle + seen := make(map[credentials.StaticSizedPublicKey]struct{}) + + for req := range reqs { + v := make(map[string]interface{}) + err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload) + require.NoError(t, err) + report, exists := v["report"] + if !exists { + t.Fatalf("expected payload %#v to contain 'report'", v) + } + + assert.Equal(t, "foo", report) + + seen[req.pk] = struct{}{} + if len(seen) == n { + t.Logf("all oracles reported") + break // saw all oracles; success! + } + } + }) +} + +func addStreamsJob( + t *testing.T, + node Node, + verifierAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + jobName string, + chainID *big.Int, + fromBlock int, +) { + node.AddJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "%[1]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "streams" +transmitterID = "%[5]x" + +[pluginConfig] +serverURL = "%[6]s" +serverPubKey = "%[7]x" +fromBlock = %[8]d + +[relayConfig] +chainID = %[9]d + + `, + jobName, + verifierAddress, + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + clientPubKey, + serverURL, + serverPubKey, + fromBlock, + chainID, + )) +} diff --git a/core/services/ocr2/plugins/streams/plugin.go b/core/services/ocr2/plugins/streams/plugin.go new file mode 100644 index 00000000000..d8133764ae1 --- /dev/null +++ b/core/services/ocr2/plugins/streams/plugin.go @@ -0,0 +1 @@ +package streams diff --git a/core/services/ocr2/validate/validate.go b/core/services/ocr2/validate/validate.go index bb9bb03a8ac..c536e035869 100644 --- a/core/services/ocr2/validate/validate.go +++ b/core/services/ocr2/validate/validate.go @@ -17,6 +17,7 @@ import ( dkgconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/dkg/config" mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config" ocr2vrfconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/config" + streamsconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/streams/config" "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" "github.com/smartcontractkit/chainlink/v2/core/services/relay" ) @@ -114,6 +115,8 @@ func validateSpec(tree *toml.Tree, spec job.Job) error { return nil case types.Mercury: return validateOCR2MercurySpec(spec.OCR2OracleSpec.PluginConfig, *spec.OCR2OracleSpec.FeedID) + case types.Streams: + return validateOCR2StreamsSpec(spec.OCR2OracleSpec.PluginConfig) case types.GenericPlugin: return validateOCR2GenericPluginSpec(spec.OCR2OracleSpec.PluginConfig) case "": @@ -255,3 +258,12 @@ func validateOCR2MercurySpec(jsonConfig job.JSONConfig, feedId [32]byte) error { } return pkgerrors.Wrap(mercuryconfig.ValidatePluginConfig(pluginConfig, feedId), "Mercury PluginConfig is invalid") } + +func validateOCR2StreamsSpec(jsonConfig job.JSONConfig) error { + var pluginConfig streamsconfig.PluginConfig + err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig) + if err != nil { + return pkgerrors.Wrap(err, "error while unmarshaling plugin config") + } + return pkgerrors.Wrap(pluginConfig.Validate(), "Streams PluginConfig is invalid") +} diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index 303cdd3ba0e..aee5de38e03 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -31,6 +31,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config" + streamsconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/streams/config" "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/functions" @@ -41,6 +42,7 @@ import ( reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" ) var _ commontypes.Relayer = &Relayer{} //nolint:staticcheck @@ -188,11 +190,56 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty default: return nil, fmt.Errorf("invalid feed version %d", feedID.Version()) } - transmitter := mercury.NewTransmitter(lggr, cw.ContractConfigTracker(), client, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.db, r.pgCfg, transmitterCodec) + transmitter := mercury.NewTransmitter(lggr, client, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.db, r.pgCfg, transmitterCodec) return NewMercuryProvider(cw, r.chainReader, NewMercuryChainReader(r.chain.HeadTracker()), transmitter, reportCodecV1, reportCodecV2, reportCodecV3, lggr), nil } +func (r *Relayer) NewStreamsProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.StreamsProvider, error) { + // TODO + relayOpts := types.NewRelayOpts(rargs) + relayConfig, err := relayOpts.RelayConfig() + if err != nil { + return nil, fmt.Errorf("failed to get relay config: %w", err) + } + + var streamsConfig streamsconfig.PluginConfig + if err := json.Unmarshal(pargs.PluginConfig, &streamsConfig); err != nil { + return nil, pkgerrors.WithStack(err) + } + if err := streamsConfig.Validate(); err != nil { + return nil, err + } + + if relayConfig.ChainID.String() != r.chain.ID().String() { + return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String()) + } + configWatcher, err := newConfigProvider(r.lggr, r.chain, relayOpts, r.eventBroadcaster) + if err != nil { + return nil, pkgerrors.WithStack(err) + } + + if !relayConfig.EffectiveTransmitterID.Valid { + return nil, pkgerrors.New("EffectiveTransmitterID must be specified") + } + privKey, err := r.ks.CSA().Get(relayConfig.EffectiveTransmitterID.String) + if err != nil { + return nil, pkgerrors.Wrap(err, "failed to get CSA key for mercury connection") + } + + client, err := r.mercuryPool.Checkout(context.Background(), privKey, streamsConfig.ServerPubKey, streamsConfig.ServerURL()) + if err != nil { + return nil, err + } + + // FIXME + // transmitter := streamsNewTransmitter(r.lggr, configWatcher.ContractConfigTracker(), client, privKey.PublicKey, rargs.JobID, r.db, r.pgCfg) + transmitter := streams.NewTransmitter(r.lggr, client, privKey.PublicKey) + cdc := streams.NewChannelDefinitionCache(r.lggr, orm, d.logpoller) + + return NewStreamsProvider(configWatcher, transmitter, r.lggr, cdc), nil +} + func (r *Relayer) NewFunctionsProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.FunctionsProvider, error) { lggr := r.lggr.Named("FunctionsProvider").Named(rargs.ExternalJobID.String()) // TODO(FUN-668): Not ready yet (doesn't implement FunctionsEvents() properly) diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index e7f3a1a96af..fa85e645da5 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -327,7 +327,7 @@ func (l *logPollerWrapper) SubscribeToUpdates(subscriberName string, subscriber if l.pluginConfig.ContractVersion == 0 { // in V0, immediately set contract address to Oracle contract and never update again if err := subscriber.UpdateRoutes(l.routerContract.Address(), l.routerContract.Address()); err != nil { - l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "subscriberName", subscriberName, "error", err) + l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "subscriberName", subscriberName, "err", err) } } else if l.pluginConfig.ContractVersion == 1 { l.mu.Lock() @@ -422,7 +422,7 @@ func (l *logPollerWrapper) handleRouteUpdate(activeCoordinator common.Address, p for _, subscriber := range l.subscribers { err := subscriber.UpdateRoutes(activeCoordinator, proposedCoordinator) if err != nil { - l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "error", err) + l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "err", err) } } } diff --git a/core/services/relay/evm/mercury/transmitter.go b/core/services/relay/evm/mercury/transmitter.go index 40a51b9d92d..7244144af81 100644 --- a/core/services/relay/evm/mercury/transmitter.go +++ b/core/services/relay/evm/mercury/transmitter.go @@ -107,7 +107,6 @@ type mercuryTransmitter struct { services.StateMachine lggr logger.Logger rpcClient wsrpc.Client - cfgTracker ConfigTracker persistenceManager *PersistenceManager codec TransmitterReportDecoder @@ -148,14 +147,13 @@ func getPayloadTypes() abi.Arguments { }) } -func NewTransmitter(lggr logger.Logger, cfgTracker ConfigTracker, rpcClient wsrpc.Client, fromAccount ed25519.PublicKey, jobID int32, feedID [32]byte, db *sqlx.DB, cfg pg.QConfig, codec TransmitterReportDecoder) *mercuryTransmitter { +func NewTransmitter(lggr logger.Logger, rpcClient wsrpc.Client, fromAccount ed25519.PublicKey, jobID int32, feedID [32]byte, db *sqlx.DB, cfg pg.QConfig, codec TransmitterReportDecoder) *mercuryTransmitter { feedIDHex := fmt.Sprintf("0x%x", feedID[:]) persistenceManager := NewPersistenceManager(lggr, NewORM(db, lggr, cfg), jobID, maxTransmitQueueSize, flushDeletesFrequency, pruneFrequency) return &mercuryTransmitter{ services.StateMachine{}, lggr.Named("MercuryTransmitter").With("feedID", feedIDHex), rpcClient, - cfgTracker, persistenceManager, codec, feedID, @@ -241,7 +239,7 @@ func (mt *mercuryTransmitter) runDeleteQueueLoop() { case req := <-mt.deleteQueue: for { if err := mt.persistenceManager.Delete(runloopCtx, req); err != nil { - mt.lggr.Errorw("Failed to delete transmit request record", "error", err, "req", req) + mt.lggr.Errorw("Failed to delete transmit request record", "err", err, "req", req) mt.transmitQueueDeleteErrorCount.Inc() select { case <-time.After(b.Duration()): diff --git a/core/services/relay/evm/mercury/transmitter_test.go b/core/services/relay/evm/mercury/transmitter_test.go index c8a68d41a16..942a89b291d 100644 --- a/core/services/relay/evm/mercury/transmitter_test.go +++ b/core/services/relay/evm/mercury/transmitter_test.go @@ -27,6 +27,7 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) q := NewTransmitQueue(lggr, "", 0, nil, nil) + codec := new(mockCodec) t.Run("v1 report transmission successfully enqueued", func(t *testing.T) { report := sampleV1Report @@ -40,7 +41,8 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), nil) + // func NewTransmitter(lggr logger.Logger, rpcClient wsrpc.Client, fromAccount ed25519.PublicKey, jobID int32, feedID [32]byte, db *sqlx.DB, cfg pg.QConfig, codec TransmitterReportDecoder) *mercuryTransmitter { + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) mt.queue = q err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) @@ -58,7 +60,7 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) mt.queue = q err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) @@ -76,7 +78,7 @@ func Test_MercuryTransmitter_Transmit(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) mt.queue = q err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) @@ -88,6 +90,8 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { t.Parallel() lggr := logger.TestLogger(t) db := pgtest.NewSqlxDB(t) + var jobID int32 + codec := new(mockCodec) t.Run("successful query", func(t *testing.T) { c := mocks.MockWSRPCClient{ @@ -101,7 +105,7 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) ts, err := mt.LatestTimestamp(testutils.Context(t)) require.NoError(t, err) @@ -116,7 +120,7 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) ts, err := mt.LatestTimestamp(testutils.Context(t)) require.NoError(t, err) @@ -129,7 +133,7 @@ func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { return nil, errors.New("something exploded") }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) _, err := mt.LatestTimestamp(testutils.Context(t)) require.Error(t, err) assert.Contains(t, err.Error(), "something exploded") @@ -151,6 +155,7 @@ func Test_MercuryTransmitter_LatestPrice(t *testing.T) { t.Parallel() lggr := logger.TestLogger(t) db := pgtest.NewSqlxDB(t) + var jobID int32 codec := new(mockCodec) @@ -167,7 +172,7 @@ func Test_MercuryTransmitter_LatestPrice(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), codec) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) t.Run("BenchmarkPriceFromReport succeeds", func(t *testing.T) { codec.val = originalPrice @@ -197,7 +202,7 @@ func Test_MercuryTransmitter_LatestPrice(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) require.NoError(t, err) @@ -210,7 +215,7 @@ func Test_MercuryTransmitter_LatestPrice(t *testing.T) { return nil, errors.New("something exploded") }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) require.Error(t, err) assert.Contains(t, err.Error(), "something exploded") @@ -222,6 +227,8 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { lggr := logger.TestLogger(t) db := pgtest.NewSqlxDB(t) + var jobID int32 + codec := new(mockCodec) t.Run("successful query", func(t *testing.T) { c := mocks.MockWSRPCClient{ @@ -235,7 +242,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.NoError(t, err) @@ -250,7 +257,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.NoError(t, err) @@ -262,7 +269,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { return nil, errors.New("something exploded") }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), codec) _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.Error(t, err) assert.Contains(t, err.Error(), "something exploded") @@ -279,7 +286,7 @@ func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { return out, nil }, } - mt := NewTransmitter(lggr, nil, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), nil) + mt := NewTransmitter(lggr, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), codec) _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) require.Error(t, err) assert.Contains(t, err.Error(), "latestReport failed; mismatched feed IDs, expected: 0x1c916b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472, got: 0x") diff --git a/core/services/relay/evm/mercury/v1/data_source.go b/core/services/relay/evm/mercury/v1/data_source.go index ce48ec6cf94..7f41bd1e36c 100644 --- a/core/services/relay/evm/mercury/v1/data_source.go +++ b/core/services/relay/evm/mercury/v1/data_source.go @@ -295,7 +295,7 @@ func (ds *datasource) setLatestBlocks(ctx context.Context, obs *v1types.Observat latestBlocks, err := ds.mercuryChainReader.LatestHeads(ctx, nBlocksObservation) if err != nil { - ds.lggr.Errorw("failed to read latest blocks", "error", err) + ds.lggr.Errorw("failed to read latest blocks", "err", err) return err } diff --git a/core/services/relay/evm/mocks/loop_relay_adapter.go b/core/services/relay/evm/mocks/loop_relay_adapter.go index 5b927f1b8ac..7a0bdf0b5f9 100644 --- a/core/services/relay/evm/mocks/loop_relay_adapter.go +++ b/core/services/relay/evm/mocks/loop_relay_adapter.go @@ -226,6 +226,36 @@ func (_m *LoopRelayAdapter) NewPluginProvider(_a0 context.Context, _a1 types.Rel return r0, r1 } +// NewStreamsProvider provides a mock function with given fields: _a0, _a1, _a2 +func (_m *LoopRelayAdapter) NewStreamsProvider(_a0 context.Context, _a1 types.RelayArgs, _a2 types.PluginArgs) (types.StreamsProvider, error) { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for NewStreamsProvider") + } + + var r0 types.StreamsProvider + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs, types.PluginArgs) (types.StreamsProvider, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs, types.PluginArgs) types.StreamsProvider); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.StreamsProvider) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.RelayArgs, types.PluginArgs) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Ready provides a mock function with given fields: func (_m *LoopRelayAdapter) Ready() error { ret := _m.Called() diff --git a/core/services/relay/evm/streams_provider.go b/core/services/relay/evm/streams_provider.go new file mode 100644 index 00000000000..c576718748e --- /dev/null +++ b/core/services/relay/evm/streams_provider.go @@ -0,0 +1,86 @@ +package evm + +import ( + "context" + "errors" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + relaytypes "github.com/smartcontractkit/chainlink-common/pkg/types" + datastreams "github.com/smartcontractkit/chainlink-data-streams/streams" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/streams" +) + +var _ commontypes.StreamsProvider = (*streamsProvider)(nil) + +type streamsProvider struct { + configWatcher *configWatcher + transmitter streams.Transmitter + logger logger.Logger + channelDefinitionCache datastreams.ChannelDefinitionCache + + ms services.MultiStart +} + +func NewStreamsProvider( + configWatcher *configWatcher, + transmitter streams.Transmitter, + lggr logger.Logger, + channelDefinitionCache datastreams.ChannelDefinitionCache, +) relaytypes.StreamsProvider { + return &streamsProvider{ + configWatcher, + transmitter, + lggr, + channelDefinitionCache, + services.MultiStart{}, + } +} + +func (p *streamsProvider) Start(ctx context.Context) error { + return p.ms.Start(ctx, p.configWatcher, p.transmitter, p.channelDefinitionCache) +} + +func (p *streamsProvider) Close() error { + return p.ms.Close() +} + +func (p *streamsProvider) Ready() error { + return errors.Join(p.configWatcher.Ready(), p.transmitter.Ready(), p.channelDefinitionCache.Ready()) +} + +func (p *streamsProvider) Name() string { + return p.logger.Name() +} + +func (p *streamsProvider) HealthReport() map[string]error { + report := map[string]error{} + services.CopyHealth(report, p.configWatcher.HealthReport()) + services.CopyHealth(report, p.transmitter.HealthReport()) + services.CopyHealth(report, p.channelDefinitionCache.HealthReport()) + return report +} + +func (p *streamsProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return p.configWatcher.ContractConfigTracker() +} + +func (p *streamsProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return p.configWatcher.OffchainConfigDigester() +} + +func (p *streamsProvider) OnchainConfigCodec() datastreams.OnchainConfigCodec { + // TODO: This should probably be moved to core since its chain-specific + return &datastreams.JSONOnchainConfigCodec{} +} + +func (p *streamsProvider) ContractTransmitter() commontypes.StreamsTransmitter { + return p.transmitter +} + +func (p *streamsProvider) ChannelDefinitionCache() datastreams.ChannelDefinitionCache { + return p.channelDefinitionCache +} diff --git a/core/services/streams/channel_definition_cache.go b/core/services/streams/channel_definition_cache.go new file mode 100644 index 00000000000..5a0e94383b6 --- /dev/null +++ b/core/services/streams/channel_definition_cache.go @@ -0,0 +1,228 @@ +package streams + +import ( + "context" + "fmt" + "maps" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink-data-streams/streams" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/stream_config_store" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +// // TODO: needs to be populated asynchronously from onchain ConfigurationStore +// type ChannelDefinitionCache interface { +// // TODO: Would this necessarily need to be scoped by contract address? +// Definitions() ChannelDefinitions +// services.Service +// } + +type ChannelDefinitionCacheORM interface { + // TODO: What about delete/cleanup? + LoadChannelDefinitions(ctx context.Context) (cd streams.ChannelDefinitions, blockNum int64, err error) + StoreChannelDefinitions(ctx context.Context, cd streams.ChannelDefinitions) (err error) +} + +var streamConfigStoreABI abi.ABI + +func init() { + var err error + streamConfigStoreABI, err = abi.JSON(strings.NewReader(stream_config_store.StreamConfigStoreABI)) + if err != nil { + panic(err) + } +} + +var _ streams.ChannelDefinitionCache = &channelDefinitionCache{} + +type channelDefinitionCache struct { + services.StateMachine + + orm ChannelDefinitionCacheORM + + filterName string + lp logpoller.LogPoller + fromBlock int64 + addr common.Address + lggr logger.Logger + + definitionsMu sync.RWMutex + definitions streams.ChannelDefinitions + definitionsBlockNum int64 + + wg sync.WaitGroup + chStop chan struct{} +} + +var ( + topicNewChannelDefinition = (stream_config_store.StreamConfigStoreNewChannelDefinition{}).Topic() + topicChannelDefinitionRemoved = (stream_config_store.StreamConfigStoreChannelDefinitionRemoved{}).Topic() + topicNewProductionConfig = (stream_config_store.StreamConfigStoreNewProductionConfig{}).Topic() + topicNewStagingConfig = (stream_config_store.StreamConfigStoreNewStagingConfig{}).Topic() + topicPromoteStagingConfig = (stream_config_store.StreamConfigStorePromoteStagingConfig{}).Topic() + + allTopics = []common.Hash{topicNewChannelDefinition, topicChannelDefinitionRemoved, topicNewProductionConfig, topicNewStagingConfig, topicPromoteStagingConfig} +) + +func NewChannelDefinitionCache(lggr logger.Logger, orm ChannelDefinitionCacheORM, lp logpoller.LogPoller, addr common.Address, fromBlock int64) streams.ChannelDefinitionCache { + filterName := logpoller.FilterName("OCR3 Streams ChannelDefinitionCachePoller", addr.String()) + return &channelDefinitionCache{ + services.StateMachine{}, + orm, + filterName, + lp, + 0, // TODO: fromblock needs to be loaded from DB cache somehow because we don't want to scan all logs every time we start this job + addr, + lggr.Named("ChannelDefinitionCache").With("addr", addr), + sync.RWMutex{}, + make(streams.ChannelDefinitions), + fromBlock, + sync.WaitGroup{}, + make(chan struct{}), + } +} + +// TODO: Needs a way to subscribe/unsubscribe to contracts + +func (c *channelDefinitionCache) Start(ctx context.Context) error { + // TODO: Initial load, then poll + // TODO: needs to be populated asynchronously from onchain ConfigurationStore + return c.StartOnce("ChannelDefinitionCache", func() (err error) { + err = c.lp.RegisterFilter(logpoller.Filter{Name: c.filterName, EventSigs: allTopics, Addresses: []common.Address{c.addr}}, pg.WithParentCtx(ctx)) + if err != nil { + return err + } + c.definitions, c.definitionsBlockNum, err = c.orm.LoadChannelDefinitions(ctx) + if err != nil { + return err + } + c.wg.Add(1) + go c.poll() + return nil + }) +} + +// TODO: make this configurable? +const pollInterval = 5 * time.Second + +func (c *channelDefinitionCache) poll() { + defer c.wg.Done() + + pollT := time.NewTicker(utils.WithJitter(pollInterval)) + + for { + select { + case <-c.chStop: + return + case <-pollT.C: + latest, err := c.lp.LatestBlock() + if err != nil { + panic("TODO") + } + toBlock := latest.BlockNumber + // TODO: Pass context + + fromBlock := c.definitionsBlockNum + + if toBlock <= fromBlock { + continue + } + + // NOTE: We assume that log poller returns logs in ascending order chronologically + logs, err := c.lp.LogsWithSigs(fromBlock, toBlock, []common.Hash{}, c.addr) + if err != nil { + // TODO: retry? + panic(err) + } + for _, log := range logs { + if err := c.applyLog(log); err != nil { + // TODO: handle errors + panic(err) + } + } + + c.definitionsBlockNum = toBlock + } + } +} + +func (c *channelDefinitionCache) applyLog(log logpoller.Log) error { + switch log.EventSig { + case topicNewChannelDefinition: + unpacked := new(stream_config_store.StreamConfigStoreNewChannelDefinition) + + err := streamConfigStoreABI.UnpackIntoInterface(unpacked, "NewChannelDefinition", log.Data) + if err != nil { + return fmt.Errorf("failed to unpack log data: %w", err) + } + + c.applyNewChannelDefinition(unpacked) + case topicChannelDefinitionRemoved: + unpacked := new(stream_config_store.StreamConfigStoreChannelDefinitionRemoved) + + err := streamConfigStoreABI.UnpackIntoInterface(unpacked, "ChannelDefinitionRemoved", log.Data) + if err != nil { + return fmt.Errorf("failed to unpack log data: %w", err) + } + + c.applyChannelDefinitionRemoved(unpacked) + default: + panic("TODO") + } + return nil +} + +func (c *channelDefinitionCache) applyNewChannelDefinition(log *stream_config_store.StreamConfigStoreNewChannelDefinition) { + rf := string(log.ChannelDefinition.ReportFormat[:]) + streamIDs := make([]streams.StreamID, len(log.ChannelDefinition.StreamIDs)) + for i, streamID := range log.ChannelDefinition.StreamIDs { + streamIDs[i] = streams.StreamID(string(streamID[:])) + } + c.definitionsMu.Lock() + defer c.definitionsMu.Unlock() + c.definitions[log.ChannelId] = streams.ChannelDefinition{ + ReportFormat: commontypes.StreamsReportFormat(rf), + ChainSelector: log.ChannelDefinition.ChainSelector, + StreamIDs: streamIDs, + } +} + +func (c *channelDefinitionCache) applyChannelDefinitionRemoved(log *stream_config_store.StreamConfigStoreChannelDefinitionRemoved) { + c.definitionsMu.Lock() + defer c.definitionsMu.Unlock() + delete(c.definitions, log.ChannelId) +} + +func (c *channelDefinitionCache) Close() error { + // TODO + // TODO: unregister filter (on job delete)? + return c.StopOnce("ChannelDefinitionCache", func() error { + close(c.chStop) + c.wg.Wait() + return nil + }) +} + +func (c *channelDefinitionCache) HealthReport() map[string]error { + report := map[string]error{c.Name(): c.Healthy()} + return report +} + +func (c *channelDefinitionCache) Name() string { return c.lggr.Name() } + +func (c *channelDefinitionCache) Definitions() streams.ChannelDefinitions { + c.definitionsMu.RLock() + defer c.definitionsMu.RUnlock() + return maps.Clone(c.definitions) +} diff --git a/core/services/streams/channel_definition_cache_factory.go b/core/services/streams/channel_definition_cache_factory.go new file mode 100644 index 00000000000..536347af60c --- /dev/null +++ b/core/services/streams/channel_definition_cache_factory.go @@ -0,0 +1,37 @@ +package streams + +import ( + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-data-streams/streams" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" +) + +type ChannelDefinitionCacheFactory interface { + NewCache(addr common.Address, fromBlock int64) streams.ChannelDefinitionCache +} + +var _ ChannelDefinitionCacheFactory = &channelDefinitionCacheFactory{} + +type channelDefinitionCacheFactory struct { + lggr logger.Logger + orm ChannelDefinitionCacheORM // TODO: pass in a pre-scoped ORM (to EVM chain ID) + lp logpoller.LogPoller + + caches map[common.Address]struct{} + mu sync.Mutex +} + +func (f *channelDefinitionCacheFactory) NewCache(addr common.Address, fromBlock int64) streams.ChannelDefinitionCache { + f.mu.Lock() + defer f.mu.Unlock() + + if _, exists := f.caches[common.Address]; exists { + // TODO: can we do better? + panic("cannot create duplicate cache") + } + f.caches[addr] = struct{}{} + return NewChannelDefinitionCache(f.lggr, f.orm, f.lp, addr, fromBlock) +} diff --git a/core/services/streams/channel_definition_cache_test.go b/core/services/streams/channel_definition_cache_test.go new file mode 100644 index 00000000000..b8b427fbefa --- /dev/null +++ b/core/services/streams/channel_definition_cache_test.go @@ -0,0 +1,9 @@ +package streams + +import "testing" + +func Test_ChannelDefinitionCache(t *testing.T) { + t.Run("Definitions", func(t *testing.T) { + t.Fatal("TODO") + }) +} diff --git a/core/services/streams/data_source.go b/core/services/streams/data_source.go new file mode 100644 index 00000000000..dfc3afea727 --- /dev/null +++ b/core/services/streams/data_source.go @@ -0,0 +1,91 @@ +package streams + +// TODO: llo datasource +import ( + "context" + "fmt" + "math/big" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/smartcontractkit/chainlink-data-streams/streams" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +var ( + promMissingStreamCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "llo_stream_missing_count", + Help: "Number of times we tried to observe a stream, but it was missing", + }, + []string{"streamID"}, + ) + promObservationErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "llo_stream_observation_error_count", + Help: "Number of times we tried to observe a stream, but it failed with an error", + }, + []string{"streamID"}, + ) +) + +type ErrMissingStream struct { + id string +} + +func (e ErrMissingStream) Error() string { + return fmt.Sprintf("missing stream definition for: %q", e.id) +} + +var _ streams.DataSource = &dataSource{} + +type dataSource struct { + lggr logger.Logger + streamCache StreamCache +} + +func NewDataSource(lggr logger.Logger, streamCache StreamCache) streams.DataSource { + // TODO: lggr should include job ID + return &dataSource{lggr, streamCache} +} + +func (d *dataSource) Observe(ctx context.Context, streamIDs map[streams.StreamID]struct{}) (streams.StreamValues, error) { + // There is no "observationSource" (AKA pipeline) + // Need a concept of "streams" + // Streams are referenced by ID from the on-chain config + // Each stream contains its own pipeline + // See: https://docs.google.com/document/d/1l1IiDOL1QSteLTnhmiGnJAi6QpcSpyOe0nkqS7D3SvU/edit for stream ID naming + + var wg sync.WaitGroup + wg.Add(len(streamIDs)) + sv := make(streams.StreamValues) + var mu sync.Mutex + + for streamID := range streamIDs { + go func(streamID streams.StreamID) { + defer wg.Done() + + var res streams.ObsResult[*big.Int] + + stream, exists := d.streamCache.Get(streamID) + if exists { + res.Val, res.Err = stream.Observe(ctx) + if res.Err != nil { + d.lggr.Debugw("Observation failed for stream", "err", res.Err, "streamID", streamID) + promObservationErrorCount.WithLabelValues(streamID.String()).Inc() + } + } else { + d.lggr.Errorw(fmt.Sprintf("Missing stream: %q", streamID), "streamID", streamID) + promMissingStreamCount.WithLabelValues(streamID.String()).Inc() + res.Err = ErrMissingStream{streamID.String()} + } + + mu.Lock() + defer mu.Unlock() + sv[streamID] = res + }(streamID) + } + + wg.Wait() + + return sv, nil +} diff --git a/core/services/streams/data_source_test.go b/core/services/streams/data_source_test.go new file mode 100644 index 00000000000..6b78eba3d8a --- /dev/null +++ b/core/services/streams/data_source_test.go @@ -0,0 +1,90 @@ +package streams + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-data-streams/streams" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +type mockStream struct { + dp DataPoint + err error +} + +func (m *mockStream) Observe(ctx context.Context) (DataPoint, error) { + return m.dp, m.err +} + +func Test_DataSource(t *testing.T) { + lggr := logger.TestLogger(t) + sc := newStreamCache(nil) + ds := NewDataSource(lggr, sc) + ctx := testutils.Context(t) + + streamIDs := make(map[streams.StreamID]struct{}) + streamIDs[streams.StreamID("ETH/USD")] = struct{}{} + streamIDs[streams.StreamID("BTC/USD")] = struct{}{} + streamIDs[streams.StreamID("LINK/USD")] = struct{}{} + + t.Run("Observe", func(t *testing.T) { + t.Run("returns errors if no streams are defined", func(t *testing.T) { + vals, err := ds.Observe(ctx, streamIDs) + assert.NoError(t, err) + + assert.Equal(t, streams.StreamValues{ + "BTC/USD": streams.ObsResult[*big.Int]{Val: nil, Err: ErrMissingStream{id: "BTC/USD"}}, + "ETH/USD": streams.ObsResult[*big.Int]{Val: nil, Err: ErrMissingStream{id: "ETH/USD"}}, + "LINK/USD": streams.ObsResult[*big.Int]{Val: nil, Err: ErrMissingStream{id: "LINK/USD"}}, + }, vals) + }) + t.Run("observes each stream with success and returns values matching map argument", func(t *testing.T) { + sc.streams["ETH/USD"] = &mockStream{ + dp: big.NewInt(2181), + } + sc.streams["BTC/USD"] = &mockStream{ + dp: big.NewInt(40602), + } + sc.streams["LINK/USD"] = &mockStream{ + dp: big.NewInt(15), + } + + vals, err := ds.Observe(ctx, streamIDs) + assert.NoError(t, err) + + assert.Equal(t, streams.StreamValues{ + "BTC/USD": streams.ObsResult[*big.Int]{Val: big.NewInt(40602), Err: nil}, + "ETH/USD": streams.ObsResult[*big.Int]{Val: big.NewInt(2181), Err: nil}, + "LINK/USD": streams.ObsResult[*big.Int]{Val: big.NewInt(15), Err: nil}, + }, vals) + }) + t.Run("observes each stream and returns success/errors", func(t *testing.T) { + sc.streams["ETH/USD"] = &mockStream{ + dp: big.NewInt(2181), + err: errors.New("something exploded"), + } + sc.streams["BTC/USD"] = &mockStream{ + dp: big.NewInt(40602), + } + sc.streams["LINK/USD"] = &mockStream{ + err: errors.New("something exploded 2"), + } + + vals, err := ds.Observe(ctx, streamIDs) + assert.NoError(t, err) + + assert.Equal(t, streams.StreamValues{ + "BTC/USD": streams.ObsResult[*big.Int]{Val: big.NewInt(40602), Err: nil}, + "ETH/USD": streams.ObsResult[*big.Int]{Val: big.NewInt(2181), Err: errors.New("something exploded")}, + "LINK/USD": streams.ObsResult[*big.Int]{Val: nil, Err: errors.New("something exploded 2")}, + }, vals) + }) + }) +} diff --git a/core/services/streams/delegate.go b/core/services/streams/delegate.go new file mode 100644 index 00000000000..6f73e4a74b0 --- /dev/null +++ b/core/services/streams/delegate.go @@ -0,0 +1,92 @@ +package streams + +import ( + "context" + "fmt" + + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink-data-streams/streams" + ocrcommontypes "github.com/smartcontractkit/libocr/commontypes" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2/types" + ocr2plus "github.com/smartcontractkit/libocr/offchainreporting2plus" + ocr3types "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" +) + +var _ job.ServiceCtx = &delegate{} + +type delegate struct { + cfg DelegateConfig + codecs map[commontypes.StreamsReportFormat]streams.ReportCodec +} + +type DelegateConfig struct { + Logger logger.Logger + Queryer pg.Queryer + Runner Runner + + // Streams + ChannelDefinitionCache streams.ChannelDefinitionCache + ORM ORM + + // OCR3 + BinaryNetworkEndpointFactory ocr2types.BinaryNetworkEndpointFactory + V2Bootstrappers []ocrcommontypes.BootstrapperLocator + ContractConfigTracker ocr2types.ContractConfigTracker + ContractTransmitter ocr3types.ContractTransmitter[commontypes.StreamsReportInfo] + Database ocr3types.Database + OCRLogger ocrcommontypes.Logger + MonitoringEndpoint ocrcommontypes.MonitoringEndpoint + OffchainConfigDigester ocr2types.OffchainConfigDigester + OffchainKeyring ocr2types.OffchainKeyring + OnchainKeyring ocr3types.OnchainKeyring[commontypes.StreamsReportInfo] + LocalConfig ocr2types.LocalConfig +} + +func NewDelegate(cfg DelegateConfig) job.ServiceCtx { + // TODO: add the chain codecs here + // TODO: nil checks? + codecs := make(map[commontypes.StreamsReportFormat]streams.ReportCodec) + return &delegate{cfg, codecs} +} + +func (d *delegate) Start(ctx context.Context) error { + // create the oracle from config values + // TODO: Do these services need starting? + prrc := streams.NewPredecessorRetirementReportCache() + src := streams.NewShouldRetireCache() + sc := NewStreamCache(d.cfg.ORM) + if err := sc.Load(ctx, d.cfg.Logger.Named("StreamCache"), d.cfg.Runner); err != nil { + return err + } + ds := NewDataSource(d.cfg.Logger.Named("DataSource"), sc) + llo, err := ocr2plus.NewOracle(ocr2plus.OCR3OracleArgs[commontypes.StreamsReportInfo]{ + BinaryNetworkEndpointFactory: d.cfg.BinaryNetworkEndpointFactory, + V2Bootstrappers: d.cfg.V2Bootstrappers, + ContractConfigTracker: d.cfg.ContractConfigTracker, + ContractTransmitter: d.cfg.ContractTransmitter, + Database: d.cfg.Database, + LocalConfig: d.cfg.LocalConfig, + Logger: d.cfg.OCRLogger, + MonitoringEndpoint: d.cfg.MonitoringEndpoint, + OffchainConfigDigester: d.cfg.OffchainConfigDigester, + OffchainKeyring: d.cfg.OffchainKeyring, + OnchainKeyring: d.cfg.OnchainKeyring, + ReportingPluginFactory: streams.NewPluginFactory( + prrc, src, d.cfg.ChannelDefinitionCache, ds, d.cfg.Logger.Named("StreamsReportingPlugin"), d.codecs, + ), + }) + + if err != nil { + return fmt.Errorf("%w: failed to create new OCR oracle", err) + } + + return llo.Start() +} + +func (d *delegate) Close() error { + panic("TODO") +} diff --git a/core/services/streams/keyring.go b/core/services/streams/keyring.go new file mode 100644 index 00000000000..3faab4be115 --- /dev/null +++ b/core/services/streams/keyring.go @@ -0,0 +1,60 @@ +package streams + +import ( + "fmt" + + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink-data-streams/streams" + "github.com/smartcontractkit/libocr/offchainreporting2/types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" +) + +type StreamsOnchainKeyring ocr3types.OnchainKeyring[commontypes.StreamsReportInfo] + +var _ StreamsOnchainKeyring = &onchainKeyring{} + +type Key interface { + Sign3(digest ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) + Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool + PublicKey() ocrtypes.OnchainPublicKey + MaxSignatureLength() int +} + +type onchainKeyring struct { + evm Key +} + +func NewOnchainKeyring(evm Key) StreamsOnchainKeyring { + return &onchainKeyring{ + evm: evm, + } +} + +func (okr *onchainKeyring) PublicKey() types.OnchainPublicKey { + // TODO: Combine this in some way for multiple chains + return okr.evm.PublicKey() +} + +func (okr *onchainKeyring) MaxSignatureLength() int { + // TODO: Needs to be max of all chain sigs + return okr.evm.MaxSignatureLength() +} + +func (okr *onchainKeyring) Sign(digest types.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[commontypes.StreamsReportInfo]) (signature []byte, err error) { + switch r.Info.ReportFormat { + case streams.ReportFormatEVM: + return okr.evm.Sign3(digest, seqNr, r.Report) + default: + return nil, fmt.Errorf("unsupported format: %q", r.Info.ReportFormat) + } +} + +func (okr *onchainKeyring) Verify(key types.OnchainPublicKey, digest types.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[commontypes.StreamsReportInfo], signature []byte) bool { + switch r.Info.ReportFormat { + case streams.ReportFormatEVM: + return okr.evm.Verify3(key, digest, seqNr, r.Report, signature) + default: + return false + } +} diff --git a/core/services/streams/keyring_test.go b/core/services/streams/keyring_test.go new file mode 100644 index 00000000000..b958164bdec --- /dev/null +++ b/core/services/streams/keyring_test.go @@ -0,0 +1,7 @@ +package streams + +import "testing" + +func Test_Keyring(t *testing.T) { + t.Fatal("TODO") +} diff --git a/core/services/streams/orm.go b/core/services/streams/orm.go new file mode 100644 index 00000000000..a963abe9d59 --- /dev/null +++ b/core/services/streams/orm.go @@ -0,0 +1,53 @@ +package streams + +import ( + "context" + + "github.com/smartcontractkit/chainlink-data-streams/streams" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" +) + +type ORM interface { + StreamCacheORM + ChannelDefinitionCacheORM +} + +var _ ORM = &orm{} + +type orm struct { + q pg.Queryer +} + +func NewORM(q pg.Queryer) ORM { + return &orm{q} +} + +func (o *orm) LoadStreams(ctx context.Context, lggr logger.Logger, runner Runner, m map[streams.StreamID]Stream) error { + rows, err := o.q.QueryContext(ctx, "SELECT s.id, ps.id, ps.dot_dag_source, ps.max_task_duration FROM streams s JOIN pipeline_specs ps ON ps.id = s.pipeline_spec_id") + if err != nil { + // TODO: retries? + return err + } + + for rows.Next() { + var strm stream + if err := rows.Scan(&strm.id, &strm.spec.ID, &strm.spec.DotDagSource, &strm.spec.MaxTaskDuration); err != nil { + return err + } + strm.lggr = lggr.Named("Stream").With("streamID", strm.id) + strm.runner = runner + + m[strm.id] = &strm + } + return rows.Err() +} + +func (o *orm) LoadChannelDefinitions(ctx context.Context) (cd streams.ChannelDefinitions, blockNum int64, err error) { + panic("TODO") +} + +func (o *orm) StoreChannelDefinitions(ctx context.Context, cd streams.ChannelDefinitions) error { + panic("TODO") +} diff --git a/core/services/streams/orm_test.go b/core/services/streams/orm_test.go new file mode 100644 index 00000000000..1c72e0347e9 --- /dev/null +++ b/core/services/streams/orm_test.go @@ -0,0 +1,71 @@ +package streams + +import ( + "context" + "testing" + + "github.com/smartcontractkit/chainlink-data-streams/streams" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" + + "github.com/stretchr/testify/assert" +) + +type mockRunner struct{} + +func (m *mockRunner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) { + return +} + +func Test_ORM(t *testing.T) { + db := pgtest.NewSqlxDB(t) + orm := NewORM(db) + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + runner := &mockRunner{} + + t.Run("LoadStreams", func(t *testing.T) { + t.Run("nothing in database", func(t *testing.T) { + m := make(map[streams.StreamID]Stream) + err := orm.LoadStreams(ctx, lggr, runner, m) + assert.NoError(t, err) + + assert.Len(t, m, 0) + }) + t.Run("loads streams from database", func(t *testing.T) { + pgtest.MustExec(t, db, ` +WITH pipeline_specs AS ( + INSERT INTO pipeline_specs (dot_dag_source, created_at) VALUES + ('foo', NOW()), + ('bar', NOW()), + ('baz', NOW()) + RETURNING id, dot_dag_source +) +INSERT INTO streams(id, pipeline_spec_id, created_at) +SELECT CONCAT('stream-', pipeline_specs.dot_dag_source), pipeline_specs.id, NOW() +FROM pipeline_specs +`) + + m := make(map[streams.StreamID]Stream) + err := orm.LoadStreams(ctx, lggr, runner, m) + assert.NoError(t, err) + + assert.Len(t, m, 3) + assert.Contains(t, m, streams.StreamID("stream-foo")) + assert.Contains(t, m, streams.StreamID("stream-bar")) + assert.Contains(t, m, streams.StreamID("stream-baz")) + + // test one of the streams to ensure it got loaded correctly + s := m["stream-foo"].(*stream) + assert.Equal(t, streams.StreamID("stream-foo"), s.id) + assert.NotNil(t, s.lggr) + assert.Equal(t, "foo", s.spec.DotDagSource) + assert.NotZero(t, s.spec.ID) + assert.NotNil(t, s.runner) + assert.Equal(t, runner, s.runner) + }) + }) +} diff --git a/core/services/streams/stream.go b/core/services/streams/stream.go new file mode 100644 index 00000000000..e0a004b1d8a --- /dev/null +++ b/core/services/streams/stream.go @@ -0,0 +1,118 @@ +package streams + +import ( + "context" + "fmt" + "math/big" + + "github.com/smartcontractkit/chainlink-data-streams/streams" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +type Runner interface { + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) +} + +// TODO: Generalize to beyond simply an int +type DataPoint *big.Int + +type Stream interface { + Observe(ctx context.Context) (DataPoint, error) +} + +type stream struct { + id streams.StreamID + lggr logger.Logger + spec pipeline.Spec + runner Runner +} + +func NewStream(lggr logger.Logger, id streams.StreamID, spec pipeline.Spec, runner Runner) Stream { + return newStream(lggr, id, spec, runner) +} + +func newStream(lggr logger.Logger, id streams.StreamID, spec pipeline.Spec, runner Runner) *stream { + return &stream{id, lggr, spec, runner} +} + +func (s *stream) Observe(ctx context.Context) (DataPoint, error) { + var run *pipeline.Run + run, trrs, err := s.executeRun(ctx) + if err != nil { + return nil, fmt.Errorf("Observe failed while executing run: %w", err) + } + s.lggr.Tracew("Observe executed run", "run", run) + // FIXME: runResults?? + // select { + // case s.runResults <- run: + // default: + // s.lggr.Warnf("unable to enqueue run save for job ID %d, buffer full", s.spec.JobID) + // } + + // NOTE: trrs comes back as _all_ tasks, but we only want the terminal ones + // They are guaranteed to be sorted by index asc so should be in the correct order + var finaltrrs []pipeline.TaskRunResult + for _, trr := range trrs { + if trr.IsTerminal() { + finaltrrs = append(finaltrrs, trr) + } + } + + // FIXME: How to handle arbitrary-shaped inputs? + // For now just assume everything is one *big.Int + parsed, err := s.parse(finaltrrs) + if err != nil { + return nil, fmt.Errorf("Observe failed while parsing run results: %w", err) + } + return parsed, nil + +} + +// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). +// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. +func (s *stream) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { + // TODO: does it need some kind of debugging stuff here? + vars := pipeline.NewVarsFrom(map[string]interface{}{}) + + run, trrs, err := s.runner.ExecuteRun(ctx, s.spec, vars, s.lggr) + if err != nil { + return nil, nil, fmt.Errorf("error executing run for spec ID %v: %w", s.spec.ID, err) + } + + return run, trrs, err +} + +// returns error on parse errors: if something is the wrong type +func (s *stream) parse(trrs pipeline.TaskRunResults) (*big.Int, error) { + var finaltrrs []pipeline.TaskRunResult + for _, trr := range trrs { + // only return terminal trrs from executeRun + if trr.IsTerminal() { + finaltrrs = append(finaltrrs, trr) + } + } + + // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed + // by the pipeline executor + if len(finaltrrs) != 1 { + return nil, fmt.Errorf("invalid number of results, expected: 1, got: %d", len(finaltrrs)) + } + res := finaltrrs[0].Result + if res.Error != nil { + return nil, res.Error + } else if val, err := toBigInt(res.Value); err != nil { + return nil, fmt.Errorf("failed to parse BenchmarkPrice: %w", err) + } else { + return val, nil + } +} + +func toBigInt(val interface{}) (*big.Int, error) { + dec, err := utils.ToDecimal(val) + if err != nil { + return nil, err + } + return dec.BigInt(), nil +} diff --git a/core/services/streams/stream_cache.go b/core/services/streams/stream_cache.go new file mode 100644 index 00000000000..1c322cb5de1 --- /dev/null +++ b/core/services/streams/stream_cache.go @@ -0,0 +1,43 @@ +package streams + +import ( + "context" + + "github.com/smartcontractkit/chainlink-data-streams/streams" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +type StreamCacheORM interface { + LoadStreams(ctx context.Context, lggr logger.Logger, runner Runner, m map[streams.StreamID]Stream) error +} + +type StreamCache interface { + Get(streamID streams.StreamID) (Stream, bool) + Load(ctx context.Context, lggr logger.Logger, runner Runner) error +} + +type streamCache struct { + orm StreamCacheORM + streams map[streams.StreamID]Stream +} + +func NewStreamCache(orm StreamCacheORM) StreamCache { + return newStreamCache(orm) +} + +func newStreamCache(orm StreamCacheORM) *streamCache { + return &streamCache{ + orm, + make(map[streams.StreamID]Stream), + } +} + +func (s *streamCache) Get(streamID streams.StreamID) (Stream, bool) { + strm, exists := s.streams[streamID] + return strm, exists +} + +func (s *streamCache) Load(ctx context.Context, lggr logger.Logger, runner Runner) error { + return s.orm.LoadStreams(ctx, lggr, runner, s.streams) +} diff --git a/core/services/streams/stream_cache_test.go b/core/services/streams/stream_cache_test.go new file mode 100644 index 00000000000..902e610a68f --- /dev/null +++ b/core/services/streams/stream_cache_test.go @@ -0,0 +1,73 @@ +package streams + +import ( + "context" + "errors" + "maps" + "math/big" + "testing" + + "github.com/smartcontractkit/chainlink-data-streams/streams" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + + "github.com/stretchr/testify/assert" +) + +type mockORM struct { + m map[streams.StreamID]Stream + err error +} + +func (orm *mockORM) LoadStreams(ctx context.Context, lggr logger.Logger, runner Runner, m map[streams.StreamID]Stream) error { + maps.Copy(m, orm.m) + return orm.err +} + +func Test_StreamCache(t *testing.T) { + t.Run("Load", func(t *testing.T) { + orm := &mockORM{} + sc := newStreamCache(orm) + lggr := logger.TestLogger(t) + runner := &mockRunner{} + + t.Run("populates cache from database using ORM", func(t *testing.T) { + assert.Len(t, sc.streams, 0) + err := sc.Load(testutils.Context(t), lggr, runner) + assert.NoError(t, err) + assert.Len(t, sc.streams, 0) + + v, exists := sc.Get("foo") + assert.Nil(t, v) + assert.False(t, exists) + + orm.m = make(map[streams.StreamID]Stream) + orm.m["foo"] = &mockStream{dp: big.NewInt(1)} + orm.m["bar"] = &mockStream{dp: big.NewInt(2)} + orm.m["baz"] = &mockStream{dp: big.NewInt(3)} + + err = sc.Load(testutils.Context(t), lggr, runner) + assert.NoError(t, err) + assert.Len(t, sc.streams, 3) + + v, exists = sc.Get("foo") + assert.True(t, exists) + assert.Equal(t, orm.m["foo"], v) + + v, exists = sc.Get("bar") + assert.True(t, exists) + assert.Equal(t, orm.m["bar"], v) + + v, exists = sc.Get("baz") + assert.True(t, exists) + assert.Equal(t, orm.m["baz"], v) + }) + + t.Run("returns error if db errors", func(t *testing.T) { + orm.err = errors.New("something exploded") + err := sc.Load(testutils.Context(t), lggr, runner) + assert.EqualError(t, err, "something exploded") + }) + }) +} diff --git a/core/services/streams/transmitter.go b/core/services/streams/transmitter.go new file mode 100644 index 00000000000..db69e06d0f3 --- /dev/null +++ b/core/services/streams/transmitter.go @@ -0,0 +1,74 @@ +package streams + +// TODO: llo transmitter + +import ( + "context" + "crypto/ed25519" + "fmt" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/libocr/offchainreporting2/types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc" +) + +type Transmitter interface { + commontypes.StreamsTransmitter + services.Service +} + +type transmitter struct { + services.StateMachine + lggr logger.Logger + rpcClient wsrpc.Client + fromAccount string +} + +func NewTransmitter(lggr logger.Logger, rpcClient wsrpc.Client, fromAccount ed25519.PublicKey) Transmitter { + return &transmitter{ + services.StateMachine{}, + lggr, + rpcClient, + fmt.Sprintf("%x", fromAccount), + } +} + +func (t *transmitter) Start(ctx context.Context) error { + // TODO + return nil +} + +func (t *transmitter) Close() error { + // TODO + return nil +} + +func (t *transmitter) HealthReport() map[string]error { + report := map[string]error{t.Name(): t.Healthy()} + services.CopyHealth(report, t.rpcClient.HealthReport()) + // FIXME + // services.CopyHealth(report, t.queue.HealthReport()) + return report +} + +func (t *transmitter) Name() string { return t.lggr.Name() } + +func (t *transmitter) Transmit( + context.Context, + types.ConfigDigest, + uint64, + ocr3types.ReportWithInfo[commontypes.StreamsReportInfo], + []types.AttributedOnchainSignature, +) error { + panic("TODO") +} + +// FromAccount returns the stringified (hex) CSA public key +func (t *transmitter) FromAccount() (ocrtypes.Account, error) { + return ocrtypes.Account(t.fromAccount), nil +} diff --git a/core/services/vrf/v1/listener_v1.go b/core/services/vrf/v1/listener_v1.go index f4e813d7d61..a3240365a66 100644 --- a/core/services/vrf/v1/listener_v1.go +++ b/core/services/vrf/v1/listener_v1.go @@ -369,7 +369,7 @@ func (lsn *Listener) handleLog(lb log.Broadcast, minConfs uint32) { func (lsn *Listener) shouldProcessLog(lb log.Broadcast) bool { consumed, err := lsn.Chain.LogBroadcaster().WasAlreadyConsumed(lb) if err != nil { - lsn.L.Errorw("Could not determine if log was already consumed", "error", err, "txHash", lb.RawLog().TxHash) + lsn.L.Errorw("Could not determine if log was already consumed", "err", err, "txHash", lb.RawLog().TxHash) // Do not process, let lb resend it as a retry mechanism. return false } diff --git a/core/store/migrate/migrations/0213_create_streams.sql b/core/store/migrate/migrations/0213_create_streams.sql new file mode 100644 index 00000000000..d72d0c0a1ab --- /dev/null +++ b/core/store/migrate/migrations/0213_create_streams.sql @@ -0,0 +1,10 @@ +-- +goose Up +CREATE TABLE streams ( + id text PRIMARY KEY, + pipeline_spec_id INT REFERENCES pipeline_specs (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE, + created_at timestamp with time zone NOT NULL +); + + +-- +goose Down +DROP TABLE streams; diff --git a/core/store/migrate/migrations/0214_create_channel_definition_caches.sql b/core/store/migrate/migrations/0214_create_channel_definition_caches.sql new file mode 100644 index 00000000000..d6fdd07ebab --- /dev/null +++ b/core/store/migrate/migrations/0214_create_channel_definition_caches.sql @@ -0,0 +1,14 @@ +-- +goose Up +CREATE TABLE streams_channel_definitions ( + addr bytea PRIMARY KEY CHECK (octet_length(addr) = 20), + evm_chain_id NUMERIC(78) NOT NULL, + definitions JSONB NOT NULL, + block_num BIGINT NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + +CREATE INDEX idx_streams_channel_definitions_evm_chain_id_addr ON streams_channel_definitions (evm_chain_id, addr); + +-- +goose Down +DROP TABLE streams_channel_definitions; diff --git a/go.md b/go.md index 090221a89fd..37825233b5b 100644 --- a/go.md +++ b/go.md @@ -22,6 +22,8 @@ flowchart LR chainlink/v2 --> caigo click caigo href "https://github.com/smartcontractkit/caigo" + chainlink/v2 --> chain-selectors + click chain-selectors href "https://github.com/smartcontractkit/chain-selectors" chainlink/v2 --> chainlink-automation click chainlink-automation href "https://github.com/smartcontractkit/chainlink-automation" chainlink/v2 --> chainlink-common @@ -50,6 +52,7 @@ flowchart LR chainlink-common --> libocr chainlink-cosmos --> chainlink-common chainlink-cosmos --> libocr + chainlink-data-streams --> chain-selectors chainlink-data-streams --> chainlink-common chainlink-data-streams --> libocr chainlink-feeds --> chainlink-common diff --git a/go.mod b/go.mod index c71705d9c92..0f71e42ce90 100644 --- a/go.mod +++ b/go.mod @@ -128,7 +128,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect github.com/blendle/zapdriver v1.3.1 // indirect - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect github.com/bytedance/sonic v1.10.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect @@ -170,7 +169,6 @@ require ( github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect - github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect @@ -262,6 +260,7 @@ require ( github.com/sethvargo/go-retry v0.2.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/sirupsen/logrus v1.9.3 // indirect + github.com/smartcontractkit/chain-selectors v1.0.5 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect @@ -325,4 +324,12 @@ replace ( // until merged upstream: https://github.com/mwitkow/grpc-proxy/pull/69 github.com/mwitkow/grpc-proxy => github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f + + // TODO: streams + github.com/smartcontractkit/chainlink-common => /Users/sam/code/smartcontractkit/chainlink-common + github.com/smartcontractkit/chainlink-cosmos => /Users/sam/code/smartcontractkit/chainlink-cosmos + github.com/smartcontractkit/chainlink-data-streams => /Users/sam/code/smartcontractkit/chainlink-data-streams + github.com/smartcontractkit/chainlink-solana => /Users/sam/code/smartcontractkit/chainlink-solana + github.com/smartcontractkit/chainlink-starknet/relayer => /Users/sam/code/smartcontractkit/chainlink-starknet/relayer + ) diff --git a/go.sum b/go.sum index b38b7ac2632..d8c367b45ea 100644 --- a/go.sum +++ b/go.sum @@ -176,8 +176,8 @@ github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3 h1:SDlJ7bAm4ewvrmZtR0DaiYbQGdKPeaaIm7bM+qRhFeU= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= @@ -431,8 +431,6 @@ github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= @@ -1132,20 +1130,12 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumvbfM1u/etVq42Afwq/jtNSBSOA8n5jntnNPo= github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= +github.com/smartcontractkit/chain-selectors v1.0.5 h1:NOefQsogPZS4aBbWPFrgAyoke0gppN2ojfa8SQkhu8c= +github.com/smartcontractkit/chain-selectors v1.0.5/go.mod h1:WBhLlODF5b95vvx2tdKK55vGACg1+qZpuBhOGu1UXVo= github.com/smartcontractkit/chainlink-automation v1.0.1 h1:vVjBFq2Zsz21kPy1Pb0wpjF9zrbJX+zjXphDeeR4XZk= github.com/smartcontractkit/chainlink-automation v1.0.1/go.mod h1:INSchkV3ntyDdlZKGWA030MPDpp6pbeuiRkRKYFCm2k= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490 h1:lSYiaiIfAA+5ac45/UD8ciytlNw/S6fnhK7bxFHYI88= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490/go.mod h1:IdlfCN9rUs8Q/hrOYe8McNBIwEOHEsi0jilb3Cw77xs= -github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e h1:xvqffqFec2HkEcUKrCkm4FDJRnn/+gHmvrE/dz3Zlw8= -github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e/go.mod h1:soVgcl4CbfR6hC9UptjuCQhz19HJaFEjwnOpiySkxg0= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20231204152908-a6e3fe8ff2a1 h1:xYqRgZO0nMSO8CBCMR0r3WA+LZ4kNL8a6bnbyk/oBtQ= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20231204152908-a6e3fe8ff2a1/go.mod h1:GuPvyXryvbiUZIHmPeLBz4L+yJKeyGUjrDfd1KNne+o= github.com/smartcontractkit/chainlink-feeds v0.0.0-20231127231053-2232d3a6766d h1:w4MsbOtNk6nD/mcXLstHWk9hB6g7QLtcAfhPjhwvOaQ= github.com/smartcontractkit/chainlink-feeds v0.0.0-20231127231053-2232d3a6766d/go.mod h1:YPAfLNowdBwiKiYOwgwtbJHi8AJWbcxkbOY0ItAvkfc= -github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231206154215-ec1718b7df3e h1:/tCHhoAJM+ittEHPZTtJsAgXmYujKiDW0ub9HXW9qtY= -github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231206154215-ec1718b7df3e/go.mod h1:9YIi413QRRytafTzpWm+Z+5NWBNxSqokhKyeEZ3ynlA= -github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231205180940-ea2e3e916725 h1:NbhPVwxx+53WN/Uld1V6c4iLgoGvUYFOsVd2kfcexe8= -github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231205180940-ea2e3e916725/go.mod h1:vHrPBipRL52NdPp77KXNU2k1IoCUa1B33N9otZQPYko= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868/go.mod h1:Kn1Hape05UzFZ7bOUnm3GVsHzP0TNrVmpfXYNHdqGGs= github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88+ZznniNJZbZPWAvHQU8SwKAdHngdDZ+pvVgB5ss= diff --git a/integration-tests/go.sum b/integration-tests/go.sum index f8a9529a4cc..139524a0123 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1526,8 +1526,15 @@ github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoM github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 h1:3SNcvBmEPE1YlB1JpVZouslJpI3GBNoiqW7+wb0Rz7w= github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= +<<<<<<< HEAD github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= +======= +github.com/stretchr/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= +github.com/stretchr/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= +github.com/testcontainers/testcontainers-go v0.23.0 h1:ERYTSikX01QczBLPZpqsETTBO7lInqEP349phDOVJVs= +github.com/testcontainers/testcontainers-go v0.23.0/go.mod h1:3gzuZfb7T9qfcH2pHpV4RLlWrPjeWNQah6XlYQ32c4I= +>>>>>>> 2cf1c5828a (Implement Data Streams plugin) github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a h1:YuO+afVc3eqrjiCUizNCxI53bl/BnPiVwXqLzqYTqgU= github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47zCZp9FrtGcWyo1VjbgDaodxX9ovZvgLb/MxaA= github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e h1:BuzhfgfWQbX0dWzYzT1zsORLnHRv3bcRcsaUk0VmXA8=