diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 98382d5de..2ccb2da07 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @janezpodhostnik @peterargue @m-Peter @zhangchiqing @ramtinms \ No newline at end of file +* @janezpodhostnik @peterargue @m-Peter @zhangchiqing diff --git a/Makefile b/Makefile index 026a75590..5d53182cc 100644 --- a/Makefile +++ b/Makefile @@ -33,9 +33,7 @@ generate: mockery --dir=storage --name=BlockIndexer --output=storage/mocks mockery --dir=storage --name=ReceiptIndexer --output=storage/mocks mockery --dir=storage --name=TransactionIndexer --output=storage/mocks - mockery --dir=storage --name=AccountIndexer --output=storage/mocks mockery --dir=storage --name=TraceIndexer --output=storage/mocks - mockery --all --dir=services/traces --output=services/traces/mocks mockery --all --dir=services/ingestion --output=services/ingestion/mocks mockery --dir=models --name=Engine --output=models/mocks diff --git a/README.md b/README.md index 393dd6396..8cc4d35e3 100644 --- a/README.md +++ b/README.md @@ -222,14 +222,10 @@ The application can be configured using the following flags at runtime: | `stream-limit` | `10` | Rate-limit for client events sent per second | | `rate-limit` | `50` | Requests per second limit for clients over any protocol (ws/http) | | `address-header` | `""` | Header for client IP when server is behind a proxy | -| `heartbeat-interval` | `100` | Interval for AN event subscription heartbeats | | `stream-timeout` | `3` | Timeout in seconds for sending events to clients | | `force-start-height` | `0` | Force-set starting Cadence height (local/testing use only) | | `wallet-api-key` | `""` | ECDSA private key for wallet APIs (local/testing use only) | | `filter-expiry` | `5m` | Expiry time for idle filters | -| `traces-gcp-bucket` | `""` | GCP bucket name for transaction traces | -| `traces-backfill-start-height` | `0` | Start height for backfilling transaction traces | -| `traces-backfill-end-height` | `0` | End height for backfilling transaction traces | | `index-only` | `false` | Run in index-only mode, allowing state queries and indexing but no transaction sending | | `profiler-enabled` | `false` | Enable the pprof profiler server | | `profiler-host` | `localhost` | Host for the pprof profiler | diff --git a/api/api.go b/api/api.go index 1c0d84042..87d3fa628 100644 --- a/api/api.go +++ b/api/api.go @@ -2,12 +2,8 @@ package api import ( "context" - _ "embed" - "encoding/hex" - "errors" "fmt" "math/big" - "strings" "github.com/onflow/go-ethereum/common" "github.com/onflow/go-ethereum/common/hexutil" @@ -30,8 +26,12 @@ import ( "github.com/onflow/flow-evm-gateway/storage" ) +const BlockGasLimit uint64 = 120_000_000 + const maxFeeHistoryBlockCount = 1024 +var baseFeesPerGas = big.NewInt(1) + // A map containing all the valid method names that are found // in the Ethereum JSON-RPC API specification. // Update accordingly if any new methods are added/removed. @@ -74,6 +74,7 @@ var validMethods = map[string]struct{}{ "debug_traceTransaction": {}, "debug_traceBlockByNumber": {}, "debug_traceBlockByHash": {}, + "debug_traceCall": {}, // web3 namespace "web3_clientVersion": {}, @@ -153,7 +154,6 @@ type BlockChainAPI struct { blocks storage.BlockIndexer transactions storage.TransactionIndexer receipts storage.ReceiptIndexer - accounts storage.AccountIndexer indexingResumedHeight uint64 limiter limiter.Store collector metrics.Collector @@ -166,7 +166,6 @@ func NewBlockChainAPI( blocks storage.BlockIndexer, transactions storage.TransactionIndexer, receipts storage.ReceiptIndexer, - accounts storage.AccountIndexer, ratelimiter limiter.Store, collector metrics.Collector, ) (*BlockChainAPI, error) { @@ -183,7 +182,6 @@ func NewBlockChainAPI( blocks: blocks, transactions: transactions, receipts: receipts, - accounts: accounts, indexingResumedHeight: indexingResumedHeight, limiter: ratelimiter, collector: collector, @@ -280,12 +278,12 @@ func (b *BlockChainAPI) GetBalance( return nil, err } - evmHeight, err := b.getBlockNumber(&blockNumberOrHash) + height, err := resolveBlockTag(&blockNumberOrHash, b.blocks, b.logger) if err != nil { return handleError[*hexutil.Big](err, l, b.collector) } - balance, err := b.evm.GetBalance(ctx, address, evmHeight) + balance, err := b.evm.GetBalance(address, height) if err != nil { return handleError[*hexutil.Big](err, l, b.collector) } @@ -516,21 +514,12 @@ func (b *BlockChainAPI) GetBlockReceipts( return nil, err } - var ( - block *models.Block - err error - ) - if blockNumberOrHash.BlockHash != nil { - block, err = b.blocks.GetByID(*blockNumberOrHash.BlockHash) - } else if blockNumberOrHash.BlockNumber != nil { - block, err = b.blocks.GetByHeight(uint64(blockNumberOrHash.BlockNumber.Int64())) - } else { - return handleError[[]map[string]interface{}]( - fmt.Errorf("%w: block number or hash not provided", errs.ErrInvalid), - l, - b.collector, - ) + height, err := resolveBlockTag(&blockNumberOrHash, b.blocks, b.logger) + if err != nil { + return handleError[[]map[string]interface{}](err, l, b.collector) } + + block, err := b.blocks.GetByHeight(height) if err != nil { return handleError[[]map[string]interface{}](err, l, b.collector) } @@ -642,7 +631,7 @@ func (b *BlockChainAPI) Call( blockNumberOrHash = &latestBlockNumberOrHash } - evmHeight, err := b.getBlockNumber(blockNumberOrHash) + height, err := resolveBlockTag(blockNumberOrHash, b.blocks, b.logger) if err != nil { return handleError[hexutil.Bytes](err, l, b.collector) } @@ -658,7 +647,7 @@ func (b *BlockChainAPI) Call( from = *args.From } - res, err := b.evm.Call(ctx, tx, from, evmHeight) + res, err := b.evm.Call(tx, from, height) if err != nil { return handleError[hexutil.Bytes](err, l, b.collector) } @@ -760,29 +749,16 @@ func (b *BlockChainAPI) GetTransactionCount( return nil, err } - evmHeight, err := b.getBlockNumber(&blockNumberOrHash) + height, err := resolveBlockTag(&blockNumberOrHash, b.blocks, b.logger) if err != nil { return handleError[*hexutil.Uint64](err, l, b.collector) } - networkNonce, err := b.evm.GetNonce(ctx, address, evmHeight) + networkNonce, err := b.evm.GetNonce(address, height) if err != nil { return handleError[*hexutil.Uint64](err, l, b.collector) } - nonce, err := b.accounts.GetNonce(address) - if err != nil { - return handleError[*hexutil.Uint64](errs.ErrInternal, l, b.collector) - } - - // compare both until we gain confidence in db nonce tracking working correctly - if nonce != networkNonce { - l.Error(). - Uint64("network-nonce", networkNonce). - Uint64("db-nonce", nonce). - Msg("network nonce does not equal db nonce") - } - return (*hexutil.Uint64)(&networkNonce), nil } @@ -813,7 +789,7 @@ func (b *BlockChainAPI) EstimateGas( tx, err := encodeTxFromArgs(args) if err != nil { - return hexutil.Uint64(blockGasLimit), nil // return block gas limit + return hexutil.Uint64(BlockGasLimit), nil // return block gas limit } // Default address in case user does not provide one @@ -826,12 +802,12 @@ func (b *BlockChainAPI) EstimateGas( blockNumberOrHash = &latestBlockNumberOrHash } - evmHeight, err := b.getBlockNumber(blockNumberOrHash) + height, err := resolveBlockTag(blockNumberOrHash, b.blocks, b.logger) if err != nil { return handleError[hexutil.Uint64](err, l, b.collector) } - estimatedGas, err := b.evm.EstimateGas(ctx, tx, from, evmHeight) + estimatedGas, err := b.evm.EstimateGas(tx, from, height) if err != nil { return handleError[hexutil.Uint64](err, l, b.collector) } @@ -855,12 +831,12 @@ func (b *BlockChainAPI) GetCode( return nil, err } - evmHeight, err := b.getBlockNumber(&blockNumberOrHash) + height, err := resolveBlockTag(&blockNumberOrHash, b.blocks, b.logger) if err != nil { return handleError[hexutil.Bytes](err, l, b.collector) } - code, err := b.evm.GetCode(ctx, address, evmHeight) + code, err := b.evm.GetCode(address, height) if err != nil { return handleError[hexutil.Bytes](err, l, b.collector) } @@ -937,11 +913,11 @@ func (b *BlockChainAPI) FeeHistory( oldestBlock = (*hexutil.Big)(big.NewInt(int64(block.Height))) } - baseFees = append(baseFees, (*hexutil.Big)(big.NewInt(0))) + baseFees = append(baseFees, (*hexutil.Big)(baseFeesPerGas)) rewards = append(rewards, blockRewards) - gasUsedRatio := float64(block.TotalGasUsed) / float64(blockGasLimit) + gasUsedRatio := float64(block.TotalGasUsed) / float64(BlockGasLimit) gasUsedRatios = append(gasUsedRatios, gasUsedRatio) } @@ -971,7 +947,7 @@ func (b *BlockChainAPI) GetStorageAt( return nil, err } - key, _, err := decodeHash(storageSlot) + key, err := decodeHash(storageSlot) if err != nil { return handleError[hexutil.Bytes]( fmt.Errorf("%w: %w", errs.ErrInvalid, err), @@ -980,12 +956,12 @@ func (b *BlockChainAPI) GetStorageAt( ) } - evmHeight, err := b.getBlockNumber(&blockNumberOrHash) + height, err := resolveBlockTag(&blockNumberOrHash, b.blocks, b.logger) if err != nil { return handleError[hexutil.Bytes](err, l, b.collector) } - result, err := b.evm.GetStorageAt(ctx, address, key, evmHeight) + result, err := b.evm.GetStorageAt(address, key, height) if err != nil { return handleError[hexutil.Bytes](err, l, b.collector) } @@ -1050,10 +1026,10 @@ func (b *BlockChainAPI) prepareBlockResponse( TransactionsRoot: block.TransactionHashRoot, Transactions: block.TransactionHashes, Uncles: []common.Hash{}, - GasLimit: hexutil.Uint64(blockGasLimit), + GasLimit: hexutil.Uint64(BlockGasLimit), Nonce: types.BlockNonce{0x1}, Timestamp: hexutil.Uint64(block.Timestamp), - BaseFeePerGas: hexutil.Big(*big.NewInt(0)), + BaseFeePerGas: hexutil.Big(*baseFeesPerGas), LogsBloom: types.LogsBloom([]*types.Log{}), Miner: evmTypes.CoinbaseAddress.ToCommon(), Sha3Uncles: types.EmptyUncleHash, @@ -1095,76 +1071,6 @@ func (b *BlockChainAPI) prepareBlockResponse( return blockResponse, nil } -func (b *BlockChainAPI) getBlockNumber(blockNumberOrHash *rpc.BlockNumberOrHash) (int64, error) { - err := fmt.Errorf("%w: neither block number nor hash specified", errs.ErrInvalid) - if blockNumberOrHash == nil { - return 0, err - } - if number, ok := blockNumberOrHash.Number(); ok { - return number.Int64(), nil - } - - if hash, ok := blockNumberOrHash.Hash(); ok { - evmHeight, err := b.blocks.GetHeightByID(hash) - if err != nil { - b.logger.Error().Err(err).Msg("failed to get block by hash") - return 0, err - } - return int64(evmHeight), nil - } - - return 0, err -} - -// handleError takes in an error and in case the error is of type ErrEntityNotFound -// it returns nil instead of an error since that is according to the API spec, -// if the error is not of type ErrEntityNotFound it will return the error and the generic -// empty type. -func handleError[T any](err error, log zerolog.Logger, collector metrics.Collector) (T, error) { - var ( - zero T - revertedErr *errs.RevertError - ) - - switch { - // as per specification returning nil and nil for not found resources - case errors.Is(err, errs.ErrEntityNotFound): - return zero, nil - case errors.Is(err, errs.ErrInvalid): - return zero, err - case errors.Is(err, errs.ErrFailedTransaction): - return zero, err - case errors.As(err, &revertedErr): - return zero, revertedErr - default: - collector.ApiErrorOccurred() - log.Error().Err(err).Msg("api error") - return zero, errs.ErrInternal - } -} - -// decodeHash parses a hex-encoded 32-byte hash. The input may optionally -// be prefixed by 0x and can have a byte length up to 32. -func decodeHash(s string) (h common.Hash, inputLength int, err error) { - if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { - s = s[2:] - } - if (len(s) & 1) > 0 { - s = "0" + s - } - b, err := hex.DecodeString(s) - if err != nil { - return common.Hash{}, 0, fmt.Errorf("invalid hex string: %s", s) - } - if len(b) > common.HashLength { - return common.Hash{}, len(b), fmt.Errorf( - "hex string too long, want at most 32 bytes, have %d bytes", - len(b), - ) - } - return common.BytesToHash(b), len(b), nil -} - /* Static responses section @@ -1230,8 +1136,7 @@ func (b *BlockChainAPI) GetUncleByBlockNumberAndIndex( // MaxPriorityFeePerGas returns a suggestion for a gas tip cap for dynamic fee transactions. func (b *BlockChainAPI) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) { - fee := hexutil.Big(*big.NewInt(1)) - return &fee, nil + return (*hexutil.Big)(b.config.GasPrice), nil } // Mining returns true if client is actively mining new blocks. diff --git a/api/debug.go b/api/debug.go index bf3a0f891..9d68cefae 100644 --- a/api/debug.go +++ b/api/debug.go @@ -2,16 +2,35 @@ package api import ( "context" + "errors" + "fmt" + "math/big" + "slices" "github.com/goccy/go-json" + "github.com/onflow/flow-go/fvm/evm/offchain/query" + "github.com/onflow/flow-go/fvm/evm/types" gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/go-ethereum/eth/tracers" + "github.com/onflow/go-ethereum/eth/tracers/logger" "github.com/onflow/go-ethereum/rpc" "github.com/rs/zerolog" + "github.com/onflow/flow-evm-gateway/config" "github.com/onflow/flow-evm-gateway/metrics" "github.com/onflow/flow-evm-gateway/models" + errs "github.com/onflow/flow-evm-gateway/models/errors" + "github.com/onflow/flow-evm-gateway/services/evm" + "github.com/onflow/flow-evm-gateway/services/replayer" + "github.com/onflow/flow-evm-gateway/services/requester" "github.com/onflow/flow-evm-gateway/storage" + "github.com/onflow/flow-evm-gateway/storage/pebble" + flowEVM "github.com/onflow/flow-go/fvm/evm" + + // this import is needed for side-effects, because the + // tracers.DefaultDirectory is relying on the init function + _ "github.com/onflow/go-ethereum/eth/tracers/js" + _ "github.com/onflow/go-ethereum/eth/tracers/native" ) // txTraceResult is the result of a single transaction trace. @@ -22,77 +41,383 @@ type txTraceResult struct { } type DebugAPI struct { - logger zerolog.Logger - tracer storage.TraceIndexer - blocks storage.BlockIndexer - collector metrics.Collector + registerStore *pebble.RegisterStorage + logger zerolog.Logger + tracer storage.TraceIndexer + blocks storage.BlockIndexer + transactions storage.TransactionIndexer + receipts storage.ReceiptIndexer + client *requester.CrossSporkClient + config *config.Config + collector metrics.Collector } -func NewDebugAPI(tracer storage.TraceIndexer, blocks storage.BlockIndexer, logger zerolog.Logger, collector metrics.Collector) *DebugAPI { +func NewDebugAPI( + registerStore *pebble.RegisterStorage, + tracer storage.TraceIndexer, + blocks storage.BlockIndexer, + transactions storage.TransactionIndexer, + receipts storage.ReceiptIndexer, + client *requester.CrossSporkClient, + config *config.Config, + logger zerolog.Logger, + collector metrics.Collector, +) *DebugAPI { return &DebugAPI{ - logger: logger, - tracer: tracer, - blocks: blocks, - collector: collector, + registerStore: registerStore, + logger: logger, + tracer: tracer, + blocks: blocks, + transactions: transactions, + receipts: receipts, + client: client, + config: config, + collector: collector, } } -// TraceTransaction will return a debug execution trace of a transaction if it exists, -// currently we only support CALL traces, so the config is ignored. +// TraceTransaction will return a debug execution trace of a transaction, if it exists. func (d *DebugAPI) TraceTransaction( _ context.Context, hash gethCommon.Hash, - _ *tracers.TraceConfig, + config *tracers.TraceConfig, ) (json.RawMessage, error) { - res, err := d.tracer.GetTransaction(hash) + // If the given trace config is equal to the default call tracer used + // in block replay during ingestion, then we fetch the trace result + // from the Traces DB. + if isDefaultCallTracer(config) { + trace, err := d.tracer.GetTransaction(hash) + // If there is no error, we return the trace result from the DB. + if err == nil { + return trace, nil + } + + // If we got an error of `ErrEntityNotFound`, for whatever reason, + // we simply re-compute the trace below. If we got any other error, + // we return it. + if !errors.Is(err, errs.ErrEntityNotFound) { + d.logger.Error().Err(err).Msgf( + "failed to retrieve default call trace for tx: %s", + hash, + ) + return nil, err + } + } + + receipt, err := d.receipts.GetByTransactionID(hash) + if err != nil { + return nil, err + } + + block, err := d.blocks.GetByHeight(receipt.BlockNumber.Uint64()) + if err != nil { + return nil, err + } + + // We need to re-execute the given transaction and all the + // transactions that precede it in the same block, based on + // the previous block state, to generate the correct trace. + previousBlock, err := d.blocks.GetByHeight(block.Height - 1) + if err != nil { + return nil, err + } + + blockExecutor, err := d.executorAtBlock(previousBlock) if err != nil { - return handleError[json.RawMessage](err, d.logger, d.collector) + return nil, err + } + + tracer, err := tracerForReceipt(config, receipt) + if err != nil { + return nil, err + } + + // Re-execute the transactions in the order they appear, for the block + // that contains the given transaction. We set the tracer only for + // the given transaction, as we don't need it for the preceding + // transactions. Once we re-execute the desired transaction, we ignore + // the rest of the transactions in the block, and simply return the trace + // result. + txExecuted := false + var txTracer *tracers.Tracer + for _, h := range block.TransactionHashes { + if txExecuted { + break + } + + tx, err := d.transactions.Get(h) + if err != nil { + return nil, err + } + + if h == hash { + txTracer = tracer + txExecuted = true + } + + if err = blockExecutor.Run(tx, txTracer); err != nil { + return nil, err + } + } + + if txTracer != nil { + return txTracer.GetResult() } - return res, nil + + return nil, fmt.Errorf("failed to trace transaction with hash: %s", hash) } func (d *DebugAPI) TraceBlockByNumber( ctx context.Context, number rpc.BlockNumber, - cfg *tracers.TraceConfig, + config *tracers.TraceConfig, ) ([]*txTraceResult, error) { block, err := d.blocks.GetByHeight(uint64(number.Int64())) if err != nil { - return handleError[[]*txTraceResult](err, d.logger, d.collector) + return nil, err + } + + results := make([]*txTraceResult, len(block.TransactionHashes)) + + // If the given trace config is equal to the default call tracer used + // in block replay during ingestion, then we fetch the trace result + // from the Traces DB. + if isDefaultCallTracer(config) { + for i, hash := range block.TransactionHashes { + trace, err := d.TraceTransaction(ctx, hash, config) + + if err != nil { + results[i] = &txTraceResult{TxHash: hash, Error: err.Error()} + } else { + results[i] = &txTraceResult{TxHash: hash, Result: trace} + } + } + + return results, nil + } + + // We need to re-execute all the transactions from the given block, + // on top of the previous block state, to generate the correct traces. + previousBlock, err := d.blocks.GetByHeight(block.Height - 1) + if err != nil { + return nil, err + } + + blockExecutor, err := d.executorAtBlock(previousBlock) + if err != nil { + return nil, err + } + + for i, h := range block.TransactionHashes { + tx, err := d.transactions.Get(h) + if err != nil { + return nil, err + } + + receipt, err := d.receipts.GetByTransactionID(tx.Hash()) + if err != nil { + return nil, err + } + + tracer, err := tracerForReceipt(config, receipt) + if err != nil { + return nil, err + } + + if err = blockExecutor.Run(tx, tracer); err != nil { + results[i] = &txTraceResult{TxHash: h, Error: err.Error()} + } else if txTrace, err := tracer.GetResult(); err != nil { + results[i] = &txTraceResult{TxHash: h, Error: err.Error()} + } else { + results[i] = &txTraceResult{TxHash: h, Result: txTrace} + } } - return d.traceBlock(ctx, block, cfg) + return results, nil } func (d *DebugAPI) TraceBlockByHash( ctx context.Context, hash gethCommon.Hash, - cfg *tracers.TraceConfig, + config *tracers.TraceConfig, ) ([]*txTraceResult, error) { block, err := d.blocks.GetByID(hash) if err != nil { - return handleError[[]*txTraceResult](err, d.logger, d.collector) + return nil, err } - return d.traceBlock(ctx, block, cfg) + return d.TraceBlockByNumber(ctx, rpc.BlockNumber(block.Height), config) } -func (d *DebugAPI) traceBlock( - ctx context.Context, - block *models.Block, - _ *tracers.TraceConfig, -) ([]*txTraceResult, error) { - results := make([]*txTraceResult, len(block.TransactionHashes)) - for i, h := range block.TransactionHashes { +func (d *DebugAPI) TraceCall( + _ context.Context, + args TransactionArgs, + blockNrOrHash rpc.BlockNumberOrHash, + config *tracers.TraceCallConfig, +) (interface{}, error) { + tx, err := encodeTxFromArgs(args) + if err != nil { + return nil, err + } - txTrace, err := d.TraceTransaction(ctx, h, nil) + // Default address in case user does not provide one + from := d.config.Coinbase + if args.From != nil { + from = *args.From + } - if err != nil { - results[i] = &txTraceResult{TxHash: h, Error: err.Error()} - } else { - results[i] = &txTraceResult{TxHash: h, Result: txTrace} + if config == nil { + config = &tracers.TraceCallConfig{} + } + + tracer, err := tracerForReceipt(&config.TraceConfig, nil) + if err != nil { + return nil, err + } + + height, err := resolveBlockTag(&blockNrOrHash, d.blocks, d.logger) + if err != nil { + return nil, err + } + + cdcHeight, err := d.blocks.GetCadenceHeight(height) + if err != nil { + return nil, err + } + + block, err := d.blocks.GetByHeight(height) + if err != nil { + return nil, err + } + + blocksProvider := replayer.NewBlocksProvider( + d.blocks, + d.config.FlowNetworkID, + tracer, + ) + viewProvider := query.NewViewProvider( + d.config.FlowNetworkID, + flowEVM.StorageAccountAddress(d.config.FlowNetworkID), + d.registerStore, + blocksProvider, + BlockGasLimit, + ) + + view, err := viewProvider.GetBlockView(block.Height) + if err != nil { + return nil, err + } + + to := gethCommon.Address{} + if tx.To != nil { + to = *tx.To + } + rca := requester.NewRemoteCadenceArch(cdcHeight, d.client, d.config.FlowNetworkID) + + opts := []query.DryCallOption{} + opts = append(opts, query.WithTracer(tracer)) + opts = append(opts, query.WithExtraPrecompiledContracts([]types.PrecompiledContract{rca})) + if config.StateOverrides != nil { + for addr, account := range *config.StateOverrides { + // Override account nonce. + if account.Nonce != nil { + opts = append(opts, query.WithStateOverrideNonce(addr, uint64(*account.Nonce))) + } + // Override account(contract) code. + if account.Code != nil { + opts = append(opts, query.WithStateOverrideCode(addr, *account.Code)) + } + // Override account balance. + if account.Balance != nil { + opts = append(opts, query.WithStateOverrideBalance(addr, (*big.Int)(*account.Balance))) + } + // Replace entire state if caller requires. + if account.State != nil { + opts = append(opts, query.WithStateOverrideState(addr, *account.State)) + } + // Apply state diff into specified accounts. + if account.StateDiff != nil { + opts = append(opts, query.WithStateOverrideStateDiff(addr, *account.StateDiff)) + } } } + _, err = view.DryCall( + from, + to, + tx.Data, + tx.Value, + tx.Gas, + opts..., + ) - return results, nil + if err != nil { + return nil, err + } + + return tracer.GetResult() +} + +func (d *DebugAPI) executorAtBlock(block *models.Block) (*evm.BlockExecutor, error) { + snapshot, err := d.registerStore.GetSnapshotAt(block.Height) + if err != nil { + return nil, fmt.Errorf( + "failed to get register snapshot at block height %d: %w", + block.Height, + err, + ) + } + ledger := storage.NewRegisterDelta(snapshot) + + return evm.NewBlockExecutor( + block, + ledger, + d.config.FlowNetworkID, + d.blocks, + d.receipts, + d.logger, + ), nil +} + +func tracerForReceipt( + config *tracers.TraceConfig, + receipt *models.Receipt, +) (*tracers.Tracer, error) { + if config == nil { + config = &tracers.TraceConfig{} + } + + // Default tracer is the struct logger + if config.Tracer == nil { + logger := logger.NewStructLogger(config.Config) + return &tracers.Tracer{ + Hooks: logger.Hooks(), + GetResult: logger.GetResult, + Stop: logger.Stop, + }, nil + } + + tracerCtx := &tracers.Context{} + if receipt != nil { + tracerCtx = &tracers.Context{ + BlockHash: receipt.BlockHash, + BlockNumber: receipt.BlockNumber, + TxIndex: int(receipt.TransactionIndex), + TxHash: receipt.TxHash, + } + } + + return tracers.DefaultDirectory.New(*config.Tracer, tracerCtx, config.TracerConfig) +} + +func isDefaultCallTracer(config *tracers.TraceConfig) bool { + if config == nil { + return false + } + + if *config.Tracer != replayer.TracerName { + return false + } + + tracerConfig := json.RawMessage(replayer.TracerConfig) + return slices.Equal(config.TracerConfig, tracerConfig) } diff --git a/api/encode_transaction.go b/api/encode_transaction.go deleted file mode 100644 index 44dacec8a..000000000 --- a/api/encode_transaction.go +++ /dev/null @@ -1,56 +0,0 @@ -package api - -import ( - "fmt" - "math/big" - - "github.com/onflow/go-ethereum/core/types" - - errs "github.com/onflow/flow-evm-gateway/models/errors" -) - -const blockGasLimit uint64 = 120_000_000 - -// encodeTxFromArgs will create a transaction from the given arguments. -// The resulting unsigned transaction is only supposed to be used through -// `EVM.dryRun` inside Cadence scripts, meaning that no state change -// will occur. -// This is only useful for `eth_estimateGas` and `eth_call` endpoints. -func encodeTxFromArgs(args TransactionArgs) ([]byte, error) { - var data []byte - if args.Data != nil { - data = *args.Data - } else if args.Input != nil { - data = *args.Input - } - - // provide a high enough gas for the tx to be able to execute, - // capped by the gas set in transaction args. - gasLimit := blockGasLimit - if args.Gas != nil { - gasLimit = uint64(*args.Gas) - } - - value := big.NewInt(0) - if args.Value != nil { - value = args.Value.ToInt() - } - - tx := types.NewTx( - &types.LegacyTx{ - Nonce: 0, - To: args.To, - Value: value, - Gas: gasLimit, - GasPrice: big.NewInt(0), - Data: data, - }, - ) - - enc, err := tx.MarshalBinary() - if err != nil { - return nil, fmt.Errorf("%w: %w", errs.ErrInvalid, err) - } - - return enc, nil -} diff --git a/api/stream.go b/api/stream.go index 11e81f816..57a979de1 100644 --- a/api/stream.go +++ b/api/stream.go @@ -140,7 +140,7 @@ func (s *StreamAPI) prepareBlockHeader( TransactionsRoot: block.TransactionHashRoot, ReceiptsRoot: block.ReceiptRoot, Miner: evmTypes.CoinbaseAddress.ToCommon(), - GasLimit: hexutil.Uint64(blockGasLimit), + GasLimit: hexutil.Uint64(BlockGasLimit), Timestamp: hexutil.Uint64(block.Timestamp), } diff --git a/api/utils.go b/api/utils.go new file mode 100644 index 000000000..7e056e758 --- /dev/null +++ b/api/utils.go @@ -0,0 +1,166 @@ +package api + +import ( + "encoding/hex" + "errors" + "fmt" + "math/big" + "strings" + + "github.com/onflow/flow-evm-gateway/metrics" + errs "github.com/onflow/flow-evm-gateway/models/errors" + "github.com/onflow/flow-evm-gateway/storage" + "github.com/onflow/go-ethereum/common" + "github.com/onflow/go-ethereum/core/types" + "github.com/onflow/go-ethereum/rpc" + "github.com/rs/zerolog" +) + +func resolveBlockTag( + blockNumberOrHash *rpc.BlockNumberOrHash, + blocksDB storage.BlockIndexer, + logger zerolog.Logger, +) (uint64, error) { + if blockNumberOrHash == nil { + return 0, fmt.Errorf( + "%w: neither block number nor hash specified", + errs.ErrInvalid, + ) + } + if number, ok := blockNumberOrHash.Number(); ok { + height, err := resolveBlockNumber(number, blocksDB) + if err != nil { + logger.Error().Err(err). + Stringer("block_number", number). + Msg("failed to resolve block by number") + return 0, err + } + return height, nil + } + + if hash, ok := blockNumberOrHash.Hash(); ok { + height, err := blocksDB.GetHeightByID(hash) + if err != nil { + logger.Error().Err(err). + Stringer("block_hash", hash). + Msg("failed to resolve block by hash") + return 0, err + } + return height, nil + } + + return 0, fmt.Errorf( + "%w: neither block number nor hash specified", + errs.ErrInvalid, + ) +} + +func resolveBlockNumber( + number rpc.BlockNumber, + blocksDB storage.BlockIndexer, +) (uint64, error) { + height := number.Int64() + + // if special values (latest) we return latest executed height + // + // all the special values are: + // SafeBlockNumber = BlockNumber(-4) + // FinalizedBlockNumber = BlockNumber(-3) + // LatestBlockNumber = BlockNumber(-2) + // PendingBlockNumber = BlockNumber(-1) + // + // EVM on Flow does not have these concepts, but the latest block is the closest fit + if height < 0 { + executed, err := blocksDB.LatestEVMHeight() + if err != nil { + return 0, err + } + height = int64(executed) + } + + return uint64(height), nil +} + +// decodeHash parses a hex-encoded 32-byte hash. The input may optionally +// be prefixed by 0x and can have a byte length up to 32. +func decodeHash(s string) (h common.Hash, err error) { + if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { + s = s[2:] + } + if (len(s) & 1) > 0 { + s = "0" + s + } + b, err := hex.DecodeString(s) + if err != nil { + return common.Hash{}, fmt.Errorf("invalid hex string: %s", s) + } + if len(b) > common.HashLength { + return common.Hash{}, fmt.Errorf( + "hex string too long, want at most 32 bytes, have %d bytes", + len(b), + ) + } + return common.BytesToHash(b), nil +} + +// handleError takes in an error and in case the error is of type ErrEntityNotFound +// it returns nil instead of an error since that is according to the API spec, +// if the error is not of type ErrEntityNotFound it will return the error and the generic +// empty type. +func handleError[T any](err error, log zerolog.Logger, collector metrics.Collector) (T, error) { + var ( + zero T + revertedErr *errs.RevertError + ) + + switch { + // as per specification returning nil and nil for not found resources + case errors.Is(err, errs.ErrEntityNotFound): + return zero, nil + case errors.Is(err, errs.ErrInvalid): + return zero, err + case errors.Is(err, errs.ErrFailedTransaction): + return zero, err + case errors.As(err, &revertedErr): + return zero, revertedErr + default: + collector.ApiErrorOccurred() + log.Error().Err(err).Msg("api error") + return zero, errs.ErrInternal + } +} + +// encodeTxFromArgs will create a transaction from the given arguments. +// The resulting unsigned transaction is only supposed to be used through +// `EVM.dryRun` inside Cadence scripts, meaning that no state change +// will occur. +// This is only useful for `eth_estimateGas` and `eth_call` endpoints. +func encodeTxFromArgs(args TransactionArgs) (*types.LegacyTx, error) { + var data []byte + if args.Data != nil { + data = *args.Data + } else if args.Input != nil { + data = *args.Input + } + + // provide a high enough gas for the tx to be able to execute, + // capped by the gas set in transaction args. + gasLimit := BlockGasLimit + if args.Gas != nil { + gasLimit = uint64(*args.Gas) + } + + value := big.NewInt(0) + if args.Value != nil { + value = args.Value.ToInt() + } + + return &types.LegacyTx{ + Nonce: 0, + To: args.To, + Value: value, + Gas: gasLimit, + GasPrice: big.NewInt(0), + Data: data, + }, nil +} diff --git a/bootstrap/bootstrap.go b/bootstrap/bootstrap.go index dbed6f484..34467fdf8 100644 --- a/bootstrap/bootstrap.go +++ b/bootstrap/bootstrap.go @@ -7,9 +7,14 @@ import ( "math" "time" + pebbleDB "github.com/cockroachdb/pebble" + "github.com/onflow/flow-go-sdk/access" "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + flowGo "github.com/onflow/flow-go/model/flow" gethTypes "github.com/onflow/go-ethereum/core/types" "github.com/rs/zerolog" "github.com/sethvargo/go-limiter/memorystore" @@ -21,18 +26,18 @@ import ( "github.com/onflow/flow-evm-gateway/models" errs "github.com/onflow/flow-evm-gateway/models/errors" "github.com/onflow/flow-evm-gateway/services/ingestion" + "github.com/onflow/flow-evm-gateway/services/replayer" "github.com/onflow/flow-evm-gateway/services/requester" - "github.com/onflow/flow-evm-gateway/services/traces" "github.com/onflow/flow-evm-gateway/storage" "github.com/onflow/flow-evm-gateway/storage/pebble" ) type Storages struct { Storage *pebble.Storage + Registers *pebble.RegisterStorage Blocks storage.BlockIndexer Transactions storage.TransactionIndexer Receipts storage.ReceiptIndexer - Accounts storage.AccountIndexer Traces storage.TraceIndexer } @@ -52,7 +57,6 @@ type Bootstrap struct { server *api.Server metrics *metrics.Server events *ingestion.Engine - traces *traces.Engine profiler *api.ProfileServer } @@ -116,97 +120,53 @@ func (b *Bootstrap) StartEventIngestion(ctx context.Context) error { Uint64("missed-heights", latestCadenceBlock.Height-latestCadenceHeight). Msg("indexing cadence height information") + chainID := b.config.FlowNetworkID + // create event subscriber - subscriber := ingestion.NewRPCSubscriber( - b.client, - b.config.HeartbeatInterval, - b.config.FlowNetworkID, + subscriber := ingestion.NewRPCEventSubscriber( b.logger, + b.client, + chainID, + latestCadenceHeight, + ) + + callTracerCollector, err := replayer.NewCallTracerCollector(b.logger) + if err != nil { + return err + } + blocksProvider := replayer.NewBlocksProvider( + b.storages.Blocks, + chainID, + callTracerCollector.TxTracer(), ) + replayerConfig := replayer.Config{ + ChainID: chainID, + RootAddr: evm.StorageAccountAddress(chainID), + CallTracerCollector: callTracerCollector, + ValidateResults: true, + } // initialize event ingestion engine b.events = ingestion.NewEventIngestionEngine( subscriber, + blocksProvider, b.storages.Storage, + b.storages.Registers, b.storages.Blocks, b.storages.Receipts, b.storages.Transactions, - b.storages.Accounts, + b.storages.Traces, b.publishers.Block, b.publishers.Logs, b.logger, b.collector, + replayerConfig, ) StartEngine(ctx, b.events, l) return nil } -func (b *Bootstrap) StartTraceDownloader(ctx context.Context) error { - l := b.logger.With().Str("component", "bootstrap-traces").Logger() - l.Info().Msg("starting engine") - - // create gcp downloader - downloader, err := traces.NewGCPDownloader(b.config.TracesBucketName, b.logger) - if err != nil { - return err - } - - // initialize trace downloader engine - b.traces = traces.NewTracesIngestionEngine( - b.publishers.Block, - b.storages.Blocks, - b.storages.Traces, - downloader, - b.logger, - b.collector, - ) - - StartEngine(ctx, b.traces, l) - - if b.config.TracesBackfillStartHeight > 0 { - startHeight := b.config.TracesBackfillStartHeight - if _, err := b.storages.Blocks.GetByHeight(startHeight); err != nil { - return fmt.Errorf("failed to get provided start height %d in db: %w", startHeight, err) - } - - cadenceStartHeight, err := b.storages.Blocks.GetCadenceHeight(startHeight) - if err != nil { - return fmt.Errorf("failed to get cadence height for backfill start height %d: %w", startHeight, err) - } - - if cadenceStartHeight < b.config.InitCadenceHeight { - b.logger.Warn(). - Uint64("evm-start-height", startHeight). - Uint64("cadence-start-height", cadenceStartHeight). - Uint64("init-cadence-height", b.config.InitCadenceHeight). - Msg("backfill start height is before initial cadence height. data may be missing from configured traces bucket") - } - - endHeight := b.config.TracesBackfillEndHeight - if endHeight == 0 { - endHeight, err = b.storages.Blocks.LatestEVMHeight() - if err != nil { - return fmt.Errorf("failed to get latest EVM height: %w", err) - } - } else if _, err := b.storages.Blocks.GetByHeight(endHeight); err != nil { - return fmt.Errorf("failed to get provided end height %d in db: %w", endHeight, err) - } - - go b.traces.Backfill(startHeight, endHeight) - } - - return nil -} - -func (b *Bootstrap) StopTraceDownloader() { - if b.traces == nil { - return - } - b.logger.Warn().Msg("stopping trace downloader engine") - b.traces.Stop() -} - func (b *Bootstrap) StopEventIngestion() { if b.events == nil { return @@ -249,7 +209,15 @@ func (b *Bootstrap) StartAPIServer(ctx context.Context) error { b.logger, ) + blocksProvider := replayer.NewBlocksProvider( + b.storages.Blocks, + b.config.FlowNetworkID, + nil, + ) + evm, err := requester.NewEVM( + b.storages.Registers, + blocksProvider, b.client, b.config, signer, @@ -281,7 +249,6 @@ func (b *Bootstrap) StartAPIServer(ctx context.Context) error { b.storages.Blocks, b.storages.Transactions, b.storages.Receipts, - b.storages.Accounts, ratelimiter, b.collector, ) @@ -309,10 +276,17 @@ func (b *Bootstrap) StartAPIServer(ctx context.Context) error { ratelimiter, ) - var debugAPI *api.DebugAPI - if b.config.TracesEnabled { - debugAPI = api.NewDebugAPI(b.storages.Traces, b.storages.Blocks, b.logger, b.collector) - } + debugAPI := api.NewDebugAPI( + b.storages.Registers, + b.storages.Traces, + b.storages.Blocks, + b.storages.Transactions, + b.storages.Receipts, + b.client, + b.config, + b.logger, + b.collector, + ) var walletAPI *api.WalletAPI if b.config.WalletEnabled { @@ -411,6 +385,16 @@ func (b *Bootstrap) StopProfilerServer() { } } +func (b *Bootstrap) StopDB() { + if b.storages == nil || b.storages.Storage == nil { + return + } + err := b.storages.Storage.Close() + if err != nil { + b.logger.Err(err).Msg("PebbleDB graceful shutdown failed") + } +} + // StartEngine starts provided engine and panics if there are startup errors. func StartEngine( ctx context.Context, @@ -488,6 +472,8 @@ func setupStorage( } blocks := pebble.NewBlocks(store, config.FlowNetworkID) + storageAddress := evm.StorageAccountAddress(config.FlowNetworkID) + registerStore := pebble.NewRegisterStorage(store, storageAddress) // hard set the start cadence height, this is used when force reindexing if config.ForceStartCadenceHeight != 0 { @@ -499,13 +485,44 @@ func setupStorage( // if database is not initialized require init height if _, err := blocks.LatestCadenceHeight(); errors.Is(err, errs.ErrStorageNotInitialized) { + batch := store.NewBatch() + defer func(batch *pebbleDB.Batch) { + err := batch.Close() + if err != nil { + // we don't know what went wrong, so this is fatal + logger.Fatal().Err(err).Msg("failed to close batch") + } + }(batch) + cadenceHeight := config.InitCadenceHeight + evmBlokcHeight := uint64(0) cadenceBlock, err := client.GetBlockHeaderByHeight(context.Background(), cadenceHeight) if err != nil { return nil, fmt.Errorf("could not fetch provided cadence height, make sure it's correct: %w", err) } - if err := blocks.InitHeights(cadenceHeight, cadenceBlock.ID); err != nil { + snapshot, err := registerStore.GetSnapshotAt(evmBlokcHeight) + if err != nil { + return nil, fmt.Errorf("could not get register snapshot at block height %d: %w", 0, err) + } + + delta := storage.NewRegisterDelta(snapshot) + accountStatus := environment.NewAccountStatus() + err = delta.SetValue( + storageAddress[:], + []byte(flowGo.AccountStatusKey), + accountStatus.ToBytes(), + ) + if err != nil { + return nil, fmt.Errorf("could not set account status: %w", err) + } + + err = registerStore.Store(delta.GetUpdates(), evmBlokcHeight, batch) + if err != nil { + return nil, fmt.Errorf("could not store register updates: %w", err) + } + + if err := blocks.InitHeights(cadenceHeight, cadenceBlock.ID, batch); err != nil { return nil, fmt.Errorf( "failed to init the database for block height: %d and ID: %s, with : %w", cadenceHeight, @@ -513,15 +530,26 @@ func setupStorage( err, ) } - logger.Info().Msgf("database initialized with cadence height: %d", cadenceHeight) + + err = batch.Commit(pebbleDB.Sync) + if err != nil { + return nil, fmt.Errorf("could not commit register updates: %w", err) + } + + logger.Info(). + Stringer("fvm_address_for_evm_storage_account", storageAddress). + Msgf("database initialized with cadence height: %d", cadenceHeight) } + //else { + // // TODO(JanezP): verify storage account owner is correct + //} return &Storages{ Storage: store, Blocks: blocks, + Registers: registerStore, Transactions: pebble.NewTransactions(store), Receipts: pebble.NewReceipts(store), - Accounts: pebble.NewAccounts(store), Traces: pebble.NewTraces(store), }, nil } @@ -535,12 +563,6 @@ func Run(ctx context.Context, cfg *config.Config, ready chan struct{}) error { return err } - if cfg.TracesEnabled { - if err := boot.StartTraceDownloader(ctx); err != nil { - return fmt.Errorf("failed to start trace downloader engine: %w", err) - } - } - if err := boot.StartEventIngestion(ctx); err != nil { return fmt.Errorf("failed to start event ingestion engine: %w", err) } @@ -566,8 +588,8 @@ func Run(ctx context.Context, cfg *config.Config, ready chan struct{}) error { boot.StopEventIngestion() boot.StopMetricsServer() - boot.StopTraceDownloader() boot.StopAPIServer() + boot.StopDB() return nil } diff --git a/cmd/run/cmd.go b/cmd/run/cmd.go index aa90aed2a..3b93e45a1 100644 --- a/cmd/run/cmd.go +++ b/cmd/run/cmd.go @@ -72,7 +72,7 @@ func parseConfigFromFlags() error { if g, ok := new(big.Int).SetString(gas, 10); ok { cfg.GasPrice = g - } else if !ok { + } else { return fmt.Errorf("invalid gas price") } @@ -201,12 +201,6 @@ func parseConfigFromFlags() error { cfg.ForceStartCadenceHeight = forceStartHeight } - cfg.TracesEnabled = cfg.TracesBucketName != "" - - if cfg.TracesBackfillStartHeight > 0 && cfg.TracesBackfillEndHeight > 0 && cfg.TracesBackfillStartHeight > cfg.TracesBackfillEndHeight { - return fmt.Errorf("traces backfill start height must be less than the end height") - } - if walletKey != "" { k, err := gethCrypto.HexToECDSA(walletKey) if err != nil { @@ -268,14 +262,9 @@ func init() { Cmd.Flags().Float64Var(&cfg.StreamLimit, "stream-limit", 10, "Rate-limits the events sent to the client within one second") Cmd.Flags().Uint64Var(&cfg.RateLimit, "rate-limit", 50, "Rate-limit requests per second made by the client over any protocol (ws/http)") Cmd.Flags().StringVar(&cfg.AddressHeader, "address-header", "", "Address header that contains the client IP, this is useful when the server is behind a proxy that sets the source IP of the client. Leave empty if no proxy is used.") - Cmd.Flags().Uint64Var(&cfg.HeartbeatInterval, "heartbeat-interval", 100, "Heartbeat interval for AN event subscription") - Cmd.Flags().UintVar(&cfg.CacheSize, "script-cache-size", 10000, "Cache size used for script execution in items kept in cache") Cmd.Flags().IntVar(&streamTimeout, "stream-timeout", 3, "Defines the timeout in seconds the server waits for the event to be sent to the client") Cmd.Flags().Uint64Var(&forceStartHeight, "force-start-height", 0, "Force set starting Cadence height. WARNING: This should only be used locally or for testing, never in production.") Cmd.Flags().StringVar(&filterExpiry, "filter-expiry", "5m", "Filter defines the time it takes for an idle filter to expire") - Cmd.Flags().StringVar(&cfg.TracesBucketName, "traces-gcp-bucket", "", "GCP bucket name where transaction traces are stored") - Cmd.Flags().Uint64Var(&cfg.TracesBackfillStartHeight, "traces-backfill-start-height", 0, "evm block height from which to start backfilling missing traces.") - Cmd.Flags().Uint64Var(&cfg.TracesBackfillEndHeight, "traces-backfill-end-height", 0, "evm block height until which to backfill missing traces. If 0, backfill until the latest block") Cmd.Flags().StringVar(&cloudKMSProjectID, "coa-cloud-kms-project-id", "", "The project ID containing the KMS keys, e.g. 'flow-evm-gateway'") Cmd.Flags().StringVar(&cloudKMSLocationID, "coa-cloud-kms-location-id", "", "The location ID where the key ring is grouped into, e.g. 'global'") Cmd.Flags().StringVar(&cloudKMSKeyRingID, "coa-cloud-kms-key-ring-id", "", "The key ring ID where the KMS keys exist, e.g. 'tx-signing'") diff --git a/config/config.go b/config/config.go index 0c8b65590..ee3d37ac4 100644 --- a/config/config.go +++ b/config/config.go @@ -74,16 +74,6 @@ type Config struct { FilterExpiry time.Duration // ForceStartCadenceHeight will force set the starting Cadence height, this should be only used for testing or locally. ForceStartCadenceHeight uint64 - // HeartbeatInterval sets custom heartbeat interval for events - HeartbeatInterval uint64 - // TracesBucketName sets the GCP bucket name where transaction traces are being stored. - TracesBucketName string - // TracesEnabled sets whether the node is supporting transaction traces. - TracesEnabled bool - // TracesBackfillStartHeight sets the starting block height for backfilling missing traces. - TracesBackfillStartHeight uint64 - // TracesBackfillEndHeight sets the ending block height for backfilling missing traces. - TracesBackfillEndHeight uint64 // WalletEnabled sets whether wallet APIs are enabled WalletEnabled bool // WalletKey used for signing transactions @@ -92,8 +82,6 @@ type Config struct { MetricsPort int // IndexOnly configures the gateway to not accept any transactions but only queries of the state IndexOnly bool - // Cache size in units of items in cache, one unit in cache takes approximately 64 bytes - CacheSize uint // ProfilerEnabled sets whether the profiler server is enabled ProfilerEnabled bool // ProfilerHost is the host for the profiler server will listen to (e.g. localhost, 0.0.0.0) diff --git a/go.mod b/go.mod index 462b19566..335ec24ef 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,12 @@ module github.com/onflow/flow-evm-gateway go 1.22 require ( - cloud.google.com/go/storage v1.36.0 github.com/cockroachdb/pebble v1.1.1 github.com/goccy/go-json v0.10.2 - github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/onflow/atree v0.8.0 github.com/onflow/cadence v1.2.1 github.com/onflow/flow-go v0.38.0-preview.0.0.20241022154145-6a254edbec23 github.com/onflow/flow-go-sdk v1.2.2 - github.com/onflow/flow/protobuf/go/flow v0.4.7 github.com/onflow/go-ethereum v1.14.7 github.com/prometheus/client_golang v1.18.0 github.com/rs/cors v1.8.0 @@ -23,7 +20,6 @@ require ( github.com/stretchr/testify v1.9.0 golang.org/x/exp v0.0.0-20240119083558-1b970713d09a golang.org/x/sync v0.8.0 - google.golang.org/api v0.162.0 google.golang.org/grpc v1.63.2 ) @@ -33,6 +29,7 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.6 // indirect cloud.google.com/go/kms v1.15.7 // indirect + cloud.google.com/go/storage v1.36.0 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc // indirect @@ -60,6 +57,8 @@ require ( github.com/dgraph-io/badger/v2 v2.2007.4 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ef-ds/deque v1.0.4 // indirect github.com/ethereum/c-kzg-4844 v1.0.0 // indirect @@ -75,12 +74,14 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect @@ -90,11 +91,10 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.3.0 // indirect - github.com/huandu/go-clone v1.6.0 // indirect - github.com/huandu/go-clone/generic v1.7.2 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect @@ -142,6 +142,7 @@ require ( github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect github.com/onflow/flow-nft/lib/go/contracts v1.2.2 // indirect github.com/onflow/flow-nft/lib/go/templates v1.2.1 // indirect + github.com/onflow/flow/protobuf/go/flow v0.4.7 // indirect github.com/onflow/sdks v0.6.0-preview.1 // indirect github.com/onsi/gomega v1.18.1 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -200,6 +201,7 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gonum.org/v1/gonum v0.14.0 // indirect + google.golang.org/api v0.162.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect diff --git a/go.sum b/go.sum index 2160cd173..6fe1a45ae 100644 --- a/go.sum +++ b/go.sum @@ -91,8 +91,11 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -153,8 +156,16 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= @@ -219,6 +230,8 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= @@ -308,6 +321,9 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 h1:dHLYa5D8/Ta0aLR2XcPsrkpAgGeFs6thhMcQK0oQ0n8= +github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -325,10 +341,7 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= @@ -353,8 +366,6 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU github.com/holiman/uint256 v1.3.0 h1:4wdcm/tnd0xXdu7iS3ruNvxkWwrb4aeBQv19ayYn8F4= github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= -github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= github.com/huandu/go-clone v1.6.0 h1:HMo5uvg4wgfiy5FoGOqlFLQED/VGRm2D9Pi8g1FXPGc= github.com/huandu/go-clone v1.6.0/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= github.com/huandu/go-clone/generic v1.7.2 h1:47pQphxs1Xc9cVADjOHN+Bm5D0hNagwH9UXErbxgVKA= @@ -363,6 +374,7 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -417,6 +429,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -591,6 +605,7 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= @@ -924,6 +939,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/models/block.go b/models/block.go index 8ed43ea26..3282578ff 100644 --- a/models/block.go +++ b/models/block.go @@ -28,8 +28,7 @@ func GenesisBlock(chainID flow.ChainID) *Block { func NewBlockFromBytes(data []byte) (*Block, error) { var b *Block - err := rlp.DecodeBytes(data, &b) - if err != nil { + if err := rlp.DecodeBytes(data, &b); err != nil { pastBlock, err := decodeBlockBreakingChanges(data) if err != nil { return nil, err @@ -37,10 +36,6 @@ func NewBlockFromBytes(data []byte) (*Block, error) { b = pastBlock } - // this is added because RLP decoding will decode into an empty string - if b.FixedHash != nil && *b.FixedHash == "" { - b.FixedHash = nil - } return b, nil } @@ -51,10 +46,8 @@ type Block struct { // will have more fields than before, so we make sure the hash we calculated // with the previous format is fixed by assigning it to this field and then // on hash calculation we check if this field is set we just return it. - // We must make the FixedHash exported so RLP encoding preserve it, and - // we must use string not common.Hash since RLP decoding has an issue - // with decoding into nil pointer slice. - FixedHash *string + // We must make the FixedHash exported so RLP encoding preserves it. + FixedHash gethCommon.Hash TransactionHashes []gethCommon.Hash } @@ -63,31 +56,30 @@ func (b *Block) ToBytes() ([]byte, error) { } func (b *Block) Hash() (gethCommon.Hash, error) { - if b.FixedHash != nil && *b.FixedHash != "" { - return gethCommon.HexToHash(*b.FixedHash), nil + if b.FixedHash != zeroGethHash { + return b.FixedHash, nil } return b.Block.Hash() } // decodeBlockEvent takes a cadence event that contains executed block payload and // decodes it into the Block type. -func decodeBlockEvent(event cadence.Event) (*Block, error) { +func decodeBlockEvent(event cadence.Event) (*Block, *events.BlockEventPayload, error) { payload, err := events.DecodeBlockEventPayload(event) if err != nil { - return nil, fmt.Errorf( + return nil, nil, fmt.Errorf( "failed to Cadence-decode EVM block event [%s]: %w", event.String(), err, ) } - var fixedHash *string + fixedHash := gethCommon.Hash{} // If the `PrevRandao` field is the zero hash, we know that // this is a block with the legacy format, and we need to // fix its hash, due to the hash calculation breaking change. if payload.PrevRandao == zeroGethHash { - hash := payload.Hash.String() - fixedHash = &hash + fixedHash = payload.Hash } return &Block{ @@ -102,7 +94,7 @@ func decodeBlockEvent(event cadence.Event) (*Block, error) { PrevRandao: payload.PrevRandao, }, FixedHash: fixedHash, - }, nil + }, payload, nil } // blockV0 is the block format, prior to adding the PrevRandao field. @@ -115,7 +107,10 @@ type blockV0 struct { // the fields from the blockV0Fields type. func (b *blockV0) Hash() (gethCommon.Hash, error) { data, err := b.Block.ToBytes() - return gethCrypto.Keccak256Hash(data), err + if err != nil { + return gethCommon.Hash{}, err + } + return gethCrypto.Keccak256Hash(data), nil } // blockV0Fields needed for decoding & computing the hash of blocks @@ -136,12 +131,11 @@ func (b *blockV0Fields) ToBytes() ([]byte, error) { } // decodeBlockBreakingChanges will try to decode the bytes into all -// previous versions of block type, if it succeeds it will return the +// previous versions of block type. If it succeeds it will return the // migrated block, otherwise it will return the decoding error. func decodeBlockBreakingChanges(encoded []byte) (*Block, error) { b0 := &blockV0{} - err := rlp.DecodeBytes(encoded, b0) - if err != nil { + if err := rlp.DecodeBytes(encoded, b0); err != nil { return nil, err } @@ -149,7 +143,6 @@ func decodeBlockBreakingChanges(encoded []byte) (*Block, error) { if err != nil { return nil, err } - h := blockHash.String() return &Block{ Block: &types.Block{ @@ -161,7 +154,7 @@ func decodeBlockBreakingChanges(encoded []byte) (*Block, error) { TransactionHashRoot: b0.Block.TransactionHashRoot, TotalGasUsed: b0.Block.TotalGasUsed, }, - FixedHash: &h, + FixedHash: blockHash, TransactionHashes: b0.TransactionHashes, }, nil } diff --git a/models/block_test.go b/models/block_test.go index f86167986..f91c5caf6 100644 --- a/models/block_test.go +++ b/models/block_test.go @@ -32,6 +32,14 @@ func Test_DecodePastBlockFormat(t *testing.T) { block, err := NewBlockFromBytes(blockBytes) require.NoError(t, err) + blockHash, err := block.Hash() + require.NoError(t, err) + + assert.Equal( + t, + gethCommon.HexToHash("0xcad79e3019da8014f623f351f01c88d1bcb4613352d4801548c6b07992fd1393"), + blockHash, + ) assert.Equal( t, gethCommon.HexToHash("0x05aa4a6edbcf6fa81178566596be1c7fff7b721615c8b3bbd14ff76d9c81ec9b"), @@ -66,12 +74,12 @@ func Test_DecodePastBlockFormat(t *testing.T) { } func Test_FixedHashBlock(t *testing.T) { - fixed := gethCommon.HexToHash("0x2").String() + fixed := gethCommon.HexToHash("0x2") block := Block{ Block: &types.Block{ Height: 1, }, - FixedHash: &fixed, + FixedHash: fixed, TransactionHashes: []gethCommon.Hash{ gethCommon.HexToHash("0x3"), gethCommon.HexToHash("0x4"), @@ -80,7 +88,7 @@ func Test_FixedHashBlock(t *testing.T) { h, err := block.Hash() require.NoError(t, err) - assert.Equal(t, fixed, h.String()) + assert.Equal(t, fixed, h) data, err := block.ToBytes() require.NoError(t, err) @@ -91,7 +99,7 @@ func Test_FixedHashBlock(t *testing.T) { // make sure fixed hash and transaction hashes persists after decoding h, err = decoded.Hash() require.NoError(t, err) - require.Equal(t, fixed, h.String()) + require.Equal(t, fixed, h) require.Equal(t, block.TransactionHashes, decoded.TransactionHashes) } @@ -112,7 +120,7 @@ func Test_DecodeBlockExecutedEvent(t *testing.T) { encEv, err := ev.Payload.ToCadence(flowGo.Previewnet) require.NoError(t, err) - decBlock, err := decodeBlockEvent(encEv) + decBlock, _, err := decodeBlockEvent(encEv) require.NoError(t, err) assert.Equal(t, decBlock, block) @@ -150,7 +158,7 @@ func Test_DecodingLegacyBlockExecutedEvent(t *testing.T) { hashToCadenceArrayValue(block.TransactionHashRoot), }).WithType(eventType) - b, err := decodeBlockEvent(legacyEvent) + b, _, err := decodeBlockEvent(legacyEvent) require.NoError(t, err) require.Equal(t, block.ParentBlockHash, b.ParentBlockHash) diff --git a/models/events.go b/models/events.go index 53af03c6a..b8ad5a539 100644 --- a/models/events.go +++ b/models/events.go @@ -36,10 +36,12 @@ func isTransactionExecutedEvent(event cadence.Event) bool { // CadenceEvents contains Flow emitted events containing one or zero evm block executed event, // and multiple or zero evm transaction events. type CadenceEvents struct { - events flow.BlockEvents // Flow events for a specific flow block - block *Block // EVM block (at most one per Flow block) - transactions []Transaction // transactions in the EVM block - receipts []*Receipt // receipts for transactions + events flow.BlockEvents // Flow events for a specific flow block + block *Block // EVM block (at most one per Flow block) + blockEventPayload *events.BlockEventPayload // EVM.BlockExecuted event payload (at most one per Flow block) + transactions []Transaction // transactions in the EVM block + txEventPayloads []events.TransactionEventPayload // EVM.TransactionExecuted event payloads + receipts []*Receipt // receipts for transactions } // NewCadenceEvents decodes the events into evm types. @@ -111,22 +113,24 @@ func decodeCadenceEvents(events flow.BlockEvents) (*CadenceEvents, error) { return nil, fmt.Errorf("EVM block was already set for Flow block: %d", events.Height) } - block, err := decodeBlockEvent(val) + block, blockEventPayload, err := decodeBlockEvent(val) if err != nil { return nil, err } e.block = block + e.blockEventPayload = blockEventPayload continue } if isTransactionExecutedEvent(val) { - tx, receipt, err := decodeTransactionEvent(val) + tx, receipt, txEventPayload, err := decodeTransactionEvent(val) if err != nil { return nil, err } e.transactions = append(e.transactions, tx) + e.txEventPayloads = append(e.txEventPayloads, *txEventPayload) e.receipts = append(e.receipts, receipt) } } @@ -162,12 +166,25 @@ func (c *CadenceEvents) Block() *Block { return c.block } +// BlockEventPayload returns the EVM.BlockExecuted event payload. If the Flow block +// events do not contain an EVM block, the return value is nil. +func (c *CadenceEvents) BlockEventPayload() *events.BlockEventPayload { + return c.blockEventPayload +} + // Transactions included in the EVM block, if event doesn't // contain EVM transactions the return value is nil. func (c *CadenceEvents) Transactions() []Transaction { return c.transactions } +// TxEventPayloads returns the EVM.TransactionExecuted event payloads for the +// current EVM block. If the Flow block events do not contain any EVM transactions +// the return value is nil. +func (c *CadenceEvents) TxEventPayloads() []events.TransactionEventPayload { + return c.txEventPayloads +} + // Receipts included in the EVM block, if event doesn't // contain EVM transactions the return value is nil. func (c *CadenceEvents) Receipts() []*Receipt { diff --git a/models/events_test.go b/models/events_test.go index 241031501..e79f03d33 100644 --- a/models/events_test.go +++ b/models/events_test.go @@ -194,7 +194,7 @@ func TestCadenceEvents_Block(t *testing.T) { } // generate single block - _, blockEvent, err := newBlock(1, hashes) + block, blockEvent, err := newBlock(1, hashes) require.NoError(t, err) blockEvent.TransactionIndex = 4 blockEvent.EventIndex = 0 @@ -216,6 +216,12 @@ func TestCadenceEvents_Block(t *testing.T) { cdcEvents.events.Events, ) + // assert we have collected the EVM.BlockExecuted event payload + blockEventPayload := cdcEvents.BlockEventPayload() + blockHash, err := block.Hash() + require.NoError(t, err) + assert.Equal(t, blockHash, blockEventPayload.Hash) + // assert that EVM transactions & receipts are sorted by their // TransactionIndex field for i := 0; i < txCount; i++ { @@ -223,6 +229,12 @@ func TestCadenceEvents_Block(t *testing.T) { receipt := cdcEvents.receipts[i] assert.Equal(t, tx.Hash(), receipt.TxHash) assert.Equal(t, uint(i), receipt.TransactionIndex) + + // assert we have collected the EVM.TransactionExecuted event payloads + // in their correct order. + txEventPayload := cdcEvents.TxEventPayloads()[i] + assert.Equal(t, tx.Hash(), txEventPayload.Hash) + assert.Equal(t, blockEventPayload.Height, txEventPayload.BlockHeight) } }) } diff --git a/models/receipt_test.go b/models/receipt_test.go index 4bc2e0abe..adae146f8 100644 --- a/models/receipt_test.go +++ b/models/receipt_test.go @@ -10,7 +10,7 @@ import ( func Test_DecodeReceipts(t *testing.T) { cdcEv, rec := createTestEvent(t, evmTxBinary) - _, receipt, err := decodeTransactionEvent(cdcEv) + _, receipt, _, err := decodeTransactionEvent(cdcEv) require.NoError(t, err) for i, l := range rec.Logs { diff --git a/models/transaction.go b/models/transaction.go index 8e19474ee..c4919497e 100644 --- a/models/transaction.go +++ b/models/transaction.go @@ -167,10 +167,19 @@ func (tc TransactionCall) MarshalBinary() ([]byte, error) { // decodeTransactionEvent takes a cadence event for transaction executed // and decodes its payload into a Transaction interface and a Receipt. // The concrete type will be either a TransactionCall or a DirectCall. -func decodeTransactionEvent(event cadence.Event) (Transaction, *Receipt, error) { +func decodeTransactionEvent(event cadence.Event) ( + Transaction, + *Receipt, + *events.TransactionEventPayload, + error, +) { txEvent, err := events.DecodeTransactionEventPayload(event) if err != nil { - return nil, nil, fmt.Errorf("failed to Cadence decode transaction event [%s]: %w", event.String(), err) + return nil, nil, nil, fmt.Errorf( + "failed to Cadence decode transaction event [%s]: %w", + event.String(), + err, + ) } gethReceipt := &gethTypes.Receipt{ @@ -186,7 +195,7 @@ func decodeTransactionEvent(event cadence.Event) (Transaction, *Receipt, error) if len(txEvent.Logs) > 0 { err = rlp.Decode(bytes.NewReader(txEvent.Logs), &gethReceipt.Logs) if err != nil { - return nil, nil, fmt.Errorf("failed to RLP-decode logs: %w", err) + return nil, nil, nil, fmt.Errorf("failed to RLP-decode logs: %w", err) } } @@ -211,19 +220,27 @@ func decodeTransactionEvent(event cadence.Event) (Transaction, *Receipt, error) if txEvent.TransactionType == types.DirectCallTxType { directCall, err := types.DirectCallFromEncoded(txEvent.Payload) if err != nil { - return nil, nil, fmt.Errorf("failed to RLP-decode direct call [%x]: %w", txEvent.Payload, err) + return nil, nil, nil, fmt.Errorf( + "failed to RLP-decode direct call [%x]: %w", + txEvent.Payload, + err, + ) } tx = DirectCall{DirectCall: directCall} } else { gethTx := &gethTypes.Transaction{} if err := gethTx.UnmarshalBinary(txEvent.Payload); err != nil { - return nil, nil, fmt.Errorf("failed to RLP-decode transaction [%x]: %w", txEvent.Payload, err) + return nil, nil, nil, fmt.Errorf( + "failed to RLP-decode transaction [%x]: %w", + txEvent.Payload, + err, + ) } receipt.EffectiveGasPrice = gethTx.EffectiveGasTipValue(nil) tx = TransactionCall{Transaction: gethTx} } - return tx, receipt, nil + return tx, receipt, txEvent, nil } func UnmarshalTransaction(value []byte) (Transaction, error) { diff --git a/models/transaction_test.go b/models/transaction_test.go index 3e11e1261..09e693f13 100644 --- a/models/transaction_test.go +++ b/models/transaction_test.go @@ -85,7 +85,7 @@ func createTestEvent(t *testing.T, txBinary string) (cadence.Event, *types.Resul func Test_DecodeEVMTransaction(t *testing.T) { cdcEv, _ := createTestEvent(t, evmTxBinary) - decTx, _, err := decodeTransactionEvent(cdcEv) + decTx, _, _, err := decodeTransactionEvent(cdcEv) require.NoError(t, err) require.IsType(t, TransactionCall{}, decTx) @@ -131,7 +131,7 @@ func Test_DecodeEVMTransaction(t *testing.T) { func Test_DecodeDirectCall(t *testing.T) { cdcEv, _ := createTestEvent(t, directCallBinary) - decTx, _, err := decodeTransactionEvent(cdcEv) + decTx, _, _, err := decodeTransactionEvent(cdcEv) require.NoError(t, err) require.IsType(t, DirectCall{}, decTx) @@ -179,7 +179,7 @@ func Test_UnmarshalTransaction(t *testing.T) { cdcEv, _ := createTestEvent(t, evmTxBinary) - tx, _, err := decodeTransactionEvent(cdcEv) + tx, _, _, err := decodeTransactionEvent(cdcEv) require.NoError(t, err) encodedTx, err := tx.MarshalBinary() @@ -233,7 +233,7 @@ func Test_UnmarshalTransaction(t *testing.T) { cdcEv, _ := createTestEvent(t, directCallBinary) - tx, _, err := decodeTransactionEvent(cdcEv) + tx, _, _, err := decodeTransactionEvent(cdcEv) require.NoError(t, err) encodedTx, err := tx.MarshalBinary() diff --git a/services/evm/executor.go b/services/evm/executor.go new file mode 100644 index 000000000..a4ce19dea --- /dev/null +++ b/services/evm/executor.go @@ -0,0 +1,160 @@ +package evm + +import ( + "fmt" + + "github.com/onflow/atree" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/types" + flowGo "github.com/onflow/flow-go/model/flow" + "github.com/onflow/go-ethereum/common" + "github.com/onflow/go-ethereum/eth/tracers" + "github.com/rs/zerolog" + + "github.com/onflow/flow-evm-gateway/models" + "github.com/onflow/flow-evm-gateway/storage" +) + +type BlockExecutor struct { + emulator types.Emulator + chainID flowGo.ChainID + block *models.Block + blocks storage.BlockIndexer + logger zerolog.Logger + receipts storage.ReceiptIndexer + + // block dynamic data + txIndex uint + gasUsed uint64 +} + +func NewBlockExecutor( + block *models.Block, + ledger atree.Ledger, + chainID flowGo.ChainID, + blocks storage.BlockIndexer, + receipts storage.ReceiptIndexer, + logger zerolog.Logger, +) *BlockExecutor { + logger = logger.With().Str("component", "trace-generation").Logger() + storageAddress := evm.StorageAccountAddress(chainID) + + return &BlockExecutor{ + emulator: emulator.NewEmulator(ledger, storageAddress), + chainID: chainID, + block: block, + blocks: blocks, + receipts: receipts, + logger: logger, + } +} + +func (s *BlockExecutor) Run( + tx models.Transaction, + tracer *tracers.Tracer, +) error { + l := s.logger.With().Str("tx-hash", tx.Hash().String()).Logger() + l.Info().Msg("executing new transaction") + + receipt, err := s.receipts.GetByTransactionID(tx.Hash()) + if err != nil { + return err + } + + ctx, err := s.blockContext(receipt, tracer) + if err != nil { + return err + } + + bv, err := s.emulator.NewBlockView(ctx) + if err != nil { + return err + } + + var res *types.Result + + switch t := tx.(type) { + case models.DirectCall: + res, err = bv.DirectCall(t.DirectCall) + case models.TransactionCall: + res, err = bv.RunTransaction(t.Transaction) + default: + return fmt.Errorf("invalid transaction type") + } + + if err != nil { + return err + } + + // we should never produce invalid transaction, since if the transaction was emitted from the evm core + // it must have either been successful or failed, invalid transactions are not emitted + if res.Invalid() { + return fmt.Errorf("invalid transaction %s: %w", tx.Hash(), res.ValidationError) + } + + // increment values as part of a virtual block + s.gasUsed += res.GasConsumed + s.txIndex++ + + l.Debug().Msg("transaction executed successfully") + + return nil +} + +// blockContext produces a context that is used by the block view during the execution. +// It can be used for transaction execution and calls. Receipt is not required when +// producing the context for calls. +func (s *BlockExecutor) blockContext( + receipt *models.Receipt, + tracer *tracers.Tracer, +) (types.BlockContext, error) { + ctx := types.BlockContext{ + ChainID: types.EVMChainIDFromFlowChainID(s.chainID), + BlockNumber: s.block.Height, + BlockTimestamp: s.block.Timestamp, + DirectCallBaseGasUsage: types.DefaultDirectCallBaseGasUsage, + DirectCallGasPrice: types.DefaultDirectCallGasPrice, + GasFeeCollector: types.CoinbaseAddress, + GetHashFunc: func(n uint64) common.Hash { + // For block heights greater than or equal to the current, + // return an empty block hash. + if n >= s.block.Height { + return common.Hash{} + } + // If the given block height, is more than 256 blocks + // in the past, return an empty block hash. + if s.block.Height-n > 256 { + return common.Hash{} + } + + block, err := s.blocks.GetByHeight(n) + if err != nil { + return common.Hash{} + } + blockHash, err := block.Hash() + if err != nil { + return common.Hash{} + } + + return blockHash + }, + Random: s.block.PrevRandao, + TxCountSoFar: s.txIndex, + TotalGasUsedSoFar: s.gasUsed, + Tracer: tracer, + } + + // only add precompile cadence arch contract if we have a receipt + if receipt != nil { + calls, err := types.AggregatedPrecompileCallsFromEncoded(receipt.PrecompiledCalls) + if err != nil { + return types.BlockContext{}, err + } + + ctx.ExtraPrecompiledContracts = precompiles.AggregatedPrecompiledCallsToPrecompiledContracts(calls) + } + + return ctx, nil +} diff --git a/services/ingestion/engine.go b/services/ingestion/engine.go index 37aeae0b7..479970395 100644 --- a/services/ingestion/engine.go +++ b/services/ingestion/engine.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + flowGo "github.com/onflow/flow-go/model/flow" + pebbleDB "github.com/cockroachdb/pebble" "github.com/onflow/flow-go-sdk" gethTypes "github.com/onflow/go-ethereum/core/types" @@ -11,8 +13,11 @@ import ( "github.com/onflow/flow-evm-gateway/metrics" "github.com/onflow/flow-evm-gateway/models" + "github.com/onflow/flow-evm-gateway/services/replayer" "github.com/onflow/flow-evm-gateway/storage" "github.com/onflow/flow-evm-gateway/storage/pebble" + + "github.com/onflow/flow-go/fvm/evm/offchain/sync" ) var _ models.Engine = &Engine{} @@ -35,29 +40,35 @@ type Engine struct { *models.EngineStatus subscriber EventSubscriber + blocksProvider *replayer.BlocksProvider store *pebble.Storage + registerStore *pebble.RegisterStorage blocks storage.BlockIndexer receipts storage.ReceiptIndexer transactions storage.TransactionIndexer - accounts storage.AccountIndexer + traces storage.TraceIndexer log zerolog.Logger evmLastHeight *models.SequentialHeight blocksPublisher *models.Publisher[*models.Block] logsPublisher *models.Publisher[[]*gethTypes.Log] collector metrics.Collector + replayerConfig replayer.Config } func NewEventIngestionEngine( subscriber EventSubscriber, + blocksProvider *replayer.BlocksProvider, store *pebble.Storage, + registerStore *pebble.RegisterStorage, blocks storage.BlockIndexer, receipts storage.ReceiptIndexer, transactions storage.TransactionIndexer, - accounts storage.AccountIndexer, + traces storage.TraceIndexer, blocksPublisher *models.Publisher[*models.Block], logsPublisher *models.Publisher[[]*gethTypes.Log], log zerolog.Logger, collector metrics.Collector, + replayerConfig replayer.Config, ) *Engine { log = log.With().Str("component", "ingestion").Logger() @@ -65,21 +76,25 @@ func NewEventIngestionEngine( EngineStatus: models.NewEngineStatus(), subscriber: subscriber, + blocksProvider: blocksProvider, store: store, + registerStore: registerStore, blocks: blocks, receipts: receipts, transactions: transactions, - accounts: accounts, + traces: traces, log: log, blocksPublisher: blocksPublisher, logsPublisher: logsPublisher, collector: collector, + replayerConfig: replayerConfig, } } // Stop the engine. func (e *Engine) Stop() { - // todo + e.MarkDone() + <-e.Stopped() } // Run the Cadence event ingestion engine. @@ -98,32 +113,36 @@ func (e *Engine) Stop() { // drops. // All other errors are unexpected. func (e *Engine) Run(ctx context.Context) error { - latestCadence, err := e.blocks.LatestCadenceHeight() - if err != nil { - return fmt.Errorf("failed to get latest cadence height: %w", err) - } - - e.log.Info().Uint64("start-cadence-height", latestCadence).Msg("starting ingestion") + e.log.Info().Msg("starting ingestion") e.MarkReady() - - for events := range e.subscriber.Subscribe(ctx, latestCadence) { - if events.Err != nil { - return fmt.Errorf( - "failure in event subscription at height %d, with: %w", - latestCadence, - events.Err, - ) - } - - err = e.processEvents(events.Events) - if err != nil { - e.log.Error().Err(err).Msg("failed to process EVM events") - return err + defer e.MarkStopped() + + events := e.subscriber.Subscribe(ctx) + + for { + select { + case <-e.Done(): + // stop the engine + return nil + case events, ok := <-events: + if !ok { + return nil + } + if events.Err != nil { + return fmt.Errorf( + "failure in event subscription with: %w", + events.Err, + ) + } + + err := e.processEvents(events.Events) + if err != nil { + e.log.Error().Err(err).Msg("failed to process EVM events") + return err + } } } - - return nil } // processEvents converts the events to block and transactions and indexes them. @@ -158,10 +177,50 @@ func (e *Engine) processEvents(events *models.CadenceEvents) error { } batch := e.store.NewBatch() - defer batch.Close() + defer func(batch *pebbleDB.Batch) { + err := batch.Close() + if err != nil { + e.log.Fatal().Err(err).Msg("failed to close batch") + } + }(batch) + + // Step 1: Re-execute all transactions on the latest EVM block + + // Step 1.1: Notify the `BlocksProvider` of the newly received EVM block + if err := e.blocksProvider.OnBlockReceived(events.Block()); err != nil { + return err + } + + replayer := sync.NewReplayer( + e.replayerConfig.ChainID, + e.replayerConfig.RootAddr, + e.registerStore, + e.blocksProvider, + e.log, + e.replayerConfig.CallTracerCollector.TxTracer(), + e.replayerConfig.ValidateResults, + ) - // we first index the block - err := e.indexBlock( + // Step 1.2: Replay all block transactions + // If `ReplayBlock` returns any error, we abort the EVM events processing + blockEvents := events.BlockEventPayload() + res, err := replayer.ReplayBlock(events.TxEventPayloads(), blockEvents) + if err != nil { + return fmt.Errorf("failed to replay block on height: %d, with: %w", events.Block().Height, err) + } + + // Step 2: Write all the necessary changes to each storage + + // Step 2.1: Write all the EVM state changes to `StorageProvider` + err = e.registerStore.Store(registerEntriesFromKeyValue(res.StorageRegisterUpdates()), blockEvents.Height, batch) + if err != nil { + return fmt.Errorf("failed to store state changes on block: %d", events.Block().Height) + } + + // Step 2.2: Write the latest EVM block to `Blocks` storage + // This verifies the EVM height is sequential, and if not it will return an error + // TODO(janezp): can we do this before re-execution of the block? + err = e.indexBlock( events.CadenceHeight(), events.CadenceBlockID(), events.Block(), @@ -171,6 +230,8 @@ func (e *Engine) processEvents(events *models.CadenceEvents) error { return fmt.Errorf("failed to index block %d event: %w", events.Block().Height, err) } + // Step 2.3: Write all EVM transactions of the current block, + // to `Transactions` storage for i, tx := range events.Transactions() { receipt := events.Receipts()[i] @@ -180,11 +241,27 @@ func (e *Engine) processEvents(events *models.CadenceEvents) error { } } + // Step 2.4: Write all EVM transaction receipts of the current block, + // to `Receipts` storage err = e.indexReceipts(events.Receipts(), batch) if err != nil { return fmt.Errorf("failed to index receipts for block %d event: %w", events.Block().Height, err) } + traceCollector := e.replayerConfig.CallTracerCollector + for _, tx := range events.Transactions() { + txHash := tx.Hash() + traceResult, err := traceCollector.Collect(txHash) + if err != nil { + return err + } + + err = e.traces.StoreTransaction(txHash, traceResult, batch) + if err != nil { + return err + } + } + if err := batch.Commit(pebbleDB.Sync); err != nil { return fmt.Errorf("failed to commit indexed data for Cadence block %d: %w", events.CadenceHeight(), err) } @@ -257,10 +334,6 @@ func (e *Engine) indexTransaction( return fmt.Errorf("failed to store tx: %s, with: %w", tx.Hash(), err) } - if err := e.accounts.Update(tx, receipt, batch); err != nil { - return fmt.Errorf("failed to update accounts for tx: %s, with: %w", tx.Hash(), err) - } - return nil } @@ -278,3 +351,14 @@ func (e *Engine) indexReceipts( return nil } + +func registerEntriesFromKeyValue(keyValue map[flowGo.RegisterID]flowGo.RegisterValue) []flowGo.RegisterEntry { + entries := make([]flowGo.RegisterEntry, 0, len(keyValue)) + for k, v := range keyValue { + entries = append(entries, flowGo.RegisterEntry{ + Key: k, + Value: v, + }) + } + return entries +} diff --git a/services/ingestion/engine_test.go b/services/ingestion/engine_test.go index c7f6a77bb..81ab88d68 100644 --- a/services/ingestion/engine_test.go +++ b/services/ingestion/engine_test.go @@ -3,15 +3,21 @@ package ingestion import ( "context" "encoding/hex" + "encoding/json" "math/big" "testing" + "github.com/onflow/flow-evm-gateway/storage" + pebbleDB "github.com/cockroachdb/pebble" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" flowGo "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-evm-gateway/metrics" "github.com/onflow/flow-evm-gateway/services/ingestion/mocks" + "github.com/onflow/flow-evm-gateway/services/replayer" "github.com/onflow/flow-evm-gateway/storage/pebble" "github.com/onflow/cadence" @@ -31,13 +37,13 @@ import ( ) func TestSerialBlockIngestion(t *testing.T) { + t.Run("successfully ingest serial blocks", func(t *testing.T) { receipts := &storageMock.ReceiptIndexer{} transactions := &storageMock.TransactionIndexer{} latestHeight := uint64(10) - store, err := pebble.New(t.TempDir(), zerolog.Nop()) - require.NoError(t, err) + store, registerStore := setupStore(t) blocks := &storageMock.BlockIndexer{} blocks. @@ -47,31 +53,31 @@ func TestSerialBlockIngestion(t *testing.T) { }). Once() // make sure this isn't called multiple times - accounts := &storageMock.AccountIndexer{} - accounts. - On("Update"). - Return(func() error { return nil }) + traces := &storageMock.TraceIndexer{} eventsChan := make(chan models.BlockEvents) subscriber := &mocks.EventSubscriber{} subscriber. - On("Subscribe", mock.Anything, mock.AnythingOfType("uint64")). - Return(func(ctx context.Context, latest uint64) <-chan models.BlockEvents { + On("Subscribe", mock.Anything). + Return(func(ctx context.Context) <-chan models.BlockEvents { return eventsChan }) engine := NewEventIngestionEngine( subscriber, + replayer.NewBlocksProvider(blocks, flowGo.Emulator, nil), store, + registerStore, blocks, receipts, transactions, - accounts, + traces, models.NewPublisher[*models.Block](), models.NewPublisher[[]*gethTypes.Log](), zerolog.Nop(), metrics.NopCollector, + defaultReplayerConfig(), ) done := make(chan struct{}) @@ -117,8 +123,7 @@ func TestSerialBlockIngestion(t *testing.T) { transactions := &storageMock.TransactionIndexer{} latestHeight := uint64(10) - store, err := pebble.New(t.TempDir(), zerolog.Nop()) - require.NoError(t, err) + store, registerStore := setupStore(t) blocks := &storageMock.BlockIndexer{} blocks. @@ -128,30 +133,30 @@ func TestSerialBlockIngestion(t *testing.T) { }). Once() // make sure this isn't called multiple times - accounts := &storageMock.AccountIndexer{} - accounts. - On("Update", mock.Anything, mock.Anything). - Return(func(t models.TransactionCall, r *gethTypes.Receipt) error { return nil }) + traces := &storageMock.TraceIndexer{} eventsChan := make(chan models.BlockEvents) subscriber := &mocks.EventSubscriber{} subscriber. - On("Subscribe", mock.Anything, mock.AnythingOfType("uint64")). - Return(func(ctx context.Context, latest uint64) <-chan models.BlockEvents { + On("Subscribe", mock.Anything). + Return(func(ctx context.Context) <-chan models.BlockEvents { return eventsChan }) engine := NewEventIngestionEngine( subscriber, + replayer.NewBlocksProvider(blocks, flowGo.Emulator, nil), store, + registerStore, blocks, receipts, transactions, - accounts, + traces, models.NewPublisher[*models.Block](), models.NewPublisher[[]*gethTypes.Log](), zerolog.Nop(), metrics.NopCollector, + defaultReplayerConfig(), ) waitErr := make(chan struct{}) @@ -159,7 +164,7 @@ func TestSerialBlockIngestion(t *testing.T) { go func() { err := engine.Run(context.Background()) assert.ErrorIs(t, err, models.ErrInvalidHeight) - assert.EqualError(t, err, "failed to index block 20 event: invalid block height, expected 11, got 20: invalid height") + assert.EqualError(t, err, "invalid height: received new block: 20, non-sequential of latest block: 11") close(waitErr) }() @@ -213,6 +218,7 @@ func TestSerialBlockIngestion(t *testing.T) { } func TestBlockAndTransactionIngestion(t *testing.T) { + t.Run("successfully ingest transaction and block", func(t *testing.T) { receipts := &storageMock.ReceiptIndexer{} transactions := &storageMock.TransactionIndexer{} @@ -220,8 +226,7 @@ func TestBlockAndTransactionIngestion(t *testing.T) { nextHeight := latestHeight + 1 blockID := flow.Identifier{0x01} - store, err := pebble.New(t.TempDir(), zerolog.Nop()) - require.NoError(t, err) + store, registerStore := setupStore(t) blocks := &storageMock.BlockIndexer{} blocks. @@ -238,16 +243,11 @@ func TestBlockAndTransactionIngestion(t *testing.T) { return nil }) - accounts := &storageMock.AccountIndexer{} - accounts. - On("Update", mock.AnythingOfType("models.TransactionCall"), mock.AnythingOfType("*models.Receipt"), mock.Anything). - Return(func(tx models.Transaction, receipt *models.Receipt, _ *pebbleDB.Batch) error { return nil }) - eventsChan := make(chan models.BlockEvents) subscriber := &mocks.EventSubscriber{} subscriber. - On("Subscribe", mock.Anything, mock.AnythingOfType("uint64")). - Return(func(ctx context.Context, latest uint64) <-chan models.BlockEvents { + On("Subscribe", mock.Anything). + Return(func(ctx context.Context) <-chan models.BlockEvents { return eventsChan }) @@ -256,17 +256,28 @@ func TestBlockAndTransactionIngestion(t *testing.T) { blockCdc, block, blockEvent, err := newBlock(nextHeight, []gethCommon.Hash{result.TxHash}) require.NoError(t, err) + traces := &storageMock.TraceIndexer{} + traces. + On("StoreTransaction", mock.AnythingOfType("common.Hash"), mock.AnythingOfType("json.RawMessage"), mock.Anything). + Return(func(txID gethCommon.Hash, trace json.RawMessage, batch *pebbleDB.Batch) error { + assert.Equal(t, transaction.Hash(), txID) + return nil + }) + engine := NewEventIngestionEngine( subscriber, + replayer.NewBlocksProvider(blocks, flowGo.Emulator, nil), store, + registerStore, blocks, receipts, transactions, - accounts, + traces, models.NewPublisher[*models.Block](), models.NewPublisher[[]*gethTypes.Log](), zerolog.Nop(), metrics.NopCollector, + defaultReplayerConfig(), ) done := make(chan struct{}) @@ -329,8 +340,7 @@ func TestBlockAndTransactionIngestion(t *testing.T) { latestHeight := uint64(10) nextHeight := latestHeight + 1 - store, err := pebble.New(t.TempDir(), zerolog.Nop()) - require.NoError(t, err) + store, registerStore := setupStore(t) blocks := &storageMock.BlockIndexer{} blocks. @@ -341,35 +351,41 @@ func TestBlockAndTransactionIngestion(t *testing.T) { On("SetLatestCadenceHeight", mock.AnythingOfType("uint64")). Return(func(h uint64) error { return nil }) - accounts := &storageMock.AccountIndexer{} - accounts. - On("Update", mock.AnythingOfType("models.TransactionCall"), mock.AnythingOfType("*models.Receipt"), mock.Anything). - Return(func(tx models.Transaction, receipt *models.Receipt, _ *pebbleDB.Batch) error { return nil }) - eventsChan := make(chan models.BlockEvents) subscriber := &mocks.EventSubscriber{} subscriber. - On("Subscribe", mock.Anything, mock.AnythingOfType("uint64")). - Return(func(ctx context.Context, latest uint64) <-chan models.BlockEvents { + On("Subscribe", mock.Anything). + Return(func(ctx context.Context) <-chan models.BlockEvents { return eventsChan }) - txCdc, txEvent, _, res, err := newTransaction(nextHeight) + txCdc, txEvent, transaction, res, err := newTransaction(nextHeight) require.NoError(t, err) blockCdc, _, blockEvent, err := newBlock(nextHeight, []gethCommon.Hash{res.TxHash}) require.NoError(t, err) + traces := &storageMock.TraceIndexer{} + traces. + On("StoreTransaction", mock.AnythingOfType("common.Hash"), mock.AnythingOfType("json.RawMessage"), mock.Anything). + Return(func(txID gethCommon.Hash, trace json.RawMessage, batch *pebbleDB.Batch) error { + assert.Equal(t, transaction.Hash(), txID) + return nil + }) + engine := NewEventIngestionEngine( subscriber, + replayer.NewBlocksProvider(blocks, flowGo.Emulator, nil), store, + registerStore, blocks, receipts, transactions, - accounts, + traces, models.NewPublisher[*models.Block](), models.NewPublisher[[]*gethTypes.Log](), zerolog.Nop(), metrics.NopCollector, + defaultReplayerConfig(), ) done := make(chan struct{}) @@ -429,8 +445,7 @@ func TestBlockAndTransactionIngestion(t *testing.T) { transactions := &storageMock.TransactionIndexer{} latestCadenceHeight := uint64(0) - store, err := pebble.New(t.TempDir(), zerolog.Nop()) - require.NoError(t, err) + store, registerStore := setupStore(t) blocks := &storageMock.BlockIndexer{} blocks. @@ -440,32 +455,31 @@ func TestBlockAndTransactionIngestion(t *testing.T) { }). Once() // make sure this isn't called multiple times - accounts := &storageMock.AccountIndexer{} - accounts. - On("Update", mock.Anything, mock.AnythingOfType("*models.Receipt"), mock.Anything). - Return(func(t models.Transaction, r *models.Receipt, _ *pebbleDB.Batch) error { return nil }) + traces := &storageMock.TraceIndexer{} eventsChan := make(chan models.BlockEvents) subscriber := &mocks.EventSubscriber{} subscriber. - On("Subscribe", mock.Anything, mock.AnythingOfType("uint64")). - Return(func(ctx context.Context, latest uint64) <-chan models.BlockEvents { - assert.Equal(t, latestCadenceHeight, latest) + On("Subscribe", mock.Anything). + Return(func(ctx context.Context) <-chan models.BlockEvents { return eventsChan }). Once() engine := NewEventIngestionEngine( subscriber, + replayer.NewBlocksProvider(blocks, flowGo.Emulator, nil), store, + registerStore, blocks, receipts, transactions, - accounts, + traces, models.NewPublisher[*models.Block](), models.NewPublisher[[]*gethTypes.Log](), zerolog.Nop(), metrics.NopCollector, + defaultReplayerConfig(), ) done := make(chan struct{}) @@ -503,6 +517,13 @@ func TestBlockAndTransactionIngestion(t *testing.T) { Return(func(receipts []*models.Receipt, _ *pebbleDB.Batch) error { return nil }). Once() + traces. + On("StoreTransaction", mock.AnythingOfType("common.Hash"), mock.AnythingOfType("json.RawMessage"), mock.Anything). + Return(func(txID gethCommon.Hash, trace json.RawMessage, batch *pebbleDB.Batch) error { + assert.Equal(t, transaction.Hash(), txID) + return nil + }) + events = append(events, flow.Event{ Type: string(txEvent.Etype), Value: txCdc, @@ -604,3 +625,41 @@ func newTransaction(height uint64) (cadence.Event, *events.Event, models.Transac cdcEv, err := ev.Payload.ToCadence(flowGo.Previewnet) return cdcEv, ev, models.TransactionCall{Transaction: tx}, res, err } + +func defaultReplayerConfig() replayer.Config { + return replayer.Config{ + ChainID: flowGo.Emulator, + RootAddr: evm.StorageAccountAddress(flowGo.Emulator), + CallTracerCollector: replayer.NopTracer, + ValidateResults: false, + } +} + +func setupStore(t *testing.T) (*pebble.Storage, *pebble.RegisterStorage) { + store, err := pebble.New(t.TempDir(), zerolog.Nop()) + require.NoError(t, err) + + storageAddress := evm.StorageAccountAddress(flowGo.Emulator) + registerStore := pebble.NewRegisterStorage(store, storageAddress) + snapshot, err := registerStore.GetSnapshotAt(0) + require.NoError(t, err) + delta := storage.NewRegisterDelta(snapshot) + accountStatus := environment.NewAccountStatus() + err = delta.SetValue( + storageAddress[:], + []byte(flowGo.AccountStatusKey), + accountStatus.ToBytes(), + ) + require.NoError(t, err) + + batch := store.NewBatch() + defer func() { + require.NoError(t, batch.Close()) + }() + err = registerStore.Store(delta.GetUpdates(), 0, batch) + require.NoError(t, err) + err = batch.Commit(pebbleDB.Sync) + require.NoError(t, err) + + return store, registerStore +} diff --git a/services/ingestion/subscriber.go b/services/ingestion/event_subscriber.go similarity index 81% rename from services/ingestion/subscriber.go rename to services/ingestion/event_subscriber.go index 6d347cd9f..47da89723 100644 --- a/services/ingestion/subscriber.go +++ b/services/ingestion/event_subscriber.go @@ -24,33 +24,35 @@ type EventSubscriber interface { // // The BlockEvents type will contain an optional error in case // the error happens, the consumer of the chanel should handle it. - Subscribe(ctx context.Context, height uint64) <-chan models.BlockEvents + Subscribe(ctx context.Context) <-chan models.BlockEvents } -var _ EventSubscriber = &RPCSubscriber{} +var _ EventSubscriber = &RPCEventSubscriber{} -type RPCSubscriber struct { - client *requester.CrossSporkClient - chain flowGo.ChainID - heartbeatInterval uint64 - logger zerolog.Logger +type RPCEventSubscriber struct { + logger zerolog.Logger + + client *requester.CrossSporkClient + chain flowGo.ChainID + height uint64 recovery bool recoveredEvents []flow.Event } -func NewRPCSubscriber( +func NewRPCEventSubscriber( + logger zerolog.Logger, client *requester.CrossSporkClient, - heartbeatInterval uint64, chainID flowGo.ChainID, - logger zerolog.Logger, -) *RPCSubscriber { + startHeight uint64, +) *RPCEventSubscriber { logger = logger.With().Str("component", "subscriber").Logger() - return &RPCSubscriber{ - client: client, - heartbeatInterval: heartbeatInterval, - chain: chainID, - logger: logger, + return &RPCEventSubscriber{ + logger: logger, + + client: client, + chain: chainID, + height: startHeight, } } @@ -59,23 +61,23 @@ func NewRPCSubscriber( // to listen all new events in the current spork. // // If error is encountered during backfill the subscription will end and the response chanel will be closed. -func (r *RPCSubscriber) Subscribe(ctx context.Context, height uint64) <-chan models.BlockEvents { - events := make(chan models.BlockEvents) +func (r *RPCEventSubscriber) Subscribe(ctx context.Context) <-chan models.BlockEvents { + eventsChan := make(chan models.BlockEvents) go func() { defer func() { - close(events) + close(eventsChan) }() - // if the height is from the previous spork, backfill all the events from previous sporks first - if r.client.IsPastSpork(height) { + // if the height is from the previous spork, backfill all the eventsChan from previous sporks first + if r.client.IsPastSpork(r.height) { r.logger.Info(). - Uint64("height", height). + Uint64("height", r.height). Msg("height found in previous spork, starting to backfill") // backfill all the missed events, handling of context cancellation is done by the producer - for ev := range r.backfill(ctx, height) { - events <- ev + for ev := range r.backfill(ctx, r.height) { + eventsChan <- ev if ev.Err != nil { return @@ -83,34 +85,34 @@ func (r *RPCSubscriber) Subscribe(ctx context.Context, height uint64) <-chan mod // keep updating height, so after we are done back-filling // it will be at the first height in the current spork - height = ev.Events.CadenceHeight() + r.height = ev.Events.CadenceHeight() } // after back-filling is done, increment height by one, // so we start with the height in the current spork - height = height + 1 + r.height = r.height + 1 } r.logger.Info(). - Uint64("next-height", height). + Uint64("next-height", r.height). Msg("backfilling done, subscribe for live data") // subscribe in the current spork, handling of context cancellation is done by the producer - for ev := range r.subscribe(ctx, height, access.WithHeartbeatInterval(r.heartbeatInterval)) { - events <- ev + for ev := range r.subscribe(ctx, r.height) { + eventsChan <- ev } r.logger.Warn().Msg("ended subscription for events") }() - return events + return eventsChan } // subscribe to events by the provided height and handle any errors. // // Subscribing to EVM specific events and handle any disconnection errors // as well as context cancellations. -func (r *RPCSubscriber) subscribe(ctx context.Context, height uint64, opts ...access.SubscribeOption) <-chan models.BlockEvents { +func (r *RPCEventSubscriber) subscribe(ctx context.Context, height uint64) <-chan models.BlockEvents { eventsChan := make(chan models.BlockEvents) _, err := r.client.GetBlockHeaderByHeight(ctx, height) @@ -120,7 +122,13 @@ func (r *RPCSubscriber) subscribe(ctx context.Context, height uint64, opts ...ac return eventsChan } - eventStream, errChan, err := r.client.SubscribeEventsByBlockHeight(ctx, height, r.blocksFilter(), opts...) + // we always use heartbeat interval of 1 to have the least amount of delay from the access node + eventStream, errChan, err := r.client.SubscribeEventsByBlockHeight( + ctx, + height, + blocksFilter(r.chain), + access.WithHeartbeatInterval(1), + ) if err != nil { eventsChan <- models.NewBlockEventsError( fmt.Errorf("failed to subscribe to events by block height: %d, with: %w", height, err), @@ -187,12 +195,12 @@ func (r *RPCSubscriber) subscribe(ctx context.Context, height uint64, opts ...ac // and check for each event it receives whether we reached the end, if we reach the end it will increase // the height by one (next height), and check if we are still in previous sporks, if so repeat everything, // otherwise return. -func (r *RPCSubscriber) backfill(ctx context.Context, height uint64) <-chan models.BlockEvents { - events := make(chan models.BlockEvents) +func (r *RPCEventSubscriber) backfill(ctx context.Context, height uint64) <-chan models.BlockEvents { + eventsChan := make(chan models.BlockEvents) go func() { defer func() { - close(events) + close(eventsChan) }() for { @@ -207,7 +215,7 @@ func (r *RPCSubscriber) backfill(ctx context.Context, height uint64) <-chan mode latestHeight, err := r.client.GetLatestHeightForSpork(ctx, height) if err != nil { - events <- models.NewBlockEventsError(err) + eventsChan <- models.NewBlockEventsError(err) return } @@ -216,8 +224,8 @@ func (r *RPCSubscriber) backfill(ctx context.Context, height uint64) <-chan mode Uint64("last-spork-height", latestHeight). Msg("backfilling spork") - for ev := range r.subscribe(ctx, height, access.WithHeartbeatInterval(1)) { - events <- ev + for ev := range r.subscribe(ctx, height) { + eventsChan <- ev if ev.Err != nil { return @@ -238,33 +246,7 @@ func (r *RPCSubscriber) backfill(ctx context.Context, height uint64) <-chan mode } }() - return events -} - -// blockFilter define events we subscribe to: -// A.{evm}.EVM.BlockExecuted and A.{evm}.EVM.TransactionExecuted, -// where {evm} is EVM deployed contract address, which depends on the chain ID we configure. -func (r *RPCSubscriber) blocksFilter() flow.EventFilter { - evmAddress := common.Address(systemcontracts.SystemContractsForChain(r.chain).EVMContract.Address) - - blockExecutedEvent := common.NewAddressLocation( - nil, - evmAddress, - string(events.EventTypeBlockExecuted), - ).ID() - - transactionExecutedEvent := common.NewAddressLocation( - nil, - evmAddress, - string(events.EventTypeTransactionExecuted), - ).ID() - - return flow.EventFilter{ - EventTypes: []string{ - blockExecutedEvent, - transactionExecutedEvent, - }, - } + return eventsChan } // fetchMissingData is used as a backup mechanism for fetching EVM-related @@ -272,14 +254,14 @@ func (r *RPCSubscriber) blocksFilter() flow.EventFilter { // An inconsistent response could be an EVM block that references EVM // transactions which are not present in the response. It falls back // to using grpc requests instead of streaming. -func (r *RPCSubscriber) fetchMissingData( +func (r *RPCEventSubscriber) fetchMissingData( ctx context.Context, blockEvents flow.BlockEvents, ) models.BlockEvents { // remove existing events blockEvents.Events = nil - for _, eventType := range r.blocksFilter().EventTypes { + for _, eventType := range blocksFilter(r.chain).EventTypes { recoveredEvents, err := r.client.GetEventsForHeightRange( ctx, eventType, @@ -309,7 +291,7 @@ func (r *RPCSubscriber) fetchMissingData( // accumulateEventsMissingBlock will keep receiving transaction events until it can produce a valid // EVM block event containing a block and transactions. At that point it will reset the recovery mode // and return the valid block events. -func (r *RPCSubscriber) accumulateEventsMissingBlock(events flow.BlockEvents) models.BlockEvents { +func (r *RPCEventSubscriber) accumulateEventsMissingBlock(events flow.BlockEvents) models.BlockEvents { r.recoveredEvents = append(r.recoveredEvents, events.Events...) events.Events = r.recoveredEvents @@ -329,7 +311,7 @@ func (r *RPCSubscriber) accumulateEventsMissingBlock(events flow.BlockEvents) mo // in which case we might miss one of the events (missing transaction), or it can be // due to a failure from the system transaction which commits an EVM block, which results // in missing EVM block event but present transactions. -func (r *RPCSubscriber) recover( +func (r *RPCEventSubscriber) recover( ctx context.Context, events flow.BlockEvents, err error, @@ -349,3 +331,29 @@ func (r *RPCSubscriber) recover( return models.NewBlockEventsError(err) } + +// blockFilter define events we subscribe to: +// A.{evm}.EVM.BlockExecuted and A.{evm}.EVM.TransactionExecuted, +// where {evm} is EVM deployed contract address, which depends on the chain ID we configure. +func blocksFilter(chainId flowGo.ChainID) flow.EventFilter { + evmAddress := common.Address(systemcontracts.SystemContractsForChain(chainId).EVMContract.Address) + + blockExecutedEvent := common.NewAddressLocation( + nil, + evmAddress, + string(events.EventTypeBlockExecuted), + ).ID() + + transactionExecutedEvent := common.NewAddressLocation( + nil, + evmAddress, + string(events.EventTypeTransactionExecuted), + ).ID() + + return flow.EventFilter{ + EventTypes: []string{ + blockExecutedEvent, + transactionExecutedEvent, + }, + } +} diff --git a/services/ingestion/subscriber_test.go b/services/ingestion/event_subscriber_test.go similarity index 92% rename from services/ingestion/subscriber_test.go rename to services/ingestion/event_subscriber_test.go index 22ac61e16..04626af21 100644 --- a/services/ingestion/subscriber_test.go +++ b/services/ingestion/event_subscriber_test.go @@ -43,9 +43,9 @@ func Test_Subscribing(t *testing.T) { ) require.NoError(t, err) - subscriber := NewRPCSubscriber(client, 100, flowGo.Previewnet, zerolog.Nop()) + subscriber := NewRPCEventSubscriber(zerolog.Nop(), client, flowGo.Previewnet, 1) - events := subscriber.Subscribe(context.Background(), 1) + events := subscriber.Subscribe(context.Background()) var prevHeight uint64 @@ -83,9 +83,9 @@ func Test_MissingBlockEvent(t *testing.T) { ) require.NoError(t, err) - subscriber := NewRPCSubscriber(client, 100, flowGo.Previewnet, zerolog.Nop()) + subscriber := NewRPCEventSubscriber(zerolog.Nop(), client, flowGo.Previewnet, 1) - events := subscriber.Subscribe(context.Background(), 1) + events := subscriber.Subscribe(context.Background()) missingHashes := make([]gethCommon.Hash, 0) @@ -160,7 +160,7 @@ func Test_MissingBlockEvent(t *testing.T) { // EVM events through the gRPC API, returns the correct data. func Test_SubscribingWithRetryOnError(t *testing.T) { endHeight := uint64(10) - sporkClients := []access.Client{} + var sporkClients []access.Client currentClient := testutils.SetupClientForRange(1, endHeight) cadenceHeight := uint64(5) @@ -185,9 +185,9 @@ func Test_SubscribingWithRetryOnError(t *testing.T) { ) require.NoError(t, err) - subscriber := NewRPCSubscriber(client, 100, flowGo.Previewnet, zerolog.Nop()) + subscriber := NewRPCEventSubscriber(zerolog.Nop(), client, flowGo.Previewnet, 1) - events := subscriber.Subscribe(context.Background(), 1) + events := subscriber.Subscribe(context.Background()) var prevHeight uint64 @@ -214,7 +214,7 @@ func Test_SubscribingWithRetryOnError(t *testing.T) { } // this makes sure we indexed all the events - require.Equal(t, uint64(endHeight), prevHeight) + require.Equal(t, endHeight, prevHeight) } // Test that back-up fetching of EVM events is triggered when the @@ -223,7 +223,7 @@ func Test_SubscribingWithRetryOnError(t *testing.T) { // of EVM events through the gRPC API, returns duplicate EVM blocks. func Test_SubscribingWithRetryOnErrorMultipleBlocks(t *testing.T) { endHeight := uint64(10) - sporkClients := []access.Client{} + var sporkClients []access.Client currentClient := testutils.SetupClientForRange(1, endHeight) cadenceHeight := uint64(5) @@ -248,9 +248,9 @@ func Test_SubscribingWithRetryOnErrorMultipleBlocks(t *testing.T) { ) require.NoError(t, err) - subscriber := NewRPCSubscriber(client, 100, flowGo.Previewnet, zerolog.Nop()) + subscriber := NewRPCEventSubscriber(zerolog.Nop(), client, flowGo.Previewnet, 1) - events := subscriber.Subscribe(context.Background(), 1) + events := subscriber.Subscribe(context.Background()) var prevHeight uint64 @@ -286,7 +286,7 @@ func Test_SubscribingWithRetryOnErrorMultipleBlocks(t *testing.T) { // of EVM events through the gRPC API, returns no EVM blocks. func Test_SubscribingWithRetryOnErrorEmptyBlocks(t *testing.T) { endHeight := uint64(10) - sporkClients := []access.Client{} + var sporkClients []access.Client currentClient := testutils.SetupClientForRange(1, endHeight) cadenceHeight := uint64(5) @@ -310,9 +310,9 @@ func Test_SubscribingWithRetryOnErrorEmptyBlocks(t *testing.T) { ) require.NoError(t, err) - subscriber := NewRPCSubscriber(client, 100, flowGo.Previewnet, zerolog.Nop()) + subscriber := NewRPCEventSubscriber(zerolog.Nop(), client, flowGo.Previewnet, 1) - events := subscriber.Subscribe(context.Background(), 1) + events := subscriber.Subscribe(context.Background()) var prevHeight uint64 @@ -405,16 +405,16 @@ func setupClientForBackupEventFetching( "GetEventsForHeightRange", mock.AnythingOfType("context.backgroundCtx"), "A.b6763b4399a888c8.EVM.BlockExecuted", - uint64(cadenceHeight), - uint64(cadenceHeight), + cadenceHeight, + cadenceHeight, ).Return(evmBlockEvents, nil).Once() client.On( "GetEventsForHeightRange", mock.AnythingOfType("context.backgroundCtx"), "A.b6763b4399a888c8.EVM.TransactionExecuted", - uint64(cadenceHeight), - uint64(cadenceHeight), + cadenceHeight, + cadenceHeight, ).Return([]flow.BlockEvents{evmTxEvents}, nil).Once() client.SubscribeEventsByBlockHeightFunc = func( diff --git a/services/ingestion/mocks/EventSubscriber.go b/services/ingestion/mocks/EventSubscriber.go index 021c708d6..11b05e897 100644 --- a/services/ingestion/mocks/EventSubscriber.go +++ b/services/ingestion/mocks/EventSubscriber.go @@ -15,17 +15,17 @@ type EventSubscriber struct { mock.Mock } -// Subscribe provides a mock function with given fields: ctx, height -func (_m *EventSubscriber) Subscribe(ctx context.Context, height uint64) <-chan models.BlockEvents { - ret := _m.Called(ctx, height) +// Subscribe provides a mock function with given fields: ctx +func (_m *EventSubscriber) Subscribe(ctx context.Context) <-chan models.BlockEvents { + ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for Subscribe") } var r0 <-chan models.BlockEvents - if rf, ok := ret.Get(0).(func(context.Context, uint64) <-chan models.BlockEvents); ok { - r0 = rf(ctx, height) + if rf, ok := ret.Get(0).(func(context.Context) <-chan models.BlockEvents); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(<-chan models.BlockEvents) diff --git a/services/replayer/blocks_provider.go b/services/replayer/blocks_provider.go new file mode 100644 index 000000000..d43749a0c --- /dev/null +++ b/services/replayer/blocks_provider.go @@ -0,0 +1,113 @@ +package replayer + +import ( + "fmt" + + "github.com/onflow/flow-evm-gateway/models" + "github.com/onflow/flow-evm-gateway/storage" + evmTypes "github.com/onflow/flow-go/fvm/evm/types" + flowGo "github.com/onflow/flow-go/model/flow" + gethCommon "github.com/onflow/go-ethereum/common" + "github.com/onflow/go-ethereum/eth/tracers" +) + +type blockSnapshot struct { + *BlocksProvider + block models.Block +} + +var _ evmTypes.BlockSnapshot = (*blockSnapshot)(nil) + +func (bs *blockSnapshot) BlockContext() (evmTypes.BlockContext, error) { + return evmTypes.BlockContext{ + ChainID: evmTypes.EVMChainIDFromFlowChainID(bs.chainID), + BlockNumber: bs.block.Height, + BlockTimestamp: bs.block.Timestamp, + DirectCallBaseGasUsage: evmTypes.DefaultDirectCallBaseGasUsage, + DirectCallGasPrice: evmTypes.DefaultDirectCallGasPrice, + GasFeeCollector: evmTypes.CoinbaseAddress, + GetHashFunc: func(n uint64) gethCommon.Hash { + // For block heights greater than or equal to the current, + // return an empty block hash. + if n >= bs.block.Height { + return gethCommon.Hash{} + } + // If the given block height, is more than 256 blocks + // in the past, return an empty block hash. + if bs.block.Height-n > 256 { + return gethCommon.Hash{} + } + + block, err := bs.blocks.GetByHeight(n) + if err != nil { + return gethCommon.Hash{} + } + blockHash, err := block.Hash() + if err != nil { + return gethCommon.Hash{} + } + + return blockHash + }, + Random: bs.block.PrevRandao, + Tracer: bs.tracer, + }, nil +} + +type BlocksProvider struct { + blocks storage.BlockIndexer + chainID flowGo.ChainID + tracer *tracers.Tracer + latestBlock *models.Block +} + +var _ evmTypes.BlockSnapshotProvider = (*BlocksProvider)(nil) + +func NewBlocksProvider( + blocks storage.BlockIndexer, + chainID flowGo.ChainID, + tracer *tracers.Tracer, +) *BlocksProvider { + return &BlocksProvider{ + blocks: blocks, + chainID: chainID, + tracer: tracer, + } +} + +func (bp *BlocksProvider) OnBlockReceived(block *models.Block) error { + if bp.latestBlock != nil && bp.latestBlock.Height != (block.Height-1) { + return fmt.Errorf( + "%w: received new block: %d, non-sequential of latest block: %d", + models.ErrInvalidHeight, + block.Height, + bp.latestBlock.Height, + ) + } + + bp.latestBlock = block + + return nil +} + +func (bp *BlocksProvider) GetSnapshotAt(height uint64) ( + evmTypes.BlockSnapshot, + error, +) { + if bp.latestBlock != nil && bp.latestBlock.Height == height { + return &blockSnapshot{ + BlocksProvider: bp, + block: *bp.latestBlock, + }, nil + } + + block, err := bp.blocks.GetByHeight(height) + if err != nil { + return nil, err + } + + return &blockSnapshot{ + BlocksProvider: bp, + block: *block, + }, nil +} diff --git a/services/replayer/blocks_provider_test.go b/services/replayer/blocks_provider_test.go new file mode 100644 index 000000000..d318ae3d6 --- /dev/null +++ b/services/replayer/blocks_provider_test.go @@ -0,0 +1,307 @@ +package replayer + +import ( + "testing" + + pebble2 "github.com/cockroachdb/pebble" + + "github.com/goccy/go-json" + "github.com/onflow/flow-evm-gateway/config" + "github.com/onflow/flow-evm-gateway/models" + "github.com/onflow/flow-evm-gateway/storage" + "github.com/onflow/flow-evm-gateway/storage/mocks" + "github.com/onflow/flow-evm-gateway/storage/pebble" + "github.com/onflow/flow-go-sdk" + evmTypes "github.com/onflow/flow-go/fvm/evm/types" + flowGo "github.com/onflow/flow-go/model/flow" + "github.com/onflow/go-ethereum/common" + "github.com/onflow/go-ethereum/eth/tracers" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + // this import is needed for side-effects, because the + // tracers.DefaultDirectory is relying on the init function + _ "github.com/onflow/go-ethereum/eth/tracers/native" +) + +func TestOnBlockReceived(t *testing.T) { + + t.Run("without latest block", func(t *testing.T) { + _, blocks := setupBlocksDB(t) + + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, nil) + + block := mocks.NewBlock(1) + err := blocksProvider.OnBlockReceived(block) + require.NoError(t, err) + }) + + t.Run("with new block non-sequential to latest block", func(t *testing.T) { + _, blocks := setupBlocksDB(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, nil) + + block1 := mocks.NewBlock(1) + err := blocksProvider.OnBlockReceived(block1) + require.NoError(t, err) + + block2 := mocks.NewBlock(3) + err = blocksProvider.OnBlockReceived(block2) + require.Error(t, err) + assert.ErrorContains( + t, + err, + "received new block: 3, non-sequential of latest block: 1", + ) + }) + + t.Run("with new block non-sequential to latest block", func(t *testing.T) { + _, blocks := setupBlocksDB(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, nil) + + block1 := mocks.NewBlock(10) + err := blocksProvider.OnBlockReceived(block1) + require.NoError(t, err) + + block2 := mocks.NewBlock(11) + err = blocksProvider.OnBlockReceived(block2) + require.NoError(t, err) + }) +} + +func TestBlockContext(t *testing.T) { + + t.Run("for latest block", func(t *testing.T) { + _, blocks := setupBlocksDB(t) + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + block := mocks.NewBlock(1) + err := blocksProvider.OnBlockReceived(block) + require.NoError(t, err) + + blockSnapshopt, err := blocksProvider.GetSnapshotAt(block.Height) + require.NoError(t, err) + + blockContext, err := blockSnapshopt.BlockContext() + require.NoError(t, err) + + assert.Equal(t, evmTypes.FlowEVMPreviewNetChainID, blockContext.ChainID) + assert.Equal(t, block.Height, blockContext.BlockNumber) + assert.Equal(t, block.Timestamp, blockContext.BlockTimestamp) + assert.Equal(t, evmTypes.DefaultDirectCallBaseGasUsage, blockContext.DirectCallBaseGasUsage) + assert.Equal(t, evmTypes.DefaultDirectCallGasPrice, blockContext.DirectCallGasPrice) + assert.Equal(t, evmTypes.CoinbaseAddress, blockContext.GasFeeCollector) + blockHash := blockContext.GetHashFunc(block.Height) + assert.Equal(t, common.Hash{}, blockHash) + assert.Equal(t, block.PrevRandao, blockContext.Random) + assert.Equal(t, tracer, blockContext.Tracer) + }) +} + +func TestGetHashFunc(t *testing.T) { + db, blocks := setupBlocksDB(t) + missingHeight := uint64(100) + + blockMapping := make(map[uint64]*models.Block, 0) + for i := uint64(1); i <= 300; i++ { + // simulate a missing block + if i == missingHeight { + continue + } + + block := mocks.NewBlock(i) + batch := db.NewBatch() + err := blocks.Store(i, flow.Identifier{0x1}, block, batch) + require.NoError(t, err) + + err = batch.Commit(pebble2.Sync) + require.NoError(t, err) + + blockMapping[i] = block + } + + t.Run("with requested height >= latest block height", func(t *testing.T) { + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + latestBlock := blockMapping[200] + err := blocksProvider.OnBlockReceived(latestBlock) + require.NoError(t, err) + + blockSnapshopt, err := blocksProvider.GetSnapshotAt(latestBlock.Height) + require.NoError(t, err) + + blockContext, err := blockSnapshopt.BlockContext() + require.NoError(t, err) + require.Equal(t, latestBlock.Height, blockContext.BlockNumber) + + // GetHashFunc should return empty block hash for block heights >= latest + blockHash := blockContext.GetHashFunc(latestBlock.Height) + assert.Equal(t, common.Hash{}, blockHash) + + blockHash = blockContext.GetHashFunc(latestBlock.Height + 1) + assert.Equal(t, common.Hash{}, blockHash) + }) + + t.Run("with requested height within 256 block height range", func(t *testing.T) { + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + latestBlock := blockMapping[257] + err := blocksProvider.OnBlockReceived(latestBlock) + require.NoError(t, err) + + blockSnapshopt, err := blocksProvider.GetSnapshotAt(latestBlock.Height) + require.NoError(t, err) + + blockContext, err := blockSnapshopt.BlockContext() + require.NoError(t, err) + require.Equal(t, latestBlock.Height, blockContext.BlockNumber) + + blockHash := blockContext.GetHashFunc(latestBlock.Height - 256) + expectedBlock := blockMapping[latestBlock.Height-256] + expectedHash, err := expectedBlock.Hash() + require.NoError(t, err) + assert.Equal(t, expectedHash, blockHash) + }) + + t.Run("with requested height outside the 256 block height range", func(t *testing.T) { + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + latestBlock := blockMapping[260] + err := blocksProvider.OnBlockReceived(latestBlock) + require.NoError(t, err) + + blockSnapshopt, err := blocksProvider.GetSnapshotAt(latestBlock.Height) + require.NoError(t, err) + + blockContext, err := blockSnapshopt.BlockContext() + require.NoError(t, err) + require.Equal(t, latestBlock.Height, blockContext.BlockNumber) + + blockHash := blockContext.GetHashFunc(latestBlock.Height - 259) + assert.Equal(t, common.Hash{}, blockHash) + }) + + t.Run("with requested height missing from Blocks DB", func(t *testing.T) { + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + latestBlock := blockMapping[260] + err := blocksProvider.OnBlockReceived(latestBlock) + require.NoError(t, err) + + blockSnapshopt, err := blocksProvider.GetSnapshotAt(latestBlock.Height) + require.NoError(t, err) + + blockContext, err := blockSnapshopt.BlockContext() + require.NoError(t, err) + require.Equal(t, latestBlock.Height, blockContext.BlockNumber) + + blockHash := blockContext.GetHashFunc(missingHeight) + assert.Equal(t, common.Hash{}, blockHash) + }) +} + +func TestGetSnapshotAt(t *testing.T) { + + t.Run("for latest block", func(t *testing.T) { + _, blocks := setupBlocksDB(t) + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + block := mocks.NewBlock(1) + err := blocksProvider.OnBlockReceived(block) + require.NoError(t, err) + + blockSnapshot, err := blocksProvider.GetSnapshotAt(block.Height) + require.NoError(t, err) + + blockContext, err := blockSnapshot.BlockContext() + require.NoError(t, err) + assert.Equal(t, block.Height, blockContext.BlockNumber) + assert.Equal(t, block.Timestamp, blockContext.BlockTimestamp) + assert.Equal(t, block.PrevRandao, blockContext.Random) + assert.Equal(t, tracer, blockContext.Tracer) + }) + + t.Run("for historic block", func(t *testing.T) { + db, blocks := setupBlocksDB(t) + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + block1 := mocks.NewBlock(1) + batch := db.NewBatch() + err := blocks.Store(1, flow.Identifier{0x1}, block1, batch) + require.NoError(t, err) + + err = batch.Commit(pebble2.Sync) + require.NoError(t, err) + + block2 := mocks.NewBlock(2) + err = blocksProvider.OnBlockReceived(block2) + require.NoError(t, err) + + blockSnapshot, err := blocksProvider.GetSnapshotAt(block1.Height) + require.NoError(t, err) + + blockContext, err := blockSnapshot.BlockContext() + require.NoError(t, err) + assert.Equal(t, block1.Height, blockContext.BlockNumber) + assert.Equal(t, block1.Timestamp, blockContext.BlockTimestamp) + assert.Equal(t, block1.PrevRandao, blockContext.Random) + assert.Equal(t, tracer, blockContext.Tracer) + }) + + t.Run("for missing historic block", func(t *testing.T) { + _, blocks := setupBlocksDB(t) + tracer := newCallTracer(t) + blocksProvider := NewBlocksProvider(blocks, flowGo.Emulator, tracer) + + // `block1` is not stored on Blocks DB + block1 := mocks.NewBlock(1) + + block2 := mocks.NewBlock(2) + err := blocksProvider.OnBlockReceived(block2) + require.NoError(t, err) + + _, err = blocksProvider.GetSnapshotAt(block1.Height) + require.Error(t, err) + assert.ErrorContains( + t, + err, + "entity not found", + ) + }) +} + +func setupBlocksDB(t *testing.T) (*pebble.Storage, storage.BlockIndexer) { + dir := t.TempDir() + db, err := pebble.New(dir, zerolog.Nop()) + require.NoError(t, err) + batch := db.NewBatch() + + chainID := flowGo.Emulator + blocks := pebble.NewBlocks(db, chainID) + + err = blocks.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}, batch) + require.NoError(t, err) + + err = batch.Commit(pebble2.Sync) + require.NoError(t, err) + + return db, blocks +} + +func newCallTracer(t *testing.T) *tracers.Tracer { + tracer, err := tracers.DefaultDirectory.New( + "callTracer", + &tracers.Context{}, + json.RawMessage(`{ "onlyTopCall": true }`), + ) + require.NoError(t, err) + + return tracer +} diff --git a/services/replayer/call_tracer_collector.go b/services/replayer/call_tracer_collector.go new file mode 100644 index 000000000..dcb3fee47 --- /dev/null +++ b/services/replayer/call_tracer_collector.go @@ -0,0 +1,214 @@ +package replayer + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/onflow/go-ethereum/common" + "github.com/onflow/go-ethereum/core/tracing" + "github.com/onflow/go-ethereum/core/types" + "github.com/onflow/go-ethereum/eth/tracers" + "github.com/rs/zerolog" +) + +const ( + TracerConfig = `{"onlyTopCall":true}` + TracerName = "callTracer" +) + +func DefaultCallTracer() (*tracers.Tracer, error) { + tracer, err := tracers.DefaultDirectory.New( + TracerName, + &tracers.Context{}, + json.RawMessage(TracerConfig), + ) + if err != nil { + return nil, err + } + + return tracer, nil +} + +type EVMTracer interface { + TxTracer() *tracers.Tracer + ResetTracer() error + Collect(txID common.Hash) (json.RawMessage, error) +} + +type CallTracerCollector struct { + tracer *tracers.Tracer + resultsByTxID map[common.Hash]json.RawMessage + logger zerolog.Logger +} + +var _ EVMTracer = (*CallTracerCollector)(nil) + +func NewCallTracerCollector(logger zerolog.Logger) ( + *CallTracerCollector, + error, +) { + tracer, err := DefaultCallTracer() + if err != nil { + return nil, err + } + + return &CallTracerCollector{ + tracer: tracer, + resultsByTxID: make(map[common.Hash]json.RawMessage), + logger: logger.With().Str("component", "evm-tracer").Logger(), + }, nil +} + +func (t *CallTracerCollector) TxTracer() *tracers.Tracer { + return NewSafeTxTracer(t) +} + +func (t *CallTracerCollector) ResetTracer() error { + var err error + t.tracer, err = DefaultCallTracer() + return err +} + +func (ct *CallTracerCollector) Collect(txID common.Hash) (json.RawMessage, error) { + // collect the trace result + result, found := ct.resultsByTxID[txID] + if !found { + return nil, fmt.Errorf("trace result for tx: %s, not found", txID.String()) + } + + // remove the result + delete(ct.resultsByTxID, txID) + + return result, nil +} + +func NewSafeTxTracer(ct *CallTracerCollector) *tracers.Tracer { + wrapped := &tracers.Tracer{ + Hooks: &tracing.Hooks{}, + GetResult: ct.tracer.GetResult, + Stop: ct.tracer.Stop, + } + + l := ct.logger + + wrapped.OnTxStart = func( + vm *tracing.VMContext, + tx *types.Transaction, + from common.Address, + ) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("panic: %v", r) + } + l.Err(err).Stack().Msg("OnTxStart trace collection failed") + } + }() + if ct.tracer.OnTxStart != nil { + ct.tracer.OnTxStart(vm, tx, from) + } + } + + wrapped.OnTxEnd = func(receipt *types.Receipt, err error) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("panic: %v", r) + } + l.Err(err).Stack().Msg("OnTxEnd trace collection failed") + } + }() + if ct.tracer.OnTxEnd != nil { + ct.tracer.OnTxEnd(receipt, err) + } + + // collect results for the tracer + res, err := ct.tracer.GetResult() + if err != nil { + l.Error().Err(err).Msg("failed to produce trace results") + return + } + ct.resultsByTxID[receipt.TxHash] = res + + // reset tracing to have fresh state + if err := ct.ResetTracer(); err != nil { + l.Error().Err(err).Msg("failed to reset tracer") + return + } + } + + wrapped.OnEnter = func( + depth int, + typ byte, + from, to common.Address, + input []byte, + gas uint64, + value *big.Int, + ) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("panic: %v", r) + } + l.Err(err).Stack().Msg("OnEnter trace collection failed") + } + }() + if ct.tracer.OnEnter != nil { + ct.tracer.OnEnter(depth, typ, from, to, input, gas, value) + } + } + + wrapped.OnExit = func(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("panic: %v", r) + } + l.Err(err).Stack().Msg("OnExit trace collection failed") + } + }() + if ct.tracer.OnExit != nil { + ct.tracer.OnExit(depth, output, gasUsed, err, reverted) + } + } + + wrapped.OnLog = func(log *types.Log) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("panic: %v", r) + } + l.Err(err).Stack().Msg("OnLog trace collection failed") + } + }() + if ct.tracer.OnLog != nil { + ct.tracer.OnLog(log) + } + } + + return wrapped +} + +var NopTracer = &nopTracer{} + +var _ EVMTracer = (*nopTracer)(nil) + +type nopTracer struct{} + +func (n nopTracer) TxTracer() *tracers.Tracer { + return nil +} + +func (n nopTracer) Collect(_ common.Hash) (json.RawMessage, error) { + return nil, nil +} + +func (n nopTracer) ResetTracer() error { + return nil +} diff --git a/services/replayer/config.go b/services/replayer/config.go new file mode 100644 index 000000000..72fb0a88d --- /dev/null +++ b/services/replayer/config.go @@ -0,0 +1,12 @@ +package replayer + +import ( + "github.com/onflow/flow-go/model/flow" +) + +type Config struct { + ChainID flow.ChainID + RootAddr flow.Address + CallTracerCollector EVMTracer + ValidateResults bool +} diff --git a/services/requester/cadence/get_balance.cdc b/services/requester/cadence/get_balance.cdc deleted file mode 100644 index b8c954133..000000000 --- a/services/requester/cadence/get_balance.cdc +++ /dev/null @@ -1,8 +0,0 @@ -import EVM - -access(all) -fun main(hexEncodedAddress: String): UInt { - let address = EVM.addressFromString(hexEncodedAddress) - - return address.balance().inAttoFLOW() -} diff --git a/services/requester/cadence/get_code.cdc b/services/requester/cadence/get_code.cdc deleted file mode 100644 index 419f8395a..000000000 --- a/services/requester/cadence/get_code.cdc +++ /dev/null @@ -1,8 +0,0 @@ -import EVM - -access(all) -fun main(hexEncodedAddress: String): String { - let address = EVM.addressFromString(hexEncodedAddress) - - return String.encodeHex(address.code()) -} diff --git a/services/requester/cadence/get_nonce.cdc b/services/requester/cadence/get_nonce.cdc deleted file mode 100644 index 39e2efe2a..000000000 --- a/services/requester/cadence/get_nonce.cdc +++ /dev/null @@ -1,8 +0,0 @@ -import EVM - -access(all) -fun main(hexEncodedAddress: String): UInt64 { - let address = EVM.addressFromString(hexEncodedAddress) - - return address.nonce() -} diff --git a/services/requester/remote_cadence_arch.go b/services/requester/remote_cadence_arch.go new file mode 100644 index 000000000..04a530af7 --- /dev/null +++ b/services/requester/remote_cadence_arch.go @@ -0,0 +1,138 @@ +package requester + +import ( + "context" + _ "embed" + "encoding/hex" + "fmt" + "math/big" + + "github.com/onflow/cadence" + errs "github.com/onflow/flow-evm-gateway/models/errors" + evmImpl "github.com/onflow/flow-go/fvm/evm/impl" + evmTypes "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" + gethCommon "github.com/onflow/go-ethereum/common" + "github.com/onflow/go-ethereum/core/types" + "github.com/onflow/go-ethereum/crypto" +) + +const cadenceArchCallGasLimit = 155_000 + +var ( + //go:embed cadence/dry_run.cdc + dryRunScript []byte +) + +var cadenceArchAddress = gethCommon.HexToAddress("0x0000000000000000000000010000000000000001") + +type RemoteCadenceArch struct { + blockHeight uint64 + client *CrossSporkClient + chainID flow.ChainID + cachedCalls map[string]evmTypes.Data +} + +var _ evmTypes.PrecompiledContract = (*RemoteCadenceArch)(nil) + +func NewRemoteCadenceArch( + blockHeight uint64, + client *CrossSporkClient, + chainID flow.ChainID, +) *RemoteCadenceArch { + return &RemoteCadenceArch{ + blockHeight: blockHeight, + client: client, + chainID: chainID, + cachedCalls: map[string]evmTypes.Data{}, + } +} + +func (rca *RemoteCadenceArch) Address() evmTypes.Address { + return evmTypes.NewAddress(cadenceArchAddress) +} + +func (rca *RemoteCadenceArch) RequiredGas(input []byte) uint64 { + evmResult, err := rca.runCall(input) + if err != nil { + return 0 + } + + return evmResult.GasConsumed +} + +func (rca *RemoteCadenceArch) Run(input []byte) ([]byte, error) { + key := hex.EncodeToString(crypto.Keccak256(input)) + + if result, ok := rca.cachedCalls[key]; ok { + return result, nil + } + + evmResult, err := rca.runCall(input) + if err != nil { + return nil, err + } + return evmResult.ReturnedData, nil +} + +func (rca *RemoteCadenceArch) runCall(input []byte) (*evmTypes.ResultSummary, error) { + tx := types.NewTx( + &types.LegacyTx{ + Nonce: 0, + To: &cadenceArchAddress, + Value: big.NewInt(0), + Gas: cadenceArchCallGasLimit, + GasPrice: big.NewInt(0), + Data: input, + }, + ) + encodedTx, err := tx.MarshalBinary() + if err != nil { + return nil, err + } + hexEncodedTx, err := cadence.NewString(hex.EncodeToString(encodedTx)) + if err != nil { + return nil, err + } + + hexEncodedAddress, err := cadence.NewString(evmTypes.CoinbaseAddress.String()) + if err != nil { + return nil, err + } + + scriptResult, err := rca.client.ExecuteScriptAtBlockHeight( + context.Background(), + rca.blockHeight, + replaceAddresses(dryRunScript, rca.chainID), + []cadence.Value{hexEncodedTx, hexEncodedAddress}, + ) + if err != nil { + return nil, err + } + + evmResult, err := parseResult(scriptResult) + if err != nil { + return nil, err + } + + key := hex.EncodeToString(crypto.Keccak256(input)) + rca.cachedCalls[key] = evmResult.ReturnedData + + return evmResult, nil +} + +func parseResult(res cadence.Value) (*evmTypes.ResultSummary, error) { + result, err := evmImpl.ResultSummaryFromEVMResultValue(res) + if err != nil { + return nil, fmt.Errorf("failed to decode EVM result of type: %s, with: %w", res.Type().ID(), err) + } + + if result.ErrorCode != 0 { + if result.ErrorCode == evmTypes.ExecutionErrCodeExecutionReverted { + return nil, errs.NewRevertError(result.ReturnedData) + } + return nil, errs.NewFailedTransactionError(result.ErrorMessage) + } + + return result, nil +} diff --git a/services/requester/remote_state.go b/services/requester/remote_state.go deleted file mode 100644 index 2e0d79a6c..000000000 --- a/services/requester/remote_state.go +++ /dev/null @@ -1,75 +0,0 @@ -package requester - -import ( - "context" - - "github.com/onflow/atree" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow/protobuf/go/flow/entities" - "github.com/onflow/flow/protobuf/go/flow/executiondata" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var _ atree.Ledger = &remoteLedger{} - -func newRemoteLedger( - client executiondata.ExecutionDataAPIClient, - cadenceHeight uint64, -) (*remoteLedger, error) { - return &remoteLedger{ - execution: client, - height: cadenceHeight, - }, nil -} - -// remoteLedger is a ledger that uses execution data APIs to fetch register values, -// thus simulating execution against the host network. -// -// The ledger implements atree.Ledger interface which is used by the type.stateDB -// to inspect the state. -type remoteLedger struct { - execution executiondata.ExecutionDataAPIClient - height uint64 -} - -func (l *remoteLedger) GetValue(owner, key []byte) ([]byte, error) { - id := flow.RegisterID{ - Key: string(key), - Owner: string(owner), - } - registerID := convert.RegisterIDToMessage(id) - - response, err := l.execution.GetRegisterValues( - context.Background(), - &executiondata.GetRegisterValuesRequest{ - BlockHeight: l.height, - RegisterIds: []*entities.RegisterID{registerID}, - }, - ) - errorCode := status.Code(err) - if err != nil && errorCode != codes.NotFound && errorCode != codes.OutOfRange { - return nil, err - } - - if response != nil && len(response.Values) > 0 { - // we only request one register so 0 index - return response.Values[0], nil - } - - return nil, nil -} - -func (l *remoteLedger) ValueExists(owner, key []byte) (exists bool, err error) { - val, err := l.GetValue(owner, key) - return val != nil, err -} - -func (l *remoteLedger) SetValue(owner, key, value []byte) (err error) { - panic("read only") -} - -func (l *remoteLedger) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { - panic("read only") -} diff --git a/services/requester/remote_state_test.go b/services/requester/remote_state_test.go deleted file mode 100644 index 69e494b8c..000000000 --- a/services/requester/remote_state_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package requester - -import ( - "context" - "encoding/hex" - "os" - "testing" - - grpcClient "github.com/onflow/flow-go-sdk/access/grpc" - "github.com/onflow/flow-go/fvm/evm" - "github.com/onflow/flow-go/fvm/evm/emulator/state" - "github.com/onflow/flow-go/fvm/evm/types" - flowGo "github.com/onflow/flow-go/model/flow" - gethCommon "github.com/onflow/go-ethereum/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -var previewnetStorageAddress = evm.StorageAccountAddress(flowGo.Previewnet) - -func Test_E2E_Previewnet_RemoteLedger(t *testing.T) { - executionAPI := os.Getenv("E2E_EXECUTION_API") // "access-001.previewnet1.nodes.onflow.org:9000" - if executionAPI == "" { - t.Skip() - } - - ledger, err := newPreviewnetLedger(executionAPI) - require.NoError(t, err) - - // this is a pre-established test account on previewnet - addrBytes, err := hex.DecodeString("BC9985a24c0846cbEdd6249868020A84Df83Ea85") - require.NoError(t, err) - testAddress := types.NewAddressFromBytes(addrBytes).ToCommon() - - stateDB, err := state.NewStateDB(ledger, previewnetStorageAddress) - require.NoError(t, err) - - assert.NotEmpty(t, stateDB.GetCode(testAddress)) - assert.NotEmpty(t, stateDB.GetNonce(testAddress)) - assert.Empty(t, stateDB.GetBalance(testAddress)) - assert.NotEmpty(t, stateDB.GetCodeSize(testAddress)) - assert.NotEmpty(t, stateDB.GetState(testAddress, gethCommon.Hash{})) -} - -/* -Testing from local machine (bottleneck is network delay to previewnet AN) - -Benchmark_RemoteLedger_GetBalance-8 9 1144204361 ns/op -*/ -func Benchmark_RemoteLedger_GetBalance(b *testing.B) { - executionAPI := os.Getenv("E2E_EXECUTION_API") // "access-001.previewnet1.nodes.onflow.org:9000" - if executionAPI == "" { - b.Skip() - } - - client, err := grpcClient.NewClient(executionAPI, - grpcClient.WithGRPCDialOptions(grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024*1024*1024))), - ) - require.NoError(b, err) - execClient := client.ExecutionDataRPCClient() - - latest, err := client.GetLatestBlockHeader(context.Background(), true) - require.NoError(b, err) - - // we have to include ledger creation since the loading of the collection - // will be done only once per height, all the subsequent requests for - // getting the balance will work on already loaded state and thus be fast - for i := 0; i < b.N; i++ { - ledger, err := newRemoteLedger(execClient, latest.Height) - require.NoError(b, err) - - stateDB, err := state.NewStateDB(ledger, previewnetStorageAddress) - require.NoError(b, err) - - addrBytes, err := hex.DecodeString("BC9985a24c0846cbEdd6249868020A84Df83Ea85") - require.NoError(b, err) - testAddress := types.NewAddressFromBytes(addrBytes).ToCommon() - - assert.Empty(b, stateDB.GetBalance(testAddress)) - } -} - -func newPreviewnetLedger(host string) (*remoteLedger, error) { - client, err := grpcClient.NewClient(host, - grpcClient.WithGRPCDialOptions(grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024*1024*1024))), - ) - if err != nil { - return nil, err - } - execClient := client.ExecutionDataRPCClient() - - latest, err := client.GetLatestBlockHeader(context.Background(), true) - if err != nil { - return nil, err - } - - return newRemoteLedger(execClient, latest.Height) -} diff --git a/services/requester/requester.go b/services/requester/requester.go index b0431bf33..d6d1d80dd 100644 --- a/services/requester/requester.go +++ b/services/requester/requester.go @@ -4,25 +4,18 @@ import ( "context" _ "embed" "encoding/hex" - "errors" "fmt" - "math" "math/big" - "strings" "sync" "time" - "github.com/hashicorp/golang-lru/v2/expirable" "github.com/onflow/cadence" "github.com/onflow/flow-go-sdk" - "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go-sdk/crypto" "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/emulator" - "github.com/onflow/flow-go/fvm/evm/emulator/state" - evmImpl "github.com/onflow/flow-go/fvm/evm/impl" + "github.com/onflow/flow-go/fvm/evm/offchain/query" evmTypes "github.com/onflow/flow-go/fvm/evm/types" - "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/go-ethereum/common" "github.com/onflow/go-ethereum/core/txpool" "github.com/onflow/go-ethereum/core/types" @@ -33,54 +26,27 @@ import ( "github.com/onflow/flow-evm-gateway/metrics" "github.com/onflow/flow-evm-gateway/models" errs "github.com/onflow/flow-evm-gateway/models/errors" + "github.com/onflow/flow-evm-gateway/services/replayer" "github.com/onflow/flow-evm-gateway/storage" + "github.com/onflow/flow-evm-gateway/storage/pebble" + + gethParams "github.com/onflow/go-ethereum/params" ) var ( - //go:embed cadence/dry_run.cdc - dryRunScript []byte - //go:embed cadence/run.cdc runTxScript []byte - //go:embed cadence/get_balance.cdc - getBalanceScript []byte - //go:embed cadence/create_coa.cdc createCOAScript []byte - //go:embed cadence/get_nonce.cdc - getNonceScript []byte - - //go:embed cadence/get_code.cdc - getCodeScript []byte - //go:embed cadence/get_latest_evm_height.cdc getLatestEVMHeight []byte ) -type scriptType int - -const ( - dryRun scriptType = iota - getBalance - getNonce - getCode - getLatest -) - -var scripts = map[scriptType][]byte{ - dryRun: dryRunScript, - getBalance: getBalanceScript, - getNonce: getNonceScript, - getCode: getCodeScript, - getLatest: getLatestEVMHeight, -} - const minFlowBalance = 2 const coaFundingBalance = minFlowBalance - 1 - -const LatestBlockHeight uint64 = math.MaxUint64 - 1 +const blockGasLimit = 120_000_000 type Requester interface { // SendRawTransaction will submit signed transaction data to the network. @@ -89,43 +55,44 @@ type Requester interface { // GetBalance returns the amount of wei for the given address in the state of the // given EVM block height. - GetBalance(ctx context.Context, address common.Address, evmHeight int64) (*big.Int, error) + GetBalance(address common.Address, height uint64) (*big.Int, error) // Call executes the given signed transaction data on the state for the given EVM block height. // Note, this function doesn't make and changes in the state/blockchain and is // useful to execute and retrieve values. - Call(ctx context.Context, data []byte, from common.Address, evmHeight int64) ([]byte, error) + Call(tx *types.LegacyTx, from common.Address, height uint64) ([]byte, error) // EstimateGas executes the given signed transaction data on the state for the given EVM block height. // Note, this function doesn't make any changes in the state/blockchain and is // useful to executed and retrieve the gas consumption and possible failures. - EstimateGas(ctx context.Context, data []byte, from common.Address, evmHeight int64) (uint64, error) + EstimateGas(tx *types.LegacyTx, from common.Address, height uint64) (uint64, error) // GetNonce gets nonce from the network at the given EVM block height. - GetNonce(ctx context.Context, address common.Address, evmHeight int64) (uint64, error) + GetNonce(address common.Address, height uint64) (uint64, error) // GetCode returns the code stored at the given address in // the state for the given EVM block height. - GetCode(ctx context.Context, address common.Address, evmHeight int64) ([]byte, error) + GetCode(address common.Address, height uint64) ([]byte, error) + + // GetStorageAt returns the storage from the state at the given address, key and block number. + GetStorageAt(address common.Address, hash common.Hash, height uint64) (common.Hash, error) // GetLatestEVMHeight returns the latest EVM height of the network. GetLatestEVMHeight(ctx context.Context) (uint64, error) - - // GetStorageAt returns the storage from the state at the given address, key and block number. - GetStorageAt(ctx context.Context, address common.Address, hash common.Hash, evmHeight int64) (common.Hash, error) } var _ Requester = &EVM{} type EVM struct { - client *CrossSporkClient - config *config.Config - signer crypto.Signer - txPool *TxPool - logger zerolog.Logger - blocks storage.BlockIndexer - mux sync.Mutex - scriptCache *expirable.LRU[string, cadence.Value] + registerStore *pebble.RegisterStorage + blocksProvider *replayer.BlocksProvider + client *CrossSporkClient + config *config.Config + signer crypto.Signer + txPool *TxPool + logger zerolog.Logger + blocks storage.BlockIndexer + mux sync.Mutex head *types.Header evmSigner types.Signer @@ -135,6 +102,8 @@ type EVM struct { } func NewEVM( + registerStore *pebble.RegisterStorage, + blocksProvider *replayer.BlocksProvider, client *CrossSporkClient, config *config.Config, signer crypto.Signer, @@ -168,7 +137,7 @@ func NewEVM( head := &types.Header{ Number: big.NewInt(20_182_324), Time: uint64(time.Now().Unix()), - GasLimit: 30_000_000, + GasLimit: blockGasLimit, } emulatorConfig := emulator.NewConfig( emulator.WithChainID(config.EVMNetworkID), @@ -187,12 +156,9 @@ func NewEVM( MinTip: new(big.Int), } - var cache *expirable.LRU[string, cadence.Value] - if config.CacheSize != 0 { - cache = expirable.NewLRU[string, cadence.Value](int(config.CacheSize), nil, time.Second) - } - evm := &EVM{ + registerStore: registerStore, + blocksProvider: blocksProvider, client: client, config: config, signer: signer, @@ -203,14 +169,13 @@ func NewEVM( evmSigner: evmSigner, validationOptions: validationOptions, collector: collector, - scriptCache: cache, } // create COA on the account if config.CreateCOAResource { tx, err := evm.buildTransaction( context.Background(), - evm.replaceAddresses(createCOAScript), + replaceAddresses(createCOAScript, config.FlowNetworkID), cadence.UFix64(coaFundingBalance), ) if err != nil { @@ -255,7 +220,7 @@ func (e *EVM) SendRawTransaction(ctx context.Context, data []byte) (common.Hash, return common.Hash{}, err } - script := e.replaceAddresses(runTxScript) + script := replaceAddresses(runTxScript, e.config.FlowNetworkID) flowTx, err := e.buildTransaction(ctx, script, hexEncodedTx, coinbaseAddress) if err != nil { e.logger.Error().Err(err).Str("data", txData).Msg("failed to build transaction") @@ -336,317 +301,160 @@ func (e *EVM) buildTransaction(ctx context.Context, script []byte, args ...caden } func (e *EVM) GetBalance( - ctx context.Context, address common.Address, - evmHeight int64, + height uint64, ) (*big.Int, error) { - hexEncodedAddress, err := addressToCadenceString(address) - if err != nil { - return nil, err - } - - height, err := e.evmToCadenceHeight(evmHeight) + view, err := e.getBlockView(height) if err != nil { return nil, err } - val, err := e.executeScriptAtHeight( - ctx, - getBalance, - height, - []cadence.Value{hexEncodedAddress}, - ) - if err != nil { - if !errors.Is(err, errs.ErrHeightOutOfRange) { - e.logger.Error(). - Err(err). - Str("address", address.String()). - Int64("evm-height", evmHeight). - Uint64("cadence-height", height). - Msg("failed to get get balance") - } - return nil, fmt.Errorf( - "failed to get balance of address: %s at height: %d, with: %w", - address, - evmHeight, - err, - ) - } - - // sanity check, should never occur - if _, ok := val.(cadence.UInt); !ok { - return nil, fmt.Errorf("failed to convert balance %v to UInt, got type: %T", val, val) - } - - return val.(cadence.UInt).Big(), nil + return view.GetBalance(address) } func (e *EVM) GetNonce( - ctx context.Context, address common.Address, - evmHeight int64, + height uint64, ) (uint64, error) { - hexEncodedAddress, err := addressToCadenceString(address) + view, err := e.getBlockView(height) if err != nil { return 0, err } - height, err := e.evmToCadenceHeight(evmHeight) - if err != nil { - return 0, err - } - - val, err := e.executeScriptAtHeight( - ctx, - getNonce, - height, - []cadence.Value{hexEncodedAddress}, - ) - if err != nil { - if !errors.Is(err, errs.ErrHeightOutOfRange) { - e.logger.Error().Err(err). - Str("address", address.String()). - Int64("evm-height", evmHeight). - Uint64("cadence-height", height). - Msg("failed to get nonce") - } - return 0, fmt.Errorf( - "failed to get nonce of address: %s at height: %d, with: %w", - address, - evmHeight, - err, - ) - } - - // sanity check, should never occur - if _, ok := val.(cadence.UInt64); !ok { - return 0, fmt.Errorf("failed to convert nonce %v to UInt64, got type: %T", val, val) - } - - nonce := uint64(val.(cadence.UInt64)) - - e.logger.Debug(). - Uint64("nonce", nonce). - Int64("evm-height", evmHeight). - Uint64("cadence-height", height). - Msg("get nonce executed") - - return nonce, nil -} - -func (e *EVM) stateAt(evmHeight int64) (*state.StateDB, error) { - cadenceHeight, err := e.evmToCadenceHeight(evmHeight) - if err != nil { - return nil, err - } - - if cadenceHeight == LatestBlockHeight { - h, err := e.client.GetLatestBlockHeader(context.Background(), true) - if err != nil { - return nil, err - } - cadenceHeight = h.Height - } - - exeClient, ok := e.client.Client.(*grpc.Client) - if !ok { - return nil, fmt.Errorf("could not convert to execution client") - } - ledger, err := newRemoteLedger(exeClient.ExecutionDataRPCClient(), cadenceHeight) - if err != nil { - return nil, fmt.Errorf("could not create remote ledger for height: %d, with: %w", cadenceHeight, err) - } - - storageAddress := evm.StorageAccountAddress(e.config.FlowNetworkID) - return state.NewStateDB(ledger, storageAddress) + return view.GetNonce(address) } func (e *EVM) GetStorageAt( - ctx context.Context, address common.Address, hash common.Hash, - evmHeight int64, + height uint64, ) (common.Hash, error) { - stateDB, err := e.stateAt(evmHeight) + view, err := e.getBlockView(height) if err != nil { return common.Hash{}, err } - result := stateDB.GetState(address, hash) - return result, stateDB.Error() + return view.GetSlab(address, hash) } func (e *EVM) Call( - ctx context.Context, - data []byte, + tx *types.LegacyTx, from common.Address, - evmHeight int64, + height uint64, ) ([]byte, error) { - hexEncodedTx, err := cadence.NewString(hex.EncodeToString(data)) + view, err := e.getBlockView(height) if err != nil { return nil, err } - hexEncodedAddress, err := addressToCadenceString(from) - if err != nil { - return nil, err + to := common.Address{} + if tx.To != nil { + to = *tx.To } - - height, err := e.evmToCadenceHeight(evmHeight) + cdcHeight, err := e.evmToCadenceHeight(height) if err != nil { return nil, err } - - scriptResult, err := e.executeScriptAtHeight( - ctx, - dryRun, - height, - []cadence.Value{hexEncodedTx, hexEncodedAddress}, + rca := NewRemoteCadenceArch(cdcHeight, e.client, e.config.FlowNetworkID) + result, err := view.DryCall( + from, + to, + tx.Data, + tx.Value, + tx.Gas, + query.WithExtraPrecompiledContracts([]evmTypes.PrecompiledContract{rca}), ) - if err != nil { - if !errors.Is(err, errs.ErrHeightOutOfRange) { - e.logger.Error(). - Err(err). - Uint64("cadence-height", height). - Int64("evm-height", evmHeight). - Str("from", from.String()). - Str("data", hex.EncodeToString(data)). - Msg("failed to execute call") - } - return nil, fmt.Errorf("failed to execute script at height: %d, with: %w", height, err) - } - evmResult, err := parseResult(scriptResult) - if err != nil { - return nil, err + resultSummary := result.ResultSummary() + if resultSummary.ErrorCode != 0 { + if resultSummary.ErrorCode == evmTypes.ExecutionErrCodeExecutionReverted { + return nil, errs.NewRevertError(resultSummary.ReturnedData) + } + return nil, errs.NewFailedTransactionError(resultSummary.ErrorMessage) } - result := evmResult.ReturnedData - - e.logger.Debug(). - Str("result", hex.EncodeToString(result)). - Int64("evm-height", evmHeight). - Uint64("cadence-height", height). - Msg("call executed") - - return result, nil + return result.ReturnedData, err } func (e *EVM) EstimateGas( - ctx context.Context, - data []byte, + tx *types.LegacyTx, from common.Address, - evmHeight int64, + height uint64, ) (uint64, error) { - hexEncodedTx, err := cadence.NewString(hex.EncodeToString(data)) + view, err := e.getBlockView(height) if err != nil { return 0, err } - hexEncodedAddress, err := addressToCadenceString(from) + to := common.Address{} + if tx.To != nil { + to = *tx.To + } + cdcHeight, err := e.evmToCadenceHeight(height) if err != nil { return 0, err } - - height, err := e.evmToCadenceHeight(evmHeight) + rca := NewRemoteCadenceArch(cdcHeight, e.client, e.config.FlowNetworkID) + result, err := view.DryCall( + from, + to, + tx.Data, + tx.Value, + tx.Gas, + query.WithExtraPrecompiledContracts([]evmTypes.PrecompiledContract{rca}), + ) if err != nil { return 0, err } - scriptResult, err := e.executeScriptAtHeight( - ctx, - dryRun, - height, - []cadence.Value{hexEncodedTx, hexEncodedAddress}, - ) - if err != nil { - if !errors.Is(err, errs.ErrHeightOutOfRange) { - e.logger.Error(). - Err(err). - Uint64("cadence-height", height). - Int64("evm-height", evmHeight). - Str("from", from.String()). - Str("data", hex.EncodeToString(data)). - Msg("failed to execute estimateGas") + resultSummary := result.ResultSummary() + if resultSummary.ErrorCode != 0 { + if resultSummary.ErrorCode == evmTypes.ExecutionErrCodeExecutionReverted { + return 0, errs.NewRevertError(resultSummary.ReturnedData) } - return 0, fmt.Errorf("failed to execute script at height: %d, with: %w", height, err) + return 0, errs.NewFailedTransactionError(resultSummary.ErrorMessage) } - evmResult, err := parseResult(scriptResult) - if err != nil { - return 0, err - } + if result.Successful() { + // As mentioned in https://github.com/ethereum/EIPs/blob/master/EIPS/eip-150.md#specification + // Define "all but one 64th" of N as N - floor(N / 64). + // If a call asks for more gas than the maximum allowed amount + // (i.e. the total amount of gas remaining in the parent after subtracting + // the gas cost of the call and memory expansion), do not return an OOG error; + // instead, if a call asks for more gas than all but one 64th of the maximum + // allowed amount, call with all but one 64th of the maximum allowed amount of + // gas (this is equivalent to a version of EIP-901 plus EIP-1142). + // CREATE only provides all but one 64th of the parent gas to the child call. + result.GasConsumed = AddOne64th(result.GasConsumed) - gasConsumed := evmResult.GasConsumed + // Adding `gethParams.SstoreSentryGasEIP2200` is needed for this condition: + // https://github.com/onflow/go-ethereum/blob/master/core/vm/operations_acl.go#L29-L32 + result.GasConsumed += gethParams.SstoreSentryGasEIP2200 - e.logger.Debug(). - Uint64("gas", gasConsumed). - Int64("evm-height", evmHeight). - Uint64("cadence-height", height). - Msg("estimateGas executed") + // Take into account any gas refunds, which are calculated only after + // transaction execution. + result.GasConsumed += result.GasRefund + } - return gasConsumed, nil + return result.GasConsumed, err } func (e *EVM) GetCode( - ctx context.Context, address common.Address, - evmHeight int64, + height uint64, ) ([]byte, error) { - hexEncodedAddress, err := addressToCadenceString(address) - if err != nil { - return nil, err - } - - height, err := e.evmToCadenceHeight(evmHeight) - if err != nil { - return nil, err - } - - value, err := e.executeScriptAtHeight( - ctx, - getCode, - height, - []cadence.Value{hexEncodedAddress}, - ) - if err != nil { - if !errors.Is(err, errs.ErrHeightOutOfRange) { - e.logger.Error(). - Err(err). - Uint64("cadence-height", height). - Int64("evm-height", evmHeight). - Str("address", address.String()). - Msg("failed to get code") - } - - return nil, fmt.Errorf( - "failed to execute script for get code of address: %s at height: %d, with: %w", - address, - height, - err, - ) - } - - code, err := cadenceStringToBytes(value) + view, err := e.getBlockView(height) if err != nil { return nil, err } - e.logger.Debug(). - Str("address", address.Hex()). - Int64("evm-height", evmHeight). - Uint64("cadence-height", height). - Str("code size", fmt.Sprintf("%d", len(code))). - Msg("get code executed") - - return code, nil + return view.GetCode(address) } func (e *EVM) GetLatestEVMHeight(ctx context.Context) (uint64, error) { - val, err := e.executeScriptAtHeight( + val, err := e.client.ExecuteScriptAtLatestBlock( ctx, - getLatest, - LatestBlockHeight, + replaceAddresses(getLatestEVMHeight, e.config.FlowNetworkID), nil, ) if err != nil { @@ -694,181 +502,32 @@ func (e *EVM) getSignerNetworkInfo(ctx context.Context) (uint32, uint64, error) ) } -// replaceAddresses replace the addresses based on the network -func (e *EVM) replaceAddresses(script []byte) []byte { - // make the list of all contracts we should replace address for - sc := systemcontracts.SystemContractsForChain(e.config.FlowNetworkID) - contracts := []systemcontracts.SystemContract{sc.EVMContract, sc.FungibleToken, sc.FlowToken} - - s := string(script) - // iterate over all the import name and address pairs and replace them in script - for _, contract := range contracts { - s = strings.ReplaceAll(s, - fmt.Sprintf("import %s", contract.Name), - fmt.Sprintf("import %s from %s", contract.Name, contract.Address.HexWithPrefix()), - ) - } - - // also replace COA address if used (in scripts) - s = strings.ReplaceAll(s, "0xCOA", e.config.COAAddress.HexWithPrefix()) +func (e *EVM) getBlockView(height uint64) (*query.View, error) { + viewProvider := query.NewViewProvider( + e.config.FlowNetworkID, + evm.StorageAccountAddress(e.config.FlowNetworkID), + e.registerStore, + e.blocksProvider, + blockGasLimit, + ) - return []byte(s) + return viewProvider.GetBlockView(height) } -func (e *EVM) evmToCadenceHeight(height int64) (uint64, error) { - if height < 0 { - return LatestBlockHeight, nil - } - - evmHeight := uint64(height) - evmLatest, err := e.blocks.LatestEVMHeight() +func (e *EVM) evmToCadenceHeight(height uint64) (uint64, error) { + cadenceHeight, err := e.blocks.GetCadenceHeight(height) if err != nil { return 0, fmt.Errorf( - "failed to map evm height: %d to cadence height, getting latest evm height: %w", - evmHeight, + "failed to map evm height: %d to cadence height: %w", + height, err, ) } - // if provided evm height equals to latest evm height indexed we - // return latest height special value to signal requester to execute - // script at the latest block, not at the cadence height we get from the - // index, that is because at that point the height might already be pruned - if evmHeight == evmLatest { - return LatestBlockHeight, nil - } - - cadenceHeight, err := e.blocks.GetCadenceHeight(uint64(evmHeight)) - if err != nil { - return 0, fmt.Errorf("failed to map evm height: %d to cadence height: %w", evmHeight, err) - } - return cadenceHeight, nil } -// executeScriptAtHeight will execute the given script, at the given -// block height, with the given arguments. A height of `LatestBlockHeight` -// (math.MaxUint64 - 1) is a special value, which means the script will be -// executed at the latest sealed block. -func (e *EVM) executeScriptAtHeight( - ctx context.Context, - scriptType scriptType, - height uint64, - arguments []cadence.Value, -) (cadence.Value, error) { - script, ok := scripts[scriptType] - if !ok { - return nil, fmt.Errorf("unknown script type") - } - - // try and get the value from the cache if key is supported - key := cacheKey(scriptType, height, arguments) - if key != "" && e.scriptCache != nil { - val, ok := e.scriptCache.Get(key) - if ok { - e.logger.Info(). - Uint64("evm-height", height). - Int("script", int(scriptType)). - Str("result", val.String()). - Msg("cache hit") - return val, nil - } - } - - var res cadence.Value - var err error - - if height == LatestBlockHeight { - res, err = e.client.ExecuteScriptAtLatestBlock( - ctx, - e.replaceAddresses(script), - arguments, - ) - } else { - res, err = e.client.ExecuteScriptAtBlockHeight( - ctx, - height, - e.replaceAddresses(script), - arguments, - ) - } - if err != nil { - // if snapshot doesn't exist on EN, the height at which script was executed is out - // of the boundaries the EN keeps state, so return out of range - const storageError = "failed to create storage snapshot" - if strings.Contains(err.Error(), storageError) { - return nil, errs.NewHeightOutOfRangeError(height) - } - } else if key != "" && e.scriptCache != nil { // if error is nil and key is supported add to cache - e.scriptCache.Add(key, res) - } - - return res, err -} - -func addressToCadenceString(address common.Address) (cadence.String, error) { - return cadence.NewString( - strings.TrimPrefix(address.Hex(), "0x"), - ) -} - -func cadenceStringToBytes(value cadence.Value) ([]byte, error) { - cdcString, ok := value.(cadence.String) - if !ok { - return nil, fmt.Errorf( - "failed to convert cadence value of type: %T to string: %v", - value, - value, - ) - } - - code, err := hex.DecodeString(string(cdcString)) - if err != nil { - return nil, fmt.Errorf("failed to hex-decode string to byte array [%s]: %w", cdcString, err) - } - - return code, nil -} - -// parseResult -func parseResult(res cadence.Value) (*evmTypes.ResultSummary, error) { - result, err := evmImpl.ResultSummaryFromEVMResultValue(res) - if err != nil { - return nil, fmt.Errorf("failed to decode EVM result of type: %s, with: %w", res.Type().ID(), err) - } - - if result.ErrorCode != 0 { - if result.ErrorCode == evmTypes.ExecutionErrCodeExecutionReverted { - return nil, errs.NewRevertError(result.ReturnedData) - } - return nil, errs.NewFailedTransactionError(result.ErrorMessage) - } - - return result, err -} - -// cacheKey builds the cache key from the script type, height and arguments. -func cacheKey(scriptType scriptType, height uint64, args []cadence.Value) string { - key := fmt.Sprintf("%d%d", scriptType, height) - - switch scriptType { - case getBalance: - if len(args) != 1 { - return "" - } - v := args[0].(cadence.String) - key = fmt.Sprintf("%s%s", key, string(v)) - case getNonce: - if len(args) != 1 { - return "" - } - v := args[0].(cadence.String) - key = fmt.Sprintf("%s%s", key, string(v)) - case getLatest: - // no additional arguments - default: - return "" - } - - return key +func AddOne64th(n uint64) uint64 { + // NOTE: Go's integer division floors, but that is desirable here + return n + (n / 64) } diff --git a/services/requester/requester_test.go b/services/requester/requester_test.go deleted file mode 100644 index f40ab8641..000000000 --- a/services/requester/requester_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package requester - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/hashicorp/golang-lru/v2/expirable" - "github.com/onflow/cadence" - "github.com/onflow/flow-go-sdk/access/mocks" - flowGo "github.com/onflow/flow-go/model/flow" - "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-evm-gateway/config" -) - -func Test_Caching(t *testing.T) { - t.Run("Get balance at height cached", func(t *testing.T) { - mockClient := &mocks.Client{} - - cache := expirable.NewLRU[string, cadence.Value](1000, nil, time.Second) - e := createEVM(t, cache, mockClient) - - height := uint64(100) - address, _ := cadence.NewString("123") - balance := cadence.NewInt(1) - - mockClient. - On("ExecuteScriptAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(balance, nil). - Once() - - require.Equal(t, 0, cache.Len()) // empty cache - - // first request goes through the above mock client, - // additional requests should be processed with cache, note the above mock client - // is only set to once, so if cache is a miss it would fail to call the client again - for i := 0; i < 5; i++ { - val, err := e.executeScriptAtHeight(context.Background(), getBalance, height, []cadence.Value{address}) - require.NoError(t, err) - require.Equal(t, balance, val) - // cache should be filled - require.Equal(t, 1, cache.Len()) - } - }) - - t.Run("Get balance at latest height cached", func(t *testing.T) { - mockClient := &mocks.Client{} - - cache := expirable.NewLRU[string, cadence.Value](1000, nil, time.Second) - e := createEVM(t, cache, mockClient) - - height := LatestBlockHeight - address, _ := cadence.NewString("123") - balance := cadence.NewInt(1) - - mockClient. - On("ExecuteScriptAtLatestBlock", mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(balance, nil). - Once() - - require.Equal(t, 0, cache.Len()) // empty cache - - // first request goes through the above mock client, - // additional requests should be processed with cache, note the above mock client - // is only set to once, so if cache is a miss it would fail to call the client again - for i := 0; i < 5; i++ { - val, err := e.executeScriptAtHeight(context.Background(), getBalance, height, []cadence.Value{address}) - require.NoError(t, err) - require.Equal(t, balance, val) - // cache should be filled - require.Equal(t, 1, cache.Len()) - } - }) - - t.Run("Get balance cache expires and is added again", func(t *testing.T) { - mockClient := &mocks.Client{} - - cacheExpiry := time.Millisecond * 100 - cache := expirable.NewLRU[string, cadence.Value](1000, nil, cacheExpiry) - e := createEVM(t, cache, mockClient) - - height := LatestBlockHeight - address, _ := cadence.NewString("123") - balance := cadence.NewInt(1) - - mockClient. - On("ExecuteScriptAtLatestBlock", mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(balance, nil). - Once() - - require.Equal(t, 0, cache.Len()) // empty cache - - // first request goes through the above mock client, - // additional requests should be processed with cache, note the above mock client - // is only set to once, so if cache is a miss it would fail to call the client again - for i := 0; i < 5; i++ { - val, err := e.executeScriptAtHeight(context.Background(), getBalance, height, []cadence.Value{address}) - require.NoError(t, err) - require.Equal(t, balance, val) - // cache should be filled - require.Equal(t, 1, cache.Len()) - } - - // wait for cache expiry - time.Sleep(cacheExpiry + 100*time.Millisecond) - - require.Equal(t, 0, cache.Len()) // make sure cache is empty - - // re-set the mock - mockClient. - On("ExecuteScriptAtLatestBlock", mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(balance, nil). - Once() - val, err := e.executeScriptAtHeight(context.Background(), getBalance, height, []cadence.Value{address}) - require.NoError(t, err) - require.Equal(t, balance, val) - require.Equal(t, 1, cache.Len()) - }) - - t.Run("Get balance multiple addresses and heights", func(t *testing.T) { - mockClient := &mocks.Client{} - - cache := expirable.NewLRU[string, cadence.Value](1000, nil, time.Second) - e := createEVM(t, cache, mockClient) - - type acc struct { - height uint64 - address cadence.String - balance cadence.Int - } - - tests := []acc{{ - height: 1002233, - address: cadence.String("1AC87F33D10b76E8BDd4fb501445A5ec413eb121"), - balance: cadence.NewInt(23958395), - }, { - height: 2002233, - address: cadence.String("A3014d9F6162a162BAD9Ff15346A4B82A56F841f"), - balance: cadence.NewInt(1), - }, { - height: 3002233, - address: cadence.String("53e6A4b36a56CB68fe54661416Be2c5b3Ee193c9"), - balance: cadence.NewInt(4), - }, { - height: 4002233, - address: cadence.String("839fEfa0750798B3A0BD9c925871e3f5027a5d44"), - balance: cadence.NewInt(3), - }, { - height: 7002233, - address: cadence.String("243a064089cF765E1F270B90913Db31cdDf299F5"), - balance: cadence.NewInt(5), - }} - - for i, test := range tests { - mockClient. - On("ExecuteScriptAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(test.balance, nil). - Once() - - val, err := e.executeScriptAtHeight(context.Background(), getBalance, test.height, []cadence.Value{test.address}) - require.NoError(t, err) - require.Equal(t, test.balance, val) - // cache should be filled - require.Equal(t, i+1, cache.Len()) - } - - require.Equal(t, len(tests), cache.Len()) - - // first request goes through the above mock client, - // additional requests should be processed with cache, note the above mock client - // is only set to once, so if cache is a miss it would fail to call the client again - for _, test := range tests { - val, err := e.executeScriptAtHeight(context.Background(), getBalance, test.height, []cadence.Value{test.address}) - require.NoError(t, err) - require.Equal(t, test.balance, val) - // cache should be filled - require.Equal(t, len(tests), cache.Len()) - } - }) -} - -func Test_CacheKey(t *testing.T) { - addr, _ := cadence.NewString("0x1") - h := uint64(100) - - key := cacheKey(getBalance, h, []cadence.Value{addr}) - require.Equal(t, fmt.Sprintf("%d%d%s", getBalance, h, string(addr)), key) - - key = cacheKey(getBalance, LatestBlockHeight, []cadence.Value{addr}) - require.Equal(t, fmt.Sprintf("%d%d%s", getBalance, LatestBlockHeight, string(addr)), key) - - key = cacheKey(getNonce, LatestBlockHeight, []cadence.Value{addr}) - require.Equal(t, fmt.Sprintf("%d%d%s", getNonce, LatestBlockHeight, string(addr)), key) - - key = cacheKey(getNonce, h, []cadence.Value{addr}) - require.Equal(t, fmt.Sprintf("%d%d%s", getNonce, h, string(addr)), key) - - key = cacheKey(getLatest, LatestBlockHeight, nil) - require.Equal(t, fmt.Sprintf("%d%d", getLatest, LatestBlockHeight), key) - - key = cacheKey(getCode, LatestBlockHeight, nil) - require.Equal(t, "", key) - - key = cacheKey(getBalance, LatestBlockHeight, []cadence.Value{addr, addr}) - require.Equal(t, "", key) - -} - -func createEVM(t *testing.T, cache *expirable.LRU[string, cadence.Value], mockClient *mocks.Client) *EVM { - networkID := flowGo.Emulator - log := zerolog.New(zerolog.NewTestWriter(t)) - - client, err := NewCrossSporkClient(mockClient, nil, log, networkID) - require.NoError(t, err) - - return &EVM{ - client: client, - logger: log, - scriptCache: cache, - config: &config.Config{ - FlowNetworkID: networkID, - }, - } -} diff --git a/services/requester/utils.go b/services/requester/utils.go new file mode 100644 index 000000000..d3ee4d7fc --- /dev/null +++ b/services/requester/utils.go @@ -0,0 +1,31 @@ +package requester + +import ( + "fmt" + "strings" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +// replaceAddresses replace the addresses based on the network +func replaceAddresses(script []byte, chainID flow.ChainID) []byte { + // make the list of all contracts we should replace address for + sc := systemcontracts.SystemContractsForChain(chainID) + contracts := []systemcontracts.SystemContract{ + sc.EVMContract, + sc.FungibleToken, + sc.FlowToken, + } + + s := string(script) + // iterate over all the import name and address pairs and replace them in script + for _, contract := range contracts { + s = strings.ReplaceAll(s, + fmt.Sprintf("import %s", contract.Name), + fmt.Sprintf("import %s from %s", contract.Name, contract.Address.HexWithPrefix()), + ) + } + + return []byte(s) +} diff --git a/services/traces/downloader.go b/services/traces/downloader.go deleted file mode 100644 index 4647ee594..000000000 --- a/services/traces/downloader.go +++ /dev/null @@ -1,78 +0,0 @@ -package traces - -import ( - "context" - "encoding/json" - "fmt" - "io" - "time" - - "cloud.google.com/go/storage" - "github.com/onflow/flow-go-sdk" - "github.com/onflow/go-ethereum/common" - "github.com/rs/zerolog" - "google.golang.org/api/option" -) - -const downloadTimeout = 60 * time.Minute - -type Downloader interface { - // Download traces or returning an error with the failure - Download(txID common.Hash, blockIO flow.Identifier) (json.RawMessage, error) -} - -var _ Downloader = &GCPDownloader{} - -type GCPDownloader struct { - client *storage.Client - logger zerolog.Logger - bucket *storage.BucketHandle -} - -func NewGCPDownloader(bucketName string, logger zerolog.Logger) (*GCPDownloader, error) { - if bucketName == "" { - return nil, fmt.Errorf("must provide bucket name") - } - - ctx := context.Background() - // we don't require authentication for public bucket - client, err := storage.NewClient(ctx, option.WithoutAuthentication()) - if err != nil { - return nil, fmt.Errorf("failed to create Google Cloud Storage client: %w", err) - } - - return &GCPDownloader{ - client: client, - logger: logger, - bucket: client.Bucket(bucketName), - }, nil -} - -func (g *GCPDownloader) Download(txID common.Hash, blockID flow.Identifier) (json.RawMessage, error) { - l := g.logger.With(). - Str("tx-id", txID.String()). - Str("cadence-block-id", blockID.String()). - Logger() - - l.Debug().Msg("downloading transaction trace") - - ctx, cancel := context.WithTimeout(context.Background(), downloadTimeout) - defer cancel() - - id := fmt.Sprintf("%s-%s", blockID.String(), txID.String()) - - rc, err := g.bucket.Object(id).NewReader(ctx) - if err != nil { - return nil, fmt.Errorf("failed to download object id %s: %w", id, err) - } - defer rc.Close() - - trace, err := io.ReadAll(rc) - if err != nil { - return nil, fmt.Errorf("failed to read trace id %s: %w", id, err) - } - - l.Info().Int("trace-size", len(trace)).Msg("transaction trace downloaded") - - return trace, nil -} diff --git a/services/traces/engine.go b/services/traces/engine.go deleted file mode 100644 index a76830c01..000000000 --- a/services/traces/engine.go +++ /dev/null @@ -1,193 +0,0 @@ -package traces - -import ( - "context" - "sync" - "time" - - "github.com/onflow/flow-go-sdk" - gethCommon "github.com/onflow/go-ethereum/common" - "github.com/rs/zerolog" - "github.com/sethvargo/go-retry" - - "github.com/onflow/flow-evm-gateway/metrics" - "github.com/onflow/flow-evm-gateway/models" - "github.com/onflow/flow-evm-gateway/storage" -) - -var _ models.Engine = &Engine{} - -// Engine is an implementation of the trace downloader engine. -// -// Traces are ethereum transaction execution traces: https://geth.ethereum.org/docs/developers/evm-tracing -// Currently EVM gateway doesn't produce the traces since it doesn't -// execute the transactions and is thus relying on the execution node -// to produce and upload the traces during execution. This engine -// listens for new transaction events and then downloads and index the -// traces from the transaction execution. -type Engine struct { - *models.EngineStatus - - logger zerolog.Logger - blocksPublisher *models.Publisher[*models.Block] - blocks storage.BlockIndexer - traces storage.TraceIndexer - downloader Downloader - collector metrics.Collector -} - -// NewTracesIngestionEngine creates a new instance of the engine. -func NewTracesIngestionEngine( - blocksPublisher *models.Publisher[*models.Block], - blocks storage.BlockIndexer, - traces storage.TraceIndexer, - downloader Downloader, - logger zerolog.Logger, - collector metrics.Collector, -) *Engine { - return &Engine{ - EngineStatus: models.NewEngineStatus(), - - logger: logger.With().Str("component", "trace-ingestion").Logger(), - blocksPublisher: blocksPublisher, - blocks: blocks, - traces: traces, - downloader: downloader, - collector: collector, - } -} - -// Run the engine. -// TODO: use the context to stop the engine. -func (e *Engine) Run(ctx context.Context) error { - // subscribe to new blocks - e.blocksPublisher.Subscribe(e) - - e.MarkReady() - return nil -} - -// Notify is a handler that is being used to subscribe for new EVM block notifications. -// This method should be non-blocking. -func (e *Engine) Notify(block *models.Block) { - // If the block has no transactions, we simply return early - // as there are no transaction traces to index. - if len(block.TransactionHashes) == 0 { - return - } - - l := e.logger.With().Uint64("evm-height", block.Height).Logger() - - cadenceID, err := e.blocks.GetCadenceID(block.Height) - if err != nil { - l.Error().Err(err).Msg("failed to get cadence block ID") - return - } - - go e.indexBlockTraces(block, cadenceID, false) -} - -// indexBlockTraces iterates the block transaction hashes and tries to download the traces -func (e *Engine) indexBlockTraces(evmBlock *models.Block, cadenceBlockID flow.Identifier, skipExisting bool) { - ctx, cancel := context.WithTimeout(context.Background(), downloadTimeout) - defer cancel() - - const maxConcurrentDownloads = 5 // limit number of concurrent downloads - limiter := make(chan struct{}, maxConcurrentDownloads) - - wg := sync.WaitGroup{} - - for _, h := range evmBlock.TransactionHashes { - wg.Add(1) - limiter <- struct{}{} // acquire a slot - - go func(h gethCommon.Hash) { - defer wg.Done() - defer func() { <-limiter }() // release a slot after done - - l := e.logger.With(). - Str("tx-id", h.String()). - Uint64("evm-height", evmBlock.Height). - Str("cadence-block-id", cadenceBlockID.String()). - Logger() - - if skipExisting { - if _, err := e.traces.GetTransaction(h); err == nil { - l.Debug().Msg("trace already downloaded") - return - } - } - - err := retry.Fibonacci(ctx, time.Second*1, func(ctx context.Context) error { - trace, err := e.downloader.Download(h, cadenceBlockID) - if err != nil { - l.Warn().Err(err).Msg("retrying failed download") - return retry.RetryableError(err) - } - - return e.traces.StoreTransaction(h, trace, nil) - }) - if err != nil { - e.collector.TraceDownloadFailed() - l.Error().Err(err).Msg("failed to download trace") - return - } - l.Info().Msg("trace downloaded successfully") - }(h) - } - - wg.Wait() -} - -// Error is required by the publisher, and we just return a nil, -// since the errors are handled gracefully in the indexBlockTraces -func (e *Engine) Error() <-chan error { - return nil -} - -func (e *Engine) Stop() { - e.MarkStopped() -} - -// Backfill redownloads traces for blocks from EVM start to end height. -func (e *Engine) Backfill(start uint64, end uint64) { - select { - case <-e.Ready(): - case <-e.Done(): - return - } - - lg := e.logger.With().Uint64("start", start).Uint64("end", end).Logger() - - lg.Info().Msg("backfilling traces") - for height := start; height <= end; height++ { - select { - case <-e.Done(): - return - case <-e.Stopped(): - return - default: - } - - l := lg.With().Uint64("evm-height", height).Logger() - - block, err := e.blocks.GetByHeight(height) - if err != nil { - l.Error().Err(err).Msg("failed to get block by height") - return - } - - if len(block.TransactionHashes) == 0 { - continue - } - - cadenceID, err := e.blocks.GetCadenceID(block.Height) - if err != nil { - l.Error().Err(err).Msg("failed to get cadence block ID") - return - } - - e.indexBlockTraces(block, cadenceID, true) - } - lg.Info().Msg("done backfilling traces") -} diff --git a/services/traces/engine_test.go b/services/traces/engine_test.go deleted file mode 100644 index 028309918..000000000 --- a/services/traces/engine_test.go +++ /dev/null @@ -1,288 +0,0 @@ -package traces - -import ( - "context" - "encoding/json" - "fmt" - "slices" - "strings" - "testing" - "time" - - pebbleDB "github.com/cockroachdb/pebble" - "github.com/onflow/flow-go-sdk" - gethCommon "github.com/onflow/go-ethereum/common" - "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-evm-gateway/metrics" - "github.com/onflow/flow-evm-gateway/models" - "github.com/onflow/flow-evm-gateway/services/traces/mocks" - storageMock "github.com/onflow/flow-evm-gateway/storage/mocks" -) - -// this test makes sure once a notification for a new block is triggered -// the block transaction hashes are iterated, and for each a trace is -// downloaded and stored. -func TestTraceIngestion(t *testing.T) { - t.Run("successful single block ingestion", func(t *testing.T) { - blockPublisher := models.NewPublisher[*models.Block]() - blocks := &storageMock.BlockIndexer{} - trace := &storageMock.TraceIndexer{} - downloader := &mocks.Downloader{} - - txTrace := func(id gethCommon.Hash) json.RawMessage { - return json.RawMessage(fmt.Sprintf(`{ - "id": "%s", - "from":"0x42fdd562221741a1db62a0f69a5a680367f07e33", - "gas":"0x15f900", - "gasUsed":"0x387dc", - "to":"0xca11bde05977b3631167028862be2a173976ca11" - }`, id.String())) - } - - latestHeight := uint64(0) - blockID := flow.Identifier{0x09} - hashes := []gethCommon.Hash{{0x1}, {0x2}, {0x3}} - block := storageMock.NewBlock(latestHeight + 1) - block.TransactionHashes = hashes - - blocks. - On("GetByHeight", mock.Anything). - Return(func(height uint64) (*models.Block, error) { - require.Equal(t, latestHeight+1, height) // make sure it gets next block - return block, nil - }) - - blocks. - On("GetCadenceID", mock.Anything). - Return(func(height uint64) (flow.Identifier, error) { - require.Equal(t, latestHeight+1, height) - return blockID, nil - }) - - downloader. - On("Download", mock.Anything, mock.Anything). - Return(func(txID gethCommon.Hash, blkID flow.Identifier) (json.RawMessage, error) { - require.Equal(t, blockID, blkID) - time.Sleep(time.Millisecond * 200) // simulate download delay - return txTrace(txID), nil - }) - - stored := make(chan gethCommon.Hash, len(hashes)) - trace. - On("StoreTransaction", mock.Anything, mock.Anything, mock.Anything). - Return(func(ID gethCommon.Hash, trace json.RawMessage, _ *pebbleDB.Batch) error { - require.Equal(t, txTrace(ID), trace) - stored <- ID - return nil - }) - - engine := NewTracesIngestionEngine( - blockPublisher, - blocks, - trace, - downloader, - zerolog.Nop(), - metrics.NopCollector, - ) - - err := engine.Run(context.Background()) - require.NoError(t, err) - - blockPublisher.Publish(block) - - // make sure stored was called as many times as block contained hashes - require.Eventuallyf(t, func() bool { - return len(stored) == len(hashes) - }, time.Second, time.Millisecond*50, "index not run") - - close(stored) - storedHashes := make([]string, 0) - for h := range stored { - storedHashes = append(storedHashes, h.String()) - } - - // make sure we stored all the hashes in the block - for _, h := range hashes { - require.True(t, slices.Contains(storedHashes, h.String())) - } - }) - - t.Run("successful multiple blocks ingestion", func(t *testing.T) { - blocksPublisher := models.NewPublisher[*models.Block]() - blocks := &storageMock.BlockIndexer{} - trace := &storageMock.TraceIndexer{} - downloader := &mocks.Downloader{} - - txTrace := func(id gethCommon.Hash) json.RawMessage { - return json.RawMessage(fmt.Sprintf(`{ - "id": "%s", - "from":"0x42fdd562221741a1db62a0f69a5a680367f07e33", - "gas":"0x15f900", - "gasUsed":"0x387dc", - "to":"0xca11bde05977b3631167028862be2a173976ca11" - }`, id.String())) - } - - latestHeight := uint64(0) - - const blockCount = 10 - const txCount = 50 - - // generate mock blocks, each with mock transactions - mockBlocks := make([]*models.Block, blockCount+1) - mockCadenceIDs := make([]flow.Identifier, blockCount+1) - - for i := range mockBlocks { - b := storageMock.NewBlock(uint64(i)) - cid := flow.Identifier{byte(i + 10)} - - h := make([]gethCommon.Hash, txCount) - for j := range h { - h[j] = gethCommon.Hash{byte(j), byte(i)} - } - - b.TransactionHashes = h - mockBlocks[i] = b - mockCadenceIDs[i] = cid - } - - blocks. - On("GetCadenceID", mock.Anything). - Return(func(height uint64) (flow.Identifier, error) { - latestHeight++ - require.Equal(t, latestHeight, height) - require.Less(t, int(height), len(mockCadenceIDs)) - return mockCadenceIDs[height], nil - }) - - downloadedIDs := make(chan string, blockCount*txCount) - downloader. - On("Download", mock.Anything, mock.Anything). - Return(func(txID gethCommon.Hash, blkID flow.Identifier) (json.RawMessage, error) { - id := fmt.Sprintf("%s-%s", blkID.String(), txID.String()) - downloadedIDs <- id - time.Sleep(time.Millisecond * 200) // simulate download delay - return txTrace(txID), nil - }) - - stored := make(chan gethCommon.Hash, blockCount*txCount) - trace. - On("StoreTransaction", mock.Anything, mock.Anything, mock.Anything). - Return(func(ID gethCommon.Hash, trace json.RawMessage, _ *pebbleDB.Batch) error { - require.Equal(t, txTrace(ID), trace) - stored <- ID - return nil - }) - - engine := NewTracesIngestionEngine( - blocksPublisher, - blocks, - trace, - downloader, - zerolog.Nop(), - metrics.NopCollector, - ) - - err := engine.Run(context.Background()) - require.NoError(t, err) - - for i := 0; i < blockCount; i++ { - blocksPublisher.Publish(mockBlocks[i+1]) - time.Sleep(time.Millisecond * 100) // simulate block delay - } - - // make sure download was called as many times as all blocks times the hashes it contained - require.Eventuallyf(t, func() bool { - return len(downloadedIDs) == blockCount*txCount - }, time.Second*10, time.Millisecond*100, "traces not downloaded") - - close(downloadedIDs) - - // make sure stored was called as many times as all blocks times the hashes it contained - require.Eventuallyf(t, func() bool { - return len(stored) == blockCount*txCount - }, time.Second*10, time.Millisecond*100, "traces not indexed") - - close(stored) - - // make sure we downloaded and indexed all the hashes in the block - for id := range downloadedIDs { - found := false - for _, b := range mockBlocks { - for _, h := range b.TransactionHashes { - txID := strings.Split(id, "-")[1] - if txID == h.String() { - found = true - break - } - } - if found { - break - } - } - require.True(t, found, fmt.Sprintf("id %s not found", id)) - } - }) - - t.Run("failed download retries", func(t *testing.T) { - blockBroadcaster := models.NewPublisher[*models.Block]() - blocks := &storageMock.BlockIndexer{} - downloader := &mocks.Downloader{} - trace := &storageMock.TraceIndexer{} - logger := zerolog.New(zerolog.NewTestWriter(t)) - collector := metrics.NopCollector - - latestHeight := uint64(0) - blockID := flow.Identifier{0x09} - hashes := []gethCommon.Hash{{0x1}} - block := storageMock.NewBlock(latestHeight + 1) - block.TransactionHashes = hashes - - blocks. - On("GetByHeight", mock.Anything). - Return(func(height uint64) (*models.Block, error) { - require.Equal(t, latestHeight+1, height) // make sure it gets next block - return block, nil - }) - - blocks. - On("GetCadenceID", mock.Anything). - Return(func(height uint64) (flow.Identifier, error) { - require.Equal(t, latestHeight+1, height) - return blockID, nil - }) - - const retriesNum = 3 - downloads := make(chan struct{}, retriesNum) - downloader. - On("Download", mock.Anything, mock.Anything). - Return(func(txID gethCommon.Hash, blkID flow.Identifier) (json.RawMessage, error) { - downloads <- struct{}{} - return nil, fmt.Errorf("failed download") - }) - - engine := NewTracesIngestionEngine( - blockBroadcaster, - blocks, - trace, - downloader, - logger, - collector, - ) - - err := engine.Run(context.Background()) - require.NoError(t, err) - - blockBroadcaster.Publish(block) - - // make sure stored was called as many times as block contained hashes - require.Eventuallyf(t, func() bool { - return len(downloads) == retriesNum - }, time.Second*10, time.Millisecond*200, "download not retried") - - close(downloads) - }) -} diff --git a/services/traces/mocks/Downloader.go b/services/traces/mocks/Downloader.go deleted file mode 100644 index 83353acb0..000000000 --- a/services/traces/mocks/Downloader.go +++ /dev/null @@ -1,61 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - flow "github.com/onflow/flow-go-sdk" - common "github.com/onflow/go-ethereum/common" - - json "encoding/json" - - mock "github.com/stretchr/testify/mock" -) - -// Downloader is an autogenerated mock type for the Downloader type -type Downloader struct { - mock.Mock -} - -// Download provides a mock function with given fields: txID, blockIO -func (_m *Downloader) Download(txID common.Hash, blockIO flow.Identifier) (json.RawMessage, error) { - ret := _m.Called(txID, blockIO) - - if len(ret) == 0 { - panic("no return value specified for Download") - } - - var r0 json.RawMessage - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, flow.Identifier) (json.RawMessage, error)); ok { - return rf(txID, blockIO) - } - if rf, ok := ret.Get(0).(func(common.Hash, flow.Identifier) json.RawMessage); ok { - r0 = rf(txID, blockIO) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(json.RawMessage) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash, flow.Identifier) error); ok { - r1 = rf(txID, blockIO) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewDownloader creates a new instance of Downloader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewDownloader(t interface { - mock.TestingT - Cleanup(func()) -}) *Downloader { - mock := &Downloader{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/storage/index.go b/storage/index.go index 38cebeb73..b83e80b82 100644 --- a/storage/index.go +++ b/storage/index.go @@ -1,8 +1,6 @@ package storage import ( - "math/big" - "github.com/cockroachdb/pebble" "github.com/goccy/go-json" "github.com/onflow/flow-go-sdk" @@ -97,19 +95,6 @@ type TransactionIndexer interface { Get(ID common.Hash) (models.Transaction, error) } -type AccountIndexer interface { - // Update account with executed transactions. - // Batch is required to batch multiple indexer operations, skipped if nil. - Update(tx models.Transaction, receipt *models.Receipt, batch *pebble.Batch) error - - // GetNonce gets an account nonce. If no nonce was indexed it returns 0. - // todo add getting nonce at provided block height / hash - GetNonce(address common.Address) (uint64, error) - - // GetBalance gets an account balance. If no balance was indexed it returns 0. - GetBalance(address common.Address) (*big.Int, error) -} - type TraceIndexer interface { // StoreTransaction will index transaction trace by the transaction ID. // Batch is required to batch multiple indexer operations, skipped if nil. diff --git a/storage/index_testsuite.go b/storage/index_test.go similarity index 71% rename from storage/index_testsuite.go rename to storage/index_test.go index bdd608f27..207873d9a 100644 --- a/storage/index_testsuite.go +++ b/storage/index_test.go @@ -1,14 +1,20 @@ -package storage +package storage_test import ( "fmt" + "testing" + + pebble2 "github.com/cockroachdb/pebble" + "github.com/onflow/flow-evm-gateway/config" + "github.com/onflow/flow-evm-gateway/storage" + "github.com/onflow/flow-evm-gateway/storage/pebble" + flowGo "github.com/onflow/flow-go/model/flow" + "github.com/stretchr/testify/require" "github.com/goccy/go-json" "github.com/onflow/flow-go-sdk" - evmEmulator "github.com/onflow/flow-go/fvm/evm/emulator" "github.com/onflow/go-ethereum/common" "github.com/onflow/go-ethereum/core/types" - "github.com/onflow/go-ethereum/crypto" "github.com/stretchr/testify/suite" "github.com/onflow/flow-evm-gateway/models" @@ -16,9 +22,70 @@ import ( "github.com/onflow/flow-evm-gateway/storage/mocks" ) +// tests that make sure the implementation conform to the interface expected behaviour +func TestBlocks(t *testing.T) { + runDB("blocks", t, func(t *testing.T, db *pebble.Storage) { + bl := pebble.NewBlocks(db, flowGo.Emulator) + batch := db.NewBatch() + + err := bl.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}, batch) + require.NoError(t, err) + + err = batch.Commit(pebble2.Sync) + require.NoError(t, err) + + suite.Run(t, &BlockTestSuite{ + Blocks: bl, + DB: db, + }) + }) +} + +func TestReceipts(t *testing.T) { + runDB("receipts", t, func(t *testing.T, db *pebble.Storage) { + // prepare the blocks database since they track heights which are used in receipts as well + bl := pebble.NewBlocks(db, flowGo.Emulator) + batch := db.NewBatch() + + err := bl.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}, batch) + require.NoError(t, err) + err = bl.Store(30, flow.Identifier{0x1}, mocks.NewBlock(10), batch) // update first and latest height + require.NoError(t, err) + err = bl.Store(30, flow.Identifier{0x1}, mocks.NewBlock(300), batch) // update latest + require.NoError(t, err) + + err = batch.Commit(pebble2.Sync) + require.NoError(t, err) + + suite.Run(t, &ReceiptTestSuite{ + ReceiptIndexer: pebble.NewReceipts(db), + DB: db, + }) + }) +} + +func TestTransactions(t *testing.T) { + runDB("transactions", t, func(t *testing.T, db *pebble.Storage) { + suite.Run(t, &TransactionTestSuite{ + TransactionIndexer: pebble.NewTransactions(db), + DB: db, + }) + }) +} + +func TestTraces(t *testing.T) { + runDB("traces", t, func(t *testing.T, db *pebble.Storage) { + suite.Run(t, &TraceTestSuite{ + TraceIndexer: pebble.NewTraces(db), + DB: db, + }) + }) +} + type BlockTestSuite struct { suite.Suite - Blocks BlockIndexer + Blocks storage.BlockIndexer + DB *pebble.Storage } func (b *BlockTestSuite) TestGet() { @@ -26,7 +93,12 @@ func (b *BlockTestSuite) TestGet() { height := uint64(1) flowID := flow.Identifier{0x01} block := mocks.NewBlock(height) - err := b.Blocks.Store(height+1, flowID, block, nil) + batch := b.DB.NewBatch() + + err := b.Blocks.Store(height+1, flowID, block, batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) ID, err := block.Hash() @@ -59,17 +131,33 @@ func (b *BlockTestSuite) TestStore() { b.Run("success", func() { flowID := flow.Identifier{0x01} - err := b.Blocks.Store(2, flowID, block, nil) + batch := b.DB.NewBatch() + + err := b.Blocks.Store(2, flowID, block, batch) b.Require().NoError(err) + err = batch.Commit(pebble2.Sync) + b.Require().NoError(err) + + batch = b.DB.NewBatch() + // we allow overwriting blocks to make the actions idempotent - err = b.Blocks.Store(2, flowID, block, nil) + err = b.Blocks.Store(2, flowID, block, batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) }) b.Run("store multiple blocks, and get one", func() { + for i := 0; i < 10; i++ { - err := b.Blocks.Store(uint64(i+5), flow.Identifier{byte(i)}, mocks.NewBlock(uint64(10+i)), nil) + batch := b.DB.NewBatch() + + err := b.Blocks.Store(uint64(i+5), flow.Identifier{byte(i)}, mocks.NewBlock(uint64(10+i)), batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) } @@ -89,7 +177,12 @@ func (b *BlockTestSuite) TestHeights() { b.Run("last EVM height", func() { for i := 0; i < 5; i++ { lastHeight := uint64(100 + i) - err := b.Blocks.Store(lastHeight+10, flow.Identifier{byte(i)}, mocks.NewBlock(lastHeight), nil) + batch := b.DB.NewBatch() + + err := b.Blocks.Store(lastHeight+10, flow.Identifier{byte(i)}, mocks.NewBlock(lastHeight), batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) last, err := b.Blocks.LatestEVMHeight() @@ -109,7 +202,12 @@ func (b *BlockTestSuite) TestHeights() { for i, evmHeight := range evmHeights { blocks[i] = mocks.NewBlock(evmHeight) - err := b.Blocks.Store(uint64(i), cadenceIDs[i], blocks[i], nil) + batch := b.DB.NewBatch() + + err := b.Blocks.Store(uint64(i), cadenceIDs[i], blocks[i], batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) } @@ -125,7 +223,11 @@ func (b *BlockTestSuite) TestHeights() { b.Run("last Cadence height", func() { for i := 0; i < 5; i++ { lastHeight := uint64(100 + i) - err := b.Blocks.Store(lastHeight, flow.Identifier{byte(i)}, mocks.NewBlock(lastHeight-10), nil) + batch := b.DB.NewBatch() + err := b.Blocks.Store(lastHeight, flow.Identifier{byte(i)}, mocks.NewBlock(lastHeight-10), batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) last, err := b.Blocks.LatestCadenceHeight() @@ -138,7 +240,11 @@ func (b *BlockTestSuite) TestHeights() { evmHeights := []uint64{10, 11, 12, 13} cadenceHeights := []uint64{20, 24, 26, 27} for i, evmHeight := range evmHeights { - err := b.Blocks.Store(cadenceHeights[i], flow.Identifier{byte(i)}, mocks.NewBlock(evmHeight), nil) + batch := b.DB.NewBatch() + err := b.Blocks.Store(cadenceHeights[i], flow.Identifier{byte(i)}, mocks.NewBlock(evmHeight), batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) } @@ -153,7 +259,11 @@ func (b *BlockTestSuite) TestHeights() { evmHeights := []uint64{10, 11, 12, 13} cadenceIDs := []flow.Identifier{{0x01}, {0x02}, {0x03}, {0x04}} for i, evmHeight := range evmHeights { - err := b.Blocks.Store(uint64(i), cadenceIDs[i], mocks.NewBlock(evmHeight), nil) + batch := b.DB.NewBatch() + err := b.Blocks.Store(uint64(i), cadenceIDs[i], mocks.NewBlock(evmHeight), batch) + b.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) b.Require().NoError(err) } @@ -167,14 +277,19 @@ func (b *BlockTestSuite) TestHeights() { type ReceiptTestSuite struct { suite.Suite - ReceiptIndexer ReceiptIndexer + ReceiptIndexer storage.ReceiptIndexer + DB *pebble.Storage } func (s *ReceiptTestSuite) TestStoreReceipt() { s.Run("store receipt successfully", func() { receipt := mocks.NewReceipt(1, common.HexToHash("0xf1")) - err := s.ReceiptIndexer.Store([]*models.Receipt{receipt}, nil) + batch := s.DB.NewBatch() + err := s.ReceiptIndexer.Store([]*models.Receipt{receipt}, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) }) @@ -194,7 +309,11 @@ func (s *ReceiptTestSuite) TestStoreReceipt() { } } - err := s.ReceiptIndexer.Store(receipts, nil) + batch := s.DB.NewBatch() + err := s.ReceiptIndexer.Store(receipts, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) storeReceipts, err := s.ReceiptIndexer.GetByBlockHeight(height) @@ -211,7 +330,8 @@ func (s *ReceiptTestSuite) TestStoreReceipt() { mocks.NewReceipt(2, common.HexToHash("0x2")), } - err := s.ReceiptIndexer.Store(receipts, nil) + batch := s.DB.NewBatch() + err := s.ReceiptIndexer.Store(receipts, batch) s.Require().EqualError(err, "can't store receipts for multiple heights") }) } @@ -219,7 +339,11 @@ func (s *ReceiptTestSuite) TestStoreReceipt() { func (s *ReceiptTestSuite) TestGetReceiptByTransactionID() { s.Run("existing transaction ID", func() { receipt := mocks.NewReceipt(2, common.HexToHash("0xf2")) - err := s.ReceiptIndexer.Store([]*models.Receipt{receipt}, nil) + batch := s.DB.NewBatch() + err := s.ReceiptIndexer.Store([]*models.Receipt{receipt}, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) retReceipt, err := s.ReceiptIndexer.GetByTransactionID(receipt.TxHash) @@ -238,11 +362,21 @@ func (s *ReceiptTestSuite) TestGetReceiptByTransactionID() { func (s *ReceiptTestSuite) TestGetReceiptByBlockHeight() { s.Run("existing block height", func() { receipt := mocks.NewReceipt(3, common.HexToHash("0x1")) - err := s.ReceiptIndexer.Store([]*models.Receipt{receipt}, nil) + batch := s.DB.NewBatch() + err := s.ReceiptIndexer.Store([]*models.Receipt{receipt}, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) + + batch = s.DB.NewBatch() + // add one more receipt that shouldn't be retrieved r := mocks.NewReceipt(4, common.HexToHash("0x2")) - s.Require().NoError(s.ReceiptIndexer.Store([]*models.Receipt{r}, nil)) + s.Require().NoError(s.ReceiptIndexer.Store([]*models.Receipt{r}, batch)) + + err = batch.Commit(pebble2.Sync) + s.Require().NoError(err) retReceipts, err := s.ReceiptIndexer.GetByBlockHeight(receipt.BlockNumber.Uint64()) s.Require().NoError(err) @@ -268,7 +402,11 @@ func (s *ReceiptTestSuite) TestBloomsForBlockRange() { r := mocks.NewReceipt(i, common.HexToHash(fmt.Sprintf("0xf1%d", i))) testBlooms = append(testBlooms, &r.Bloom) testHeights = append(testHeights, i) - err := s.ReceiptIndexer.Store([]*models.Receipt{r}, nil) + batch := s.DB.NewBatch() + err := s.ReceiptIndexer.Store([]*models.Receipt{r}, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) } @@ -307,7 +445,10 @@ func (s *ReceiptTestSuite) TestBloomsForBlockRange() { r2 := mocks.NewReceipt(i, common.HexToHash(fmt.Sprintf("0x%d", i))) receipts := []*models.Receipt{r1, r2} - s.Require().NoError(s.ReceiptIndexer.Store(receipts, nil)) + batch := s.DB.NewBatch() + s.Require().NoError(s.ReceiptIndexer.Store(receipts, batch)) + err := batch.Commit(pebble2.Sync) + s.Require().NoError(err) testBlooms = append(testBlooms, &r1.Bloom, &r2.Bloom) testHeights = append(testHeights, i) @@ -355,7 +496,12 @@ func (s *ReceiptTestSuite) TestBloomsForBlockRange() { for i := start; i < end; i++ { r1 := mocks.NewReceipt(i, common.HexToHash(fmt.Sprintf("0x%d", i))) receipts := []*models.Receipt{r1} - s.Require().NoError(s.ReceiptIndexer.Store(receipts, nil)) + + batch := s.DB.NewBatch() + s.Require().NoError(s.ReceiptIndexer.Store(receipts, batch)) + + err := batch.Commit(pebble2.Sync) + s.Require().NoError(err) if i == specific { expectedBloom = &r1.Bloom @@ -430,14 +576,20 @@ func (s *ReceiptTestSuite) compareReceipts(expected *models.Receipt, actual *mod type TransactionTestSuite struct { suite.Suite - TransactionIndexer TransactionIndexer + TransactionIndexer storage.TransactionIndexer + DB *pebble.Storage } func (s *TransactionTestSuite) TestStoreTransaction() { tx := mocks.NewTransaction(0) s.Run("store transaction successfully", func() { - err := s.TransactionIndexer.Store(tx, nil) + batch := s.DB.NewBatch() + + err := s.TransactionIndexer.Store(tx, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) }) } @@ -445,7 +597,11 @@ func (s *TransactionTestSuite) TestStoreTransaction() { func (s *TransactionTestSuite) TestGetTransaction() { s.Run("existing transaction", func() { tx := mocks.NewTransaction(1) - err := s.TransactionIndexer.Store(tx, nil) + batch := s.DB.NewBatch() + err := s.TransactionIndexer.Store(tx, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) txHash := tx.Hash() @@ -456,15 +612,23 @@ func (s *TransactionTestSuite) TestGetTransaction() { retTxHash := retTx.Hash() s.Require().Equal(txHash, retTxHash) // if hashes are equal the data must be equal + batch = s.DB.NewBatch() // allow same transaction overwrites - s.Require().NoError(s.TransactionIndexer.Store(retTx, nil)) + s.Require().NoError(s.TransactionIndexer.Store(retTx, batch)) + + err = batch.Commit(pebble2.Sync) + s.Require().NoError(err) }) s.Run("store multiple transactions and get single", func() { var tx models.Transaction for i := 0; i < 10; i++ { tx = mocks.NewTransaction(uint64(10 + i)) - err := s.TransactionIndexer.Store(tx, nil) + batch := s.DB.NewBatch() + err := s.TransactionIndexer.Store(tx, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) } @@ -485,82 +649,23 @@ func (s *TransactionTestSuite) TestGetTransaction() { }) } -type AccountTestSuite struct { - suite.Suite - AccountIndexer AccountIndexer -} - -func (a *AccountTestSuite) TestNonce() { - - a.Run("update account and increase nonce", func() { - // todo add multiple accounts test - from := common.HexToAddress("FACF71692421039876a5BB4F10EF7A439D8ef61E") - rawKey := "f6d5333177711e562cabf1f311916196ee6ffc2a07966d9d4628094073bd5442" - key, err := crypto.HexToECDSA(rawKey) - a.Require().NoError(err) - - nonce, err := a.AccountIndexer.GetNonce(from) - a.Require().NoError(err) - a.Require().Equal(uint64(0), nonce) - - for i := 1; i < 5; i++ { - tx := mocks.NewTransaction(0) - - txCall, ok := tx.(models.TransactionCall) - a.Require().True(ok) - - txHash := tx.Hash() - - rcp := mocks.NewReceipt(uint64(i+5), txHash) - gethTx, err := types.SignTx(txCall.Transaction, evmEmulator.GetDefaultSigner(), key) - a.Require().NoError(err) - - tx = models.TransactionCall{Transaction: gethTx} - - err = a.AccountIndexer.Update(tx, rcp, nil) - a.Require().NoError(err) - - nonce, err = a.AccountIndexer.GetNonce(from) - a.Require().NoError(err) - a.Require().Equal(uint64(i), nonce) - } - - // if run second time we should still see same nonce values, since they won't be incremented - // because we track nonce with evm height, and if same height is used twice we don't update - for i := 1; i < 5; i++ { - tx := mocks.NewTransaction(0) - - txCall, ok := tx.(models.TransactionCall) - a.Require().True(ok) - - txHash := tx.Hash() - - rcp := mocks.NewReceipt(uint64(i+5), txHash) - gethTx, err := types.SignTx(txCall.Transaction, evmEmulator.GetDefaultSigner(), key) - a.Require().NoError(err) - - tx = models.TransactionCall{Transaction: gethTx} - - err = a.AccountIndexer.Update(tx, rcp, nil) - a.Require().NoError(err) - - nonce, err = a.AccountIndexer.GetNonce(from) - a.Require().NoError(err) - a.Require().Equal(uint64(4), nonce) // always equal to latest nonce - } - }) -} - type TraceTestSuite struct { suite.Suite - TraceIndexer TraceIndexer + TraceIndexer storage.TraceIndexer + DB *pebble.Storage } func (s *TraceTestSuite) TestStore() { s.Run("store new trace", func() { id := common.Hash{0x01} trace := json.RawMessage(`{ "test": "foo" }`) - err := s.TraceIndexer.StoreTransaction(id, trace, nil) + + batch := s.DB.NewBatch() + + err := s.TraceIndexer.StoreTransaction(id, trace, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) }) @@ -568,7 +673,13 @@ func (s *TraceTestSuite) TestStore() { for i := 0; i < 2; i++ { id := common.Hash{0x01} trace := json.RawMessage(`{ "test": "foo" }`) - err := s.TraceIndexer.StoreTransaction(id, trace, nil) + + batch := s.DB.NewBatch() + + err := s.TraceIndexer.StoreTransaction(id, trace, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) } }) @@ -579,7 +690,12 @@ func (s *TraceTestSuite) TestGet() { id := common.Hash{0x01} trace := json.RawMessage(`{ "test": "foo" }`) - err := s.TraceIndexer.StoreTransaction(id, trace, nil) + batch := s.DB.NewBatch() + + err := s.TraceIndexer.StoreTransaction(id, trace, batch) + s.Require().NoError(err) + + err = batch.Commit(pebble2.Sync) s.Require().NoError(err) val, err := s.TraceIndexer.GetTransaction(id) diff --git a/storage/mocks/AccountIndexer.go b/storage/mocks/AccountIndexer.go deleted file mode 100644 index a010784fc..000000000 --- a/storage/mocks/AccountIndexer.go +++ /dev/null @@ -1,109 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - - common "github.com/onflow/go-ethereum/common" - mock "github.com/stretchr/testify/mock" - - models "github.com/onflow/flow-evm-gateway/models" - - pebble "github.com/cockroachdb/pebble" -) - -// AccountIndexer is an autogenerated mock type for the AccountIndexer type -type AccountIndexer struct { - mock.Mock -} - -// GetBalance provides a mock function with given fields: address -func (_m *AccountIndexer) GetBalance(address common.Address) (*big.Int, error) { - ret := _m.Called(address) - - if len(ret) == 0 { - panic("no return value specified for GetBalance") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(common.Address) (*big.Int, error)); ok { - return rf(address) - } - if rf, ok := ret.Get(0).(func(common.Address) *big.Int); ok { - r0 = rf(address) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(common.Address) error); ok { - r1 = rf(address) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNonce provides a mock function with given fields: address -func (_m *AccountIndexer) GetNonce(address common.Address) (uint64, error) { - ret := _m.Called(address) - - if len(ret) == 0 { - panic("no return value specified for GetNonce") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { - return rf(address) - } - if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { - r0 = rf(address) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(common.Address) error); ok { - r1 = rf(address) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Update provides a mock function with given fields: tx, receipt, batch -func (_m *AccountIndexer) Update(tx models.Transaction, receipt *models.Receipt, batch *pebble.Batch) error { - ret := _m.Called(tx, receipt, batch) - - if len(ret) == 0 { - panic("no return value specified for Update") - } - - var r0 error - if rf, ok := ret.Get(0).(func(models.Transaction, *models.Receipt, *pebble.Batch) error); ok { - r0 = rf(tx, receipt, batch) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewAccountIndexer creates a new instance of AccountIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAccountIndexer(t interface { - mock.TestingT - Cleanup(func()) -}) *AccountIndexer { - mock := &AccountIndexer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/storage/mocks/mocks.go b/storage/mocks/mocks.go index 706e20f6b..c7be2a73a 100644 --- a/storage/mocks/mocks.go +++ b/storage/mocks/mocks.go @@ -3,6 +3,7 @@ package mocks import ( "fmt" "math/big" + "time" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/go-ethereum/common" @@ -20,10 +21,14 @@ func NewBlock(height uint64) *models.Block { return &models.Block{ Block: &types.Block{ - ParentBlockHash: parent, - Height: height, - TotalSupply: big.NewInt(1000), - ReceiptRoot: common.HexToHash(fmt.Sprintf("0x1337%d", height)), + ParentBlockHash: parent, + Height: height, + Timestamp: uint64(time.Now().Second()), + TotalSupply: big.NewInt(1000), + ReceiptRoot: common.HexToHash(fmt.Sprintf("0x100%d", height)), + TransactionHashRoot: common.HexToHash(fmt.Sprintf("0x200%d", height)), + TotalGasUsed: uint64(30_000), + PrevRandao: common.HexToHash(fmt.Sprintf("0x300%d", height)), }, TransactionHashes: make([]common.Hash, 0), } diff --git a/storage/pebble/accounts.go b/storage/pebble/accounts.go deleted file mode 100644 index 3b64862b9..000000000 --- a/storage/pebble/accounts.go +++ /dev/null @@ -1,125 +0,0 @@ -package pebble - -import ( - "encoding/binary" - "errors" - "fmt" - "math/big" - "sync" - - "github.com/cockroachdb/pebble" - "github.com/onflow/go-ethereum/common" - - "github.com/onflow/flow-evm-gateway/models" - errs "github.com/onflow/flow-evm-gateway/models/errors" - "github.com/onflow/flow-evm-gateway/storage" -) - -var _ storage.AccountIndexer = &Accounts{} - -type Accounts struct { - store *Storage - mux sync.RWMutex -} - -func NewAccounts(db *Storage) *Accounts { - return &Accounts{ - store: db, - mux: sync.RWMutex{}, - } -} - -func (a *Accounts) Update( - tx models.Transaction, - receipt *models.Receipt, - batch *pebble.Batch, -) error { - a.mux.Lock() - defer a.mux.Unlock() - - from, err := tx.From() - if err != nil { - return err - } - - nonce, height, err := a.getNonce(from, batch) - if err != nil { - return err - } - - // make sure the transaction height is bigger than the height we already - // recorded for the nonce. this makes the operation idempotent and safer. - txHeight := receipt.BlockNumber.Uint64() - if txHeight <= height { - return nil - } - - nonce += 1 - - data := encodeNonce(nonce, txHeight) - return a.store.set(accountNonceKey, from.Bytes(), data, batch) -} - -func (a *Accounts) getNonce(address common.Address, batch *pebble.Batch) (uint64, uint64, error) { - var val []byte - var err error - if batch != nil { - val, err = a.store.batchGet(batch, accountNonceKey, address.Bytes()) - } else { - val, err = a.store.get(accountNonceKey, address.Bytes()) - } - if err != nil { - // if no nonce was yet saved for the account the nonce is 0 - if errors.Is(err, errs.ErrEntityNotFound) { - return 0, 0, nil - } - - return 0, 0, err - } - - nonce, height, err := decodeNonce(val) - if err != nil { - return 0, 0, err - } - - return nonce, height, nil -} - -func (a *Accounts) GetNonce(address common.Address) (uint64, error) { - a.mux.RLock() - defer a.mux.RUnlock() - nonce, _, err := a.getNonce(address, nil) - if err != nil { - return 0, fmt.Errorf("failed to get nonce of address: %s, with: %w", address, err) - } - - return nonce, nil -} - -func (a *Accounts) GetBalance(address common.Address) (*big.Int, error) { - panic("not supported") -} - -// decodeNonce converts nonce data into nonce and height -func decodeNonce(data []byte) (uint64, uint64, error) { - if len(data) != 16 { - return 0, 0, fmt.Errorf("invalid nonce data, expected length: %d, got: %d", 16, len(data)) - } - nonce := binary.BigEndian.Uint64(data[:8]) - height := binary.BigEndian.Uint64(data[8:]) - - return nonce, height, nil -} - -// encodeNonce converts nonce and height into nonce data -func encodeNonce(nonce uint64, height uint64) []byte { - payload := make([]byte, 16) - for i, b := range uint64Bytes(nonce) { - payload[i] = b - } - for i, b := range uint64Bytes(height) { - payload[i+8] = b - } - - return payload -} diff --git a/storage/pebble/blocks.go b/storage/pebble/blocks.go index e7286596e..42099b1d4 100644 --- a/storage/pebble/blocks.go +++ b/storage/pebble/blocks.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "slices" - "sync" "github.com/cockroachdb/pebble" "github.com/onflow/flow-go-sdk" @@ -27,7 +26,6 @@ var _ storage.BlockIndexer = &Blocks{} type Blocks struct { store *Storage - mux sync.RWMutex chainID flowGo.ChainID } @@ -35,7 +33,6 @@ func NewBlocks(store *Storage, chainID flowGo.ChainID) *Blocks { return &Blocks{ store: store, chainID: chainID, - mux: sync.RWMutex{}, } } @@ -45,8 +42,6 @@ func (b *Blocks) Store( block *models.Block, batch *pebble.Batch, ) error { - b.mux.Lock() - defer b.mux.Unlock() // dev note: please be careful if any store reads are added here, // store.batchGet must be used instead and batch must be used @@ -122,9 +117,6 @@ func (b *Blocks) Store( } func (b *Blocks) GetByHeight(height uint64) (*models.Block, error) { - b.mux.RLock() - defer b.mux.RUnlock() - last, err := b.latestEVMHeight() if err != nil { return nil, err @@ -144,9 +136,6 @@ func (b *Blocks) GetByHeight(height uint64) (*models.Block, error) { } func (b *Blocks) GetByID(ID common.Hash) (*models.Block, error) { - b.mux.RLock() - defer b.mux.RUnlock() - height, err := b.store.get(blockIDToHeightKey, ID.Bytes()) if err != nil { return nil, fmt.Errorf("failed to get EVM block by ID: %s, with: %w", ID, err) @@ -165,9 +154,6 @@ func (b *Blocks) GetByID(ID common.Hash) (*models.Block, error) { } func (b *Blocks) GetHeightByID(ID common.Hash) (uint64, error) { - b.mux.RLock() - defer b.mux.RUnlock() - height, err := b.store.get(blockIDToHeightKey, ID.Bytes()) if err != nil { return 0, fmt.Errorf("failed to get EVM block by ID: %s, with: %w", ID, err) @@ -177,9 +163,6 @@ func (b *Blocks) GetHeightByID(ID common.Hash) (uint64, error) { } func (b *Blocks) LatestEVMHeight() (uint64, error) { - b.mux.RLock() - defer b.mux.RUnlock() - return b.latestEVMHeight() } @@ -196,9 +179,6 @@ func (b *Blocks) latestEVMHeight() (uint64, error) { } func (b *Blocks) LatestCadenceHeight() (uint64, error) { - b.mux.RLock() - defer b.mux.RUnlock() - val, err := b.store.get(latestCadenceHeightKey) if err != nil { if errors.Is(err, errs.ErrEntityNotFound) { @@ -211,9 +191,6 @@ func (b *Blocks) LatestCadenceHeight() (uint64, error) { } func (b *Blocks) SetLatestCadenceHeight(height uint64, batch *pebble.Batch) error { - b.mux.Lock() - defer b.mux.Unlock() - if err := b.store.set(latestCadenceHeightKey, nil, uint64Bytes(height), batch); err != nil { return fmt.Errorf("failed to store latest Cadence height: %d, with: %w", height, err) } @@ -222,24 +199,24 @@ func (b *Blocks) SetLatestCadenceHeight(height uint64, batch *pebble.Batch) erro } // InitHeights sets the Cadence height to zero as well as EVM heights. Used for empty database init. -func (b *Blocks) InitHeights(cadenceHeight uint64, cadenceID flow.Identifier) error { +func (b *Blocks) InitHeights(cadenceHeight uint64, cadenceID flow.Identifier, batch *pebble.Batch) error { // sanity check, make sure we don't have any heights stored, disable overwriting the database _, err := b.LatestEVMHeight() if !errors.Is(err, errs.ErrStorageNotInitialized) { return fmt.Errorf("can't init the database that already has data stored") } - if err := b.store.set(latestCadenceHeightKey, nil, uint64Bytes(cadenceHeight), nil); err != nil { + if err := b.store.set(latestCadenceHeightKey, nil, uint64Bytes(cadenceHeight), batch); err != nil { return fmt.Errorf("failed to init latest Cadence height at: %d, with: %w", cadenceHeight, err) } - if err := b.store.set(latestEVMHeightKey, nil, uint64Bytes(0), nil); err != nil { + if err := b.store.set(latestEVMHeightKey, nil, uint64Bytes(0), batch); err != nil { return fmt.Errorf("failed to init latest EVM height at: %d, with: %w", 0, err) } // we store genesis block because it isn't emitted over the network genesisBlock := models.GenesisBlock(b.chainID) - if err := b.Store(cadenceHeight, cadenceID, genesisBlock, nil); err != nil { + if err := b.Store(cadenceHeight, cadenceID, genesisBlock, batch); err != nil { return fmt.Errorf("failed to store genesis block at Cadence height: %d, with: %w", cadenceHeight, err) } @@ -247,9 +224,6 @@ func (b *Blocks) InitHeights(cadenceHeight uint64, cadenceID flow.Identifier) er } func (b *Blocks) GetCadenceHeight(evmHeight uint64) (uint64, error) { - b.mux.RLock() - defer b.mux.RUnlock() - val, err := b.store.get(evmHeightToCadenceHeightKey, uint64Bytes(evmHeight)) if err != nil { return 0, err @@ -259,9 +233,6 @@ func (b *Blocks) GetCadenceHeight(evmHeight uint64) (uint64, error) { } func (b *Blocks) GetCadenceID(evmHeight uint64) (flow.Identifier, error) { - b.mux.RLock() - defer b.mux.RUnlock() - val, err := b.store.get(evmHeightToCadenceIDKey, uint64Bytes(evmHeight)) if err != nil { return flow.Identifier{}, err diff --git a/storage/pebble/keys.go b/storage/pebble/keys.go index 77411c7a4..aa46b61a3 100644 --- a/storage/pebble/keys.go +++ b/storage/pebble/keys.go @@ -1,6 +1,10 @@ package pebble -import "encoding/binary" +import ( + "encoding/binary" + + "github.com/cockroachdb/pebble" +) const ( // block keys @@ -17,16 +21,11 @@ const ( receiptHeightKey = byte(21) bloomHeightKey = byte(22) - // account keys - accountNonceKey = byte(30) - accountBalanceKey = byte(31) - // traces keys traceTxIDKey = byte(40) - // ledger value - ledgerValue = byte(50) - ledgerSlabIndex = byte(51) + // registers + registerKeyMarker = byte(50) // special keys latestEVMHeightKey = byte(100) @@ -60,3 +59,22 @@ func uint64Bytes(height uint64) []byte { binary.BigEndian.PutUint64(b, height) return b } + +func NewMVCCComparer() *pebble.Comparer { + comparer := *pebble.DefaultComparer + comparer.Split = func(a []byte) int { + if len(a) == 0 { + // edge case. Not sure if this is possible, but just in case + return 0 + } + if a[0] == registerKeyMarker { + // special case for registers + return len(a) - 8 + } + // default comparer + return len(a) + } + comparer.Name = "flow.MVCCComparer" + + return &comparer +} diff --git a/storage/pebble/ledger.go b/storage/pebble/ledger.go deleted file mode 100644 index 6669570ec..000000000 --- a/storage/pebble/ledger.go +++ /dev/null @@ -1,114 +0,0 @@ -package pebble - -import ( - "errors" - "fmt" - "sync" - - "github.com/onflow/atree" - - errs "github.com/onflow/flow-evm-gateway/models/errors" -) - -var _ atree.Ledger = &Ledger{} - -// todo we need to support historic data, -// we likely need to create ledger with the context of block height -// and then prepend all keys with that height - -type Ledger struct { - store *Storage - mux sync.RWMutex -} - -func NewLedger(store *Storage) *Ledger { - return &Ledger{ - store: store, - mux: sync.RWMutex{}, - } -} - -func (l *Ledger) GetValue(owner, key []byte) ([]byte, error) { - l.mux.RLock() - defer l.mux.RUnlock() - - id := append(owner, key...) - val, err := l.store.get(ledgerValue, id) - if err != nil { - // as per interface expectation we need to remove nil if not found - if errors.Is(err, errs.ErrEntityNotFound) { - return nil, nil - } - - return nil, fmt.Errorf( - "failed to get ledger value at owner %x and key %x: %w", - owner, - key, - err, - ) - } - - return val, nil -} - -func (l *Ledger) SetValue(owner, key, value []byte) error { - l.mux.Lock() - defer l.mux.Unlock() - - id := append(owner, key...) - if err := l.store.set(ledgerValue, id, value, nil); err != nil { - return fmt.Errorf( - "failed to store ledger value for owner %x and key %x: %w", - owner, - key, - err, - ) - } - - return nil -} - -func (l *Ledger) ValueExists(owner, key []byte) (bool, error) { - val, err := l.GetValue(owner, key) - if err != nil { - return false, err - } - - return val != nil, nil -} - -func (l *Ledger) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { - l.mux.Lock() - defer l.mux.Unlock() - - var index atree.SlabIndex - - val, err := l.store.get(ledgerSlabIndex, owner) - if err != nil { - if !errors.Is(err, errs.ErrEntityNotFound) { - return atree.SlabIndexUndefined, err - } - } - - if val != nil { - if len(val) != len(index) { - return atree.SlabIndexUndefined, fmt.Errorf( - "slab index was not stored in correct format for owner %x", - owner, - ) - } - - copy(index[:], val) - } - - index.Next() - if err := l.store.set(ledgerSlabIndex, owner, index[:], nil); err != nil { - return atree.SlabIndexUndefined, fmt.Errorf( - "slab index failed to set for owner %x: %w", - owner, - err, - ) - } - - return index, nil -} diff --git a/storage/pebble/receipts.go b/storage/pebble/receipts.go index be45a32e1..5a3ffec68 100644 --- a/storage/pebble/receipts.go +++ b/storage/pebble/receipts.go @@ -3,7 +3,6 @@ package pebble import ( "encoding/binary" "fmt" - "sync" "github.com/cockroachdb/pebble" "github.com/onflow/go-ethereum/common" @@ -19,13 +18,11 @@ var _ storage.ReceiptIndexer = &Receipts{} type Receipts struct { store *Storage - mux sync.RWMutex } func NewReceipts(store *Storage) *Receipts { return &Receipts{ store: store, - mux: sync.RWMutex{}, } } @@ -37,9 +34,6 @@ func NewReceipts(store *Storage) *Receipts { // - receipt block height => list of encoded receipts (1+ per block) // - receipt block height => list of bloom filters (1+ per block) func (r *Receipts) Store(receipts []*models.Receipt, batch *pebble.Batch) error { - r.mux.Lock() - defer r.mux.Unlock() - var blooms []*gethTypes.Bloom var height uint64 @@ -91,15 +85,12 @@ func (r *Receipts) Store(receipts []*models.Receipt, batch *pebble.Batch) error } func (r *Receipts) GetByTransactionID(ID common.Hash) (*models.Receipt, error) { - r.mux.RLock() - defer r.mux.RUnlock() - height, err := r.store.get(receiptTxIDToHeightKey, ID.Bytes()) if err != nil { return nil, fmt.Errorf("failed to get receipt by tx ID: %s, with: %w", ID, err) } - receipts, err := r.getByBlockHeight(height, nil) + receipts, err := r.getByBlockHeight(height) if err != nil { return nil, fmt.Errorf( "failed to get receipt by height: %d, with: %w", @@ -118,21 +109,12 @@ func (r *Receipts) GetByTransactionID(ID common.Hash) (*models.Receipt, error) { } func (r *Receipts) GetByBlockHeight(height uint64) ([]*models.Receipt, error) { - r.mux.RLock() - defer r.mux.RUnlock() - - return r.getByBlockHeight(uint64Bytes(height), nil) + return r.getByBlockHeight(uint64Bytes(height)) } -func (r *Receipts) getByBlockHeight(height []byte, batch *pebble.Batch) ([]*models.Receipt, error) { - var val []byte - var err error +func (r *Receipts) getByBlockHeight(height []byte) ([]*models.Receipt, error) { - if batch != nil { - val, err = r.store.batchGet(batch, receiptHeightKey, height) - } else { - val, err = r.store.get(receiptHeightKey, height) - } + val, err := r.store.get(receiptHeightKey, height) if err != nil { return nil, err } @@ -161,9 +143,6 @@ func (r *Receipts) getByBlockHeight(height []byte, batch *pebble.Batch) ([]*mode } func (r *Receipts) BloomsForBlockRange(start, end uint64) ([]*models.BloomsHeight, error) { - r.mux.RLock() - defer r.mux.RUnlock() - if start > end { return nil, fmt.Errorf( "%w: start value %d is bigger than end value %d", diff --git a/storage/pebble/register_storage.go b/storage/pebble/register_storage.go new file mode 100644 index 000000000..c3dc2b4cb --- /dev/null +++ b/storage/pebble/register_storage.go @@ -0,0 +1,231 @@ +package pebble + +import ( + "encoding/binary" + "fmt" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage/pebble/registers" + + "github.com/cockroachdb/pebble" +) + +var ( + // MinLookupKeyLen defines the minimum length for a valid lookup key + // + // Lookup keys use the following format: + // [marker] [key] / [height] + // Where: + // - marker: 1 byte marking that this is a register key + // - key: optional variable length field + // - height: 8 bytes representing the block height (uint64) + // - separator: '/' is used to separate variable length field + // + // Therefore the minimum key would be 2 bytes + # of bytes for height + // [marker] / [height] + MinLookupKeyLen = 2 + registers.HeightSuffixLen +) + +type RegisterStorage struct { + store *Storage + owner flow.Address +} + +var _ types.StorageProvider = &RegisterStorage{} + +// NewRegisterStorage creates a new index instance at the provided height, all reads and +// writes of the registers will happen at that height. +// this is not concurrency safe. +// +// The register store does verify that the owner supplied is the one that was used before, +// or that the heights are sequential. +// This should be done by the caller. +// +// The RegisterStorage is modeled after `pebble.Registers` from `flow-go` but there are a few differences: +// 1. The `flow-go` implementation creates its own independent batch when saving registers. +// The gateway needs to save the registers together with blocks and transaction so the batch +// is shared with that. +// 2. The gateway does not need to store the owner address as all the registers are for the same owner. +// 3. The gateway does not need pruning (yet) as the db is supposed to be much smaller. +// 4. The owner and height checks are expected to be performed by the caller. +func NewRegisterStorage( + store *Storage, + owner flow.Address, +) *RegisterStorage { + return &RegisterStorage{ + store: store, + owner: owner, + } +} + +// Get returns the register value for the given register ID at the given height. +// Get will check that the owner is the same as the one used to create the index. +func (r *RegisterStorage) Get(id flow.RegisterID, height uint64) (value flow.RegisterValue, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic: %v", r) + } + }() + + owner := flow.BytesToAddress([]byte(id.Owner)) + if r.owner != flow.BytesToAddress([]byte(id.Owner)) { + return nil, registerOwnerMismatch(r.owner, owner) + } + + lookupKey := newLookupKey(height, []byte(id.Key)) + return r.lookupRegister(lookupKey.Bytes()) +} + +// Store stores the register entries for the given height to the given batch. +// The batch does need to be indexed. +// +// Store will check that all the register entries are for the same owner. +func (r *RegisterStorage) Store(entries flow.RegisterEntries, height uint64, batch *pebble.Batch) error { + for _, entry := range entries { + owner := flow.BytesToAddress([]byte(entry.Key.Owner)) + if r.owner != owner { + return registerOwnerMismatch(r.owner, owner) + } + + encoded := newLookupKey(height, []byte(entry.Key.Key)).Bytes() + + err := batch.Set(encoded, entry.Value, nil) + if err != nil { + return fmt.Errorf("failed to set key: %w", err) + } + } + + return nil +} + +func (r *RegisterStorage) lookupRegister(key []byte) (flow.RegisterValue, error) { + db := r.store.db + + iter, err := db.NewIter(&pebble.IterOptions{ + UseL6Filters: true, + }) + if err != nil { + return nil, err + } + + defer func() { + if err := iter.Close(); err != nil { + r.store.log.Error().Err(err).Msg("failed to close register iterator") + } + }() + + ok := iter.SeekPrefixGE(key) + if !ok { + // no such register found (which is equivalent to the register being nil) + return nil, nil + } + + binaryValue, err := iter.ValueAndErr() + if err != nil { + return nil, fmt.Errorf("failed to get value: %w", err) + } + // preventing caller from modifying the iterator's value slices + valueCopy := make([]byte, len(binaryValue)) + copy(valueCopy, binaryValue) + + return valueCopy, nil +} + +// lookupKey is the encoded format of the storage key for looking up register value +type lookupKey struct { + encoded []byte +} + +// Bytes returns the encoded lookup key. +func (h lookupKey) Bytes() []byte { + return h.encoded +} + +// String returns the encoded lookup key as a string. +func (h lookupKey) String() string { + return string(h.encoded) +} + +// newLookupKey takes a height and registerID, returns the key for storing the register value in storage +func newLookupKey(height uint64, key []byte) *lookupKey { + lookupKey := lookupKey{ + // 1 byte gaps for db prefix and '/' separators + encoded: make([]byte, 0, MinLookupKeyLen+len(key)), + } + + // The lookup lookupKey used to find most recent value for a register. + // + // The "" part is the register lookupKey, which is used as a prefix to filter and iterate + // through updated values at different heights, and find the most recent updated value at or below + // a certain height. + lookupKey.encoded = append(lookupKey.encoded, registerKeyMarker) + lookupKey.encoded = append(lookupKey.encoded, key...) + lookupKey.encoded = append(lookupKey.encoded, '/') + + // Encode the height getting it to 1s compliment (all bits flipped) and big-endian byte order. + // + // RegisterStorage are a sparse dataset stored with a single entry per update. To find the value at a particular + // height, we need to do a scan across the entries to find the highest height that is less than or equal + // to the target height. + // + // Pebble does not support reverse iteration, so we use the height's one's complement to effectively + // reverse sort on the height. This allows us to use a bitwise forward scan for the next most recent + // entry. + onesCompliment := ^height + lookupKey.encoded = binary.BigEndian.AppendUint64(lookupKey.encoded, onesCompliment) + + return &lookupKey +} + +// GetSnapshotAt returns a snapshot of the register index at the given block height. +// the snapshot has a cache. Nil values are cached. +func (r *RegisterStorage) GetSnapshotAt(evmBlockHeight uint64) (types.BackendStorageSnapshot, error) { + return NewStorageSnapshot(r.Get, evmBlockHeight), nil +} + +func registerOwnerMismatch(expected flow.Address, owner flow.Address) error { + return fmt.Errorf("owner mismatch. Storage expects a single owner %s, given %s", expected.Hex(), owner.Hex()) +} + +type GetAtHeightFunc func(id flow.RegisterID, height uint64) (flow.RegisterValue, error) + +type StorageSnapshot struct { + cache map[flow.RegisterID]flow.RegisterValue + + evmBlockHeight uint64 + storageGet GetAtHeightFunc +} + +// NewStorageSnapshot creates a new snapshot of the register index at the given block height. +// the snapshot has a cache. Nil values are cached. +// The snapshot is not concurrency-safe. +func NewStorageSnapshot(get GetAtHeightFunc, evmBlockHeight uint64) *StorageSnapshot { + return &StorageSnapshot{ + cache: make(map[flow.RegisterID]flow.RegisterValue), + storageGet: get, + evmBlockHeight: evmBlockHeight, + } +} + +// GetValue returns the value for the given register ID at the snapshot block height. +// If the value is not found in the cache, it is fetched from the register index. +func (s StorageSnapshot) GetValue(owner []byte, key []byte) ([]byte, error) { + id := flow.CadenceRegisterID(owner, key) + value, ok := s.cache[id] + if ok { + return value, nil + } + + // get from index + val, err := s.storageGet(id, s.evmBlockHeight) + if err != nil { + return nil, err + } + + // non-existing key will also be cached with `nil` value. + s.cache[id] = val + return val, nil +} + +var _ types.BackendStorageSnapshot = &StorageSnapshot{} diff --git a/storage/pebble/register_storage_test.go b/storage/pebble/register_storage_test.go new file mode 100644 index 000000000..8d500ca2f --- /dev/null +++ b/storage/pebble/register_storage_test.go @@ -0,0 +1,160 @@ +package pebble + +import ( + "testing" + + "github.com/cockroachdb/pebble" + "github.com/onflow/flow-go/model/flow" + "github.com/stretchr/testify/require" +) + +func Test_RegisterIndex(t *testing.T) { + t.Parallel() + owner := "0x1" + ownerAddress := flow.BytesToAddress([]byte(owner)) + owner2 := "0x2" + key := "0x3" + value := []byte{0x4} + + runDB("get register", t, func(t *testing.T, db *Storage) { + t.Parallel() + + r := NewRegisterStorage(db, ownerAddress) + + v, err := r.Get(flow.RegisterID{Owner: owner, Key: key}, 0) + require.NoError(t, err) + require.Empty(t, v) + }) + + runDB("get register - owner2", t, func(t *testing.T, db *Storage) { + t.Parallel() + + r := NewRegisterStorage(db, ownerAddress) + + _, err := r.Get(flow.RegisterID{Owner: owner2, Key: key}, 0) + require.Error(t, err) + }) + + runDB("store registers", t, func(t *testing.T, db *Storage) { + t.Parallel() + + r := NewRegisterStorage(db, ownerAddress) + + batch := db.NewBatch() + + err := r.Store( + flow.RegisterEntries{ + flow.RegisterEntry{ + Key: flow.RegisterID{Owner: owner, Key: key}, + Value: value, + }, + }, + 0, + batch, + ) + require.NoError(t, err) + + v, err := r.Get(flow.RegisterID{Owner: owner, Key: key}, 0) + require.NoError(t, err) + // not commited, so value is still empty + require.Empty(t, v) + + err = batch.Commit(pebble.Sync) + require.NoError(t, err) + + v, err = r.Get(flow.RegisterID{Owner: owner, Key: key}, 0) + require.NoError(t, err) + require.Equal(t, value, v) + + require.NoError(t, err) + }) + + runDB("store registers - owner2", t, func(t *testing.T, db *Storage) { + t.Parallel() + + r := NewRegisterStorage(db, ownerAddress) + + batch := db.NewBatch() + + err := r.Store( + flow.RegisterEntries{ + flow.RegisterEntry{ + Key: flow.RegisterID{Owner: owner2, Key: key}, + Value: value, + }, + }, + 0, + batch, + ) + require.Error(t, err) + }) +} + +func Test_StorageSnapshot(t *testing.T) { + + t.Parallel() + owner := []byte("0x1") + ownerAddress := flow.BytesToAddress(owner) + key := []byte("0x3") + value := []byte{0x4} + + runDB("get register", t, func(t *testing.T, db *Storage) { + t.Parallel() + + r := NewRegisterStorage(db, ownerAddress) + s, err := r.GetSnapshotAt(0) + require.NoError(t, err) + + v, err := s.GetValue(owner, key) + require.NoError(t, err) + require.Empty(t, v) + }) + + runDB("get register", t, func(t *testing.T, db *Storage) { + t.Parallel() + + count := uint64(0) + + storageGet := func(id flow.RegisterID, height uint64) (flow.RegisterValue, error) { + count++ + return value, nil + } + + s := NewStorageSnapshot(storageGet, 0) + + v, err := s.GetValue(owner, key) + require.NoError(t, err) + require.Equal(t, value, v) + + v, err = s.GetValue(owner, key) + require.NoError(t, err) + require.Equal(t, value, v) + + // value should be cached + require.Equal(t, uint64(1), count) + }) + + runDB("get register - cache nil", t, func(t *testing.T, db *Storage) { + t.Parallel() + + count := uint64(0) + + storageGet := func(id flow.RegisterID, height uint64) (flow.RegisterValue, error) { + count++ + return nil, nil + } + + s := NewStorageSnapshot(storageGet, 0) + + v, err := s.GetValue(owner, key) + require.NoError(t, err) + require.Empty(t, v) + + v, err = s.GetValue(owner, key) + require.NoError(t, err) + require.Empty(t, v) + + // value should be cached + require.Equal(t, uint64(1), count) + }) +} diff --git a/storage/pebble/storage.go b/storage/pebble/storage.go index a8624cfb3..eb33ad97e 100644 --- a/storage/pebble/storage.go +++ b/storage/pebble/storage.go @@ -5,6 +5,8 @@ import ( "fmt" "io" + "github.com/cockroachdb/pebble/bloom" + "github.com/cockroachdb/pebble" "github.com/rs/zerolog" @@ -26,6 +28,7 @@ func New(dir string, log zerolog.Logger) (*Storage, error) { // currently pebble is only used for registers opts := &pebble.Options{ Cache: cache, + Comparer: NewMVCCComparer(), FormatMajorVersion: pebble.FormatNewest, L0CompactionThreshold: 2, L0StopWritesThreshold: 1000, @@ -46,6 +49,12 @@ func New(dir string, log zerolog.Logger) (*Storage, error) { // for good performance (esp. on stripped storage). l.BlockSize = 32 << 10 // 32 KB l.IndexBlockSize = 256 << 10 // 256 KB + + // The bloom filter speedsup our SeekPrefixGE by skipping + // sstables that do not contain the prefix + l.FilterPolicy = bloom.FilterPolicy(MinLookupKeyLen) + l.FilterType = pebble.TableFilter + if i > 0 { // L0 starts at 2MiB, each level is 2x the previous. l.TargetFileSize = opts.Levels[i-1].TargetFileSize * 2 @@ -76,12 +85,7 @@ func New(dir string, log zerolog.Logger) (*Storage, error) { func (s *Storage) set(keyCode byte, key []byte, value []byte, batch *pebble.Batch) error { prefixedKey := makePrefix(keyCode, key) - if batch != nil { - // set the value on batch and return - return batch.Set(prefixedKey, value, nil) - } - - return s.db.Set(prefixedKey, value, nil) + return batch.Set(prefixedKey, value, nil) } func (s *Storage) get(keyCode byte, key ...[]byte) ([]byte, error) { @@ -109,21 +113,10 @@ func (s *Storage) get(keyCode byte, key ...[]byte) ([]byte, error) { return cp, nil } -// batchGet loads the value from an indexed batch if data is found, else it loads the value from the storage. -func (s *Storage) batchGet(batch *pebble.Batch, keyCode byte, key ...[]byte) ([]byte, error) { - if batch == nil || !batch.Indexed() { - return nil, fmt.Errorf("batch must not be nil and it must be indexed") - } - - data, closer, err := batch.Get(makePrefix(keyCode, key...)) - if err == nil { - _ = closer.Close() - return data, nil - } - - return s.get(keyCode, key...) +func (s *Storage) NewBatch() *pebble.Batch { + return s.db.NewBatch() } -func (s *Storage) NewBatch() *pebble.Batch { - return s.db.NewIndexedBatch() +func (s *Storage) Close() error { + return s.db.Close() } diff --git a/storage/pebble/storage_test.go b/storage/pebble/storage_test.go index 4a031b3c9..5e80dd90c 100644 --- a/storage/pebble/storage_test.go +++ b/storage/pebble/storage_test.go @@ -5,72 +5,31 @@ import ( "github.com/cockroachdb/pebble" "github.com/goccy/go-json" + "github.com/onflow/flow-evm-gateway/config" + "github.com/onflow/flow-evm-gateway/models/errors" + "github.com/onflow/flow-evm-gateway/storage/mocks" "github.com/onflow/flow-go-sdk" flowGo "github.com/onflow/flow-go/model/flow" "github.com/onflow/go-ethereum/common" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-evm-gateway/config" - "github.com/onflow/flow-evm-gateway/models/errors" - "github.com/onflow/flow-evm-gateway/storage" - "github.com/onflow/flow-evm-gateway/storage/mocks" ) -// tests that make sure the implementation conform to the interface expected behaviour -func TestBlocks(t *testing.T) { - runDB("blocks", t, func(t *testing.T, db *Storage) { - bl := NewBlocks(db, flowGo.Emulator) - err := bl.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}) - require.NoError(t, err) - suite.Run(t, &storage.BlockTestSuite{Blocks: bl}) - }) -} - -func TestReceipts(t *testing.T) { - runDB("receipts", t, func(t *testing.T, db *Storage) { - // prepare the blocks database since they track heights which are used in receipts as well - bl := NewBlocks(db, flowGo.Emulator) - err := bl.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}) - require.NoError(t, err) - err = bl.Store(30, flow.Identifier{0x1}, mocks.NewBlock(10), nil) // update first and latest height - require.NoError(t, err) - err = bl.Store(30, flow.Identifier{0x1}, mocks.NewBlock(300), nil) // update latest - require.NoError(t, err) - - suite.Run(t, &storage.ReceiptTestSuite{ReceiptIndexer: NewReceipts(db)}) - }) -} - -func TestTransactions(t *testing.T) { - runDB("transactions", t, func(t *testing.T, db *Storage) { - suite.Run(t, &storage.TransactionTestSuite{TransactionIndexer: NewTransactions(db)}) - }) -} - -func TestAccounts(t *testing.T) { - runDB("accounts", t, func(t *testing.T, db *Storage) { - suite.Run(t, &storage.AccountTestSuite{AccountIndexer: NewAccounts(db)}) - }) -} - -func TestTraces(t *testing.T) { - runDB("traces", t, func(t *testing.T, db *Storage) { - suite.Run(t, &storage.TraceTestSuite{TraceIndexer: NewTraces(db)}) - }) -} - func TestBlock(t *testing.T) { runDB("store block", t, func(t *testing.T, db *Storage) { bl := mocks.NewBlock(10) blocks := NewBlocks(db, flowGo.Emulator) - err := blocks.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}) + batch := db.NewBatch() + + err := blocks.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}, batch) + require.NoError(t, err) + + err = blocks.Store(20, flow.Identifier{0x1}, bl, batch) require.NoError(t, err) - err = blocks.Store(20, flow.Identifier{0x1}, bl, nil) + err = batch.Commit(pebble.Sync) require.NoError(t, err) }) @@ -81,10 +40,14 @@ func TestBlock(t *testing.T) { bl := mocks.NewBlock(height) blocks := NewBlocks(db, flowGo.Emulator) - err := blocks.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}) + batch := db.NewBatch() + err := blocks.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}, batch) require.NoError(t, err) - err = blocks.Store(cadenceHeight, cadenceID, bl, nil) + err = blocks.Store(cadenceHeight, cadenceID, bl, batch) + require.NoError(t, err) + + err = batch.Commit(pebble.Sync) require.NoError(t, err) block, err := blocks.GetByHeight(height) @@ -109,9 +72,15 @@ func TestBlock(t *testing.T) { runDB("get not found block error", t, func(t *testing.T, db *Storage) { blocks := NewBlocks(db, flowGo.Emulator) - err := blocks.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}) + + batch := db.NewBatch() + err := blocks.InitHeights(config.EmulatorInitCadenceHeight, flow.Identifier{0x1}, batch) + require.NoError(t, err) + err = blocks.Store(2, flow.Identifier{0x1}, mocks.NewBlock(1), batch) // init + require.NoError(t, err) + + err = batch.Commit(pebble.Sync) require.NoError(t, err) - _ = blocks.Store(2, flow.Identifier{0x1}, mocks.NewBlock(1), nil) // init bl, err := blocks.GetByHeight(11) require.ErrorIs(t, err, errors.ErrEntityNotFound) @@ -123,18 +92,6 @@ func TestBlock(t *testing.T) { }) } -func TestAccount(t *testing.T) { - t.Run("encoding decoding nonce data", func(t *testing.T) { - nonce := uint64(10) - height := uint64(20) - raw := encodeNonce(10, 20) - decNonce, decHeight, err := decodeNonce(raw) - require.NoError(t, err) - assert.Equal(t, nonce, decNonce) - assert.Equal(t, height, decHeight) - }) -} - func TestBatch(t *testing.T) { runDB("batch successfully stores", t, func(t *testing.T, db *Storage) { blocks := NewBlocks(db, flowGo.Emulator) diff --git a/storage/pebble/traces.go b/storage/pebble/traces.go index b3174d04c..7de1784d8 100644 --- a/storage/pebble/traces.go +++ b/storage/pebble/traces.go @@ -2,7 +2,6 @@ package pebble import ( "fmt" - "sync" "github.com/cockroachdb/pebble" "github.com/goccy/go-json" @@ -15,20 +14,15 @@ var _ storage.TraceIndexer = &Traces{} type Traces struct { store *Storage - mux sync.RWMutex } func NewTraces(store *Storage) *Traces { return &Traces{ store: store, - mux: sync.RWMutex{}, } } func (t *Traces) StoreTransaction(ID common.Hash, trace json.RawMessage, batch *pebble.Batch) error { - t.mux.Lock() - defer t.mux.Unlock() - if err := t.store.set(traceTxIDKey, ID.Bytes(), trace, batch); err != nil { return fmt.Errorf("failed to store trace for transaction ID %s: %w", ID.String(), err) } @@ -37,9 +31,6 @@ func (t *Traces) StoreTransaction(ID common.Hash, trace json.RawMessage, batch * } func (t *Traces) GetTransaction(ID common.Hash) (json.RawMessage, error) { - t.mux.RLock() - defer t.mux.RUnlock() - val, err := t.store.get(traceTxIDKey, ID.Bytes()) if err != nil { return nil, fmt.Errorf("failed to get trace for transaction ID %s: %w", ID.String(), err) diff --git a/storage/register_delta.go b/storage/register_delta.go new file mode 100644 index 000000000..e7c5b85ed --- /dev/null +++ b/storage/register_delta.go @@ -0,0 +1,129 @@ +package storage + +import ( + "fmt" + + "github.com/onflow/atree" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +var _ types.BackendStorage = &RegisterDelta{} +var _ types.BackendStorageSnapshot = &RegisterDelta{} + +type RegisterDelta struct { + deltas map[flow.RegisterID]flow.RegisterEntry + registers types.BackendStorageSnapshot +} + +// NewRegisterDelta creates a new instance of RegisterDelta. It is not concurrency safe. +// This allows for the caller to build new state on top of the provided snapshot. +// The new state is not persisted. The caller is responsible for persisting the state using +// the `GetUpdates` method. +func NewRegisterDelta( + registers types.BackendStorageSnapshot, +) *RegisterDelta { + return &RegisterDelta{ + deltas: make(map[flow.RegisterID]flow.RegisterEntry), + registers: registers, + } +} + +// GetValue gets the value for the given register ID. If the value was set, it returns that value. +// If the value was not set, it reads the value from the snapshot. +func (r *RegisterDelta) GetValue(owner []byte, key []byte) ([]byte, error) { + id := flow.CadenceRegisterID(owner, key) + + // get from delta first + if delta, ok := r.deltas[id]; ok { + return delta.Value, nil + } + + // get from storage + return r.registers.GetValue(owner, key) +} + +// SetValue sets the value for the given register ID. It sets it in the delta, not in the storage. +func (r *RegisterDelta) SetValue(owner, key, value []byte) error { + id := flow.CadenceRegisterID(owner, key) + + r.deltas[id] = flow.RegisterEntry{Key: id, Value: value} + + return nil +} + +func (r *RegisterDelta) ValueExists(owner []byte, key []byte) (bool, error) { + value, err := r.GetValue(owner, key) + if err != nil { + return false, err + } + return len(value) > 0, nil +} + +// GetUpdates returns the register updates from the delta to be applied to storage. +func (r *RegisterDelta) GetUpdates() flow.RegisterEntries { + entries := make(flow.RegisterEntries, 0, len(r.deltas)) + for id, delta := range r.deltas { + entries = append(entries, flow.RegisterEntry{Key: id, Value: delta.Value}) + } + + return entries +} + +func (r *RegisterDelta) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + return allocateSlabIndex(owner, r) + +} + +// allocateSlabIndex allocates a new slab index for the given owner and key. +// this method only uses the storage get/set methods. +func allocateSlabIndex(owner []byte, storage types.BackendStorage) (atree.SlabIndex, error) { + // get status + address := flow.BytesToAddress(owner) + id := flow.AccountStatusRegisterID(address) + statusBytes, err := storage.GetValue(owner, []byte(id.Key)) + if err != nil { + return atree.SlabIndex{}, fmt.Errorf( + "failed to load account status for the account (%s): %w", + address.String(), + err) + } + if len(statusBytes) == 0 { + return atree.SlabIndex{}, errors.NewAccountNotFoundError(address) + } + status, err := environment.AccountStatusFromBytes(statusBytes) + if err != nil { + return atree.SlabIndex{}, err + } + + // get and increment the index + index := status.SlabIndex() + newIndexBytes := index.Next() + + // store nil so that the setValue for new allocated slabs would be faster + // and won't do ledger getValue for every new slabs (currently happening to + // compute storage size changes) + // this way the getValue would load this value from deltas + key := atree.SlabIndexToLedgerKey(index) + err = storage.SetValue(owner, key, []byte{}) + if err != nil { + return atree.SlabIndex{}, fmt.Errorf( + "failed to allocate an storage index: %w", + err) + } + + // update the storageIndex bytes + status.SetStorageIndex(newIndexBytes) + + err = storage.SetValue(owner, []byte(id.Key), status.ToBytes()) + if err != nil { + return atree.SlabIndex{}, fmt.Errorf( + "failed to store the account status for account (%s): %w", + address.String(), + err) + } + return index, nil + +} diff --git a/storage/register_delta_test.go b/storage/register_delta_test.go new file mode 100644 index 000000000..10c2e501e --- /dev/null +++ b/storage/register_delta_test.go @@ -0,0 +1,244 @@ +package storage_test + +import ( + "testing" + + "github.com/cockroachdb/pebble" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + flowGo "github.com/onflow/flow-go/model/flow" + + "github.com/onflow/flow-evm-gateway/storage" + pebbleStorage "github.com/onflow/flow-evm-gateway/storage/pebble" +) + +func Test_RegisterDeltaWithStorage(t *testing.T) { + owner := []byte{0x01} + ownerAddress := flowGo.BytesToAddress(owner) + owner2 := []byte{0x02} + key := []byte{0x03} + value1 := []byte{0x05} + value2 := []byte{0x06} + + // helper to create a new register delta + delta := func(t *testing.T, r *pebbleStorage.RegisterStorage, evmBlockHeight uint64) *storage.RegisterDelta { + ss, err := r.GetSnapshotAt(0) + require.NoError(t, err) + return storage.NewRegisterDelta(ss) + } + + runDB("get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + require.Len(t, v, 0) + }) + + runDB("set register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + }) + + runDB("set-get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + require.Equal(t, value1, v) + }) + + runDB("set-set-get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + + err = d.SetValue(owner, key, value2) + require.NoError(t, err) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + require.Equal(t, value2, v) + }) + + runDB("set-unset-get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + + err = d.SetValue(owner, key, nil) + require.NoError(t, err) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + // not actually nil, but empty + require.Len(t, v, 0) + }) + + runDB("set-next-get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + + err = commit(t, db, d, r) + require.NoError(t, err) + + d = delta(t, r, 1) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + require.Equal(t, value1, v) + }) + + runDB("set-dont-commit-get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + + d = delta(t, r, 1) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + require.Empty(t, v) + }) + + runDB("set-next-set-next-get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + + err = commit(t, db, d, r) + require.NoError(t, err) + + d = delta(t, r, 1) + + err = d.SetValue(owner, key, value2) + require.NoError(t, err) + + err = commit(t, db, d, r) + require.NoError(t, err) + + d = delta(t, r, 2) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + require.Equal(t, value2, v) + }) + + runDB("set-next-unset-next-get register", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + err := d.SetValue(owner, key, value1) + require.NoError(t, err) + + err = commit(t, db, d, r) + require.NoError(t, err) + + d = delta(t, r, 1) + + err = d.SetValue(owner, key, nil) + require.NoError(t, err) + + err = commit(t, db, d, r) + require.NoError(t, err) + + d = delta(t, r, 2) + + v, err := d.GetValue(owner, key) + require.NoError(t, err) + // not actually nil, but empty + require.Len(t, v, 0) + }) + + runDB("get with wrong owner", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + r := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, r, 0) + + _, err := d.GetValue(owner2, key) + require.Error(t, err) + }) + + runDB("commit with wrong owner", t, func(t *testing.T, db *pebbleStorage.Storage) { + t.Parallel() + + s := pebbleStorage.NewRegisterStorage(db, ownerAddress) + d := delta(t, s, 0) + + err := d.SetValue(owner2, key, value1) + require.NoError(t, err) + + err = commit(t, db, d, s) + require.Error(t, err) + }) +} + +func runDB(name string, t *testing.T, f func(t *testing.T, db *pebbleStorage.Storage)) { + dir := t.TempDir() + + db, err := pebbleStorage.New(dir, zerolog.New(zerolog.NewTestWriter(t))) + require.NoError(t, err) + + t.Run(name, func(t *testing.T) { + f(t, db) + }) +} + +// commit is an example on how to commit the delta to storage. +func commit( + t *testing.T, + db *pebbleStorage.Storage, + d *storage.RegisterDelta, + r *pebbleStorage.RegisterStorage, +) error { + batch := db.NewBatch() + + err := r.Store(d.GetUpdates(), 0, batch) + + if err != nil { + return err + } + + err = batch.Commit(pebble.Sync) + require.NoError(t, err) + return nil +} diff --git a/tests/e2e_web3js_test.go b/tests/e2e_web3js_test.go index 70276762f..03c8b3860 100644 --- a/tests/e2e_web3js_test.go +++ b/tests/e2e_web3js_test.go @@ -28,6 +28,14 @@ func TestWeb3_E2E(t *testing.T) { runWeb3Test(t, "build_evm_state_test") }) + t.Run("verify Cadence arch calls", func(t *testing.T) { + runWeb3Test(t, "verify_cadence_arch_calls_test") + }) + + t.Run("test transaction traces", func(t *testing.T) { + runWeb3Test(t, "debug_traces_test") + }) + t.Run("test setup sanity check", func(t *testing.T) { runWeb3Test(t, "setup_test") }) @@ -56,6 +64,10 @@ func TestWeb3_E2E(t *testing.T) { runWeb3Test(t, "eth_deploy_contract_and_interact_test") }) + t.Run("test retrieval of contract storage slots", func(t *testing.T) { + runWeb3Test(t, "eth_get_storage_at_test") + }) + t.Run("deploy multicall3 contract and call methods", func(t *testing.T) { runWeb3Test(t, "eth_multicall3_contract_test") }) diff --git a/tests/go.mod b/tests/go.mod index ac19862fe..c61c7ed2d 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -55,7 +55,9 @@ require ( github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ef-ds/deque v1.0.4 // indirect github.com/ethereum/c-kzg-4844 v1.0.0 // indirect @@ -75,6 +77,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.0 // indirect @@ -83,6 +86,7 @@ require ( github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-dap v0.11.0 // indirect + github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect diff --git a/tests/go.sum b/tests/go.sum index e7b099561..1311f1f08 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -141,8 +141,11 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -222,8 +225,16 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -330,16 +341,21 @@ github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+j github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -435,6 +451,7 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 h1:dHLYa5D8/Ta0aLR2XcPsrkpAgGeFs6thhMcQK0oQ0n8= github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -524,6 +541,7 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -607,6 +625,8 @@ github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoK github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -908,6 +928,7 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= @@ -1306,6 +1327,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/tests/helpers.go b/tests/helpers.go index a62bb2e11..600922f8d 100644 --- a/tests/helpers.go +++ b/tests/helpers.go @@ -153,7 +153,7 @@ func servicesSetup(t *testing.T) (emulator.Emulator, func()) { LogWriter: testLogWriter(), StreamTimeout: time.Second * 30, StreamLimit: 10, - RateLimit: 50, + RateLimit: 500, WSEnabled: true, MetricsPort: 8443, FilterExpiry: time.Second * 5, @@ -177,7 +177,7 @@ func servicesSetup(t *testing.T) (emulator.Emulator, func()) { // and will report failure or success of the test. func executeTest(t *testing.T, testFile string) { command := fmt.Sprintf( - "./web3js/node_modules/.bin/mocha ./web3js/%s.js --timeout 120s", + "./web3js/node_modules/.bin/mocha ./web3js/%s.js --timeout 150s", testFile, ) parts := strings.Fields(command) diff --git a/tests/web3js/build_evm_state_test.js b/tests/web3js/build_evm_state_test.js index d52eb715c..a9490492c 100644 --- a/tests/web3js/build_evm_state_test.js +++ b/tests/web3js/build_evm_state_test.js @@ -237,85 +237,6 @@ it('should handle a large number of EVM interactions', async () => { gasPrice: conf.minGasPrice, }) assert.equal(res.receipt.status, conf.successStatus) - - // submit a transaction that calls verifyArchCallToRandomSource(uint64 height) - let getRandomSourceData = deployed.contract.methods.verifyArchCallToRandomSource(120).encodeABI() - res = await helpers.signAndSend({ - from: conf.eoa.address, - to: contractAddress, - data: getRandomSourceData, - value: '0', - gasPrice: conf.minGasPrice, - }) - assert.equal(res.receipt.status, conf.successStatus) - - // make a contract call for verifyArchCallToRandomSource(uint64 height) - res = await web3.eth.call({ to: contractAddress, data: getRandomSourceData }, latest) - assert.notEqual( - res, - '0x0000000000000000000000000000000000000000000000000000000000000000' - ) - assert.lengthOf(res, 66) - - // submit a transaction that calls verifyArchCallToRevertibleRandom() - let revertibleRandomData = deployed.contract.methods.verifyArchCallToRevertibleRandom().encodeABI() - res = await helpers.signAndSend({ - from: conf.eoa.address, - to: contractAddress, - data: revertibleRandomData, - value: '0', - gasPrice: conf.minGasPrice, - }) - assert.equal(res.receipt.status, conf.successStatus) - - // make a contract call for verifyArchCallToRevertibleRandom() - res = await web3.eth.call({ to: contractAddress, data: revertibleRandomData }, latest) - assert.notEqual( - res, - '0x0000000000000000000000000000000000000000000000000000000000000000' - ) - assert.lengthOf(res, 66) - - // submit a transaction that calls verifyArchCallToFlowBlockHeight() - let flowBlockHeightData = deployed.contract.methods.verifyArchCallToFlowBlockHeight().encodeABI() - res = await helpers.signAndSend({ - from: conf.eoa.address, - to: contractAddress, - data: flowBlockHeightData, - value: '0', - gasPrice: conf.minGasPrice, - }) - assert.equal(res.receipt.status, conf.successStatus) - - // make a contract call for verifyArchCallToFlowBlockHeight() - res = await web3.eth.call({ to: contractAddress, data: flowBlockHeightData }, latest) - assert.equal( - web3.eth.abi.decodeParameter('uint64', res), - latest, - ) - - // submit a transaction that calls verifyArchCallToVerifyCOAOwnershipProof(address,bytes32,bytes) - let tx = await web3.eth.getTransactionFromBlock(conf.startBlockHeight, 1) - let verifyCOAOwnershipProofData = deployed.contract.methods.verifyArchCallToVerifyCOAOwnershipProof( - tx.to, - '0x1bacdb569847f31ade07e83d6bb7cefba2b9290b35d5c2964663215e73519cff', - web3.utils.hexToBytes('f853c18088f8d6e0586b0a20c78365766df842b840b90448f4591df2639873be2914c5560149318b7e2fcf160f7bb8ed13cfd97be2f54e6889606f18e50b2c37308386f840e03a9fff915f57b2164cba27f0206a95') - ).encodeABI() - res = await helpers.signAndSend({ - from: conf.eoa.address, - to: contractAddress, - data: verifyCOAOwnershipProofData, - value: '0', - gasPrice: conf.minGasPrice, - }) - assert.equal(res.receipt.status, conf.successStatus) - - // make a contract call for verifyArchCallToVerifyCOAOwnershipProof(address,bytes32,bytes) - res = await web3.eth.call({ to: contractAddress, data: verifyCOAOwnershipProofData }, latest) - assert.equal( - web3.eth.abi.decodeParameter('bool', res), - false, - ) }) function randomItem(items) { diff --git a/tests/web3js/debug_traces_test.js b/tests/web3js/debug_traces_test.js new file mode 100644 index 000000000..da20c5a71 --- /dev/null +++ b/tests/web3js/debug_traces_test.js @@ -0,0 +1,564 @@ +const { assert } = require('chai') +const conf = require('./config') +const helpers = require('./helpers') +const web3 = conf.web3 + +let deployed = null +let contractAddress = null + +before(async () => { + deployed = await helpers.deployContract('storage') + contractAddress = deployed.receipt.contractAddress + + assert.equal(deployed.receipt.status, conf.successStatus) +}) + +it('should retrieve transaction traces', async () => { + assert.equal(deployed.receipt.status, conf.successStatus) + + let receipt = await web3.eth.getTransactionReceipt(deployed.receipt.transactionHash) + assert.equal(receipt.contractAddress, contractAddress) + + let callTracer = { + tracer: 'callTracer', + tracerConfig: { + onlyTopCall: true + } + } + response = await helpers.callRPCMethod( + 'debug_traceTransaction', + [receipt.transactionHash, callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + // Assert proper response for `callTracer` + let txTrace = response.body.result + assert.equal(txTrace.from, '0xfacf71692421039876a5bb4f10ef7a439d8ef61e') + assert.equal(txTrace.gas, '0x118e0c') + assert.equal(txTrace.gasUsed, '0x114010') + assert.equal(txTrace.to, '0x99a64c993965f8d69f985b5171bc20065cc32fab') + assert.lengthOf(txTrace.input, 9856n) + assert.lengthOf(txTrace.output, 9806n) + assert.equal(txTrace.value, '0x0') + assert.equal(txTrace.type, 'CREATE') + + let jsTracer = '{hist: {}, nops: 0, step: function(log, db) { var op = log.op.toString(); if (this.hist[op]){ this.hist[op]++; } else { this.hist[op] = 1; } this.nops++; }, fault: function(log, db) {}, result: function(ctx) { return this.hist; }}' + response = await helpers.callRPCMethod( + 'debug_traceTransaction', + [receipt.transactionHash, { tracer: jsTracer }] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + // Assert proper response for custom JavaScript tracer + txTrace = response.body.result + assert.deepEqual( + txTrace, + { + PUSH1: 2, + MSTORE: 1, + PUSH2: 3, + PUSH0: 3, + DUP2: 1, + SWAP1: 1, + SSTORE: 1, + POP: 1, + DUP1: 1, + CODECOPY: 1, + RETURN: 1 + } + ) + + let updateData = deployed.contract.methods.store(100n).encodeABI() + let res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: updateData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + receipt = await web3.eth.getTransactionReceipt(res.receipt.transactionHash) + + response = await helpers.callRPCMethod( + 'debug_traceTransaction', + [receipt.transactionHash, callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + // Assert proper response for `callTracer` + txTrace = response.body.result + assert.equal(txTrace.from, '0xfacf71692421039876a5bb4f10ef7a439d8ef61e') + assert.equal(txTrace.gas, '0x72c3') + assert.equal(txTrace.gasUsed, '0x6827') + assert.equal(txTrace.to, '0x99a64c993965f8d69f985b5171bc20065cc32fab') + assert.equal( + txTrace.input, + updateData + ) + assert.equal(txTrace.value, '0x0') + assert.equal(txTrace.type, 'CALL') + + let prestateTracer = { + tracer: 'prestateTracer', + tracerConfig: { + diffMode: true + } + } + response = await helpers.callRPCMethod( + 'debug_traceTransaction', + [receipt.transactionHash, prestateTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + // Assert proper response for `prestateTracer` + txTrace = response.body.result + assert.deepEqual( + txTrace.pre['0x0000000000000000000000030000000000000000'], + { balance: '0x0', nonce: 1 } + ) + assert.deepEqual( + txTrace.pre['0xfacf71692421039876a5bb4f10ef7a439d8ef61e'], + { balance: '0x456391823ad876a0', nonce: 1 } + ) + assert.deepEqual( + txTrace.post['0x0000000000000000000000030000000000000000'], + { balance: '0x3d06da' } + ) + assert.deepEqual( + txTrace.post['0xfacf71692421039876a5bb4f10ef7a439d8ef61e'], + { balance: '0x456391823a9b6fc6', nonce: 2 } + ) + + response = await helpers.callRPCMethod( + 'debug_traceTransaction', + [receipt.transactionHash, { tracer: '4byteTracer' }] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + // Assert proper response for `4byteTracer` + txTrace = response.body.result + assert.deepEqual( + txTrace, + { '0x6057361d-32': 1 } + ) + + response = await helpers.callRPCMethod( + 'debug_traceBlockByNumber', + [web3.utils.toHex(receipt.blockNumber), callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + let txTraces = response.body.result + assert.lengthOf(txTraces, 2) // the 2nd tx trace is from the transfer of fees to coinbase + assert.deepEqual( + txTraces, + [ + { + txHash: '0x87449feedc004c75c0e8b12d01656f2e28366c7d73b1b5336beae20aaa5033dd', + result: { + from: '0xfacf71692421039876a5bb4f10ef7a439d8ef61e', + gas: '0x72c3', + gasUsed: '0x6827', + to: '0x99a64c993965f8d69f985b5171bc20065cc32fab', + input: '0x6057361d0000000000000000000000000000000000000000000000000000000000000064', + value: '0x0', + type: 'CALL' + } + }, + { + txHash: '0x6039ef1f7dc8d40b74f58e502f5b0b535a46c1b4ddd780c23cb97cf4d681bb47', + result: { + from: '0x0000000000000000000000030000000000000000', + gas: '0x5b04', + gasUsed: '0x5208', + to: '0x658bdf435d810c91414ec09147daa6db62406379', + input: '0x', + value: '0x3d06da', + type: 'CALL' + } + } + ] + ) + + response = await helpers.callRPCMethod( + 'debug_traceBlockByHash', + [web3.utils.toHex(receipt.blockHash), callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + txTraces = response.body.result + assert.lengthOf(txTraces, 2) // the 2nd tx trace is from the transfer of fees to coinbase + assert.deepEqual( + txTraces, + [ + { + txHash: '0x87449feedc004c75c0e8b12d01656f2e28366c7d73b1b5336beae20aaa5033dd', + result: { + from: '0xfacf71692421039876a5bb4f10ef7a439d8ef61e', + gas: '0x72c3', + gasUsed: '0x6827', + to: '0x99a64c993965f8d69f985b5171bc20065cc32fab', + input: '0x6057361d0000000000000000000000000000000000000000000000000000000000000064', + value: '0x0', + type: 'CALL' + } + }, + { + txHash: '0x6039ef1f7dc8d40b74f58e502f5b0b535a46c1b4ddd780c23cb97cf4d681bb47', + result: { + from: '0x0000000000000000000000030000000000000000', + gas: '0x5b04', + gasUsed: '0x5208', + to: '0x658bdf435d810c91414ec09147daa6db62406379', + input: '0x', + value: '0x3d06da', + type: 'CALL' + } + } + ] + ) + + callTracer = { + tracer: 'callTracer', + tracerConfig: { + onlyTopCall: false + } + } + + // submit a transaction that calls verifyArchCallToFlowBlockHeight() + let flowBlockHeightData = deployed.contract.methods.verifyArchCallToFlowBlockHeight().encodeABI() + res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: flowBlockHeightData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + response = await helpers.callRPCMethod( + 'debug_traceTransaction', + [web3.utils.toHex(res.receipt.transactionHash), callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body.result) + + txTrace = response.body.result + + assert.deepEqual( + txTrace, + { + from: conf.eoa.address.toLowerCase(), + gas: '0xc9c7', + gasUsed: '0x6147', + to: contractAddress.toLowerCase(), + input: '0xc550f90f', + output: '0x0000000000000000000000000000000000000000000000000000000000000006', + calls: [ + { + from: contractAddress.toLowerCase(), + gas: '0x6948', + gasUsed: '0x2', + to: '0x0000000000000000000000010000000000000001', + input: '0x53e87d66', + output: '0x0000000000000000000000000000000000000000000000000000000000000006', + type: 'STATICCALL' + } + ], + value: '0x0', + type: 'CALL' + } + ) +}) + +it('should retrieve call traces', async () => { + let receipt = await web3.eth.getTransactionReceipt(deployed.receipt.transactionHash) + assert.equal(receipt.contractAddress, contractAddress) + + let callTracer = { + tracer: 'callTracer', + tracerConfig: { + onlyTopCall: true + } + } + + let callData = deployed.contract.methods.store(500).encodeABI() + let traceCall = { + from: conf.eoa.address, + to: contractAddress, + data: callData, + value: '0x0', + gasPrice: web3.utils.toHex(conf.minGasPrice), + gas: '0x95ab' + } + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, 'latest', callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + let updateTrace = response.body.result + assert.equal(updateTrace.from, '0xfacf71692421039876a5bb4f10ef7a439d8ef61e') + assert.equal(updateTrace.gas, '0x95ab') + assert.equal(updateTrace.gasUsed, '0x6833') + assert.equal(updateTrace.to, '0x99a64c993965f8d69f985b5171bc20065cc32fab') + assert.equal( + updateTrace.input, + '0x6057361d00000000000000000000000000000000000000000000000000000000000001f4' + ) + assert.equal(updateTrace.value, '0x0') + assert.equal(updateTrace.type, 'CALL') + + callData = deployed.contract.methods.retrieve().encodeABI() + traceCall = { + from: conf.eoa.address, + to: contractAddress, + gas: '0x75ab', + gasPrice: web3.utils.toHex(conf.minGasPrice), + value: '0x0', + data: callData, + } + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, 'latest', callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + let callTrace = response.body.result + assert.equal(callTrace.from, '0xfacf71692421039876a5bb4f10ef7a439d8ef61e') + assert.equal(callTrace.gas, '0x75ab') + assert.equal(callTrace.gasUsed, '0x5be0') + assert.equal(callTrace.to, '0x99a64c993965f8d69f985b5171bc20065cc32fab') + assert.equal(callTrace.input, '0x2e64cec1') + assert.equal( + callTrace.output, + '0x0000000000000000000000000000000000000000000000000000000000000064' + ) + assert.equal(callTrace.value, '0x0') + assert.equal(callTrace.type, 'CALL') + + let prestateTracer = { + tracer: 'prestateTracer', + tracerConfig: { + diffMode: true + } + } + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, 'latest', prestateTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + // Assert proper response for `prestateTracer` + txTrace = response.body.result + assert.deepEqual( + txTrace, + { + post: { + '0xfacf71692421039876a5bb4f10ef7a439d8ef61e': { + nonce: 4 + } + }, + pre: { + '0xfacf71692421039876a5bb4f10ef7a439d8ef61e': { + balance: '0x456391823a62702c', + nonce: 3 + } + } + } + ) + + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, 'latest', { tracer: '4byteTracer' }] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + // Assert proper response for `4byteTracer` + txTrace = response.body.result + assert.deepEqual( + txTrace, + { '0x2e64cec1-0': 1 } + ) + + let jsTracer = '{hist: {}, nops: 0, step: function(log, db) { var op = log.op.toString(); if (this.hist[op]){ this.hist[op]++; } else { this.hist[op] = 1; } this.nops++; }, fault: function(log, db) {}, result: function(ctx) { return this.hist; }}' + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, 'latest', { tracer: jsTracer }] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + // Assert proper response for custom JavaScript tracer + txTrace = response.body.result + assert.deepEqual( + txTrace, + { + PUSH1: 7, + MSTORE: 2, + CALLVALUE: 1, + DUP1: 6, + ISZERO: 1, + PUSH2: 13, + JUMPI: 5, + JUMPDEST: 12, + POP: 9, + CALLDATASIZE: 1, + LT: 1, + PUSH0: 5, + CALLDATALOAD: 1, + SHR: 1, + PUSH4: 3, + GT: 2, + EQ: 1, + JUMP: 8, + SLOAD: 1, + SWAP1: 7, + MLOAD: 2, + SWAP2: 4, + DUP3: 2, + ADD: 2, + DUP4: 1, + DUP5: 1, + DUP2: 2, + SWAP3: 1, + SUB: 1, + RETURN: 1 + } + ) + + let callTracerWithStateOverrides = { + tracer: 'callTracer', + tracerConfig: { + onlyTopCall: true + }, + stateOverrides: { + [contractAddress]: { + stateDiff: { + '0x0000000000000000000000000000000000000000000000000000000000000000': '0x00000000000000000000000000000000000000000000000000000000000003e8' + } + } + } + } + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, 'latest', callTracerWithStateOverrides] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + callTrace = response.body.result + assert.equal(callTrace.from, '0xfacf71692421039876a5bb4f10ef7a439d8ef61e') + assert.equal(callTrace.gas, '0x75ab') + assert.equal(callTrace.gasUsed, '0x5be0') + assert.equal(callTrace.to, '0x99a64c993965f8d69f985b5171bc20065cc32fab') + assert.equal(callTrace.input, '0x2e64cec1') + assert.equal( + callTrace.output, + '0x00000000000000000000000000000000000000000000000000000000000003e8' + ) + assert.equal(callTrace.value, '0x0') + assert.equal(callTrace.type, 'CALL') + + let updateData = deployed.contract.methods.store(1500).encodeABI() + let res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: updateData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + let latestHeight = await web3.eth.getBlockNumber() + + // Assert value on previous block + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, web3.utils.toHex(latestHeight - 1n), callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + callTrace = response.body.result + assert.equal( + callTrace.output, + '0x0000000000000000000000000000000000000000000000000000000000000064' + ) + + // Assert value on latest block + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, web3.utils.toHex(latestHeight), callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + callTrace = response.body.result + assert.equal( + callTrace.output, + '0x00000000000000000000000000000000000000000000000000000000000005dc' + ) + + let flowBlockHeightData = deployed.contract.methods.verifyArchCallToFlowBlockHeight().encodeABI() + traceCall = { + from: conf.eoa.address, + to: contractAddress, + gas: '0xcdd4', + data: flowBlockHeightData, + value: '0x0', + gasPrice: web3.utils.toHex(conf.minGasPrice), + } + + callTracer = { + tracer: 'callTracer', + tracerConfig: { + onlyTopCall: false + } + } + + response = await helpers.callRPCMethod( + 'debug_traceCall', + [traceCall, web3.utils.toHex(latestHeight), callTracer] + ) + assert.equal(response.status, 200) + assert.isDefined(response.body) + + callTrace = response.body.result + assert.deepEqual( + callTrace, + { + from: conf.eoa.address.toLowerCase(), + gas: '0xcdd4', + gasUsed: '0xbdd4', + to: contractAddress.toLowerCase(), + input: '0xc550f90f', + output: '0x0000000000000000000000000000000000000000000000000000000000000007', + calls: [ + { + from: contractAddress.toLowerCase(), + gas: '0x6d44', + gasUsed: '0x5c8f', + to: '0x0000000000000000000000010000000000000001', + input: '0x53e87d66', + output: '0x0000000000000000000000000000000000000000000000000000000000000007', + type: 'STATICCALL' + } + ], + value: '0x0', + type: 'CALL' + } + ) +}) diff --git a/tests/web3js/eth_get_storage_at_test.js b/tests/web3js/eth_get_storage_at_test.js new file mode 100644 index 000000000..b1a5237cd --- /dev/null +++ b/tests/web3js/eth_get_storage_at_test.js @@ -0,0 +1,72 @@ +const { assert } = require('chai') +const conf = require('./config') +const helpers = require('./helpers') +const web3 = conf.web3 + +it('should retrieve storage slots of contracts', async () => { + let deployed = await helpers.deployContract('storage') + let contractAddress = deployed.receipt.contractAddress + + // make sure deploy results are correct + assert.equal(deployed.receipt.status, conf.successStatus) + + // get the default deployed value on contract + let callRetrieve = await deployed.contract.methods.retrieve().encodeABI() + let result = await web3.eth.call({ to: contractAddress, data: callRetrieve }, 'latest') + + let slot = 0 // The slot for the 'number' variable + let stored = await web3.eth.getStorageAt(contractAddress, slot, 'latest') + assert.equal(stored, result) + + // set the value on the contract, to its current value + let initValue = 1337 + let updateData = deployed.contract.methods.store(initValue).encodeABI() + // store a value in the contract + let res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: updateData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + // check the new value on contract + result = await web3.eth.call({ to: contractAddress, data: callRetrieve }, 'latest') + assert.equal(result, initValue) + + // update the value on the contract + newValue = 100 + updateData = deployed.contract.methods.store(newValue).encodeABI() + // store a value in the contract + res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: updateData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + let latestHeight = await web3.eth.getBlockNumber() + + // assert the storage slot on latest block + stored = await web3.eth.getStorageAt(contractAddress, slot, latestHeight) + value = web3.eth.abi.decodeParameter('uint256', stored) + assert.equal(value, 100n) + + // // assert the storage slot on previous block + stored = await web3.eth.getStorageAt(contractAddress, slot, latestHeight - 1n) + value = web3.eth.abi.decodeParameter('uint256', stored) + assert.equal(value, 1337n) + + // assert the storage slot on block of contract deployment + stored = await web3.eth.getStorageAt(contractAddress, slot, deployed.receipt.blockNumber) + value = web3.eth.abi.decodeParameter('uint256', stored) + assert.equal(value, 1337n) + + // assert the storage slot on block prior to contract deployment + stored = await web3.eth.getStorageAt(contractAddress, slot, deployed.receipt.blockNumber - 1n) + value = web3.eth.abi.decodeParameter('uint256', stored) + assert.equal(value, 0n) +}) diff --git a/tests/web3js/eth_non_interactive_test.js b/tests/web3js/eth_non_interactive_test.js index 2cff8052e..a260bc83c 100644 --- a/tests/web3js/eth_non_interactive_test.js +++ b/tests/web3js/eth_non_interactive_test.js @@ -27,7 +27,7 @@ it('get block', async () => { block.transactionsRoot, '0x0000000000000000000000000000000000000000000000000000000000000000' ) - assert.equal(block.size, 3995n) + assert.equal(block.size, 4028n) assert.equal(block.gasLimit, 120000000n) assert.equal(block.miner, '0x0000000000000000000000030000000000000000') assert.equal( @@ -379,7 +379,7 @@ it('get fee history', async () => { { oldestBlock: 1n, reward: [['0x96'], ['0x96'], ['0x96']], // gas price is 150 during testing - baseFeePerGas: [0n, 0n, 0n], + baseFeePerGas: [1n, 1n, 1n], gasUsedRatio: [0, 0, 0.006205458333333334] } ) diff --git a/tests/web3js/eth_rate_limit_test.js b/tests/web3js/eth_rate_limit_test.js index 4bcdce3f5..5f55c0e3c 100644 --- a/tests/web3js/eth_rate_limit_test.js +++ b/tests/web3js/eth_rate_limit_test.js @@ -1,32 +1,32 @@ const { assert } = require('chai') -const {Web3} = require("web3") +const { Web3 } = require('web3') it('rate limit after X requests', async function () { this.timeout(0) setTimeout(() => process.exit(0), 5000) // make sure the process exits - let ws = new Web3("ws://127.0.0.1:8545") + let ws = new Web3('ws://127.0.0.1:8545') // wait for ws connection to establish and reset rate-limit timer await new Promise(res => setTimeout(res, 1500)) // this should be synced with the value on server config - let requestLimit = 50 + let requestLimit = 500 let requestsMade = 0 let requestsFailed = 0 - let requests = 60 + let requests = 1000 for (let i = 0; i < requests; i++) { try { await ws.eth.getBlockNumber() requestsMade++ - } catch(e) { + } catch (e) { assert.equal(e.innerError.message, 'limit of requests per second reached') requestsFailed++ } } - assert.equal(requestsMade, requestLimit, "more requests made than the limit") - assert.equal(requestsFailed, requests-requestLimit, "failed requests don't match expected value") + assert.equal(requestsMade, requestLimit, 'more requests made than the limit') + assert.equal(requestsFailed, requests - requestLimit, 'failed requests don\'t match expected value') await new Promise(res => setTimeout(res, 1000)) @@ -38,14 +38,14 @@ it('rate limit after X requests', async function () { try { await ws.eth.getBlockNumber() requestsMade++ - } catch(e) { + } catch (e) { assert.equal(e.innerError.message, 'limit of requests per second reached') requestsFailed++ } } - assert.equal(requestsMade, requestLimit, "more requests made than the limit") - assert.equal(requestsFailed, requests-requestLimit, "failed requests don't match expected value") + assert.equal(requestsMade, requestLimit, 'more requests made than the limit') + assert.equal(requestsFailed, requests - requestLimit, 'failed requests don\'t match expected value') await ws.currentProvider.disconnect() }) diff --git a/tests/web3js/eth_revert_reason_test.js b/tests/web3js/eth_revert_reason_test.js index 4578abfad..fa6617885 100644 --- a/tests/web3js/eth_revert_reason_test.js +++ b/tests/web3js/eth_revert_reason_test.js @@ -40,18 +40,21 @@ it('store revertReason field in transaction receipts', async () => { [signedTx.rawTransaction] ) assert.equal(200, response.status) + let txHash = response.body.result - let latestHeight = await web3.eth.getBlockNumber() - let block = await web3.eth.getBlock(latestHeight) - assert.equal(block.number, conf.startBlockHeight + 2n) + let rcp = null + // wait until the transaction is executed & indexed, and its + // receipt becomes available. + while (rcp == null) { + rcp = await helpers.callRPCMethod( + 'eth_getTransactionReceipt', + [txHash] + ) + if (rcp.body.result == null) { + rcp = null + } + } - let revertedTx = await web3.eth.getTransactionFromBlock(latestHeight, 0) - // Give some time to the engine to ingest the latest transaction - await new Promise(res => setTimeout(res, 1500)) - rcp = await helpers.callRPCMethod( - 'eth_getTransactionReceipt', - [revertedTx.hash] - ) // make sure the `revertReason` field is included in the response assert.equal( rcp.body['result'].revertReason, @@ -74,22 +77,24 @@ it('store revertReason field in transaction receipts', async () => { [signedTx.rawTransaction] ) assert.equal(200, response.status) + txHash = response.body.result - latestHeight = await web3.eth.getBlockNumber() - block = await web3.eth.getBlock(latestHeight) - assert.equal(block.number, conf.startBlockHeight + 3n) + rcp = null + // wait until the transaction is executed & indexed, and its + // receipt becomes available. + while (rcp == null) { + rcp = await helpers.callRPCMethod( + 'eth_getTransactionReceipt', + [txHash] + ) + if (rcp.body.result == null) { + rcp = null + } + } - revertedTx = await web3.eth.getTransactionFromBlock(latestHeight, 0) - // Give some time to the engine to ingest the latest transaction - await new Promise(res => setTimeout(res, 1500)) - rcp = await helpers.callRPCMethod( - 'eth_getTransactionReceipt', - [revertedTx.hash] - ) // make sure the `revertReason` field is included in the response assert.equal( rcp.body['result'].revertReason, '0x9195785a00000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000001056616c756520697320746f6f206c6f7700000000000000000000000000000000' ) - }) diff --git a/tests/web3js/verify_cadence_arch_calls_test.js b/tests/web3js/verify_cadence_arch_calls_test.js new file mode 100644 index 000000000..914dc352f --- /dev/null +++ b/tests/web3js/verify_cadence_arch_calls_test.js @@ -0,0 +1,92 @@ +const { assert } = require('chai') +const conf = require('./config') +const helpers = require('./helpers') +const web3 = conf.web3 + +it('should be able to use Cadence Arch calls', async () => { + let latest = await web3.eth.getBlockNumber() + let expectedBlockHeight = conf.startBlockHeight + assert.equal(latest, expectedBlockHeight) + + let deployed = await helpers.deployContract('storage') + let contractAddress = deployed.receipt.contractAddress + + // submit a transaction that calls verifyArchCallToRandomSource(uint64 height) + let getRandomSourceData = deployed.contract.methods.verifyArchCallToRandomSource(2).encodeABI() + res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: getRandomSourceData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + // make a contract call for verifyArchCallToRandomSource(uint64 height) + res = await web3.eth.call({ to: contractAddress, data: getRandomSourceData }, 'latest') + assert.notEqual( + res, + '0x0000000000000000000000000000000000000000000000000000000000000000' + ) + assert.lengthOf(res, 66) + + // submit a transaction that calls verifyArchCallToRevertibleRandom() + let revertibleRandomData = deployed.contract.methods.verifyArchCallToRevertibleRandom().encodeABI() + res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: revertibleRandomData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + // make a contract call for verifyArchCallToRevertibleRandom() + res = await web3.eth.call({ to: contractAddress, data: revertibleRandomData }, 'latest') + assert.notEqual( + res, + '0x0000000000000000000000000000000000000000000000000000000000000000' + ) + assert.lengthOf(res, 66) + + // submit a transaction that calls verifyArchCallToFlowBlockHeight() + let flowBlockHeightData = deployed.contract.methods.verifyArchCallToFlowBlockHeight().encodeABI() + res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: flowBlockHeightData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + // make a contract call for verifyArchCallToFlowBlockHeight() + res = await web3.eth.call({ to: contractAddress, data: flowBlockHeightData }, 'latest') + assert.equal( + web3.eth.abi.decodeParameter('uint64', res), + 7n, + ) + + // submit a transaction that calls verifyArchCallToVerifyCOAOwnershipProof(address,bytes32,bytes) + let tx = await web3.eth.getTransactionFromBlock(conf.startBlockHeight, 1) + let verifyCOAOwnershipProofData = deployed.contract.methods.verifyArchCallToVerifyCOAOwnershipProof( + tx.to, + '0x1bacdb569847f31ade07e83d6bb7cefba2b9290b35d5c2964663215e73519cff', + web3.utils.hexToBytes('f853c18088f8d6e0586b0a20c78365766df842b840b90448f4591df2639873be2914c5560149318b7e2fcf160f7bb8ed13cfd97be2f54e6889606f18e50b2c37308386f840e03a9fff915f57b2164cba27f0206a95') + ).encodeABI() + res = await helpers.signAndSend({ + from: conf.eoa.address, + to: contractAddress, + data: verifyCOAOwnershipProofData, + value: '0', + gasPrice: conf.minGasPrice, + }) + assert.equal(res.receipt.status, conf.successStatus) + + // make a contract call for verifyArchCallToVerifyCOAOwnershipProof(address,bytes32,bytes) + res = await web3.eth.call({ to: contractAddress, data: verifyCOAOwnershipProofData }, 'latest') + assert.equal( + web3.eth.abi.decodeParameter('bool', res), + false, + ) +})