From 300df874d789eba38fe652d270c8998c8759937c Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 25 Oct 2023 14:08:52 +0200 Subject: [PATCH 01/41] cmd/evm: make t8ntool handle transaction decoding errors better (#28397) This change closes https://github.com/ethereum/go-ethereum/issues/27730 . By using an iterator instead of a slice of transactions, we can better handle the case when an individual transaction (within an otherwise well-formed RLP-list) cannot be decoded. --- cmd/evm/internal/t8ntool/execution.go | 37 +++-- cmd/evm/internal/t8ntool/transition.go | 130 +--------------- cmd/evm/internal/t8ntool/tx_iterator.go | 194 ++++++++++++++++++++++++ cmd/evm/t8n_test.go | 8 + cmd/evm/testdata/30/README.txt | 77 ++++++++++ cmd/evm/testdata/30/alloc.json | 23 +++ cmd/evm/testdata/30/env.json | 23 +++ cmd/evm/testdata/30/exp.json | 64 ++++++++ cmd/evm/testdata/30/txs.rlp | 1 + cmd/evm/testdata/30/txs_more.rlp | 1 + 10 files changed, 420 insertions(+), 138 deletions(-) create mode 100644 cmd/evm/internal/t8ntool/tx_iterator.go create mode 100644 cmd/evm/testdata/30/README.txt create mode 100644 cmd/evm/testdata/30/alloc.json create mode 100644 cmd/evm/testdata/30/env.json create mode 100644 cmd/evm/testdata/30/exp.json create mode 100644 cmd/evm/testdata/30/txs.rlp create mode 100644 cmd/evm/testdata/30/txs_more.rlp diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 312f427d4c6b..5cac5f07f886 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -116,8 +116,8 @@ type rejectedTx struct { // Apply applies a set of transactions to a pre-state func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, - txs types.Transactions, miningReward int64, - getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) { + txIt txIterator, miningReward int64, + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, []byte, error) { // Capture errors for BLOCKHASH operation, if we haven't been supplied the // required blockhashes var hashError error @@ -190,25 +190,39 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb) } var blobGasUsed uint64 - for i, tx := range txs { + + for i := 0; txIt.Next(); i++ { + tx, err := txIt.Tx() + if err != nil { + log.Warn("rejected tx", "index", i, "error", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) + continue + } if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil { errMsg := "blob tx used but field env.ExcessBlobGas missing" log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) continue } - if tx.Type() == types.BlobTxType { - blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes())) - } msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) if err != nil { log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) continue } + if tx.Type() == types.BlobTxType { + txBlobGas := uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes())) + if used, max := blobGasUsed+txBlobGas, uint64(params.MaxBlobGasPerBlock); used > max { + err := fmt.Errorf("blob gas (%d) would exceed maximum allowance %d", used, max) + log.Warn("rejected tx", "index", i, "err", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) + continue + } + blobGasUsed += txBlobGas + } tracer, err := getTracerFn(txIndex, tx.Hash()) if err != nil { - return nil, nil, err + return nil, nil, nil, err } vmConfig.Tracer = tracer statedb.SetTxContext(tx.Hash(), txIndex) @@ -231,7 +245,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } includedTxs = append(includedTxs, tx) if hashError != nil { - return nil, nil, NewError(ErrorMissingBlockhash, hashError) + return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError) } gasUsed += msgResult.UsedGas @@ -306,7 +320,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, // Commit block root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber)) if err != nil { - return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err)) + return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err)) } execRs := &ExecutionResult{ StateRoot: root, @@ -332,9 +346,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, // for accessing latest states. statedb, err = state.New(root, statedb.Database(), nil) if err != nil { - return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err)) + return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err)) } - return statedb, execRs, nil + body, _ := rlp.EncodeToBytes(includedTxs) + return statedb, execRs, body, nil } func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB { diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 600bc460f726..d517592e5cfb 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -17,14 +17,12 @@ package t8ntool import ( - "crypto/ecdsa" "encoding/json" "errors" "fmt" "math/big" "os" "path" - "strings" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -33,11 +31,9 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" "github.com/urfave/cli/v2" ) @@ -147,7 +143,7 @@ func Transition(ctx *cli.Context) error { // Check if anything needs to be read from stdin var ( prestate Prestate - txs types.Transactions // txs to apply + txIt txIterator // txs to apply allocStr = ctx.String(InputAllocFlag.Name) envStr = ctx.String(InputEnvFlag.Name) @@ -192,7 +188,7 @@ func Transition(ctx *cli.Context) error { // Set the chain id chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) - if txs, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil { + if txIt, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil { return err } if err := applyLondonChecks(&prestate.Env, chainConfig); err != nil { @@ -208,136 +204,16 @@ func Transition(ctx *cli.Context) error { return err } // Run the test and aggregate the result - s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) + s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name), getTracer) if err != nil { return err } - body, _ := rlp.EncodeToBytes(txs) // Dump the excution result collector := make(Alloc) s.DumpToCollector(collector, nil) return dispatchOutput(ctx, baseDir, result, collector, body) } -// txWithKey is a helper-struct, to allow us to use the types.Transaction along with -// a `secretKey`-field, for input -type txWithKey struct { - key *ecdsa.PrivateKey - tx *types.Transaction - protected bool -} - -func (t *txWithKey) UnmarshalJSON(input []byte) error { - // Read the metadata, if present - type txMetadata struct { - Key *common.Hash `json:"secretKey"` - Protected *bool `json:"protected"` - } - var data txMetadata - if err := json.Unmarshal(input, &data); err != nil { - return err - } - if data.Key != nil { - k := data.Key.Hex()[2:] - if ecdsaKey, err := crypto.HexToECDSA(k); err != nil { - return err - } else { - t.key = ecdsaKey - } - } - if data.Protected != nil { - t.protected = *data.Protected - } else { - t.protected = true - } - // Now, read the transaction itself - var tx types.Transaction - if err := json.Unmarshal(input, &tx); err != nil { - return err - } - t.tx = &tx - return nil -} - -// signUnsignedTransactions converts the input txs to canonical transactions. -// -// The transactions can have two forms, either -// 1. unsigned or -// 2. signed -// -// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set. -// If so, we sign it here and now, with the given `secretKey` -// If the condition above is not met, then it's considered a signed transaction. -// -// To manage this, we read the transactions twice, first trying to read the secretKeys, -// and secondly to read them with the standard tx json format -func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) { - var signedTxs []*types.Transaction - for i, tx := range txs { - var ( - v, r, s = tx.tx.RawSignatureValues() - signed *types.Transaction - err error - ) - if tx.key == nil || v.BitLen()+r.BitLen()+s.BitLen() != 0 { - // Already signed - signedTxs = append(signedTxs, tx.tx) - continue - } - // This transaction needs to be signed - if tx.protected { - signed, err = types.SignTx(tx.tx, signer, tx.key) - } else { - signed, err = types.SignTx(tx.tx, types.FrontierSigner{}, tx.key) - } - if err != nil { - return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) - } - signedTxs = append(signedTxs, signed) - } - return signedTxs, nil -} - -func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (types.Transactions, error) { - var txsWithKeys []*txWithKey - var signed types.Transactions - if txStr != stdinSelector { - data, err := os.ReadFile(txStr) - if err != nil { - return nil, NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) - } - if strings.HasSuffix(txStr, ".rlp") { // A file containing an rlp list - var body hexutil.Bytes - if err := json.Unmarshal(data, &body); err != nil { - return nil, err - } - // Already signed transactions - if err := rlp.DecodeBytes(body, &signed); err != nil { - return nil, err - } - return signed, nil - } - if err := json.Unmarshal(data, &txsWithKeys); err != nil { - return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) - } - } else { - if len(inputData.TxRlp) > 0 { - // Decode the body of already signed transactions - body := common.FromHex(inputData.TxRlp) - // Already signed transactions - if err := rlp.DecodeBytes(body, &signed); err != nil { - return nil, err - } - return signed, nil - } - // JSON encoded transactions - txsWithKeys = inputData.Txs - } - // We may have to sign the transactions. - signer := types.LatestSignerForChainID(chainConfig.ChainID) - return signUnsignedTransactions(txsWithKeys, signer) -} - func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error { if !chainConfig.IsLondon(big.NewInt(int64(env.Number))) { return nil diff --git a/cmd/evm/internal/t8ntool/tx_iterator.go b/cmd/evm/internal/t8ntool/tx_iterator.go new file mode 100644 index 000000000000..8f28dc70223b --- /dev/null +++ b/cmd/evm/internal/t8ntool/tx_iterator.go @@ -0,0 +1,194 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "bytes" + "crypto/ecdsa" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +// txWithKey is a helper-struct, to allow us to use the types.Transaction along with +// a `secretKey`-field, for input +type txWithKey struct { + key *ecdsa.PrivateKey + tx *types.Transaction + protected bool +} + +func (t *txWithKey) UnmarshalJSON(input []byte) error { + // Read the metadata, if present + type txMetadata struct { + Key *common.Hash `json:"secretKey"` + Protected *bool `json:"protected"` + } + var data txMetadata + if err := json.Unmarshal(input, &data); err != nil { + return err + } + if data.Key != nil { + k := data.Key.Hex()[2:] + if ecdsaKey, err := crypto.HexToECDSA(k); err != nil { + return err + } else { + t.key = ecdsaKey + } + } + if data.Protected != nil { + t.protected = *data.Protected + } else { + t.protected = true + } + // Now, read the transaction itself + var tx types.Transaction + if err := json.Unmarshal(input, &tx); err != nil { + return err + } + t.tx = &tx + return nil +} + +// signUnsignedTransactions converts the input txs to canonical transactions. +// +// The transactions can have two forms, either +// 1. unsigned or +// 2. signed +// +// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set. +// If so, we sign it here and now, with the given `secretKey` +// If the condition above is not met, then it's considered a signed transaction. +// +// To manage this, we read the transactions twice, first trying to read the secretKeys, +// and secondly to read them with the standard tx json format +func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) { + var signedTxs []*types.Transaction + for i, tx := range txs { + var ( + v, r, s = tx.tx.RawSignatureValues() + signed *types.Transaction + err error + ) + if tx.key == nil || v.BitLen()+r.BitLen()+s.BitLen() != 0 { + // Already signed + signedTxs = append(signedTxs, tx.tx) + continue + } + // This transaction needs to be signed + if tx.protected { + signed, err = types.SignTx(tx.tx, signer, tx.key) + } else { + signed, err = types.SignTx(tx.tx, types.FrontierSigner{}, tx.key) + } + if err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) + } + signedTxs = append(signedTxs, signed) + } + return signedTxs, nil +} + +func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (txIterator, error) { + var txsWithKeys []*txWithKey + if txStr != stdinSelector { + data, err := os.ReadFile(txStr) + if err != nil { + return nil, NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + if strings.HasSuffix(txStr, ".rlp") { // A file containing an rlp list + var body hexutil.Bytes + if err := json.Unmarshal(data, &body); err != nil { + return nil, err + } + return newRlpTxIterator(body), nil + } + if err := json.Unmarshal(data, &txsWithKeys); err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) + } + } else { + if len(inputData.TxRlp) > 0 { + // Decode the body of already signed transactions + return newRlpTxIterator(common.FromHex(inputData.TxRlp)), nil + } + // JSON encoded transactions + txsWithKeys = inputData.Txs + } + // We may have to sign the transactions. + signer := types.LatestSignerForChainID(chainConfig.ChainID) + txs, err := signUnsignedTransactions(txsWithKeys, signer) + return newSliceTxIterator(txs), err +} + +type txIterator interface { + // Next returns true until EOF + Next() bool + // Tx returns the next transaction, OR an error. + Tx() (*types.Transaction, error) +} + +type sliceTxIterator struct { + idx int + txs []*types.Transaction +} + +func newSliceTxIterator(transactions types.Transactions) txIterator { + return &sliceTxIterator{0, transactions} +} + +func (ait *sliceTxIterator) Next() bool { + return ait.idx < len(ait.txs) +} + +func (ait *sliceTxIterator) Tx() (*types.Transaction, error) { + if ait.idx < len(ait.txs) { + ait.idx++ + return ait.txs[ait.idx-1], nil + } + return nil, io.EOF +} + +type rlpTxIterator struct { + in *rlp.Stream +} + +func newRlpTxIterator(rlpData []byte) txIterator { + in := rlp.NewStream(bytes.NewBuffer(rlpData), 1024*1024) + in.List() + return &rlpTxIterator{in} +} + +func (it *rlpTxIterator) Next() bool { + return it.in.MoreDataInList() +} + +func (it *rlpTxIterator) Tx() (*types.Transaction, error) { + var a types.Transaction + if err := it.in.Decode(&a); err != nil { + return nil, err + } + return &a, nil +} diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index ad09a6b4d61f..efacbe95ae8d 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -275,6 +275,14 @@ func TestT8n(t *testing.T) { output: t8nOutput{alloc: true, result: true}, expOut: "exp.json", }, + { // More cancun test, plus example of rlp-transaction that cannot be decoded properly + base: "./testdata/30", + input: t8nInput{ + "alloc.json", "txs_more.rlp", "env.json", "Cancun", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, } { args := []string{"t8n"} args = append(args, tc.output.get()...) diff --git a/cmd/evm/testdata/30/README.txt b/cmd/evm/testdata/30/README.txt new file mode 100644 index 000000000000..84c92de8530c --- /dev/null +++ b/cmd/evm/testdata/30/README.txt @@ -0,0 +1,77 @@ +This example comes from https://github.com/ethereum/go-ethereum/issues/27730. +The input transactions contain three transactions, number `0` and `2` are taken from +`testdata/13`, whereas number `1` is taken from #27730. + +The problematic second transaction cannot be RLP-decoded, and the expectation is +that that particular transaction should be rejected, but number `0` and `1` should +still be accepted. + +``` +$ go run . t8n --input.alloc=./testdata/30/alloc.json --input.txs=./testdata/30/txs_more.rlp --input.env=./testdata/30/env.json --output.result=stdout --output.alloc=stdout --state.fork=Cancun +WARN [10-22|15:38:03.283] rejected tx index=1 error="rlp: input string too short for common.Address, decoding into (types.Transaction)(types.BlobTx).To" +INFO [10-22|15:38:03.284] Trie dumping started root=348312..915c93 +INFO [10-22|15:38:03.284] Trie dumping complete accounts=3 elapsed="160.831ยตs" +{ + "alloc": { + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": { + "code": "0x60004960005500", + "balance": "0xde0b6b3a7640000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xde0b6b3a7640000" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xfffffffb8390", + "nonce": "0x3" + } + }, + "result": { + "stateRoot": "0x3483124b6710486c9fb3e07975669c66924697c88cccdcc166af5e1218915c93", + "txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d", + "receiptsRoot": "0x75308898d571eafb5cd8cde8278bf5b3d13c5f6ec074926de3bb895b519264e1", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "effectiveGasPrice": null, + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0xa410", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "effectiveGasPrice": null, + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + } + ], + "rejected": [ + { + "index": 1, + "error": "rlp: input string too short for common.Address, decoding into (types.Transaction)(types.BlobTx).To" + } + ], + "currentDifficulty": null, + "gasUsed": "0xa410", + "currentBaseFee": "0x7", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + } +} + +``` \ No newline at end of file diff --git a/cmd/evm/testdata/30/alloc.json b/cmd/evm/testdata/30/alloc.json new file mode 100644 index 000000000000..6bc93d255210 --- /dev/null +++ b/cmd/evm/testdata/30/alloc.json @@ -0,0 +1,23 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x60004960005500", + "nonce" : "0x00", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance": "0x01000000000000", + "code": "0x", + "nonce": "0x01", + "storage": { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/30/env.json b/cmd/evm/testdata/30/env.json new file mode 100644 index 000000000000..4acd9794be8f --- /dev/null +++ b/cmd/evm/testdata/30/env.json @@ -0,0 +1,23 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "currentGasLimit" : "0x1000000000", + "previousHash" : "0xe4e2a30b340bec696242b67584264f878600dce98354ae0b6328740fd4ff18da", + "currentDataGasUsed" : "0x2000", + "parentTimestamp" : "0x00", + "parentDifficulty" : "0x00", + "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "parentBeaconBlockRoot" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "currentRandom" : "0x0000000000000000000000000000000000000000000000000000000000020000", + "withdrawals" : [ + ], + "parentBaseFee" : "0x08", + "parentGasUsed" : "0x00", + "parentGasLimit" : "0x1000000000", + "parentExcessBlobGas" : "0x1000", + "parentBlobGasUsed" : "0x2000", + "blockHashes" : { + "0" : "0xe4e2a30b340bec696242b67584264f878600dce98354ae0b6328740fd4ff18da" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/30/exp.json b/cmd/evm/testdata/30/exp.json new file mode 100644 index 000000000000..f0b19c6b3d3a --- /dev/null +++ b/cmd/evm/testdata/30/exp.json @@ -0,0 +1,64 @@ +{ + "alloc": { + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": { + "code": "0x60004960005500", + "balance": "0xde0b6b3a7640000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xde0b6b3a7640000" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xfffffffb8390", + "nonce": "0x3" + } + }, + "result": { + "stateRoot": "0x3483124b6710486c9fb3e07975669c66924697c88cccdcc166af5e1218915c93", + "txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d", + "receiptsRoot": "0x75308898d571eafb5cd8cde8278bf5b3d13c5f6ec074926de3bb895b519264e1", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "effectiveGasPrice": null, + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0xa410", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "effectiveGasPrice": null, + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + } + ], + "rejected": [ + { + "index": 1, + "error": "rlp: input string too short for common.Address, decoding into (types.Transaction)(types.BlobTx).To" + } + ], + "currentDifficulty": null, + "gasUsed": "0xa410", + "currentBaseFee": "0x7", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "currentExcessBlobGas": "0x0", + "blobGasUsed": "0x0" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/30/txs.rlp b/cmd/evm/testdata/30/txs.rlp new file mode 100644 index 000000000000..620c1a13ac70 --- /dev/null +++ b/cmd/evm/testdata/30/txs.rlp @@ -0,0 +1 @@ +"0xf8dbb8d903f8d601800285012a05f200833d090080830186a000f85bf85994095e7baea6a6c7c4c2dfeb977efac326af552d87f842a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000010ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d880a0fc12b67159a3567f8bdbc49e0be369a2e20e09d57a51c41310543a4128409464a02de0cfe5495c4f58ff60645ceda0afd67a4c90a70bc89fe207269435b35e5b67" \ No newline at end of file diff --git a/cmd/evm/testdata/30/txs_more.rlp b/cmd/evm/testdata/30/txs_more.rlp new file mode 100644 index 000000000000..35af8d1f2300 --- /dev/null +++ b/cmd/evm/testdata/30/txs_more.rlp @@ -0,0 +1 @@ +"0xf901adb86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b8d903f8d601800285012a05f200833d090080830186a000f85bf85994095e7baea6a6c7c4c2dfeb977efac326af552d87f842a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000010ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d880a0fc12b67159a3567f8bdbc49e0be369a2e20e09d57a51c41310543a4128409464a02de0cfe5495c4f58ff60645ceda0afd67a4c90a70bc89fe207269435b35e5b67b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" \ No newline at end of file From 96b75033c08c4edbfd3b3aa356f3e7d4cce4f2b8 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 25 Oct 2023 14:53:50 +0200 Subject: [PATCH 02/41] trie: use explicit errors in stacktrie (instead of panic) (#28361) This PR removes panics from stacktrie (mostly), and makes the Update return errors instead. While adding tests for this, I also found that one case of possible corruption was not caught, which is now fixed. --- core/state/snapshot/generate.go | 4 +++- trie/stacktrie.go | 9 ++++++--- trie/stacktrie_test.go | 22 ++++++++++++++++++++++ 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 204584c956ea..adeaa1daa01e 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -230,7 +230,9 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [ if origin == nil && !diskMore { stackTr := trie.NewStackTrie(nil) for i, key := range keys { - stackTr.Update(key, vals[i]) + if err := stackTr.Update(key, vals[i]); err != nil { + return nil, err + } } if gotRoot := stackTr.Hash(); gotRoot != root { return &proofResult{ diff --git a/trie/stacktrie.go b/trie/stacktrie.go index 423afdec8816..f2f5355c49e8 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -18,6 +18,7 @@ package trie import ( "bytes" + "errors" "sync" "github.com/ethereum/go-ethereum/common" @@ -92,12 +93,14 @@ func NewStackTrie(options *StackTrieOptions) *StackTrie { // Update inserts a (key, value) pair into the stack trie. func (t *StackTrie) Update(key, value []byte) error { - k := keybytesToHex(key) if len(value) == 0 { - panic("deletion not supported") + return errors.New("trying to insert empty (deletion)") } + k := keybytesToHex(key) k = k[:len(k)-1] // chop the termination flag - + if bytes.Compare(t.last, k) >= 0 { + return errors.New("non-ascending key order") + } // track the first and last inserted entries. if t.first == nil { t.first = append([]byte{}, k...) diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index 629586e2b1bf..909a77062aab 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/trie/testutil" + "github.com/stretchr/testify/assert" "golang.org/x/exp/slices" ) @@ -463,3 +464,24 @@ func TestPartialStackTrie(t *testing.T) { } } } + +func TestStackTrieErrors(t *testing.T) { + s := NewStackTrie(nil) + // Deletion + if err := s.Update(nil, nil); err == nil { + t.Fatal("expected error") + } + if err := s.Update(nil, []byte{}); err == nil { + t.Fatal("expected error") + } + if err := s.Update([]byte{0xa}, []byte{}); err == nil { + t.Fatal("expected error") + } + // Non-ascending keys (going backwards or repeating) + assert.Nil(t, s.Update([]byte{0xaa}, []byte{0xa})) + assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xa}), "repeat insert same key") + assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key") + assert.Nil(t, s.Update([]byte{0xab}, []byte{0xa})) + assert.NotNil(t, s.Update([]byte{0x10}, []byte{0xb}), "out of order insert") + assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key") +} From f7b62e550696c3a7379d17228744c65e2dd29e7b Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 25 Oct 2023 21:20:23 +0800 Subject: [PATCH 03/41] graphql: logs return error if from block > to (#28412) As per discussion in ethereum/execution-apis#475 Signed-off-by: jsvisa --- graphql/graphql.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/graphql/graphql.go b/graphql/graphql.go index ec7382f80adc..50f0c6ca4715 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1417,6 +1417,9 @@ func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria if args.Filter.ToBlock != nil { end = int64(*args.Filter.ToBlock) } + if begin > 0 && end > 0 && begin > end { + return nil, errInvalidBlockRange + } var addresses []common.Address if args.Filter.Addresses != nil { addresses = *args.Filter.Addresses From d8c6ae054c8e120c72d20eeac805f1b95e5802a2 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 25 Oct 2023 23:52:42 +1000 Subject: [PATCH 04/41] rpc: use correct stringer-method for serializing BlockNumberOrHash (#28358) The String() version of BlockNumberOrHash uses decimal for all block numbers, including negative ones used to indicate labels. Switch to using BlockNumber.String() which encodes it correctly for use in the JSON-RPC API. --- rpc/types.go | 3 +-- rpc/types_test.go | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/rpc/types.go b/rpc/types.go index 34a1451deaa7..f88c37c59dad 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -21,7 +21,6 @@ import ( "encoding/json" "fmt" "math" - "strconv" "strings" "github.com/ethereum/go-ethereum/common" @@ -221,7 +220,7 @@ func (bnh *BlockNumberOrHash) Number() (BlockNumber, bool) { func (bnh *BlockNumberOrHash) String() string { if bnh.BlockNumber != nil { - return strconv.Itoa(int(*bnh.BlockNumber)) + return bnh.BlockNumber.String() } if bnh.BlockHash != nil { return bnh.BlockHash.String() diff --git a/rpc/types_test.go b/rpc/types_test.go index f110dee7c6ff..617f441d9166 100644 --- a/rpc/types_test.go +++ b/rpc/types_test.go @@ -153,3 +153,24 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) { }) } } + +func TestBlockNumberOrHash_StringAndUnmarshal(t *testing.T) { + tests := []BlockNumberOrHash{ + BlockNumberOrHashWithNumber(math.MaxInt64), + BlockNumberOrHashWithNumber(PendingBlockNumber), + BlockNumberOrHashWithNumber(LatestBlockNumber), + BlockNumberOrHashWithNumber(EarliestBlockNumber), + BlockNumberOrHashWithNumber(32), + BlockNumberOrHashWithHash(common.Hash{0xaa}, false), + } + for _, want := range tests { + marshalled, _ := json.Marshal(want.String()) + var have BlockNumberOrHash + if err := json.Unmarshal(marshalled, &have); err != nil { + t.Fatalf("cannot unmarshal (%v): %v", string(marshalled), err) + } + if !reflect.DeepEqual(want, have) { + t.Fatalf("wrong result: have %v, want %v", have, want) + } + } +} From 58ae1df6840e512b263a4fc2e021e1ec5637ca21 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 25 Oct 2023 17:57:12 +0200 Subject: [PATCH 05/41] cmd/geth: test for logging-output (#28373) This PR is a bit in preparation for the slog work in #28187 . Our current test re logging mostly test the internals, but we have no real end-to-end test of the logging output. This PR introduces a simple reexec-based log tester. This also relies upon a special mode in geth, which can be made to eject a set of predefined log messages (only available if the build-tag `integrationtests` is used e.g. go run --tags=integrationtests ./cmd/geth --log.format terminal logtest While working on this, I also noticed a quirk in the setup: when geth was configured to use a file output, then two separate handlers were used (one handler for the file, one handler for the console). Using two separate handlers means that two formatters are used, thus the formatting of any/all records happened twice. This PR changes the mechanism to use two separate io.Writers instead, which is both more optimal and fixes a bug which occurs due to a global statefulness in the formatter. --- build/ci.go | 3 + cmd/geth/logging_test.go | 185 ++++++++++++++++++ cmd/geth/logtestcmd_active.go | 134 +++++++++++++ cmd/geth/logtestcmd_inactive.go | 23 +++ cmd/geth/main.go | 3 + cmd/geth/testdata/logging/logtest-logfmt.txt | 39 ++++ .../testdata/logging/logtest-terminal.txt | 40 ++++ internal/debug/flags.go | 20 +- log/format.go | 8 + log/format_test.go | 139 ------------- 10 files changed, 445 insertions(+), 149 deletions(-) create mode 100644 cmd/geth/logging_test.go create mode 100644 cmd/geth/logtestcmd_active.go create mode 100644 cmd/geth/logtestcmd_inactive.go create mode 100644 cmd/geth/testdata/logging/logtest-logfmt.txt create mode 100644 cmd/geth/testdata/logging/logtest-terminal.txt diff --git a/build/ci.go b/build/ci.go index 46f1ac28121a..afe1c332b8cb 100644 --- a/build/ci.go +++ b/build/ci.go @@ -307,6 +307,9 @@ func doTest(cmdline []string) { // Enable CKZG backend in CI. gotest.Args = append(gotest.Args, "-tags=ckzg") + // Enable integration-tests + gotest.Args = append(gotest.Args, "-tags=integrationtests") + // Test a single package at a time. CI builders are slow // and some tests run into timeouts under load. gotest.Args = append(gotest.Args, "-p", "1") diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go new file mode 100644 index 000000000000..cc951d6e97fa --- /dev/null +++ b/cmd/geth/logging_test.go @@ -0,0 +1,185 @@ +//go:build integrationtests + +// Copyright 2023 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math/rand" + "os" + "os/exec" + "strings" + "testing" + + "github.com/docker/docker/pkg/reexec" +) + +func runSelf(args ...string) ([]byte, error) { + cmd := &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{"geth-test"}, args...), + } + return cmd.CombinedOutput() +} + +func split(input io.Reader) []string { + var output []string + scanner := bufio.NewScanner(input) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + output = append(output, strings.TrimSpace(scanner.Text())) + } + return output +} + +func censor(input string, start, end int) string { + if len(input) < end { + return input + } + return input[:start] + strings.Repeat("X", end-start) + input[end:] +} + +func TestLogging(t *testing.T) { + testConsoleLogging(t, "terminal", 6, 24) + testConsoleLogging(t, "logfmt", 2, 26) +} + +func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) { + haveB, err := runSelf("--log.format", format, "logtest") + if err != nil { + t.Fatal(err) + } + readFile, err := os.Open(fmt.Sprintf("testdata/logging/logtest-%v.txt", format)) + if err != nil { + t.Fatal(err) + } + wantLines := split(readFile) + haveLines := split(bytes.NewBuffer(haveB)) + for i, want := range wantLines { + if i > len(haveLines)-1 { + t.Fatalf("format %v, line %d missing, want:%v", format, i, want) + } + have := haveLines[i] + for strings.Contains(have, "Unknown config environment variable") { + // This can happen on CI runs. Drop it. + haveLines = append(haveLines[:i], haveLines[i+1:]...) + have = haveLines[i] + } + + // Black out the timestamp + have = censor(have, tStart, tEnd) + want = censor(want, tStart, tEnd) + if have != want { + t.Logf(nicediff([]byte(have), []byte(want))) + t.Fatalf("format %v, line %d\nhave %v\nwant %v", format, i, have, want) + } + } + if len(haveLines) != len(wantLines) { + t.Errorf("format %v, want %d lines, have %d", format, len(haveLines), len(wantLines)) + } +} + +func TestVmodule(t *testing.T) { + checkOutput := func(level int, want, wantNot string) { + t.Helper() + output, err := runSelf("--log.format", "terminal", "--verbosity=0", "--log.vmodule", fmt.Sprintf("logtestcmd_active.go=%d", level), "logtest") + if err != nil { + t.Fatal(err) + } + if len(want) > 0 && !strings.Contains(string(output), want) { // trace should be present at 5 + t.Errorf("failed to find expected string ('%s') in output", want) + } + if len(wantNot) > 0 && strings.Contains(string(output), wantNot) { // trace should be present at 5 + t.Errorf("string ('%s') should not be present in output", wantNot) + } + } + checkOutput(5, "log at level trace", "") // trace should be present at 5 + checkOutput(4, "log at level debug", "log at level trace") // debug should be present at 4, but trace should be missing + checkOutput(3, "log at level info", "log at level debug") // info should be present at 3, but debug should be missing + checkOutput(2, "log at level warn", "log at level info") // warn should be present at 2, but info should be missing + checkOutput(1, "log at level error", "log at level warn") // error should be present at 1, but warn should be missing +} + +func nicediff(have, want []byte) string { + var i = 0 + for ; i < len(have) && i < len(want); i++ { + if want[i] != have[i] { + break + } + } + var end = i + 40 + var start = i - 50 + if start < 0 { + start = 0 + } + var h, w string + if end < len(have) { + h = string(have[start:end]) + } else { + h = string(have[start:]) + } + if end < len(want) { + w = string(want[start:end]) + } else { + w = string(want[start:]) + } + return fmt.Sprintf("have vs want:\n%q\n%q\n", h, w) +} + +func TestFileOut(t *testing.T) { + var ( + have, want []byte + err error + path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63()) + ) + t.Cleanup(func() { os.Remove(path) }) + if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "logtest"); err != nil { + t.Fatal(err) + } + if have, err = os.ReadFile(path); err != nil { + t.Fatal(err) + } + if !bytes.Equal(have, want) { + // show an intelligent diff + t.Logf(nicediff(have, want)) + t.Errorf("file content wrong") + } +} + +func TestRotatingFileOut(t *testing.T) { + var ( + have, want []byte + err error + path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63()) + ) + t.Cleanup(func() { os.Remove(path) }) + if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "--log.rotate", "logtest"); err != nil { + t.Fatal(err) + } + if have, err = os.ReadFile(path); err != nil { + t.Fatal(err) + } + if !bytes.Equal(have, want) { + // show an intelligent diff + t.Logf(nicediff(have, want)) + t.Errorf("file content wrong") + } +} diff --git a/cmd/geth/logtestcmd_active.go b/cmd/geth/logtestcmd_active.go new file mode 100644 index 000000000000..c66013517aa4 --- /dev/null +++ b/cmd/geth/logtestcmd_active.go @@ -0,0 +1,134 @@ +//go:build integrationtests + +// Copyright 2023 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "math" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/holiman/uint256" + "github.com/urfave/cli/v2" +) + +var logTestCommand = &cli.Command{ + Action: logTest, + Name: "logtest", + Usage: "Print some log messages", + ArgsUsage: " ", + Description: ` +This command is only meant for testing. +`} + +// logTest is an entry point which spits out some logs. This is used by testing +// to verify expected outputs +func logTest(ctx *cli.Context) error { + log.ResetGlobalState() + { // big.Int + ba, _ := new(big.Int).SetString("111222333444555678999", 10) // "111,222,333,444,555,678,999" + bb, _ := new(big.Int).SetString("-111222333444555678999", 10) // "-111,222,333,444,555,678,999" + bc, _ := new(big.Int).SetString("11122233344455567899900", 10) // "11,122,233,344,455,567,899,900" + bd, _ := new(big.Int).SetString("-11122233344455567899900", 10) // "-11,122,233,344,455,567,899,900" + log.Info("big.Int", "111,222,333,444,555,678,999", ba) + log.Info("-big.Int", "-111,222,333,444,555,678,999", bb) + log.Info("big.Int", "11,122,233,344,455,567,899,900", bc) + log.Info("-big.Int", "-11,122,233,344,455,567,899,900", bd) + } + { //uint256 + ua, _ := uint256.FromDecimal("111222333444555678999") + ub, _ := uint256.FromDecimal("11122233344455567899900") + log.Info("uint256", "111,222,333,444,555,678,999", ua) + log.Info("uint256", "11,122,233,344,455,567,899,900", ub) + } + { // int64 + log.Info("int64", "1,000,000", int64(1000000)) + log.Info("int64", "-1,000,000", int64(-1000000)) + log.Info("int64", "9,223,372,036,854,775,807", int64(math.MaxInt64)) + log.Info("int64", "-9,223,372,036,854,775,808", int64(math.MinInt64)) + } + { // uint64 + log.Info("uint64", "1,000,000", uint64(1000000)) + log.Info("uint64", "18,446,744,073,709,551,615", uint64(math.MaxUint64)) + } + { // Special characters + log.Info("Special chars in value", "key", "special \r\n\t chars") + log.Info("Special chars in key", "special \n\t chars", "value") + + log.Info("nospace", "nospace", "nospace") + log.Info("with space", "with nospace", "with nospace") + + log.Info("Bash escapes in value", "key", "\u001b[1G\u001b[K\u001b[1A") + log.Info("Bash escapes in key", "\u001b[1G\u001b[K\u001b[1A", "value") + + log.Info("Bash escapes in message \u001b[1G\u001b[K\u001b[1A end", "key", "value") + + colored := fmt.Sprintf("\u001B[%dmColored\u001B[0m[", 35) + log.Info(colored, colored, colored) + } + { // Custom Stringer() - type + log.Info("Custom Stringer value", "2562047h47m16.854s", common.PrettyDuration(time.Duration(9223372036854775807))) + } + { // Lazy eval + log.Info("Lazy evaluation of value", "key", log.Lazy{Fn: func() interface{} { return "lazy value" }}) + } + { // Multi-line message + log.Info("A message with wonky \U0001F4A9 characters") + log.Info("A multiline message \nINFO [10-18|14:11:31.106] with wonky characters \U0001F4A9") + log.Info("A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above") + } + { // Miscellaneous json-quirks + // This will check if the json output uses strings or json-booleans to represent bool values + log.Info("boolean", "true", true, "false", false) + // Handling of duplicate keys. + // This is actually ill-handled by the current handler: the format.go + // uses a global 'fieldPadding' map and mixes up the two keys. If 'alpha' + // is shorter than beta, it sometimes causes erroneous padding -- and what's more + // it causes _different_ padding in multi-handler context, e.g. both file- + // and console output, making the two mismatch. + log.Info("repeated-key 1", "foo", "alpha", "foo", "beta") + log.Info("repeated-key 2", "xx", "short", "xx", "longer") + } + { // loglevels + log.Debug("log at level debug") + log.Trace("log at level trace") + log.Info("log at level info") + log.Warn("log at level warn") + log.Error("log at level error") + } + { + // The current log formatter has a global map of paddings, storing the + // longest seen padding per key in a map. This results in a statefulness + // which has some odd side-effects. Demonstrated here: + log.Info("test", "bar", "short", "a", "aligned left") + log.Info("test", "bar", "a long message", "a", 1) + log.Info("test", "bar", "short", "a", "aligned right") + } + { + // This sequence of logs should be output with alignment, so each field becoems a column. + log.Info("The following logs should align so that the key-fields make 5 columns") + log.Info("Inserted known block", "number", 1_012, "hash", common.HexToHash("0x1234"), "txs", 200, "gas", 1_123_123, "other", "first") + log.Info("Inserted new block", "number", 1, "hash", common.HexToHash("0x1235"), "txs", 2, "gas", 1_123, "other", "second") + log.Info("Inserted known block", "number", 99, "hash", common.HexToHash("0x12322"), "txs", 10, "gas", 1, "other", "third") + log.Warn("Inserted known block", "number", 1_012, "hash", common.HexToHash("0x1234"), "txs", 200, "gas", 99, "other", "fourth") + } + return nil +} diff --git a/cmd/geth/logtestcmd_inactive.go b/cmd/geth/logtestcmd_inactive.go new file mode 100644 index 000000000000..691ab5bcd8ed --- /dev/null +++ b/cmd/geth/logtestcmd_inactive.go @@ -0,0 +1,23 @@ +//go:build !integrationtests + +// Copyright 2023 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import "github.com/urfave/cli/v2" + +var logTestCommand *cli.Command diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 4b26de05a903..2d4fe3dc060c 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -234,6 +234,9 @@ func init() { // See verkle.go verkleCommand, } + if logTestCommand != nil { + app.Commands = append(app.Commands, logTestCommand) + } sort.Sort(cli.CommandsByName(app.Commands)) app.Flags = flags.Merge( diff --git a/cmd/geth/testdata/logging/logtest-logfmt.txt b/cmd/geth/testdata/logging/logtest-logfmt.txt new file mode 100644 index 000000000000..79f29e1fae22 --- /dev/null +++ b/cmd/geth/testdata/logging/logtest-logfmt.txt @@ -0,0 +1,39 @@ +t=2023-10-20T12:56:08+0200 lvl=info msg=big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +t=2023-10-20T12:56:08+0200 lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999 +t=2023-10-20T12:56:08+0200 lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +t=2023-10-20T12:56:08+0200 lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900 +t=2023-10-20T12:56:08+0200 lvl=info msg=uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +t=2023-10-20T12:56:08+0200 lvl=info msg=uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +t=2023-10-20T12:56:08+0200 lvl=info msg=int64 1,000,000=1,000,000 +t=2023-10-20T12:56:08+0200 lvl=info msg=int64 -1,000,000=-1,000,000 +t=2023-10-20T12:56:08+0200 lvl=info msg=int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807 +t=2023-10-20T12:56:08+0200 lvl=info msg=int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808 +t=2023-10-20T12:56:08+0200 lvl=info msg=uint64 1,000,000=1,000,000 +t=2023-10-20T12:56:08+0200 lvl=info msg=uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615 +t=2023-10-20T12:56:08+0200 lvl=info msg="Special chars in value" key="special \r\n\t chars" +t=2023-10-20T12:56:08+0200 lvl=info msg="Special chars in key" "special \n\t chars"=value +t=2023-10-20T12:56:08+0200 lvl=info msg=nospace nospace=nospace +t=2023-10-20T12:56:08+0200 lvl=info msg="with space" "with nospace"="with nospace" +t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A" +t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value +t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value +t=2023-10-20T12:56:08+0200 lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m[" +t=2023-10-20T12:56:08+0200 lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s +t=2023-10-20T12:56:08+0200 lvl=info msg="Lazy evaluation of value" key="lazy value" +t=2023-10-20T12:56:08+0200 lvl=info msg="A message with wonky ๐Ÿ’ฉ characters" +t=2023-10-20T12:56:08+0200 lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters ๐Ÿ’ฉ" +t=2023-10-20T12:56:08+0200 lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above" +t=2023-10-20T12:56:08+0200 lvl=info msg=boolean true=true false=false +t=2023-10-20T12:56:08+0200 lvl=info msg="repeated-key 1" foo=alpha foo=beta +t=2023-10-20T12:56:08+0200 lvl=info msg="repeated-key 2" xx=short xx=longer +t=2023-10-20T12:56:08+0200 lvl=info msg="log at level info" +t=2023-10-20T12:56:08+0200 lvl=warn msg="log at level warn" +t=2023-10-20T12:56:08+0200 lvl=eror msg="log at level error" +t=2023-10-20T12:56:08+0200 lvl=info msg=test bar=short a="aligned left" +t=2023-10-20T12:56:08+0200 lvl=info msg=test bar="a long message" a=1 +t=2023-10-20T12:56:08+0200 lvl=info msg=test bar=short a="aligned right" +t=2023-10-20T12:56:08+0200 lvl=info msg="The following logs should align so that the key-fields make 5 columns" +t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1,123,123 other=first +t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second +t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third +t=2023-10-20T12:56:08+0200 lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth diff --git a/cmd/geth/testdata/logging/logtest-terminal.txt b/cmd/geth/testdata/logging/logtest-terminal.txt new file mode 100644 index 000000000000..ff68b6047a78 --- /dev/null +++ b/cmd/geth/testdata/logging/logtest-terminal.txt @@ -0,0 +1,40 @@ +INFO [10-20|12:56:42.532] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +INFO [10-20|12:56:42.532] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999 +INFO [10-20|12:56:42.532] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +INFO [10-20|12:56:42.532] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900 +INFO [10-20|12:56:42.532] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +INFO [10-20|12:56:42.532] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +INFO [10-20|12:56:42.532] int64 1,000,000=1,000,000 +INFO [10-20|12:56:42.532] int64 -1,000,000=-1,000,000 +INFO [10-20|12:56:42.532] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807 +INFO [10-20|12:56:42.532] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808 +INFO [10-20|12:56:42.532] uint64 1,000,000=1,000,000 +INFO [10-20|12:56:42.532] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615 +INFO [10-20|12:56:42.532] Special chars in value key="special \r\n\t chars" +INFO [10-20|12:56:42.532] Special chars in key "special \n\t chars"=value +INFO [10-20|12:56:42.532] nospace nospace=nospace +INFO [10-20|12:56:42.532] with space "with nospace"="with nospace" +INFO [10-20|12:56:42.532] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A" +INFO [10-20|12:56:42.532] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value +INFO [10-20|12:56:42.532] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value +INFO [10-20|12:56:42.532] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m[" +INFO [10-20|12:56:42.532] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s +INFO [10-20|12:56:42.532] Lazy evaluation of value key="lazy value" +INFO [10-20|12:56:42.532] "A message with wonky ๐Ÿ’ฉ characters" +INFO [10-20|12:56:42.532] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters ๐Ÿ’ฉ" +INFO [10-20|12:56:42.532] A multiline message +LALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above +INFO [10-20|12:56:42.532] boolean true=true false=false +INFO [10-20|12:56:42.532] repeated-key 1 foo=alpha foo=beta +INFO [10-20|12:56:42.532] repeated-key 2 xx=short xx=longer +INFO [10-20|12:56:42.532] log at level info +WARN [10-20|12:56:42.532] log at level warn +ERROR[10-20|12:56:42.532] log at level error +INFO [10-20|12:56:42.532] test bar=short a="aligned left" +INFO [10-20|12:56:42.532] test bar="a long message" a=1 +INFO [10-20|12:56:42.532] test bar=short a="aligned right" +INFO [10-20|12:56:42.532] The following logs should align so that the key-fields make 5 columns +INFO [10-20|12:56:42.532] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first +INFO [10-20|12:56:42.532] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second +INFO [10-20|12:56:42.532] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third +WARN [10-20|12:56:42.532] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 736fede94334..4f0f5fe86074 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -218,10 +218,9 @@ func Setup(ctx *cli.Context) error { return fmt.Errorf("unknown log format: %v", ctx.String(logFormatFlag.Name)) } var ( - stdHandler = log.StreamHandler(output, logfmt) - ostream = stdHandler - logFile = ctx.String(logFileFlag.Name) - rotation = ctx.Bool(logRotateFlag.Name) + ostream = log.StreamHandler(output, logfmt) + logFile = ctx.String(logFileFlag.Name) + rotation = ctx.Bool(logRotateFlag.Name) ) if len(logFile) > 0 { if err := validateLogLocation(filepath.Dir(logFile)); err != nil { @@ -242,20 +241,21 @@ func Setup(ctx *cli.Context) error { } else { context = append(context, "location", filepath.Join(os.TempDir(), "geth-lumberjack.log")) } - ostream = log.MultiHandler(log.StreamHandler(&lumberjack.Logger{ + lumberWriter := &lumberjack.Logger{ Filename: logFile, MaxSize: ctx.Int(logMaxSizeMBsFlag.Name), MaxBackups: ctx.Int(logMaxBackupsFlag.Name), MaxAge: ctx.Int(logMaxAgeFlag.Name), Compress: ctx.Bool(logCompressFlag.Name), - }, logfmt), stdHandler) + } + ostream = log.StreamHandler(io.MultiWriter(output, lumberWriter), logfmt) } else if logFile != "" { - if logOutputStream, err := log.FileHandler(logFile, logfmt); err != nil { + f, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { return err - } else { - ostream = log.MultiHandler(logOutputStream, stdHandler) - context = append(context, "location", logFile) } + ostream = log.StreamHandler(io.MultiWriter(output, f), logfmt) + context = append(context, "location", logFile) } glogger.SetHandler(ostream) diff --git a/log/format.go b/log/format.go index 1adf79c17e68..2fd1f2855815 100644 --- a/log/format.go +++ b/log/format.go @@ -24,6 +24,14 @@ const ( termCtxMaxPadding = 40 ) +// ResetGlobalState resets the fieldPadding, which is useful for producing +// predictable output. +func ResetGlobalState() { + fieldPaddingLock.Lock() + fieldPadding = make(map[string]int) + fieldPaddingLock.Unlock() +} + // locationTrims are trimmed for display to avoid unwieldy log lines. var locationTrims = []string{ "github.com/ethereum/go-ethereum/", diff --git a/log/format_test.go b/log/format_test.go index e08c1d1a4a9c..41e1809c38cd 100644 --- a/log/format_test.go +++ b/log/format_test.go @@ -1,105 +1,10 @@ package log import ( - "fmt" - "math" - "math/big" "math/rand" - "strings" "testing" - - "github.com/holiman/uint256" ) -func TestPrettyInt64(t *testing.T) { - tests := []struct { - n int64 - s string - }{ - {0, "0"}, - {10, "10"}, - {-10, "-10"}, - {100, "100"}, - {-100, "-100"}, - {1000, "1000"}, - {-1000, "-1000"}, - {10000, "10000"}, - {-10000, "-10000"}, - {99999, "99999"}, - {-99999, "-99999"}, - {100000, "100,000"}, - {-100000, "-100,000"}, - {1000000, "1,000,000"}, - {-1000000, "-1,000,000"}, - {math.MaxInt64, "9,223,372,036,854,775,807"}, - {math.MinInt64, "-9,223,372,036,854,775,808"}, - } - for i, tt := range tests { - if have := FormatLogfmtInt64(tt.n); have != tt.s { - t.Errorf("test %d: format mismatch: have %s, want %s", i, have, tt.s) - } - } -} - -func TestPrettyUint64(t *testing.T) { - tests := []struct { - n uint64 - s string - }{ - {0, "0"}, - {10, "10"}, - {100, "100"}, - {1000, "1000"}, - {10000, "10000"}, - {99999, "99999"}, - {100000, "100,000"}, - {1000000, "1,000,000"}, - {math.MaxUint64, "18,446,744,073,709,551,615"}, - } - for i, tt := range tests { - if have := FormatLogfmtUint64(tt.n); have != tt.s { - t.Errorf("test %d: format mismatch: have %s, want %s", i, have, tt.s) - } - } -} - -func TestPrettyBigInt(t *testing.T) { - tests := []struct { - int string - s string - }{ - {"111222333444555678999", "111,222,333,444,555,678,999"}, - {"-111222333444555678999", "-111,222,333,444,555,678,999"}, - {"11122233344455567899900", "11,122,233,344,455,567,899,900"}, - {"-11122233344455567899900", "-11,122,233,344,455,567,899,900"}, - } - - for _, tt := range tests { - v, _ := new(big.Int).SetString(tt.int, 10) - if have := formatLogfmtBigInt(v); have != tt.s { - t.Errorf("invalid output %s, want %s", have, tt.s) - } - } -} - -func TestPrettyUint256(t *testing.T) { - tests := []struct { - int string - s string - }{ - {"111222333444555678999", "111,222,333,444,555,678,999"}, - {"11122233344455567899900", "11,122,233,344,455,567,899,900"}, - } - - for _, tt := range tests { - v := new(uint256.Int) - v.SetFromDecimal(tt.int) - if have := formatLogfmtUint256(v); have != tt.s { - t.Errorf("invalid output %s, want %s", have, tt.s) - } - } -} - var sink string func BenchmarkPrettyInt64Logfmt(b *testing.B) { @@ -115,47 +20,3 @@ func BenchmarkPrettyUint64Logfmt(b *testing.B) { sink = FormatLogfmtUint64(rand.Uint64()) } } - -func TestSanitation(t *testing.T) { - msg := "\u001b[1G\u001b[K\u001b[1A" - msg2 := "\u001b \u0000" - msg3 := "NiceMessage" - msg4 := "Space Message" - msg5 := "Enter\nMessage" - - for i, tt := range []struct { - msg string - want string - }{ - { - msg: msg, - want: fmt.Sprintf("] %q %q=%q\n", msg, msg, msg), - }, - { - msg: msg2, - want: fmt.Sprintf("] %q %q=%q\n", msg2, msg2, msg2), - }, - { - msg: msg3, - want: fmt.Sprintf("] %s %s=%s\n", msg3, msg3, msg3), - }, - { - msg: msg4, - want: fmt.Sprintf("] %s %q=%q\n", msg4, msg4, msg4), - }, - { - msg: msg5, - want: fmt.Sprintf("] %s %q=%q\n", msg5, msg5, msg5), - }, - } { - var ( - logger = New() - out = new(strings.Builder) - ) - logger.SetHandler(LvlFilterHandler(LvlInfo, StreamHandler(out, TerminalFormat(false)))) - logger.Info(tt.msg, tt.msg, tt.msg) - if have := out.String()[24:]; tt.want != have { - t.Fatalf("test %d: want / have: \n%v\n%v", i, tt.want, have) - } - } -} From abe3fca1deb0eca9245cccef3a9c637c57b79f7e Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 26 Oct 2023 19:48:51 +0800 Subject: [PATCH 06/41] graphql: fix an issue of nil pointer panic (#28416) Signed-off-by: jsvisa --- graphql/graphql.go | 3 +++ graphql/graphql_test.go | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/graphql/graphql.go b/graphql/graphql.go index 50f0c6ca4715..93313d743a9a 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1325,6 +1325,9 @@ func (r *Resolver) Blocks(ctx context.Context, args struct { From *Long To *Long }) ([]*Block, error) { + if args.From == nil { + return nil, errors.New("from block number must be specified") + } from := rpc.BlockNumber(*args.From) var to rpc.BlockNumber diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index 4bbfb7251d0c..540a56778ca8 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -148,6 +148,11 @@ func TestGraphQLBlockSerialization(t *testing.T) { want: `{"data":{"block":{"number":"0xa","call":{"data":"0x","status":"0x1"}}}}`, code: 200, }, + { + body: `{"query": "{blocks {number}}"}`, + want: `{"errors":[{"message":"from block number must be specified","path":["blocks"]}],"data":null}`, + code: 400, + }, } { resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", strings.NewReader(tt.body)) if err != nil { From 4cbca5178a2e5f575f1700c0d49a4324b592f09f Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 27 Oct 2023 04:15:14 +0200 Subject: [PATCH 07/41] core, cmd/geth: add --override.* flags to geth init (#28407) * core, cmd/geth: add --override.* flags to geth init * also apply overrides before genesis commit with new block * review feedback --- cmd/geth/chaincmd.go | 13 ++++++++++++- core/genesis.go | 5 +++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index a6bb2c2d2c41..5663963e3cf4 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -50,6 +50,8 @@ var ( ArgsUsage: "", Flags: flags.Merge([]cli.Flag{ utils.CachePreimagesFlag, + utils.OverrideCancun, + utils.OverrideVerkle, }, utils.DatabaseFlags), Description: ` The init command initializes a new genesis block and definition for the network. @@ -193,6 +195,15 @@ func initGenesis(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() + var overrides core.ChainOverrides + if ctx.IsSet(utils.OverrideCancun.Name) { + v := ctx.Uint64(utils.OverrideCancun.Name) + overrides.OverrideCancun = &v + } + if ctx.IsSet(utils.OverrideVerkle.Name) { + v := ctx.Uint64(utils.OverrideVerkle.Name) + overrides.OverrideVerkle = &v + } for _, name := range []string{"chaindata", "lightchaindata"} { chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false) if err != nil { @@ -203,7 +214,7 @@ func initGenesis(ctx *cli.Context) error { triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false) defer triedb.Close() - _, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis) + _, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } diff --git a/core/genesis.go b/core/genesis.go index 0f1e8baf43cd..1045815fab90 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -287,11 +287,11 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen } else { log.Info("Writing custom genesis block") } + applyOverrides(genesis.Config) block, err := genesis.Commit(db, triedb) if err != nil { return genesis.Config, common.Hash{}, err } - applyOverrides(genesis.Config) return genesis.Config, block.Hash(), nil } // The genesis block is present(perhaps in ancient database) while the @@ -303,6 +303,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen if genesis == nil { genesis = DefaultGenesisBlock() } + applyOverrides(genesis.Config) // Ensure the stored genesis matches with the given one. hash := genesis.ToBlock().Hash() if hash != stored { @@ -312,11 +313,11 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen if err != nil { return genesis.Config, hash, err } - applyOverrides(genesis.Config) return genesis.Config, block.Hash(), nil } // Check whether the genesis block is already written. if genesis != nil { + applyOverrides(genesis.Config) hash := genesis.ToBlock().Hash() if hash != stored { return genesis.Config, hash, &GenesisMismatchError{stored, hash} From 2d7dba024d76603398907a595da98ad4df81b858 Mon Sep 17 00:00:00 2001 From: Delweng Date: Fri, 27 Oct 2023 22:50:59 +0800 Subject: [PATCH 08/41] graphql: always set content-type to application/json (#28417) --------- Signed-off-by: jsvisa --- graphql/graphql_test.go | 3 +++ graphql/service.go | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index 540a56778ca8..a83d6bbd467e 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -169,6 +169,9 @@ func TestGraphQLBlockSerialization(t *testing.T) { if tt.code != resp.StatusCode { t.Errorf("testcase %d %s,\nwrong statuscode, have: %v, want: %v", i, tt.body, resp.StatusCode, tt.code) } + if ctype := resp.Header.Get("Content-Type"); ctype != "application/json" { + t.Errorf("testcase %d \nwrong Content-Type, have: %v, want: %v", i, ctype, "application/json") + } } } diff --git a/graphql/service.go b/graphql/service.go index f33e763058e3..584165bdb802 100644 --- a/graphql/service.go +++ b/graphql/service.go @@ -73,12 +73,12 @@ func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // Setting this disables gzip compression in package node. - w.Header().Set("transfer-encoding", "identity") + w.Header().Set("Transfer-Encoding", "identity") // Flush the response. Since we are writing close to the response timeout, // chunked transfer encoding must be disabled by setting content-length. - w.Header().Set("content-type", "application/json") - w.Header().Set("content-length", strconv.Itoa(len(responseJSON))) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(len(responseJSON))) w.Write(responseJSON) if flush, ok := w.(http.Flusher); ok { flush.Flush() @@ -97,10 +97,10 @@ func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } + w.Header().Set("Content-Type", "application/json") if len(response.Errors) > 0 { w.WriteHeader(http.StatusBadRequest) } - w.Header().Set("Content-Type", "application/json") w.Write(responseJSON) }) } From 233db64cc1d083e6251abe768c97e0454e2ca898 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Sat, 28 Oct 2023 00:14:43 +0200 Subject: [PATCH 09/41] all: make vendored copy of reexec (#28382) a little copying is better than a little dependency -- go proverb We have this dependency on docker, a.k.a moby: a gigantic library, and we only need ~70 LOC, so here I tried moving it inline instead. Co-authored-by: Felix Lange --- build/update-license.go | 3 +-- cmd/clef/run_test.go | 2 +- cmd/ethkey/run_test.go | 2 +- cmd/evm/t8n_test.go | 2 +- cmd/geth/logging_test.go | 2 +- cmd/geth/run_test.go | 2 +- go.mod | 2 -- go.sum | 4 ---- internal/cmdtest/test_cmd.go | 2 +- internal/reexec/reexec.go | 35 +++++++++++++++++++++++++++++++ internal/reexec/self_linux.go | 14 +++++++++++++ internal/reexec/self_others.go | 32 ++++++++++++++++++++++++++++ p2p/simulations/adapters/exec.go | 2 +- p2p/simulations/adapters/types.go | 2 +- 14 files changed, 90 insertions(+), 16 deletions(-) create mode 100644 internal/reexec/reexec.go create mode 100644 internal/reexec/self_linux.go create mode 100644 internal/reexec/self_others.go diff --git a/build/update-license.go b/build/update-license.go index 52a54bf66ab0..70e2de06c776 100644 --- a/build/update-license.go +++ b/build/update-license.go @@ -65,10 +65,8 @@ var ( "vendor/", "tests/testdata/", "build/", // don't relicense vendored sources - "cmd/internal/browser", "common/bitutil/bitutil", "common/prque/", - "consensus/ethash/xor.go", "crypto/blake2b/", "crypto/bn256/", "crypto/bls12381/", @@ -78,6 +76,7 @@ var ( "log/", "metrics/", "signer/rules/deps", + "internal/reexec", // skip special licenses "crypto/secp256k1", // Relicensed to BSD-3 via https://github.com/ethereum/go-ethereum/pull/17225 diff --git a/cmd/clef/run_test.go b/cmd/clef/run_test.go index fc3145b1e0cd..5fa6e02e1471 100644 --- a/cmd/clef/run_test.go +++ b/cmd/clef/run_test.go @@ -21,8 +21,8 @@ import ( "os" "testing" - "github.com/docker/docker/pkg/reexec" "github.com/ethereum/go-ethereum/internal/cmdtest" + "github.com/ethereum/go-ethereum/internal/reexec" ) const registeredName = "clef-test" diff --git a/cmd/ethkey/run_test.go b/cmd/ethkey/run_test.go index 6006f6b5bb70..73506e5da147 100644 --- a/cmd/ethkey/run_test.go +++ b/cmd/ethkey/run_test.go @@ -21,8 +21,8 @@ import ( "os" "testing" - "github.com/docker/docker/pkg/reexec" "github.com/ethereum/go-ethereum/internal/cmdtest" + "github.com/ethereum/go-ethereum/internal/reexec" ) type testEthkey struct { diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index efacbe95ae8d..03503d11c3b1 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -24,9 +24,9 @@ import ( "strings" "testing" - "github.com/docker/docker/pkg/reexec" "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" "github.com/ethereum/go-ethereum/internal/cmdtest" + "github.com/ethereum/go-ethereum/internal/reexec" ) func TestMain(m *testing.M) { diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go index cc951d6e97fa..af50e93f940f 100644 --- a/cmd/geth/logging_test.go +++ b/cmd/geth/logging_test.go @@ -29,7 +29,7 @@ import ( "strings" "testing" - "github.com/docker/docker/pkg/reexec" + "github.com/ethereum/go-ethereum/internal/reexec" ) func runSelf(args ...string) ([]byte, error) { diff --git a/cmd/geth/run_test.go b/cmd/geth/run_test.go index 0588623acb00..2e03dc5eaae5 100644 --- a/cmd/geth/run_test.go +++ b/cmd/geth/run_test.go @@ -23,8 +23,8 @@ import ( "testing" "time" - "github.com/docker/docker/pkg/reexec" "github.com/ethereum/go-ethereum/internal/cmdtest" + "github.com/ethereum/go-ethereum/internal/reexec" "github.com/ethereum/go-ethereum/rpc" ) diff --git a/go.mod b/go.mod index 490103031110..385d5afdc800 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,6 @@ require ( github.com/crate-crypto/go-kzg-4844 v0.7.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 - github.com/docker/docker v24.0.5+incompatible github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 github.com/ethereum/c-kzg-4844 v0.4.0 github.com/fatih/color v1.13.0 @@ -141,6 +140,5 @@ require ( golang.org/x/net v0.17.0 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gotest.tools/v3 v3.5.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 6017c9f77e83..cc38e7975f17 100644 --- a/go.sum +++ b/go.sum @@ -169,8 +169,6 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= -github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= @@ -963,8 +961,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go index 43137053c13e..4890d0b7c617 100644 --- a/internal/cmdtest/test_cmd.go +++ b/internal/cmdtest/test_cmd.go @@ -32,7 +32,7 @@ import ( "text/template" "time" - "github.com/docker/docker/pkg/reexec" + "github.com/ethereum/go-ethereum/internal/reexec" ) func NewTestCmd(t *testing.T, data interface{}) *TestCmd { diff --git a/internal/reexec/reexec.go b/internal/reexec/reexec.go new file mode 100644 index 000000000000..af8d347986b5 --- /dev/null +++ b/internal/reexec/reexec.go @@ -0,0 +1,35 @@ +// This file originates from Docker/Moby, +// https://github.com/moby/moby/blob/master/pkg/reexec/reexec.go +// Licensed under Apache License 2.0: https://github.com/moby/moby/blob/master/LICENSE +// Copyright 2013-2018 Docker, Inc. +// +// Package reexec facilitates the busybox style reexec of the docker binary that +// we require because of the forking limitations of using Go. Handlers can be +// registered with a name and the argv 0 of the exec of the binary will be used +// to find and execute custom init paths. +package reexec + +import ( + "fmt" + "os" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + if initializer, ok := registeredInitializers[os.Args[0]]; ok { + initializer() + return true + } + return false +} diff --git a/internal/reexec/self_linux.go b/internal/reexec/self_linux.go new file mode 100644 index 000000000000..956d09326a2b --- /dev/null +++ b/internal/reexec/self_linux.go @@ -0,0 +1,14 @@ +// This file originates from Docker/Moby, +// https://github.com/moby/moby/blob/master/pkg/reexec/ +// Licensed under Apache License 2.0: https://github.com/moby/moby/blob/master/LICENSE +// Copyright 2013-2018 Docker, Inc. + +//go:build linux + +package reexec + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} diff --git a/internal/reexec/self_others.go b/internal/reexec/self_others.go new file mode 100644 index 000000000000..a9f502ca87e7 --- /dev/null +++ b/internal/reexec/self_others.go @@ -0,0 +1,32 @@ +// This file originates from Docker/Moby, +// https://github.com/moby/moby/blob/master/pkg/reexec/ +// Licensed under Apache License 2.0: https://github.com/moby/moby/blob/master/LICENSE +// Copyright 2013-2018 Docker, Inc. + +//go:build !linux + +package reexec + +import ( + "os" + "os/exec" + "path/filepath" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go index 1d812514dee7..5ac3379393ed 100644 --- a/p2p/simulations/adapters/exec.go +++ b/p2p/simulations/adapters/exec.go @@ -34,7 +34,7 @@ import ( "syscall" "time" - "github.com/docker/docker/pkg/reexec" + "github.com/ethereum/go-ethereum/internal/reexec" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go index 3b4e05a90147..098759599c70 100644 --- a/p2p/simulations/adapters/types.go +++ b/p2p/simulations/adapters/types.go @@ -25,8 +25,8 @@ import ( "os" "strconv" - "github.com/docker/docker/pkg/reexec" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/reexec" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" From ea2e66a58e48ef63566d5274c4a875e817a1cd39 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 31 Oct 2023 17:39:55 +0800 Subject: [PATCH 10/41] trie/triedb/pathdb: improve dirty node flushing trigger (#28426) * trie/triedb/pathdb: improve dirty node flushing trigger * trie/triedb/pathdb: add tests * trie/triedb/pathdb: address comment --- trie/triedb/pathdb/database_test.go | 51 ++++++++++++++++++++----- trie/triedb/pathdb/disklayer.go | 58 +++++++++++++++++++++-------- trie/triedb/pathdb/history.go | 26 ++++--------- 3 files changed, 93 insertions(+), 42 deletions(-) diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go index 912364f7f44a..5509682c3926 100644 --- a/trie/triedb/pathdb/database_test.go +++ b/trie/triedb/pathdb/database_test.go @@ -96,11 +96,15 @@ type tester struct { snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte } -func newTester(t *testing.T) *tester { +func newTester(t *testing.T, historyLimit uint64) *tester { var ( disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) - db = New(disk, &Config{CleanCacheSize: 256 * 1024, DirtyCacheSize: 256 * 1024}) - obj = &tester{ + db = New(disk, &Config{ + StateHistory: historyLimit, + CleanCacheSize: 256 * 1024, + DirtyCacheSize: 256 * 1024, + }) + obj = &tester{ db: db, preimages: make(map[common.Hash]common.Address), accounts: make(map[common.Hash][]byte), @@ -376,7 +380,7 @@ func (t *tester) bottomIndex() int { func TestDatabaseRollback(t *testing.T) { // Verify state histories - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.verifyHistory(); err != nil { @@ -402,7 +406,7 @@ func TestDatabaseRollback(t *testing.T) { func TestDatabaseRecoverable(t *testing.T) { var ( - tester = newTester(t) + tester = newTester(t, 0) index = tester.bottomIndex() ) defer tester.release() @@ -440,7 +444,7 @@ func TestDatabaseRecoverable(t *testing.T) { } func TestDisable(t *testing.T) { - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) @@ -476,7 +480,7 @@ func TestDisable(t *testing.T) { } func TestCommit(t *testing.T) { - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.db.Commit(tester.lastHash(), false); err != nil { @@ -500,7 +504,7 @@ func TestCommit(t *testing.T) { } func TestJournal(t *testing.T) { - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.db.Journal(tester.lastHash()); err != nil { @@ -524,7 +528,7 @@ func TestJournal(t *testing.T) { } func TestCorruptedJournal(t *testing.T) { - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.db.Journal(tester.lastHash()); err != nil { @@ -553,6 +557,35 @@ func TestCorruptedJournal(t *testing.T) { } } +// TestTailTruncateHistory function is designed to test a specific edge case where, +// when history objects are removed from the end, it should trigger a state flush +// if the ID of the new tail object is even higher than the persisted state ID. +// +// For example, let's say the ID of the persistent state is 10, and the current +// history objects range from ID(5) to ID(15). As we accumulate six more objects, +// the history will expand to cover ID(11) to ID(21). ID(11) then becomes the +// oldest history object, and its ID is even higher than the stored state. +// +// In this scenario, it is mandatory to update the persistent state before +// truncating the tail histories. This ensures that the ID of the persistent state +// always falls within the range of [oldest-history-id, latest-history-id]. +func TestTailTruncateHistory(t *testing.T) { + tester := newTester(t, 10) + defer tester.release() + + tester.db.Close() + tester.db = New(tester.db.diskdb, &Config{StateHistory: 10}) + + head, err := tester.db.freezer.Ancients() + if err != nil { + t.Fatalf("Failed to obtain freezer head") + } + stored := rawdb.ReadPersistentStateID(tester.db.diskdb) + if head != stored { + t.Fatalf("Failed to truncate excess history object above, stored: %d, head: %d", stored, head) + } +} + // copyAccounts returns a deep-copied account set of the provided one. func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte { copied := make(map[common.Hash][]byte, len(set)) diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go index d3b6419cc594..ef697cbce8ce 100644 --- a/trie/triedb/pathdb/disklayer.go +++ b/trie/triedb/pathdb/disklayer.go @@ -172,37 +172,65 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { dl.lock.Lock() defer dl.lock.Unlock() - // Construct and store the state history first. If crash happens - // after storing the state history but without flushing the - // corresponding states(journal), the stored state history will - // be truncated in the next restart. + // Construct and store the state history first. If crash happens after storing + // the state history but without flushing the corresponding states(journal), + // the stored state history will be truncated from head in the next restart. + var ( + overflow bool + oldest uint64 + ) if dl.db.freezer != nil { - err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateHistory) + err := writeHistory(dl.db.freezer, bottom) if err != nil { return nil, err } + // Determine if the persisted history object has exceeded the configured + // limitation, set the overflow as true if so. + tail, err := dl.db.freezer.Tail() + if err != nil { + return nil, err + } + limit := dl.db.config.StateHistory + if limit != 0 && bottom.stateID()-tail > limit { + overflow = true + oldest = bottom.stateID() - limit + 1 // track the id of history **after truncation** + } } // Mark the diskLayer as stale before applying any mutations on top. dl.stale = true - // Store the root->id lookup afterwards. All stored lookups are - // identified by the **unique** state root. It's impossible that - // in the same chain blocks are not adjacent but have the same - // root. + // Store the root->id lookup afterwards. All stored lookups are identified + // by the **unique** state root. It's impossible that in the same chain + // blocks are not adjacent but have the same root. if dl.id == 0 { rawdb.WriteStateID(dl.db.diskdb, dl.root, 0) } rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID()) - // Construct a new disk layer by merging the nodes from the provided - // diff layer, and flush the content in disk layer if there are too - // many nodes cached. The clean cache is inherited from the original - // disk layer for reusing. + // Construct a new disk layer by merging the nodes from the provided diff + // layer, and flush the content in disk layer if there are too many nodes + // cached. The clean cache is inherited from the original disk layer. ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes)) - err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force) - if err != nil { + + // In a unique scenario where the ID of the oldest history object (after tail + // truncation) surpasses the persisted state ID, we take the necessary action + // of forcibly committing the cached dirty nodes to ensure that the persisted + // state ID remains higher. + if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest { + force = true + } + if err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force); err != nil { return nil, err } + // To remove outdated history objects from the end, we set the 'tail' parameter + // to 'oldest-1' due to the offset between the freezer index and the history ID. + if overflow { + pruned, err := truncateFromTail(ndl.db.diskdb, ndl.db.freezer, oldest-1) + if err != nil { + return nil, err + } + log.Debug("Pruned state history", "items", pruned, "tailid", oldest) + } return ndl, nil } diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go index 6f33b61859a4..6e3f3faaedce 100644 --- a/trie/triedb/pathdb/history.go +++ b/trie/triedb/pathdb/history.go @@ -512,38 +512,28 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) return &dec, nil } -// writeHistory writes the state history with provided state set. After -// storing the corresponding state history, it will also prune the stale -// histories from the disk with the given threshold. -func writeHistory(db ethdb.KeyValueStore, freezer *rawdb.ResettableFreezer, dl *diffLayer, limit uint64) error { +// writeHistory persists the state history with the provided state set. +func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { // Short circuit if state set is not available. if dl.states == nil { return errors.New("state change set is not available") } var ( - err error - n int - start = time.Now() - h = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states) + start = time.Now() + history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states) ) - accountData, storageData, accountIndex, storageIndex := h.encode() + accountData, storageData, accountIndex, storageIndex := history.encode() dataSize := common.StorageSize(len(accountData) + len(storageData)) indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) // Write history data into five freezer table respectively. - rawdb.WriteStateHistory(freezer, dl.stateID(), h.meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateHistory(freezer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) - // Prune stale state histories based on the config. - if limit != 0 && dl.stateID() > limit { - n, err = truncateFromTail(db, freezer, dl.stateID()-limit) - if err != nil { - return err - } - } historyDataBytesMeter.Mark(int64(dataSize)) historyIndexBytesMeter.Mark(int64(indexSize)) historyBuildTimeMeter.UpdateSince(start) - log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "pruned", n, "elapsed", common.PrettyDuration(time.Since(start))) + log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "elapsed", common.PrettyDuration(time.Since(start))) + return nil } From 447945e43834fdb24a71885eebc72996ca451f8b Mon Sep 17 00:00:00 2001 From: Jakub Freebit <49676311+jakub-freebit@users.noreply.github.com> Date: Tue, 31 Oct 2023 20:04:45 +0900 Subject: [PATCH 11/41] core/rawdb: add logging and fix comments around AncientRange function. (#28379) This adds warning logs when the read does not match the expected count. We can also remove the size limit since the function documentation explicitly states that callers should limit the count. --- core/rawdb/accessors_chain.go | 19 ++++++++++++------- core/rawdb/freezer_resettable.go | 7 ++++--- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 97401d283caa..d9a89fe90c99 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -334,13 +334,18 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu return rlpHeaders } // read remaining from ancients - max := count * 700 - data, err := db.AncientRange(ChainFreezerHeaderTable, i+1-count, count, max) - if err == nil && uint64(len(data)) == count { - // the data is on the order [h, h+1, .., n] -- reordering needed - for i := range data { - rlpHeaders = append(rlpHeaders, data[len(data)-1-i]) - } + data, err := db.AncientRange(ChainFreezerHeaderTable, i+1-count, count, 0) + if err != nil { + log.Error("Failed to read headers from freezer", "err", err) + return rlpHeaders + } + if uint64(len(data)) != count { + log.Warn("Incomplete read of headers from freezer", "wanted", count, "read", len(data)) + return rlpHeaders + } + // The data is on the order [h, h+1, .., n] -- reordering needed + for i := range data { + rlpHeaders = append(rlpHeaders, data[len(data)-1-i]) } return rlpHeaders } diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index 1df6411a393d..7a8548973819 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -119,9 +119,10 @@ func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) // AncientRange retrieves multiple items in sequence, starting from the index 'start'. // It will return -// - at most 'max' items, -// - at least 1 item (even if exceeding the maxByteSize), but will otherwise -// return as many items as fit into maxByteSize +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present. func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { f.lock.RLock() defer f.lock.RUnlock() From bc42e88415d36861a173b6f86fe4f0d94a81683b Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 31 Oct 2023 12:39:25 +0100 Subject: [PATCH 12/41] core: add basic chain history support in GenerateChain (#28428) This change improves GenerateChain to support internal chain history access (ChainReader) for the consensus engine and EVM. GenerateChain takes a `parent` block and the number of blocks to create. With my changes, the consensus engine and EVM can now access blocks from `parent` up to the block currently being generated. This is required to make the BLOCKHASH instruction work, and also needed to create real clique chains. Clique uses chain history to figure out if the current signer is in-turn, for example. I've also added some more accessors to BlockGen. These are helpful when creating transactions: - g.Signer returns a signer instance for the current block - g.Difficulty returns the current block difficulty - g.Gas returns the remaining gas amount Another fix in this commit concerns the receipts returned by GenerateChain. The receipts now have properly derived fields (BlockHash, etc.) and should generally match what would be returned by the RPC API. --- core/bench_test.go | 4 +- core/chain_makers.go | 271 +++++++++++++++++++++++------------ core/chain_makers_test.go | 56 ++++++-- core/state_processor.go | 3 +- core/state_processor_test.go | 3 +- eth/filters/filter_test.go | 108 ++++++++------ 6 files changed, 295 insertions(+), 150 deletions(-) diff --git a/core/bench_test.go b/core/bench_test.go index 55fa980a8566..c5991f10e82e 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -84,7 +84,7 @@ func genValueTx(nbytes int) func(int, *BlockGen) { toaddr := common.Address{} data := make([]byte, nbytes) gas, _ := IntrinsicGas(data, nil, false, false, false, false) - signer := types.MakeSigner(gen.config, big.NewInt(int64(i)), gen.header.Time) + signer := gen.Signer() gasPrice := big.NewInt(0) if gen.header.BaseFee != nil { gasPrice = gen.header.BaseFee @@ -128,7 +128,7 @@ func genTxRing(naccounts int) func(int, *BlockGen) { if gen.header.BaseFee != nil { gasPrice = gen.header.BaseFee } - signer := types.MakeSigner(gen.config, big.NewInt(int64(i)), gen.header.Time) + signer := gen.Signer() for { gas -= params.TxGas if gas < params.TxGas { diff --git a/core/chain_makers.go b/core/chain_makers.go index 0f445ab3f046..31c111b73e06 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -38,8 +38,8 @@ import ( // See GenerateChain for a detailed explanation. type BlockGen struct { i int + cm *chainMaker parent *types.Block - chain []*types.Block header *types.Header statedb *state.StateDB @@ -49,7 +49,6 @@ type BlockGen struct { uncles []*types.Header withdrawals []*types.Withdrawal - config *params.ChainConfig engine consensus.Engine } @@ -88,13 +87,18 @@ func (b *BlockGen) SetPoS() { b.header.Difficulty = new(big.Int) } +// Difficulty returns the currently calculated difficulty of the block. +func (b *BlockGen) Difficulty() *big.Int { + return new(big.Int).Set(b.header.Difficulty) +} + // SetParentBeaconRoot sets the parent beacon root field of the generated // block. func (b *BlockGen) SetParentBeaconRoot(root common.Hash) { b.header.ParentBeaconRoot = &root var ( - blockContext = NewEVMBlockContext(b.header, nil, &b.header.Coinbase) - vmenv = vm.NewEVM(blockContext, vm.TxContext{}, b.statedb, b.config, vm.Config{}) + blockContext = NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase) + vmenv = vm.NewEVM(blockContext, vm.TxContext{}, b.statedb, b.cm.config, vm.Config{}) ) ProcessBeaconBlockRoot(root, vmenv, b.statedb) } @@ -111,7 +115,7 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti b.SetCoinbase(common.Address{}) } b.statedb.SetTxContext(tx.Hash(), len(b.txs)) - receipt, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vmConfig) + receipt, err := ApplyTransaction(b.cm.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vmConfig) if err != nil { panic(err) } @@ -125,11 +129,11 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti // AddTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // -// AddTx panics if the transaction cannot be executed. In addition to -// the protocol-imposed limitations (gas limit, etc.), there are some -// further limitations on the content of transactions that can be -// added. Notably, contract code relying on the BLOCKHASH instruction -// will panic during execution. +// AddTx panics if the transaction cannot be executed. In addition to the protocol-imposed +// limitations (gas limit, etc.), there are some further limitations on the content of +// transactions that can be added. Notably, contract code relying on the BLOCKHASH +// instruction will panic during execution if it attempts to access a block number outside +// of the range created by GenerateChain. func (b *BlockGen) AddTx(tx *types.Transaction) { b.addTx(nil, vm.Config{}, tx) } @@ -137,11 +141,10 @@ func (b *BlockGen) AddTx(tx *types.Transaction) { // AddTxWithChain adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // -// AddTxWithChain panics if the transaction cannot be executed. In addition to -// the protocol-imposed limitations (gas limit, etc.), there are some -// further limitations on the content of transactions that can be -// added. If contract code relies on the BLOCKHASH instruction, -// the block in chain will be returned. +// AddTxWithChain panics if the transaction cannot be executed. In addition to the +// protocol-imposed limitations (gas limit, etc.), there are some further limitations on +// the content of transactions that can be added. If contract code relies on the BLOCKHASH +// instruction, the block in chain will be returned. func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { b.addTx(bc, vm.Config{}, tx) } @@ -158,8 +161,7 @@ func (b *BlockGen) GetBalance(addr common.Address) *big.Int { return b.statedb.GetBalance(addr) } -// AddUncheckedTx forcefully adds a transaction to the block without any -// validation. +// AddUncheckedTx forcefully adds a transaction to the block without any validation. // // AddUncheckedTx will cause consensus failures when used during real // chain processing. This is best used in conjunction with raw block insertion. @@ -182,6 +184,16 @@ func (b *BlockGen) BaseFee() *big.Int { return new(big.Int).Set(b.header.BaseFee) } +// Gas returns the amount of gas left in the current block. +func (b *BlockGen) Gas() uint64 { + return b.header.GasLimit - b.header.GasUsed +} + +// Signer returns a valid signer instance for the current block. +func (b *BlockGen) Signer() types.Signer { + return types.MakeSigner(b.cm.config, b.header.Number, b.header.Time) +} + // AddUncheckedReceipt forcefully adds a receipts to the block without a // backing transaction. // @@ -207,20 +219,19 @@ func (b *BlockGen) AddUncle(h *types.Header) { var parent *types.Header for i := b.i - 1; i >= 0; i-- { - if b.chain[i].Hash() == h.ParentHash { - parent = b.chain[i].Header() + if b.cm.chain[i].Hash() == h.ParentHash { + parent = b.cm.chain[i].Header() break } } - chainreader := &fakeChainReader{config: b.config} - h.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, parent) + h.Difficulty = b.engine.CalcDifficulty(b.cm, b.header.Time, parent) // The gas limit and price should be derived from the parent h.GasLimit = parent.GasLimit - if b.config.IsLondon(h.Number) { - h.BaseFee = eip1559.CalcBaseFee(b.config, parent) - if !b.config.IsLondon(parent.Number) { - parentGasLimit := parent.GasLimit * b.config.ElasticityMultiplier() + if b.cm.config.IsLondon(h.Number) { + h.BaseFee = eip1559.CalcBaseFee(b.cm.config, parent) + if !b.cm.config.IsLondon(parent.Number) { + parentGasLimit := parent.GasLimit * b.cm.config.ElasticityMultiplier() h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) } } @@ -242,12 +253,12 @@ func (b *BlockGen) nextWithdrawalIndex() uint64 { return b.withdrawals[len(b.withdrawals)-1].Index + 1 } for i := b.i - 1; i >= 0; i-- { - if wd := b.chain[i].Withdrawals(); len(wd) != 0 { + if wd := b.cm.chain[i].Withdrawals(); len(wd) != 0 { return wd[len(wd)-1].Index + 1 } if i == 0 { // Correctly set the index if no parent had withdrawals. - if wd := b.parent.Withdrawals(); len(wd) != 0 { + if wd := b.cm.bottom.Withdrawals(); len(wd) != 0 { return wd[len(wd)-1].Index + 1 } } @@ -263,9 +274,9 @@ func (b *BlockGen) PrevBlock(index int) *types.Block { panic(fmt.Errorf("block index %d out of range (%d,%d)", index, -1, b.i)) } if index == -1 { - return b.parent + return b.cm.bottom } - return b.chain[index] + return b.cm.chain[index] } // OffsetTime modifies the time instance of a block, implicitly changing its @@ -273,11 +284,10 @@ func (b *BlockGen) PrevBlock(index int) *types.Block { // tied to chain length directly. func (b *BlockGen) OffsetTime(seconds int64) { b.header.Time += uint64(seconds) - if b.header.Time <= b.parent.Header().Time { + if b.header.Time <= b.cm.bottom.Header().Time { panic("block time out of range") } - chainreader := &fakeChainReader{config: b.config} - b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header()) + b.header.Difficulty = b.engine.CalcDifficulty(b.cm, b.header.Time, b.parent.Header()) } // GenerateChain creates a chain of n blocks. The first block's @@ -296,11 +306,14 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse if config == nil { config = params.TestChainConfig } - blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) - chainreader := &fakeChainReader{config: config} + if engine == nil { + panic("nil consensus engine") + } + cm := newChainMaker(parent, config, engine) + genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts) { - b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} - b.header = makeHeader(chainreader, parent, statedb, b.engine) + b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine} + b.header = cm.makeHeader(parent, statedb, b.engine) // Set the difficulty for clique block. The chain maker doesn't have access // to a chain, so the difficulty will be left unset (nil). Set it here to the @@ -330,24 +343,23 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse if gen != nil { gen(i, b) } - if b.engine != nil { - block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts, b.withdrawals) - if err != nil { - panic(err) - } - // Write state changes to db - root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number)) - if err != nil { - panic(fmt.Sprintf("state write error: %v", err)) - } - if err = triedb.Commit(root, false); err != nil { - panic(fmt.Sprintf("trie write error: %v", err)) - } - return block, b.receipts + block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, b.txs, b.uncles, b.receipts, b.withdrawals) + if err != nil { + panic(err) } - return nil, nil + + // Write state changes to db + root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number)) + if err != nil { + panic(fmt.Sprintf("state write error: %v", err)) + } + if err = triedb.Commit(root, false); err != nil { + panic(fmt.Sprintf("trie write error: %v", err)) + } + return block, b.receipts } + // Forcibly use hash-based state scheme for retaining all nodes in disk. triedb := trie.NewDatabase(db, trie.HashDefaults) defer triedb.Close() @@ -357,12 +369,36 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse if err != nil { panic(err) } - block, receipt := genblock(i, parent, triedb, statedb) - blocks[i] = block - receipts[i] = receipt + block, receipts := genblock(i, parent, triedb, statedb) + + // Post-process the receipts. + // Here we assign the final block hash and other info into the receipt. + // In order for DeriveFields to work, the transaction and receipt lists need to be + // of equal length. If AddUncheckedTx or AddUncheckedReceipt are used, there will be + // extra ones, so we just trim the lists here. + receiptsCount := len(receipts) + txs := block.Transactions() + if len(receipts) > len(txs) { + receipts = receipts[:len(txs)] + } else if len(receipts) < len(txs) { + txs = txs[:len(receipts)] + } + var blobGasPrice *big.Int + if block.ExcessBlobGas() != nil { + blobGasPrice = eip4844.CalcBlobFee(*block.ExcessBlobGas()) + } + if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { + panic(err) + } + + // Re-expand to ensure all receipts are returned. + receipts = receipts[:receiptsCount] + + // Advance the chain. + cm.add(block, receipts) parent = block } - return blocks, receipts + return cm.chain, cm.receipts } // GenerateChainWithGenesis is a wrapper of GenerateChain which will initialize @@ -380,35 +416,26 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, return db, blocks, receipts } -func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { - var time uint64 - if parent.Time() == 0 { - time = 10 - } else { - time = parent.Time() + 10 // block time is fixed at 10 seconds - } +func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { + time := parent.Time() + 10 // block time is fixed at 10 seconds header := &types.Header{ - Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())), + Root: state.IntermediateRoot(cm.config.IsEIP158(parent.Number())), ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), - Difficulty: engine.CalcDifficulty(chain, time, &types.Header{ - Number: parent.Number(), - Time: time - 10, - Difficulty: parent.Difficulty(), - UncleHash: parent.UncleHash(), - }), - GasLimit: parent.GasLimit(), - Number: new(big.Int).Add(parent.Number(), common.Big1), - Time: time, + Difficulty: engine.CalcDifficulty(cm, time, parent.Header()), + GasLimit: parent.GasLimit(), + Number: new(big.Int).Add(parent.Number(), common.Big1), + Time: time, } - if chain.Config().IsLondon(header.Number) { - header.BaseFee = eip1559.CalcBaseFee(chain.Config(), parent.Header()) - if !chain.Config().IsLondon(parent.Number()) { - parentGasLimit := parent.GasLimit() * chain.Config().ElasticityMultiplier() + + if cm.config.IsLondon(header.Number) { + header.BaseFee = eip1559.CalcBaseFee(cm.config, parent.Header()) + if !cm.config.IsLondon(parent.Number()) { + parentGasLimit := parent.GasLimit() * cm.config.ElasticityMultiplier() header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) } } - if chain.Config().IsCancun(header.Number, header.Time) { + if cm.config.IsCancun(header.Number, header.Time) { var ( parentExcessBlobGas uint64 parentBlobGasUsed uint64 @@ -461,18 +488,86 @@ func makeBlockChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, return db, blocks } -type fakeChainReader struct { - config *params.ChainConfig +// chainMaker contains the state of chain generation. +type chainMaker struct { + bottom *types.Block + engine consensus.Engine + config *params.ChainConfig + chain []*types.Block + chainByHash map[common.Hash]*types.Block + receipts []types.Receipts } -// Config returns the chain configuration. -func (cr *fakeChainReader) Config() *params.ChainConfig { - return cr.config +func newChainMaker(bottom *types.Block, config *params.ChainConfig, engine consensus.Engine) *chainMaker { + return &chainMaker{ + bottom: bottom, + config: config, + engine: engine, + chainByHash: make(map[common.Hash]*types.Block), + } +} + +func (cm *chainMaker) add(b *types.Block, r []*types.Receipt) { + cm.chain = append(cm.chain, b) + cm.chainByHash[b.Hash()] = b + cm.receipts = append(cm.receipts, r) +} + +func (cm *chainMaker) blockByNumber(number uint64) *types.Block { + if number == cm.bottom.NumberU64() { + return cm.bottom + } + cur := cm.CurrentHeader().Number.Uint64() + lowest := cm.bottom.NumberU64() + 1 + if number < lowest || number > cur { + return nil + } + return cm.chain[number-lowest] +} + +// ChainReader/ChainContext implementation + +// Config returns the chain configuration (for consensus.ChainReader). +func (cm *chainMaker) Config() *params.ChainConfig { + return cm.config +} + +// Engine returns the consensus engine (for ChainContext). +func (cm *chainMaker) Engine() consensus.Engine { + return cm.engine +} + +func (cm *chainMaker) CurrentHeader() *types.Header { + if len(cm.chain) == 0 { + return cm.bottom.Header() + } + return cm.chain[len(cm.chain)-1].Header() } -func (cr *fakeChainReader) CurrentHeader() *types.Header { return nil } -func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header { return nil } -func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil } -func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } -func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } -func (cr *fakeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { return nil } +func (cm *chainMaker) GetHeaderByNumber(number uint64) *types.Header { + b := cm.blockByNumber(number) + if b == nil { + return nil + } + return b.Header() +} + +func (cm *chainMaker) GetHeaderByHash(hash common.Hash) *types.Header { + b := cm.chainByHash[hash] + if b == nil { + return nil + } + return b.Header() +} + +func (cm *chainMaker) GetHeader(hash common.Hash, number uint64) *types.Header { + return cm.GetHeaderByNumber(number) +} + +func (cm *chainMaker) GetBlock(hash common.Hash, number uint64) *types.Block { + return cm.blockByNumber(number) +} + +func (cm *chainMaker) GetTd(hash common.Hash, number uint64) *big.Int { + return nil // not supported +} diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index 018151cb0c86..84148841f588 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -19,8 +19,10 @@ package core import ( "fmt" "math/big" + "reflect" "testing" + "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" @@ -52,9 +54,8 @@ func TestGeneratePOSChain(t *testing.T) { Difficulty: common.Big1, GasLimit: 5_000_000, } - gendb = rawdb.NewMemoryDatabase() - signer = types.LatestSigner(gspec.Config) - db = rawdb.NewMemoryDatabase() + gendb = rawdb.NewMemoryDatabase() + db = rawdb.NewMemoryDatabase() ) config.TerminalTotalDifficultyPassed = true @@ -82,10 +83,20 @@ func TestGeneratePOSChain(t *testing.T) { } genesis := gspec.MustCommit(gendb, trie.NewDatabase(gendb, trie.HashDefaults)) - chain, _ := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) { + genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) { gen.SetParentBeaconRoot(common.Hash{byte(i + 1)}) - tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(address), address, big.NewInt(1000), params.TxGas, new(big.Int).Add(gen.BaseFee(), common.Big1), nil), signer, key) + + // Add value transfer tx. + tx := types.MustSignNewTx(key, gen.Signer(), &types.LegacyTx{ + Nonce: gen.TxNonce(address), + To: &address, + Value: big.NewInt(1000), + Gas: params.TxGas, + GasPrice: new(big.Int).Add(gen.BaseFee(), common.Big1), + }) gen.AddTx(tx) + + // Add withdrawals. if i == 1 { gen.AddWithdrawal(&types.Withdrawal{ Validator: 42, @@ -116,20 +127,39 @@ func TestGeneratePOSChain(t *testing.T) { blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() - if i, err := blockchain.InsertChain(chain); err != nil { - fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err) - return + if i, err := blockchain.InsertChain(genchain); err != nil { + t.Fatalf("insert error (block %d): %v\n", genchain[i].NumberU64(), err) } // enforce that withdrawal indexes are monotonically increasing from 0 var ( withdrawalIndex uint64 - head = blockchain.CurrentBlock().Number.Uint64() ) - for i := 0; i < int(head); i++ { - block := blockchain.GetBlockByNumber(uint64(i)) + for i := range genchain { + blocknum := genchain[i].NumberU64() + block := blockchain.GetBlockByNumber(blocknum) if block == nil { - t.Fatalf("block %d not found", i) + t.Fatalf("block %d not found", blocknum) + } + + // Verify receipts. + genBlockReceipts := genreceipts[i] + for _, r := range genBlockReceipts { + if r.BlockNumber.Cmp(block.Number()) != 0 { + t.Errorf("receipt has wrong block number %d, want %d", r.BlockNumber, block.Number()) + } + if r.BlockHash != block.Hash() { + t.Errorf("receipt has wrong block hash %v, want %v", r.BlockHash, block.Hash()) + } + + // patch up empty logs list to make DeepEqual below work + if r.Logs == nil { + r.Logs = []*types.Log{} + } + } + blockchainReceipts := blockchain.GetReceiptsByHash(block.Hash()) + if !reflect.DeepEqual(genBlockReceipts, blockchainReceipts) { + t.Fatalf("receipts mismatch\ngenerated: %s\nblockchain: %s", spew.Sdump(genBlockReceipts), spew.Sdump(blockchainReceipts)) } // Verify withdrawals. @@ -144,7 +174,7 @@ func TestGeneratePOSChain(t *testing.T) { } // Verify parent beacon root. - want := common.Hash{byte(i)} + want := common.Hash{byte(blocknum)} if got := block.BeaconRoot(); *got != want { t.Fatalf("block %d, wrong parent beacon root: got %s, want %s", i, got, want) } diff --git a/core/state_processor.go b/core/state_processor.go index 7dd81487d540..9a4333f72330 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -165,7 +165,8 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo } // Create a new context to be used in the EVM environment blockContext := NewEVMBlockContext(header, bc, author) - vmenv := vm.NewEVM(blockContext, vm.TxContext{BlobHashes: tx.BlobHashes()}, statedb, config, cfg) + txContext := NewEVMTxContext(msg) + vmenv := vm.NewEVM(blockContext, txContext, statedb, config, cfg) return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 6c6ef5adfd43..e80bc288c31f 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -359,7 +359,8 @@ func TestStateProcessorErrors(t *testing.T) { func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions, config *params.ChainConfig) *types.Block { difficulty := big.NewInt(0) if !config.TerminalTotalDifficultyPassed { - difficulty = engine.CalcDifficulty(&fakeChainReader{config}, parent.Time()+10, &types.Header{ + fakeChainReader := newChainMaker(nil, config, engine) + difficulty = engine.CalcDifficulty(fakeChainReader, parent.Time()+10, &types.Header{ Number: parent.Number(), Time: parent.Time(), Difficulty: parent.Difficulty(), diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index a076fb737778..4e09a9038b3e 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -123,35 +123,35 @@ func TestFilters(t *testing.T) { pragma solidity >=0.7.0 <0.9.0; contract Logger { - function log0() external { - assembly { - log0(0, 0) - } - } + function log0() external { + assembly { + log0(0, 0) + } + } - function log1(uint t1) external { - assembly { - log1(0, 0, t1) - } - } + function log1(uint t1) external { + assembly { + log1(0, 0, t1) + } + } - function log2(uint t1, uint t2) external { - assembly { - log2(0, 0, t1, t2) - } - } + function log2(uint t1, uint t2) external { + assembly { + log2(0, 0, t1, t2) + } + } - function log3(uint t1, uint t2, uint t3) external { - assembly { - log3(0, 0, t1, t2, t3) - } - } + function log3(uint t1, uint t2, uint t3) external { + assembly { + log3(0, 0, t1, t2, t3) + } + } - function log4(uint t1, uint t2, uint t3, uint t4) external { - assembly { - log4(0, 0, t1, t2, t3, t4) - } - } + function log4(uint t1, uint t2, uint t3, uint t4) external { + assembly { + log4(0, 0, t1, t2, t3, t4) + } + } } */ bytecode = common.FromHex("608060405234801561001057600080fd5b50600436106100575760003560e01c80630aa731851461005c5780632a4c08961461006657806378b9a1f314610082578063c670f8641461009e578063c683d6a3146100ba575b600080fd5b6100646100d6565b005b610080600480360381019061007b9190610143565b6100dc565b005b61009c60048036038101906100979190610196565b6100e8565b005b6100b860048036038101906100b391906101d6565b6100f2565b005b6100d460048036038101906100cf9190610203565b6100fa565b005b600080a0565b808284600080a3505050565b8082600080a25050565b80600080a150565b80828486600080a450505050565b600080fd5b6000819050919050565b6101208161010d565b811461012b57600080fd5b50565b60008135905061013d81610117565b92915050565b60008060006060848603121561015c5761015b610108565b5b600061016a8682870161012e565b935050602061017b8682870161012e565b925050604061018c8682870161012e565b9150509250925092565b600080604083850312156101ad576101ac610108565b5b60006101bb8582860161012e565b92505060206101cc8582860161012e565b9150509250929050565b6000602082840312156101ec576101eb610108565b5b60006101fa8482850161012e565b91505092915050565b6000806000806080858703121561021d5761021c610108565b5b600061022b8782880161012e565b945050602061023c8782880161012e565b935050604061024d8782880161012e565b925050606061025e8782880161012e565b9150509295919450925056fea264697066735822122073a4b156f487e59970dc1ef449cc0d51467268f676033a17188edafcee861f9864736f6c63430008110033") @@ -287,53 +287,71 @@ func TestFilters(t *testing.T) { { f: sys.NewBlockFilter(chain[2].Hash(), []common.Address{contract}, nil), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}), - }, { + }, + { f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}), want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xdba3e2ea9a7d690b722d70ee605fd67ba4c00d1d3aecd5cf187a7b92ad8eb3df","transactionIndex":"0x1","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}), - }, { + }, + { f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil), - }, { + }, + { f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}), - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), err: "safe header not found", - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), err: "safe header not found", - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), err: "safe header not found", - }, { + }, + { f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xc7245899e5817f16fa99cf5ad2d9c1e4b98443a565a673ec9c764640443ef037","logIndex":"0x0","removed":false}]`, - }, { + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xd5e8d4e4eb51a2a2a6ec20ef68a4c2801240743c8deb77a6a1d118ac3eefb725","logIndex":"0x0","removed":false}]`, + }, + { f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xc7245899e5817f16fa99cf5ad2d9c1e4b98443a565a673ec9c764640443ef037","logIndex":"0x0","removed":false}]`, - }, { + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xd5e8d4e4eb51a2a2a6ec20ef68a4c2801240743c8deb77a6a1d118ac3eefb725","logIndex":"0x0","removed":false}]`, + }, + { f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), err: "invalid block range", }, From 285202aae2a580b82f30ebd909c1819b22d90066 Mon Sep 17 00:00:00 2001 From: Mario Vega Date: Tue, 31 Oct 2023 08:23:51 -0600 Subject: [PATCH 13/41] cmd/evm: add --run option to blocktest command (#28421) Co-authored-by: lightclient --- cmd/evm/blockrunner.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go index 42be6726b591..ff6557458628 100644 --- a/cmd/evm/blockrunner.go +++ b/cmd/evm/blockrunner.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "os" + "regexp" "sort" "github.com/ethereum/go-ethereum/core/rawdb" @@ -30,11 +31,18 @@ import ( "github.com/urfave/cli/v2" ) +var RunFlag = &cli.StringFlag{ + Name: "run", + Value: ".*", + Usage: "Run only those tests matching the regular expression.", +} + var blockTestCommand = &cli.Command{ Action: blockTestCmd, Name: "blocktest", Usage: "executes the given blockchain tests", ArgsUsage: "", + Flags: []cli.Flag{RunFlag}, } func blockTestCmd(ctx *cli.Context) error { @@ -61,13 +69,21 @@ func blockTestCmd(ctx *cli.Context) error { if err = json.Unmarshal(src, &tests); err != nil { return err } - // run them in order + re, err := regexp.Compile(ctx.String(RunFlag.Name)) + if err != nil { + return fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err) + } + + // Run them in order var keys []string for key := range tests { keys = append(keys, key) } sort.Strings(keys) for _, name := range keys { + if !re.MatchString(name) { + continue + } test := tests[name] if err := test.Run(false, rawdb.HashScheme, tracer); err != nil { return fmt.Errorf("test %v: %w", name, err) From f4ac548619e12e5be31543a72e995ab2b3904214 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 1 Nov 2023 00:19:31 +0800 Subject: [PATCH 14/41] ethdb/pebble: cap memory table size as maxMemTableSize-1 (#28444) --- ethdb/pebble/pebble.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 691aa7329986..d713e2863efe 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -160,8 +160,15 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e // including a frozen memory table and another live one. memTableLimit := 2 memTableSize := cache * 1024 * 1024 / 2 / memTableLimit - if memTableSize > maxMemTableSize { - memTableSize = maxMemTableSize + + // The memory table size is currently capped at maxMemTableSize-1 due to a + // known bug in the pebble where maxMemTableSize is not recognized as a + // valid size. + // + // TODO use the maxMemTableSize as the maximum table size once the issue + // in pebble is fixed. + if memTableSize >= maxMemTableSize { + memTableSize = maxMemTableSize - 1 } db := &Database{ fn: file, From a3be38127c87ef8827a8dc3561caca3866de85d2 Mon Sep 17 00:00:00 2001 From: lmittmann <3458786+lmittmann@users.noreply.github.com> Date: Thu, 2 Nov 2023 07:54:28 +0100 Subject: [PATCH 15/41] core/vm: performance tweak of `OpCode.String()` (#28453) make `opCodeToString` a `[256]string` array Co-authored-by: lmittmann --- core/vm/opcodes.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index a11cf05a1578..c7a3a163bea5 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -224,8 +224,7 @@ const ( SELFDESTRUCT OpCode = 0xff ) -// Since the opcodes aren't all in order we can't use a regular slice. -var opCodeToString = map[OpCode]string{ +var opCodeToString = [256]string{ // 0x0 range - arithmetic ops. STOP: "STOP", ADD: "ADD", @@ -399,12 +398,10 @@ var opCodeToString = map[OpCode]string{ } func (op OpCode) String() string { - str := opCodeToString[op] - if len(str) == 0 { - return fmt.Sprintf("opcode %#x not defined", int(op)) + if s := opCodeToString[op]; s != "" { + return s } - - return str + return fmt.Sprintf("opcode %#x not defined", int(op)) } var stringToOp = map[string]OpCode{ From b1cec853bef98acc750298b1c9b8165f2ac6ce5a Mon Sep 17 00:00:00 2001 From: Alvaro Sevilla Date: Fri, 3 Nov 2023 10:28:27 +0100 Subject: [PATCH 16/41] eth/tracers: add position field for callTracer logs (#28389) Currently, one can use the "withLogs" parameter to include logs in the callTracer results, which allows the user to see at which trace level was each log emitted. This commit adds a position field to the logs which determine the exact ordering of a call's logs and its subcalls. This would be useful e.g. for explorers wishing to display the flow of execution. Co-authored-by: jsvisa --- .../internal/tracetest/calltrace_test.go | 9 +- .../call_tracer_withLog/calldata.json | 6 +- .../call_tracer_withLog/delegatecall.json | 15 +- .../call_tracer_withLog/multi_contracts.json | 96 +++++++---- .../call_tracer_withLog/multilogs.json | 150 ++++++++++++------ .../testdata/call_tracer_withLog/notopic.json | 6 +- .../testdata/call_tracer_withLog/simple.json | 3 +- .../tx_partial_failed.json | 3 +- eth/tracers/native/call.go | 10 +- 9 files changed, 200 insertions(+), 98 deletions(-) diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index 6df49a90c1d0..5c74baacd1c8 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -47,9 +47,10 @@ type callContext struct { // callLog is the result of LOG opCode type callLog struct { - Address common.Address `json:"address"` - Topics []common.Hash `json:"topics"` - Data hexutil.Bytes `json:"data"` + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` + Position hexutil.Uint `json:"position"` } // callTrace is the result of a callTracer run. @@ -324,7 +325,7 @@ func TestInternals(t *testing.T) { byte(vm.LOG0), }, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`, + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","position":"0x0"}],"value":"0x0","type":"CALL"}`, }, { // Leads to OOM on the prestate tracer diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json index 9264f1e2fdf3..dbece7229d3c 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json @@ -95,14 +95,16 @@ "topics": [ "0xe1c52dc63b719ade82e8bea94cc41a0d5d28e4aaf536adb5e9cccc9ff8c1aeda" ], - "data": "0x0000000000000000000000004f5777744b500616697cb655dcb02ee6cd51deb5be96016bb57376da7a6d296e0a405ee1501778227dfa604df0a81cb1ae018598" + "data": "0x0000000000000000000000004f5777744b500616697cb655dcb02ee6cd51deb5be96016bb57376da7a6d296e0a405ee1501778227dfa604df0a81cb1ae018598", + "position": "0x0" }, { "address": "0x200edd17f30485a8735878661960cd7a9a95733f", "topics": [ "0xacbdb084c721332ac59f9b8e392196c9eb0e4932862da8eb9beaf0dad4f550da" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000000", + "position": "0x0" } ], "value": "0x8ac7230489e80000", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json index f63dbd47dc5c..2b03dbb8dd54 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json @@ -257,7 +257,8 @@ "0x0000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add", "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5" ], - "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac" + "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "position": "0x0" } ], "value": "0x0", @@ -278,7 +279,8 @@ "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd" ], - "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac" + "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "position": "0x0" } ], "value": "0x0", @@ -307,7 +309,8 @@ "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd" ], - "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac" + "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "position": "0x0" } ], "value": "0x0", @@ -328,7 +331,8 @@ "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd", "0x000000000000000000000000950ca4a06c78934a148b7a3ff3ea8fc366f77a06" ], - "data": "0x0000000000000000000000000000000000000000000000000041f50e27d56848" + "data": "0x0000000000000000000000000000000000000000000000000041f50e27d56848", + "position": "0x0" } ], "value": "0x0", @@ -391,7 +395,8 @@ "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", "0x0000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add" ], - "data": "0x000000000000000000000000000000000000000000000000de0b6b3a76400000" + "data": "0x000000000000000000000000000000000000000000000000de0b6b3a76400000", + "position": "0x0" } ], "type": "DELEGATECALL", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json index 5e5d9538672e..263e88d6e14d 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json @@ -357,7 +357,8 @@ "0x000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" ], - "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa93" + "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa93", + "position": "0x0" } ], "value": "0x0", @@ -370,7 +371,8 @@ "topics": [ "0x69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de2" ], - "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa93" + "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa93", + "position": "0x1" } ], "value": "0x0", @@ -491,7 +493,8 @@ "0x000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc4", "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" ], - "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa76" + "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa76", + "position": "0x0" } ], "value": "0x0", @@ -504,7 +507,8 @@ "topics": [ "0x69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de2" ], - "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa76" + "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa76", + "position": "0x1" } ], "value": "0x0", @@ -692,7 +696,8 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" ], - "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd" + "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd", + "position": "0x0" } ], "value": "0x0", @@ -874,7 +879,8 @@ "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" ], - "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccc" + "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccc", + "position": "0x0" } ], "value": "0x0", @@ -892,7 +898,8 @@ "0x9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc", "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" ], - "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff" + "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff", + "position": "0x1" } ], "value": "0x0", @@ -914,7 +921,8 @@ "0x9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc", "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" ], - "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff" + "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff", + "position": "0x1" } ], "value": "0x0", @@ -939,7 +947,8 @@ "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000001" + "data": "0x0000000000000000000000000000000000000000000000000000000000000001", + "position": "0x0" } ], "value": "0x0", @@ -952,14 +961,16 @@ "topics": [ "0x07cf7e805770612a8b2ee8e0bcbba8aa908df5f85fbc4f9e2ef384cf75315038" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000000", + "position": "0x6" }, { "address": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", "topics": [ "0x7027eecbd2a688fc1fa281702b311ed7168571514adfd17014a55d828cb43382" ], - "data": "0x000000000000000000000000000000000000000000000004563918244f400000" + "data": "0x000000000000000000000000000000000000000000000004563918244f400000", + "position": "0x8" } ], "value": "0x0", @@ -1035,7 +1046,8 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000063" + "data": "0x0000000000000000000000000000000000000000000000000000000000000063", + "position": "0x0" } ], "value": "0x0", @@ -1162,7 +1174,8 @@ "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000064" + "data": "0x0000000000000000000000000000000000000000000000000000000000000064", + "position": "0x0" } ], "value": "0x0", @@ -1175,14 +1188,16 @@ "topics": [ "0x4b0bc4f25f8d0b92d2e12b686ba96cd75e4e69325e6cf7b1f3119d14eaf2cbdf" ], - "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526" + "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "position": "0x6" }, { "address": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", "topics": [ "0xf340c079d598119636d42046c6a2d2faf7a68c04aecee516f0e0b8a9e79b8666" ], - "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e9652600000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e9652600000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000000", + "position": "0x9" } ], "value": "0x0", @@ -1231,7 +1246,8 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", "0x0000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000001" + "data": "0x0000000000000000000000000000000000000000000000000000000000000001", + "position": "0x0" } ], "value": "0x0", @@ -1324,7 +1340,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000001" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -1417,7 +1434,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000002" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -1510,7 +1528,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000003" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -1603,7 +1622,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000004" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -1696,7 +1716,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000005" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -1789,7 +1810,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000006" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -1882,7 +1904,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000007" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -1975,7 +1998,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000008" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -2068,7 +2092,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x0000000000000000000000000000000000000000000000000000000000000009" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -2161,7 +2186,8 @@ "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", "0x000000000000000000000000000000000000000000000000000000000000000a" ], - "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "position": "0x2" } ], "value": "0x0", @@ -2213,7 +2239,8 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", "0x0000000000000000000000007ccbc69292c7a6d7b538c91f3b283de97906cf30" ], - "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "position": "0x0" } ], "value": "0x0", @@ -2234,7 +2261,8 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", "0x0000000000000000000000001b9ec8ba24630b75a7a958153ffff56dd6d4b6a2" ], - "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "position": "0x0" } ], "value": "0x0", @@ -2255,7 +2283,8 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", "0x000000000000000000000000c3a2c744ad1f5253c736875b93bacce5b01b060b" ], - "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "position": "0x0" } ], "value": "0x0", @@ -2268,21 +2297,24 @@ "topics": [ "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" ], - "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "position": "0x2" }, { "address": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", "topics": [ "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" ], - "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "position": "0x3" }, { "address": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", "topics": [ "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" ], - "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "position": "0x4" } ], "value": "0x0", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json index 1ffffd240e7d..66d458200812 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json @@ -178,350 +178,400 @@ "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebebeb0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebebeb0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8888880000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8888880000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3b3b30000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3b3b30000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3e3e30000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3e3e30000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e3e3e0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e3e3e0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdbdbdb0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdbdbdb0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f4f40000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f4f40000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb0b0b00000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb0b0b00000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0a0a00000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0a0a00000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b5b5b0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b5b5b0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9a9a90000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9a9a90000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb9b9b90000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb9b9b90000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6363630000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6363630000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9f9f90000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9f9f90000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c9c9c0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c9c9c0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8f8f80000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8f8f80000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4d4e530000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4d4e530000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x000000000000000000000000000000000000000000000000000000000000034300000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x000000000000000000000000000000000000000000000000000000000000034300000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", "topics": [ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f494b0000000000000000000000000000000000000000000000000011c37937e08000" + "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f494b0000000000000000000000000000000000000000000000000011c37937e08000", + "position": "0x0" } ], "value": "0x3782dace9d90000", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json index 116606b3c7e5..762ccbe58f75 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json @@ -266,7 +266,8 @@ "topics": [ "0xaf30e4d66b2f1f23e63ef4591058a897f67e6867233e33ca3508b982dcc4129b" ], - "data": "0x00000000000000000000000050739060a2c32dc076e507ae1a893aab28ecfe68d1b13c1538a940417bf0e73b2498634436753c854c7fb971224d971bd2ae3e8800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000249f011000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000355524c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000436a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d0000000000000000000000000000000000000000" + "data": "0x00000000000000000000000050739060a2c32dc076e507ae1a893aab28ecfe68d1b13c1538a940417bf0e73b2498634436753c854c7fb971224d971bd2ae3e8800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000249f011000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000355524c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000436a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d0000000000000000000000000000000000000000", + "position": "0x4" } ], "value": "0x179d63013c5654", @@ -277,7 +278,8 @@ { "address": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", "topics": [], - "data": "0x62616e6b726f6c6c5f6d69736d61746368" + "data": "0x62616e6b726f6c6c5f6d69736d61746368", + "position": "0x2" } ], "value": "0x429d069189e0000", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json index 30f1777067ef..64941dd4dbc1 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json @@ -75,7 +75,8 @@ "0x000000000000000000000000d1220a0cf47c7b9be7a2e6ba89f429762e7b9adb", "0x000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb" ], - "data": "0x0000000000000000000000000000000000000000000000000000000000989680" + "data": "0x0000000000000000000000000000000000000000000000000000000000989680", + "position": "0x0" } ], "value": "0x0", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json index eb2514427c91..6faf898a0f6e 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json @@ -98,7 +98,8 @@ "topics": [ "0x92ca3a80853e6663fa31fa10b99225f18d4902939b4c53a9caae9043f6efd004" ], - "data": "0x00000000000000000000000001115b41bd2731353dd3e6abf44818fdc035aaf10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c1894130000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000008861393035396362623030303030303030303030303030303030303030303030303930643363313831326465323636396266383037626437373538636562316533343937616337653430303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031633662663532363334303030" + "data": "0x00000000000000000000000001115b41bd2731353dd3e6abf44818fdc035aaf10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c1894130000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000008861393035396362623030303030303030303030303030303030303030303030303930643363313831326465323636396266383037626437373538636562316533343937616337653430303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031633662663532363334303030", + "position": "0x0" } ], "value": "0x0", diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 34cf027acacf..f85cf6206a72 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -40,6 +40,9 @@ type callLog struct { Address common.Address `json:"address"` Topics []common.Hash `json:"topics"` Data hexutil.Bytes `json:"data"` + // Position of the log relative to subcalls within the same trace + // See https://github.com/ethereum/go-ethereum/pull/28389 for details + Position hexutil.Uint `json:"position"` } type callFrame struct { @@ -188,7 +191,12 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco return } - log := callLog{Address: scope.Contract.Address(), Topics: topics, Data: hexutil.Bytes(data)} + log := callLog{ + Address: scope.Contract.Address(), + Topics: topics, + Data: hexutil.Bytes(data), + Position: hexutil.Uint(len(t.callstack[len(t.callstack)-1].Calls)), + } t.callstack[len(t.callstack)-1].Logs = append(t.callstack[len(t.callstack)-1].Logs, log) } } From e91cdb49beb4b2a3872b5f2548bf2d6559e4f561 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 3 Nov 2023 19:40:37 +0200 Subject: [PATCH 17/41] ethclient: fix forwarding 1559 gas fields (#28462) --- ethclient/ethclient.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 83c89a2fd4eb..e8a201f71b35 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -658,6 +658,12 @@ func toCallArg(msg ethereum.CallMsg) interface{} { if msg.GasPrice != nil { arg["gasPrice"] = (*hexutil.Big)(msg.GasPrice) } + if msg.GasFeeCap != nil { + arg["maxFeePerGas"] = (*hexutil.Big)(msg.GasFeeCap) + } + if msg.GasTipCap != nil { + arg["maxPriorityFeePerGas"] = (*hexutil.Big)(msg.GasTipCap) + } return arg } From 51b5ad3da311c8a9c831b453d107cebb4840fb2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 7 Nov 2023 12:35:03 +0200 Subject: [PATCH 18/41] .travis: enable cross building to macos arm64 (#28474) Co-authored-by: Felix Lange --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 4f5d482c6514..141a5233cb72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -104,6 +104,8 @@ jobs: script: - go run build/ci.go install -dlgo - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo -arch arm64 + - go run build/ci.go archive -arch arm64 -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # These builders run the tests - stage: build From 97ae32441e481b3afb9170ef355c2b54640482ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 7 Nov 2023 13:00:31 +0200 Subject: [PATCH 19/41] travis: use newer builder image (#28475) --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 141a5233cb72..c2bfc3f2bff4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -96,6 +96,7 @@ jobs: - stage: build if: type = push os: osx + osx_image: xcode14.2 go: 1.21.x env: - azure-osx From f20b334f214cd6e079cd193f01834b392375727f Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 7 Nov 2023 19:41:19 +0800 Subject: [PATCH 20/41] eth/filters: eth_getLogs fast exit for invalid block range (#28386) --- eth/filters/api.go | 8 ++++++-- eth/filters/filter_system.go | 3 +-- eth/filters/filter_system_test.go | 18 ++++++++++++++++++ 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/eth/filters/api.go b/eth/filters/api.go index cc08b442e850..22dff6c59e9e 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -34,8 +34,9 @@ import ( ) var ( - errInvalidTopic = errors.New("invalid topic(s)") - errFilterNotFound = errors.New("filter not found") + errInvalidTopic = errors.New("invalid topic(s)") + errFilterNotFound = errors.New("filter not found") + errInvalidBlockRange = errors.New("invalid block range params") ) // filter is a helper struct that holds meta information over the filter type @@ -347,6 +348,9 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type if crit.ToBlock != nil { end = crit.ToBlock.Int64() } + if begin > 0 && end > 0 && begin > end { + return nil, errInvalidBlockRange + } // Construct the range filter filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics) } diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 35e396c23e75..a9b5f2e07919 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -20,7 +20,6 @@ package filters import ( "context" - "errors" "fmt" "sync" "sync/atomic" @@ -332,7 +331,7 @@ func (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*typ if from >= 0 && to == rpc.LatestBlockNumber { return es.subscribeLogs(crit, logs), nil } - return nil, errors.New("invalid from and to block combination: from > to") + return nil, errInvalidBlockRange } // subscribeMinedPendingLogs creates a subscription that returned mined and diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index f7e5327c56e7..93cbf01830b3 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -429,7 +429,10 @@ func TestInvalidLogFilterCreation(t *testing.T) { } } +// TestLogFilterUninstall tests invalid getLogs requests func TestInvalidGetLogsRequest(t *testing.T) { + t.Parallel() + var ( db = rawdb.NewMemoryDatabase() _, sys = newTestFilterSystem(t, db, Config{}) @@ -451,6 +454,21 @@ func TestInvalidGetLogsRequest(t *testing.T) { } } +// TestInvalidGetRangeLogsRequest tests getLogs with invalid block range +func TestInvalidGetRangeLogsRequest(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + ) + + if _, err := api.GetLogs(context.Background(), FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}); err != errInvalidBlockRange { + t.Errorf("Expected Logs for invalid range return error, but got: %v", err) + } +} + // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. func TestLogFilter(t *testing.T) { t.Parallel() From 4d9f3cd5d751efccd501b08ab6cf38a83b5e2858 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 7 Nov 2023 14:21:46 +0100 Subject: [PATCH 21/41] eth: set networkID to chainID by default (#28250) Co-authored-by: Felix Lange --- eth/backend.go | 12 ++++++++---- eth/ethconfig/config.go | 7 ++++--- ethclient/ethclient_test.go | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index c6787870ca02..09559f0ac116 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -152,6 +152,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } + networkID := config.NetworkId + if networkID == 0 { + networkID = chainConfig.ChainID.Uint64() + } eth := &Ethereum{ config: config, merger: consensus.NewMerger(chainDb), @@ -160,7 +164,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { accountManager: stack.AccountManager(), engine: engine, closeBloomHandler: make(chan struct{}), - networkID: config.NetworkId, + networkID: networkID, gasPrice: config.Miner.GasPrice, etherbase: config.Miner.Etherbase, bloomRequests: make(chan chan *bloombits.Retrieval), @@ -173,7 +177,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if bcVersion != nil { dbVer = fmt.Sprintf("%d", *bcVersion) } - log.Info("Initialising Ethereum protocol", "network", config.NetworkId, "dbversion", dbVer) + log.Info("Initialising Ethereum protocol", "network", networkID, "dbversion", dbVer) if !config.SkipBcVersionCheck { if bcVersion != nil && *bcVersion > core.BlockChainVersion { @@ -236,7 +240,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { Chain: eth.blockchain, TxPool: eth.txPool, Merger: eth.merger, - Network: config.NetworkId, + Network: networkID, Sync: config.SyncMode, BloomCache: uint64(cacheLimit), EventMux: eth.eventMux, @@ -270,7 +274,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } // Start the RPC service - eth.netRPCService = ethapi.NewNetAPI(eth.p2pServer, config.NetworkId) + eth.netRPCService = ethapi.NewNetAPI(eth.p2pServer, networkID) // Register the backend on the node stack.RegisterAPIs(eth.APIs()) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index bfb1df3fb15e..5e8f58efdb35 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -59,7 +59,7 @@ var LightClientGPO = gasprice.Config{ // Defaults contains default settings for use on the Ethereum main net. var Defaults = Config{ SyncMode: downloader.SnapSync, - NetworkId: 1, + NetworkId: 0, // enable auto configuration of networkID == chainID TxLookupLimit: 2350000, TransactionHistory: 2350000, StateHistory: params.FullImmutabilityThreshold, @@ -87,8 +87,9 @@ type Config struct { // If nil, the Ethereum main net block is used. Genesis *core.Genesis `toml:",omitempty"` - // Protocol options - NetworkId uint64 // Network ID to use for selecting peers to connect to + // Network ID separates blockchains on the peer-to-peer networking level. When left + // zero, the chain ID is used as network ID. + NetworkId uint64 SyncMode downloader.SyncMode // This can be set to list of enrtree:// URLs which will be queried for diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 4aa25759fb46..0f87ad5f5cd3 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -481,7 +481,7 @@ func testStatusFunctions(t *testing.T, client *rpc.Client) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if networkID.Cmp(big.NewInt(0)) != 0 { + if networkID.Cmp(big.NewInt(1337)) != 0 { t.Fatalf("unexpected networkID: %v", networkID) } From 470dba8fc1890938a65bbf4293a4759a9b9615a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 8 Nov 2023 11:22:08 +0200 Subject: [PATCH 22/41] core/vm: set basefee to 0 internally on eth_call (#28470) * core/vm: set basefee to 0 internally on eth_call * core: nicer 0-basefee, make it work for blob fees too * internal/ethapi: make tests a bit more complex * core: fix blob fee checker * core: make code a bit more readable * core: fix some test error strings * core/vm: Get rid of weird comment * core: dict wrong typo --- core/evm.go | 6 +++++- core/state_processor_test.go | 9 ++++---- core/state_transition.go | 22 +++++++++++-------- core/vm/evm.go | 26 +++++++++++++---------- core/vm/runtime/env.go | 1 + core/vm/runtime/runtime.go | 3 ++- internal/ethapi/api_test.go | 41 ++++++++++++++++++++++++++++++++++++ 7 files changed, 82 insertions(+), 26 deletions(-) diff --git a/core/evm.go b/core/evm.go index 46dcb3146260..c4801dc797db 100644 --- a/core/evm.go +++ b/core/evm.go @@ -77,11 +77,15 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common // NewEVMTxContext creates a new transaction context for a single transaction. func NewEVMTxContext(msg *Message) vm.TxContext { - return vm.TxContext{ + ctx := vm.TxContext{ Origin: msg.From, GasPrice: new(big.Int).Set(msg.GasPrice), BlobHashes: msg.BlobHashes, } + if msg.BlobGasFeeCap != nil { + ctx.BlobFeeCap = new(big.Int).Set(msg.BlobGasFeeCap) + } + return ctx } // GetHashFn returns a GetHashFunc which retrieves header hashes by number diff --git a/core/state_processor_test.go b/core/state_processor_test.go index e80bc288c31f..5ff9353bd9a4 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -95,7 +95,7 @@ func TestStateProcessorErrors(t *testing.T) { }), signer, key1) return tx } - var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, hashes []common.Hash) *types.Transaction { + var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int, hashes []common.Hash) *types.Transaction { tx, err := types.SignTx(types.NewTx(&types.BlobTx{ Nonce: nonce, GasTipCap: uint256.MustFromBig(gasTipCap), @@ -103,6 +103,7 @@ func TestStateProcessorErrors(t *testing.T) { Gas: gasLimit, To: to, BlobHashes: hashes, + BlobFeeCap: uint256.MustFromBig(blobGasFeeCap), Value: new(uint256.Int), }), signer, key1) if err != nil { @@ -196,7 +197,7 @@ func TestStateProcessorErrors(t *testing.T) { txs: []*types.Transaction{ mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(0), big.NewInt(0)), }, - want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0 baseFee: 875000000", + want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0, baseFee: 875000000", }, { // ErrTipVeryHigh txs: []*types.Transaction{ @@ -247,9 +248,9 @@ func TestStateProcessorErrors(t *testing.T) { }, { // ErrBlobFeeCapTooLow txs: []*types.Transaction{ - mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), []common.Hash{(common.Hash{1})}), + mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), big.NewInt(0), []common.Hash{(common.Hash{1})}), }, - want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 875000000", + want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1, baseFee: 875000000", }, } { block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config) diff --git a/core/state_transition.go b/core/state_transition.go index fb03c48aabd4..612fdd781379 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -287,11 +287,11 @@ func (st *StateTransition) preCheck() error { msg.From.Hex(), codeHash) } } - // Make sure that transaction gasFeeCap is greater than the baseFee (post london) if st.evm.ChainConfig().IsLondon(st.evm.Context.BlockNumber) { // Skip the checks if gas fields are zero and baseFee was explicitly disabled (eth_call) - if !st.evm.Config.NoBaseFee || msg.GasFeeCap.BitLen() > 0 || msg.GasTipCap.BitLen() > 0 { + skipCheck := st.evm.Config.NoBaseFee && msg.GasFeeCap.BitLen() == 0 && msg.GasTipCap.BitLen() == 0 + if !skipCheck { if l := msg.GasFeeCap.BitLen(); l > 256 { return fmt.Errorf("%w: address %v, maxFeePerGas bit length: %d", ErrFeeCapVeryHigh, msg.From.Hex(), l) @@ -307,7 +307,7 @@ func (st *StateTransition) preCheck() error { // This will panic if baseFee is nil, but basefee presence is verified // as part of header validation. if msg.GasFeeCap.Cmp(st.evm.Context.BaseFee) < 0 { - return fmt.Errorf("%w: address %v, maxFeePerGas: %s baseFee: %s", ErrFeeCapTooLow, + return fmt.Errorf("%w: address %v, maxFeePerGas: %s, baseFee: %s", ErrFeeCapTooLow, msg.From.Hex(), msg.GasFeeCap, st.evm.Context.BaseFee) } } @@ -324,17 +324,21 @@ func (st *StateTransition) preCheck() error { } } } - + // Check that the user is paying at least the current blob fee if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if st.blobGasUsed() > 0 { - // Check that the user is paying at least the current blob fee - blobFee := st.evm.Context.BlobBaseFee - if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { - return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) + // Skip the checks if gas fields are zero and blobBaseFee was explicitly disabled (eth_call) + skipCheck := st.evm.Config.NoBaseFee && msg.BlobGasFeeCap.BitLen() == 0 + if !skipCheck { + // This will panic if blobBaseFee is nil, but blobBaseFee presence + // is verified as part of header validation. + if msg.BlobGasFeeCap.Cmp(st.evm.Context.BlobBaseFee) < 0 { + return fmt.Errorf("%w: address %v blobGasFeeCap: %v, blobBaseFee: %v", ErrBlobFeeCapTooLow, + msg.From.Hex(), msg.BlobGasFeeCap, st.evm.Context.BlobBaseFee) + } } } } - return st.buyGas() } diff --git a/core/vm/evm.go b/core/vm/evm.go index 2c6cc7d48466..088b18aaa4ff 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -72,8 +72,8 @@ type BlockContext struct { BlockNumber *big.Int // Provides information for NUMBER Time uint64 // Provides information for TIME Difficulty *big.Int // Provides information for DIFFICULTY - BaseFee *big.Int // Provides information for BASEFEE - BlobBaseFee *big.Int // Provides information for BLOBBASEFEE + BaseFee *big.Int // Provides information for BASEFEE (0 if vm runs with NoBaseFee flag and 0 gas price) + BlobBaseFee *big.Int // Provides information for BLOBBASEFEE (0 if vm runs with NoBaseFee flag and 0 blob gas price) Random *common.Hash // Provides information for PREVRANDAO } @@ -82,8 +82,9 @@ type BlockContext struct { type TxContext struct { // Message information Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE + GasPrice *big.Int // Provides information for GASPRICE (and is used to zero the basefee if NoBaseFee is set) BlobHashes []common.Hash // Provides information for BLOBHASH + BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set } // EVM is the Ethereum Virtual Machine base object and provides @@ -125,6 +126,17 @@ type EVM struct { // NewEVM returns a new EVM. The returned EVM is not thread safe and should // only ever be used *once*. func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM { + // If basefee tracking is disabled (eth_call, eth_estimateGas, etc), and no + // gas prices were specified, lower the basefee to 0 to avoid breaking EVM + // invariants (basefee < feecap) + if config.NoBaseFee { + if txCtx.GasPrice.BitLen() == 0 { + blockCtx.BaseFee = new(big.Int) + } + if txCtx.BlobFeeCap != nil && txCtx.BlobFeeCap.BitLen() == 0 { + blockCtx.BlobBaseFee = new(big.Int) + } + } evm := &EVM{ Context: blockCtx, TxContext: txCtx, @@ -160,14 +172,6 @@ func (evm *EVM) Interpreter() *EVMInterpreter { return evm.interpreter } -// SetBlockContext updates the block context of the EVM. -func (evm *EVM) SetBlockContext(blockCtx BlockContext) { - evm.Context = blockCtx - num := blockCtx.BlockNumber - timestamp := blockCtx.Time - evm.chainRules = evm.chainConfig.Rules(num, blockCtx.Random != nil, timestamp) -} - // Call executes the contract associated with the addr with the given input as // parameters. It also handles any necessary value transfer required and takes // the necessary steps to create accounts and reverses the state in case of an diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index 64aa550a2503..34335b8e9e29 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -26,6 +26,7 @@ func NewEnv(cfg *Config) *vm.EVM { Origin: cfg.Origin, GasPrice: cfg.GasPrice, BlobHashes: cfg.BlobHashes, + BlobFeeCap: cfg.BlobFeeCap, } blockContext := vm.BlockContext{ CanTransfer: core.CanTransfer, diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index cfd7e4dbc4ee..d10457e7faa9 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -46,6 +46,7 @@ type Config struct { BaseFee *big.Int BlobBaseFee *big.Int BlobHashes []common.Hash + BlobFeeCap *big.Int Random *common.Hash State *state.StateDB @@ -97,7 +98,7 @@ func setDefaults(cfg *Config) { cfg.BaseFee = big.NewInt(params.InitialBaseFee) } if cfg.BlobBaseFee == nil { - cfg.BlobBaseFee = new(big.Int) + cfg.BlobBaseFee = big.NewInt(params.BlobTxMinBlobGasprice) } } diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 59882cd6bb54..a67bd1203b9a 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -678,6 +678,47 @@ func TestEstimateGas(t *testing.T) { }, expectErr: core.ErrInsufficientFunds, }, + // Test for a bug where the gas price was set to zero but the basefee non-zero + // + // contract BasefeeChecker { + // constructor() { + // require(tx.gasprice >= block.basefee); + // if (tx.gasprice > 0) { + // require(block.basefee > 0); + // } + // } + //} + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &accounts[0].addr, + Input: hex2Bytes("6080604052348015600f57600080fd5b50483a1015601c57600080fd5b60003a111560315760004811603057600080fd5b5b603f80603e6000396000f3fe6080604052600080fdfea264697066735822122060729c2cee02b10748fae5200f1c9da4661963354973d9154c13a8e9ce9dee1564736f6c63430008130033"), + GasPrice: (*hexutil.Big)(big.NewInt(1_000_000_000)), // Legacy as pricing + }, + expectErr: nil, + want: 67617, + }, + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &accounts[0].addr, + Input: hex2Bytes("6080604052348015600f57600080fd5b50483a1015601c57600080fd5b60003a111560315760004811603057600080fd5b5b603f80603e6000396000f3fe6080604052600080fdfea264697066735822122060729c2cee02b10748fae5200f1c9da4661963354973d9154c13a8e9ce9dee1564736f6c63430008130033"), + MaxFeePerGas: (*hexutil.Big)(big.NewInt(1_000_000_000)), // 1559 gas pricing + }, + expectErr: nil, + want: 67617, + }, + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &accounts[0].addr, + Input: hex2Bytes("6080604052348015600f57600080fd5b50483a1015601c57600080fd5b60003a111560315760004811603057600080fd5b5b603f80603e6000396000f3fe6080604052600080fdfea264697066735822122060729c2cee02b10748fae5200f1c9da4661963354973d9154c13a8e9ce9dee1564736f6c63430008130033"), + GasPrice: nil, // No legacy gas pricing + MaxFeePerGas: nil, // No 1559 gas pricing + }, + expectErr: nil, + want: 67595, + }, } for i, tc := range testSuite { result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides) From 7ea860d665d03315ad2fa2ba21410bf80839d0bf Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 9 Nov 2023 16:36:27 +0800 Subject: [PATCH 23/41] graphql: type of yParity from Long to BigInt (#28456) Signed-off-by: jsvisa --- graphql/graphql.go | 4 ++-- graphql/schema.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/graphql/graphql.go b/graphql/graphql.go index 93313d743a9a..49be23af69dd 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -615,13 +615,13 @@ func (t *Transaction) V(ctx context.Context) hexutil.Big { return hexutil.Big(*v) } -func (t *Transaction) YParity(ctx context.Context) (*hexutil.Uint64, error) { +func (t *Transaction) YParity(ctx context.Context) (*hexutil.Big, error) { tx, _ := t.resolve(ctx) if tx == nil || tx.Type() == types.LegacyTxType { return nil, nil } v, _, _ := tx.RawSignatureValues() - ret := hexutil.Uint64(v.Int64()) + ret := hexutil.Big(*v) return &ret, nil } diff --git a/graphql/schema.go b/graphql/schema.go index 5738923fc170..8264f1c28638 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -157,7 +157,7 @@ const schema string = ` r: BigInt! s: BigInt! v: BigInt! - yParity: Long + yParity: BigInt # Envelope transaction support type: Long accessList: [AccessTuple!] From b77a9b127c6f69ab10521a4cd71a06904d5d70ad Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Thu, 9 Nov 2023 10:46:48 +0100 Subject: [PATCH 24/41] cmd/geth: more testcases for logging (#28501) * cmd/geth: more testcases for logging This adds more edgecases around logging, particularly around handling of different types of nil-values as concrete types and within interfaces. Also adds tests with 'reserved' values which breaks json/logfmt formats. The json output is checked in, but not actively used by any testcase at the moment. * cmd/geth/testdata: remove timestamps --- cmd/geth/logtestcmd_active.go | 25 ++++++ cmd/geth/testdata/logging/logtest-json.txt | 49 ++++++++++ cmd/geth/testdata/logging/logtest-logfmt.txt | 88 ++++++++++-------- .../testdata/logging/logtest-terminal.txt | 90 ++++++++++--------- 4 files changed, 173 insertions(+), 79 deletions(-) create mode 100644 cmd/geth/testdata/logging/logtest-json.txt diff --git a/cmd/geth/logtestcmd_active.go b/cmd/geth/logtestcmd_active.go index c66013517aa4..ebcc8de9768a 100644 --- a/cmd/geth/logtestcmd_active.go +++ b/cmd/geth/logtestcmd_active.go @@ -130,5 +130,30 @@ func logTest(ctx *cli.Context) error { log.Info("Inserted known block", "number", 99, "hash", common.HexToHash("0x12322"), "txs", 10, "gas", 1, "other", "third") log.Warn("Inserted known block", "number", 1_012, "hash", common.HexToHash("0x1234"), "txs", 200, "gas", 99, "other", "fourth") } + { // Various types of nil + type customStruct struct { + A string + B *uint64 + } + log.Info("(*big.Int)(nil)", "", (*big.Int)(nil)) + log.Info("(*uint256.Int)(nil)", "", (*uint256.Int)(nil)) + log.Info("(fmt.Stringer)(nil)", "res", (fmt.Stringer)(nil)) + log.Info("nil-concrete-stringer", "res", (*time.Time)(nil)) + + log.Info("error(nil) ", "res", error(nil)) + log.Info("nil-concrete-error", "res", (*customError)(nil)) + + log.Info("nil-custom-struct", "res", (*customStruct)(nil)) + log.Info("raw nil", "res", nil) + log.Info("(*uint64)(nil)", "res", (*uint64)(nil)) + } + { // Logging with 'reserved' keys + log.Info("Using keys 't', 'lvl', 'time', 'level' and 'msg'", "t", "t", "time", "time", "lvl", "lvl", "level", "level", "msg", "msg") + } return nil } + +// customError is a type which implements error +type customError struct{} + +func (c *customError) Error() string { return "" } diff --git a/cmd/geth/testdata/logging/logtest-json.txt b/cmd/geth/testdata/logging/logtest-json.txt new file mode 100644 index 000000000000..6cb2476dbd83 --- /dev/null +++ b/cmd/geth/testdata/logging/logtest-json.txt @@ -0,0 +1,49 @@ +{"111,222,333,444,555,678,999":"111222333444555678999","lvl":"info","msg":"big.Int","t":"2023-11-09T08:33:19.464383209+01:00"} +{"-111,222,333,444,555,678,999":"-111222333444555678999","lvl":"info","msg":"-big.Int","t":"2023-11-09T08:33:19.46455928+01:00"} +{"11,122,233,344,455,567,899,900":"11122233344455567899900","lvl":"info","msg":"big.Int","t":"2023-11-09T08:33:19.464582073+01:00"} +{"-11,122,233,344,455,567,899,900":"-11122233344455567899900","lvl":"info","msg":"-big.Int","t":"2023-11-09T08:33:19.464594846+01:00"} +{"111,222,333,444,555,678,999":"0x607851afc94ca2517","lvl":"info","msg":"uint256","t":"2023-11-09T08:33:19.464607873+01:00"} +{"11,122,233,344,455,567,899,900":"0x25aeffe8aaa1ef67cfc","lvl":"info","msg":"uint256","t":"2023-11-09T08:33:19.464694639+01:00"} +{"1,000,000":1000000,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464708835+01:00"} +{"-1,000,000":-1000000,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464725054+01:00"} +{"9,223,372,036,854,775,807":9223372036854775807,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464735773+01:00"} +{"-9,223,372,036,854,775,808":-9223372036854775808,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464744532+01:00"} +{"1,000,000":1000000,"lvl":"info","msg":"uint64","t":"2023-11-09T08:33:19.464752807+01:00"} +{"18,446,744,073,709,551,615":18446744073709551615,"lvl":"info","msg":"uint64","t":"2023-11-09T08:33:19.464779296+01:00"} +{"key":"special \r\n\t chars","lvl":"info","msg":"Special chars in value","t":"2023-11-09T08:33:19.464794181+01:00"} +{"lvl":"info","msg":"Special chars in key","special \n\t chars":"value","t":"2023-11-09T08:33:19.464827197+01:00"} +{"lvl":"info","msg":"nospace","nospace":"nospace","t":"2023-11-09T08:33:19.464841118+01:00"} +{"lvl":"info","msg":"with space","t":"2023-11-09T08:33:19.464862818+01:00","with nospace":"with nospace"} +{"key":"\u001b[1G\u001b[K\u001b[1A","lvl":"info","msg":"Bash escapes in value","t":"2023-11-09T08:33:19.464876802+01:00"} +{"\u001b[1G\u001b[K\u001b[1A":"value","lvl":"info","msg":"Bash escapes in key","t":"2023-11-09T08:33:19.464885416+01:00"} +{"key":"value","lvl":"info","msg":"Bash escapes in message \u001b[1G\u001b[K\u001b[1A end","t":"2023-11-09T08:33:19.464906946+01:00"} +{"\u001b[35mColored\u001b[0m[":"\u001b[35mColored\u001b[0m[","lvl":"info","msg":"\u001b[35mColored\u001b[0m[","t":"2023-11-09T08:33:19.464921455+01:00"} +{"2562047h47m16.854s":"2562047h47m16.854s","lvl":"info","msg":"Custom Stringer value","t":"2023-11-09T08:33:19.464943893+01:00"} +{"key":"lazy value","lvl":"info","msg":"Lazy evaluation of value","t":"2023-11-09T08:33:19.465013552+01:00"} +{"lvl":"info","msg":"A message with wonky ๐Ÿ’ฉ characters","t":"2023-11-09T08:33:19.465069437+01:00"} +{"lvl":"info","msg":"A multiline message \nINFO [10-18|14:11:31.106] with wonky characters ๐Ÿ’ฉ","t":"2023-11-09T08:33:19.465083053+01:00"} +{"lvl":"info","msg":"A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above","t":"2023-11-09T08:33:19.465104289+01:00"} +{"false":"false","lvl":"info","msg":"boolean","t":"2023-11-09T08:33:19.465117185+01:00","true":"true"} +{"foo":"beta","lvl":"info","msg":"repeated-key 1","t":"2023-11-09T08:33:19.465143425+01:00"} +{"lvl":"info","msg":"repeated-key 2","t":"2023-11-09T08:33:19.465156323+01:00","xx":"longer"} +{"lvl":"info","msg":"log at level info","t":"2023-11-09T08:33:19.465193158+01:00"} +{"lvl":"warn","msg":"log at level warn","t":"2023-11-09T08:33:19.465228964+01:00"} +{"lvl":"eror","msg":"log at level error","t":"2023-11-09T08:33:19.465240352+01:00"} +{"a":"aligned left","bar":"short","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465247226+01:00"} +{"a":1,"bar":"a long message","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465269028+01:00"} +{"a":"aligned right","bar":"short","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465313611+01:00"} +{"lvl":"info","msg":"The following logs should align so that the key-fields make 5 columns","t":"2023-11-09T08:33:19.465328188+01:00"} +{"gas":1123123,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","lvl":"info","msg":"Inserted known block","number":1012,"other":"first","t":"2023-11-09T08:33:19.465350507+01:00","txs":200} +{"gas":1123,"hash":"0x0000000000000000000000000000000000000000000000000000000000001235","lvl":"info","msg":"Inserted new block","number":1,"other":"second","t":"2023-11-09T08:33:19.465387952+01:00","txs":2} +{"gas":1,"hash":"0x0000000000000000000000000000000000000000000000000000000000012322","lvl":"info","msg":"Inserted known block","number":99,"other":"third","t":"2023-11-09T08:33:19.465406687+01:00","txs":10} +{"gas":99,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","lvl":"warn","msg":"Inserted known block","number":1012,"other":"fourth","t":"2023-11-09T08:33:19.465433025+01:00","txs":200} +{"\u003cnil\u003e":"\u003cnil\u003e","lvl":"info","msg":"(*big.Int)(nil)","t":"2023-11-09T08:33:19.465450283+01:00"} +{"\u003cnil\u003e":"nil","lvl":"info","msg":"(*uint256.Int)(nil)","t":"2023-11-09T08:33:19.465472953+01:00"} +{"lvl":"info","msg":"(fmt.Stringer)(nil)","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465538633+01:00"} +{"lvl":"info","msg":"nil-concrete-stringer","res":"nil","t":"2023-11-09T08:33:19.465552355+01:00"} +{"lvl":"info","msg":"error(nil) ","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465601029+01:00"} +{"lvl":"info","msg":"nil-concrete-error","res":"","t":"2023-11-09T08:33:19.46561622+01:00"} +{"lvl":"info","msg":"nil-custom-struct","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465638888+01:00"} +{"lvl":"info","msg":"raw nil","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465673664+01:00"} +{"lvl":"info","msg":"(*uint64)(nil)","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465700264+01:00"} +{"level":"level","lvl":"lvl","msg":"msg","t":"t","time":"time"} diff --git a/cmd/geth/testdata/logging/logtest-logfmt.txt b/cmd/geth/testdata/logging/logtest-logfmt.txt index 79f29e1fae22..c1e34d193057 100644 --- a/cmd/geth/testdata/logging/logtest-logfmt.txt +++ b/cmd/geth/testdata/logging/logtest-logfmt.txt @@ -1,39 +1,49 @@ -t=2023-10-20T12:56:08+0200 lvl=info msg=big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999 -t=2023-10-20T12:56:08+0200 lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999 -t=2023-10-20T12:56:08+0200 lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 -t=2023-10-20T12:56:08+0200 lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900 -t=2023-10-20T12:56:08+0200 lvl=info msg=uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999 -t=2023-10-20T12:56:08+0200 lvl=info msg=uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 -t=2023-10-20T12:56:08+0200 lvl=info msg=int64 1,000,000=1,000,000 -t=2023-10-20T12:56:08+0200 lvl=info msg=int64 -1,000,000=-1,000,000 -t=2023-10-20T12:56:08+0200 lvl=info msg=int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807 -t=2023-10-20T12:56:08+0200 lvl=info msg=int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808 -t=2023-10-20T12:56:08+0200 lvl=info msg=uint64 1,000,000=1,000,000 -t=2023-10-20T12:56:08+0200 lvl=info msg=uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615 -t=2023-10-20T12:56:08+0200 lvl=info msg="Special chars in value" key="special \r\n\t chars" -t=2023-10-20T12:56:08+0200 lvl=info msg="Special chars in key" "special \n\t chars"=value -t=2023-10-20T12:56:08+0200 lvl=info msg=nospace nospace=nospace -t=2023-10-20T12:56:08+0200 lvl=info msg="with space" "with nospace"="with nospace" -t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A" -t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value -t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value -t=2023-10-20T12:56:08+0200 lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m[" -t=2023-10-20T12:56:08+0200 lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s -t=2023-10-20T12:56:08+0200 lvl=info msg="Lazy evaluation of value" key="lazy value" -t=2023-10-20T12:56:08+0200 lvl=info msg="A message with wonky ๐Ÿ’ฉ characters" -t=2023-10-20T12:56:08+0200 lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters ๐Ÿ’ฉ" -t=2023-10-20T12:56:08+0200 lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above" -t=2023-10-20T12:56:08+0200 lvl=info msg=boolean true=true false=false -t=2023-10-20T12:56:08+0200 lvl=info msg="repeated-key 1" foo=alpha foo=beta -t=2023-10-20T12:56:08+0200 lvl=info msg="repeated-key 2" xx=short xx=longer -t=2023-10-20T12:56:08+0200 lvl=info msg="log at level info" -t=2023-10-20T12:56:08+0200 lvl=warn msg="log at level warn" -t=2023-10-20T12:56:08+0200 lvl=eror msg="log at level error" -t=2023-10-20T12:56:08+0200 lvl=info msg=test bar=short a="aligned left" -t=2023-10-20T12:56:08+0200 lvl=info msg=test bar="a long message" a=1 -t=2023-10-20T12:56:08+0200 lvl=info msg=test bar=short a="aligned right" -t=2023-10-20T12:56:08+0200 lvl=info msg="The following logs should align so that the key-fields make 5 columns" -t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1,123,123 other=first -t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second -t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third -t=2023-10-20T12:56:08+0200 lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 1,000,000=1,000,000 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 -1,000,000=-1,000,000 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint64 1,000,000=1,000,000 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Special chars in value" key="special \r\n\t chars" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Special chars in key" "special \n\t chars"=value +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nospace nospace=nospace +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="with space" "with nospace"="with nospace" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m[" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Lazy evaluation of value" key="lazy value" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A message with wonky ๐Ÿ’ฉ characters" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters ๐Ÿ’ฉ" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=boolean true=true false=false +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="repeated-key 1" foo=alpha foo=beta +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="repeated-key 2" xx=short xx=longer +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="log at level info" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=warn msg="log at level warn" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=eror msg="log at level error" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar=short a="aligned left" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar="a long message" a=1 +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar=short a="aligned right" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="The following logs should align so that the key-fields make 5 columns" +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1,123,123 other=first +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*big.Int)(nil) = +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*uint256.Int)(nil) = +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(fmt.Stringer)(nil) res=nil +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-concrete-stringer res=nil +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="error(nil) " res=nil +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-concrete-error res= +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-custom-struct res= +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="raw nil" res=nil +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*uint64)(nil) res= +t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Using keys 't', 'lvl', 'time', 'level' and 'msg'" t=t time=time lvl=lvl level=level msg=msg diff --git a/cmd/geth/testdata/logging/logtest-terminal.txt b/cmd/geth/testdata/logging/logtest-terminal.txt index ff68b6047a78..af0de7b9abd7 100644 --- a/cmd/geth/testdata/logging/logtest-terminal.txt +++ b/cmd/geth/testdata/logging/logtest-terminal.txt @@ -1,40 +1,50 @@ -INFO [10-20|12:56:42.532] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999 -INFO [10-20|12:56:42.532] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999 -INFO [10-20|12:56:42.532] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 -INFO [10-20|12:56:42.532] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900 -INFO [10-20|12:56:42.532] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999 -INFO [10-20|12:56:42.532] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 -INFO [10-20|12:56:42.532] int64 1,000,000=1,000,000 -INFO [10-20|12:56:42.532] int64 -1,000,000=-1,000,000 -INFO [10-20|12:56:42.532] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807 -INFO [10-20|12:56:42.532] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808 -INFO [10-20|12:56:42.532] uint64 1,000,000=1,000,000 -INFO [10-20|12:56:42.532] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615 -INFO [10-20|12:56:42.532] Special chars in value key="special \r\n\t chars" -INFO [10-20|12:56:42.532] Special chars in key "special \n\t chars"=value -INFO [10-20|12:56:42.532] nospace nospace=nospace -INFO [10-20|12:56:42.532] with space "with nospace"="with nospace" -INFO [10-20|12:56:42.532] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A" -INFO [10-20|12:56:42.532] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value -INFO [10-20|12:56:42.532] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value -INFO [10-20|12:56:42.532] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m[" -INFO [10-20|12:56:42.532] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s -INFO [10-20|12:56:42.532] Lazy evaluation of value key="lazy value" -INFO [10-20|12:56:42.532] "A message with wonky ๐Ÿ’ฉ characters" -INFO [10-20|12:56:42.532] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters ๐Ÿ’ฉ" -INFO [10-20|12:56:42.532] A multiline message -LALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above -INFO [10-20|12:56:42.532] boolean true=true false=false -INFO [10-20|12:56:42.532] repeated-key 1 foo=alpha foo=beta -INFO [10-20|12:56:42.532] repeated-key 2 xx=short xx=longer -INFO [10-20|12:56:42.532] log at level info -WARN [10-20|12:56:42.532] log at level warn -ERROR[10-20|12:56:42.532] log at level error -INFO [10-20|12:56:42.532] test bar=short a="aligned left" -INFO [10-20|12:56:42.532] test bar="a long message" a=1 -INFO [10-20|12:56:42.532] test bar=short a="aligned right" -INFO [10-20|12:56:42.532] The following logs should align so that the key-fields make 5 columns -INFO [10-20|12:56:42.532] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first -INFO [10-20|12:56:42.532] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second -INFO [10-20|12:56:42.532] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third -WARN [10-20|12:56:42.532] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth +INFO [XX-XX|XX:XX:XX.XXX] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +INFO [XX-XX|XX:XX:XX.XXX] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999 +INFO [XX-XX|XX:XX:XX.XXX] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +INFO [XX-XX|XX:XX:XX.XXX] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900 +INFO [XX-XX|XX:XX:XX.XXX] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999 +INFO [XX-XX|XX:XX:XX.XXX] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900 +INFO [XX-XX|XX:XX:XX.XXX] int64 1,000,000=1,000,000 +INFO [XX-XX|XX:XX:XX.XXX] int64 -1,000,000=-1,000,000 +INFO [XX-XX|XX:XX:XX.XXX] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807 +INFO [XX-XX|XX:XX:XX.XXX] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808 +INFO [XX-XX|XX:XX:XX.XXX] uint64 1,000,000=1,000,000 +INFO [XX-XX|XX:XX:XX.XXX] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615 +INFO [XX-XX|XX:XX:XX.XXX] Special chars in value key="special \r\n\t chars" +INFO [XX-XX|XX:XX:XX.XXX] Special chars in key "special \n\t chars"=value +INFO [XX-XX|XX:XX:XX.XXX] nospace nospace=nospace +INFO [XX-XX|XX:XX:XX.XXX] with space "with nospace"="with nospace" +INFO [XX-XX|XX:XX:XX.XXX] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A" +INFO [XX-XX|XX:XX:XX.XXX] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value +INFO [XX-XX|XX:XX:XX.XXX] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value +INFO [XX-XX|XX:XX:XX.XXX] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m[" +INFO [XX-XX|XX:XX:XX.XXX] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s +INFO [XX-XX|XX:XX:XX.XXX] Lazy evaluation of value key="lazy value" +INFO [XX-XX|XX:XX:XX.XXX] "A message with wonky ๐Ÿ’ฉ characters" +INFO [XX-XX|XX:XX:XX.XXX] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters ๐Ÿ’ฉ" +INFO [XX-XX|XX:XX:XX.XXX] A multiline message +LALA [XXZXXZXXZXXZXXZXXX] Actually part of message above +INFO [XX-XX|XX:XX:XX.XXX] boolean true=true false=false +INFO [XX-XX|XX:XX:XX.XXX] repeated-key 1 foo=alpha foo=beta +INFO [XX-XX|XX:XX:XX.XXX] repeated-key 2 xx=short xx=longer +INFO [XX-XX|XX:XX:XX.XXX] log at level info +WARN [XX-XX|XX:XX:XX.XXX] log at level warn +ERROR[XX-XX|XX:XX:XX.XXX] log at level error +INFO [XX-XX|XX:XX:XX.XXX] test bar=short a="aligned left" +INFO [XX-XX|XX:XX:XX.XXX] test bar="a long message" a=1 +INFO [XX-XX|XX:XX:XX.XXX] test bar=short a="aligned right" +INFO [XX-XX|XX:XX:XX.XXX] The following logs should align so that the key-fields make 5 columns +INFO [XX-XX|XX:XX:XX.XXX] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first +INFO [XX-XX|XX:XX:XX.XXX] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second +INFO [XX-XX|XX:XX:XX.XXX] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third +WARN [XX-XX|XX:XX:XX.XXX] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth +INFO [XX-XX|XX:XX:XX.XXX] (*big.Int)(nil) = +INFO [XX-XX|XX:XX:XX.XXX] (*uint256.Int)(nil) = +INFO [XX-XX|XX:XX:XX.XXX] (fmt.Stringer)(nil) res=nil +INFO [XX-XX|XX:XX:XX.XXX] nil-concrete-stringer res=nil +INFO [XX-XX|XX:XX:XX.XXX] error(nil) res=nil +INFO [XX-XX|XX:XX:XX.XXX] nil-concrete-error res= +INFO [XX-XX|XX:XX:XX.XXX] nil-custom-struct res= +INFO [XX-XX|XX:XX:XX.XXX] raw nil res=nil +INFO [XX-XX|XX:XX:XX.XXX] (*uint64)(nil) res= +INFO [XX-XX|XX:XX:XX.XXX] Using keys 't', 'lvl', 'time', 'level' and 'msg' t=t time=time lvl=lvl level=level msg=msg From f7dde2a96c6a826a7a652589516ed22bb7f1e937 Mon Sep 17 00:00:00 2001 From: Jim McDonald Date: Thu, 9 Nov 2023 14:15:22 +0000 Subject: [PATCH 25/41] ethdb/pebble: add `Errorf` function to panicLogger (#28491) cockroachdb/pebble@422dce9 added Errorf to the Logger interface, this change makes it possible to compile geth with that version of pebble by adding the corresponding method to panicLogger. --- ethdb/pebble/pebble.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index d713e2863efe..6d0ea9496255 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -127,6 +127,9 @@ type panicLogger struct{} func (l panicLogger) Infof(format string, args ...interface{}) { } +func (l panicLogger) Errorf(format string, args ...interface{}) { +} + func (l panicLogger) Fatalf(format string, args ...interface{}) { panic(errors.Errorf("fatal: "+format, args...)) } From e38b9f183065b374110918660114e93006f102dc Mon Sep 17 00:00:00 2001 From: Delweng Date: Fri, 10 Nov 2023 15:10:03 +0800 Subject: [PATCH 26/41] eth/filters: exit early if topics-filter has more than 4 topics (#28494) Currently, geth's will return `[]` for any `len(topics) > 4` log filter. The EVM only supports up to four logs, via LOG4 opcode, so larger criterias fail. This change makes the filter query exit early in those cases. --- eth/filters/api.go | 7 +++++++ eth/filters/filter_system.go | 3 +++ eth/filters/filter_system_test.go | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/eth/filters/api.go b/eth/filters/api.go index 22dff6c59e9e..a4eaa9cec805 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -37,8 +37,12 @@ var ( errInvalidTopic = errors.New("invalid topic(s)") errFilterNotFound = errors.New("filter not found") errInvalidBlockRange = errors.New("invalid block range params") + errExceedMaxTopics = errors.New("exceed max topics") ) +// The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0 +const maxTopics = 4 + // filter is a helper struct that holds meta information over the filter type // and associated subscription in the event system. type filter struct { @@ -334,6 +338,9 @@ func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { // GetLogs returns logs matching the given argument that are stored within the state. func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { + if len(crit.Topics) > maxTopics { + return nil, errExceedMaxTopics + } var filter *Filter if crit.BlockHash != nil { // Block filter requested, construct a single-shot filter diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index a9b5f2e07919..f98a1f84ce14 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -299,6 +299,9 @@ func (es *EventSystem) subscribe(sub *subscription) *Subscription { // given criteria to the given logs channel. Default value for the from and to // block is "latest". If the fromBlock > toBlock an error is returned. func (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) (*Subscription, error) { + if len(crit.Topics) > maxTopics { + return nil, errExceedMaxTopics + } var from, to rpc.BlockNumber if crit.FromBlock == nil { from = rpc.LatestBlockNumber diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 93cbf01830b3..27cad8826aa0 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -386,6 +386,8 @@ func TestLogFilterCreation(t *testing.T) { {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, // from block "higher" than to block {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, + // topics more then 4 + {FilterCriteria{Topics: [][]common.Hash{{}, {}, {}, {}, {}}}, false}, } ) @@ -420,6 +422,7 @@ func TestInvalidLogFilterCreation(t *testing.T) { 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, + 3: {Topics: [][]common.Hash{{}, {}, {}, {}, {}}}, } for i, test := range testCases { @@ -445,6 +448,7 @@ func TestInvalidGetLogsRequest(t *testing.T) { 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)}, 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)}, 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, + 3: {BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}}, } for i, test := range testCases { From 326fa00759d959061035becc9514660c24897053 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 10 Nov 2023 18:56:39 +0800 Subject: [PATCH 27/41] core/rawdb: fsync the index file after each freezer write (#28483) * core/rawdb: fsync the index and data file after each freezer write * core/rawdb: fsync the data file in freezer after write --- core/rawdb/ancient_utils.go | 3 +++ core/rawdb/freezer_batch.go | 12 ++++++++++-- core/rawdb/freezer_table.go | 17 ++++++++++++++--- core/rawdb/freezer_utils.go | 6 +----- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index dfb2fdfb67df..1b93a9aa5a85 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -18,6 +18,7 @@ package rawdb import ( "fmt" + "path/filepath" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" @@ -126,6 +127,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s switch freezerName { case chainFreezerName: path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy + case stateFreezerName: + path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy default: return fmt.Errorf("unknown freezer, supported ones: %v", freezers) } diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go index 3cc7d84f4ef4..84a63a4518d7 100644 --- a/core/rawdb/freezer_batch.go +++ b/core/rawdb/freezer_batch.go @@ -182,19 +182,27 @@ func (batch *freezerTableBatch) maybeCommit() error { // commit writes the batched items to the backing freezerTable. func (batch *freezerTableBatch) commit() error { - // Write data. + // Write data. The head file is fsync'd after write to ensure the + // data is truly transferred to disk. _, err := batch.t.head.Write(batch.dataBuffer) if err != nil { return err } + if err := batch.t.head.Sync(); err != nil { + return err + } dataSize := int64(len(batch.dataBuffer)) batch.dataBuffer = batch.dataBuffer[:0] - // Write indices. + // Write indices. The index file is fsync'd after write to ensure the + // data indexes are truly transferred to disk. _, err = batch.t.index.Write(batch.indexBuffer) if err != nil { return err } + if err := batch.t.index.Sync(); err != nil { + return err + } indexSize := int64(len(batch.indexBuffer)) batch.indexBuffer = batch.indexBuffer[:0] diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index e3353cc7d5cb..61436bf93272 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -215,7 +215,9 @@ func (t *freezerTable) repair() error { if t.readonly { return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize) } - truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path + if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil { + return err + } // New file can't trigger this path } // Retrieve the file sizes and prepare for truncation if stat, err = t.index.Stat(); err != nil { @@ -260,8 +262,8 @@ func (t *freezerTable) repair() error { // Print an error log if the index is corrupted due to an incorrect // last index item. While it is theoretically possible to have a zero offset // by storing all zero-size items, it is highly unlikely to occur in practice. - if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 { - log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1) + if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 { + log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize) } if t.readonly { t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly) @@ -416,6 +418,9 @@ func (t *freezerTable) truncateHead(items uint64) error { if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil { return err } + if err := t.index.Sync(); err != nil { + return err + } // Calculate the new expected size of the data file and truncate it var expected indexEntry if length == 0 { @@ -438,6 +443,7 @@ func (t *freezerTable) truncateHead(items uint64) error { // Release any files _after the current head -- both the previous head // and any files which may have been opened for reading t.releaseFilesAfter(expected.filenum, true) + // Set back the historic head t.head = newHead t.headId = expected.filenum @@ -445,6 +451,9 @@ func (t *freezerTable) truncateHead(items uint64) error { if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil { return err } + if err := t.head.Sync(); err != nil { + return err + } // All data files truncated, set internal counters and return t.headBytes = int64(expected.offset) t.items.Store(items) @@ -589,10 +598,12 @@ func (t *freezerTable) Close() error { // error on Windows. doClose(t.index, true, true) doClose(t.meta, true, true) + // The preopened non-head data-files are all opened in readonly. // The head is opened in rw-mode, so we sync it here - but since it's also // part of t.files, it will be closed in the loop below. doClose(t.head, true, false) // sync but do not close + for _, f := range t.files { doClose(f, false, true) // close but do not sync } diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go index 1bbb50c4984f..752e95ba6aea 100644 --- a/core/rawdb/freezer_utils.go +++ b/core/rawdb/freezer_utils.go @@ -73,11 +73,7 @@ func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) e return err } f = nil - - if err := os.Rename(fname, destPath); err != nil { - return err - } - return nil + return os.Rename(fname, destPath) } // openFreezerFileForAppend opens a freezer table file and seeks to the end From 2f4833b8282045da08b42e692439194a3f424095 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 10 Nov 2023 12:21:51 +0100 Subject: [PATCH 28/41] cmd/evm: allow state dump regardless if test passes in statetest (#28484) This change makes it so that when executing state tess, state is always dumped out if the corresponding flag is set. --- cmd/evm/staterunner.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 8a07fccdf886..618ddf2ede13 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -108,13 +108,14 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error { fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root) } } + // Dump any state to aid debugging + if dump { + dump := state.RawDump(nil) + result.State = &dump + } if err != nil { - // Test failed, mark as so and dump any state to aid debugging + // Test failed, mark as so result.Pass, result.Error = false, err.Error() - if dump { - dump := state.RawDump(nil) - result.State = &dump - } } }) results = append(results, *result) From ce5a4809fd212b056ab1198271f44d123740de0e Mon Sep 17 00:00:00 2001 From: Delweng Date: Fri, 10 Nov 2023 19:23:09 +0800 Subject: [PATCH 29/41] ethclient: add empty/nonexist account testcase for eth_getProof RPC (#28482) Adds testcases for eth_getProof endpoint for the following cases: - the account/contract does not exist - the account/contract exists but is empty. --- ethclient/gethclient/gethclient_test.go | 46 +++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go index de45b106957a..a718246bd0dc 100644 --- a/ethclient/gethclient/gethclient_test.go +++ b/ethclient/gethclient/gethclient_test.go @@ -42,6 +42,7 @@ var ( testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testAddr = crypto.PubkeyToAddress(testKey.PublicKey) testContract = common.HexToAddress("0xbeef") + testEmpty = common.HexToAddress("0xeeee") testSlot = common.HexToHash("0xdeadbeef") testValue = crypto.Keccak256Hash(testSlot[:]) testBalance = big.NewInt(2e15) @@ -80,8 +81,11 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) { func generateTestChain() (*core.Genesis, []*types.Block) { genesis := &core.Genesis{ Config: params.AllEthashProtocolChanges, - Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}}, - testContract: {Nonce: 1, Code: []byte{0x13, 0x37}}}, + Alloc: core.GenesisAlloc{ + testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}}, + testContract: {Nonce: 1, Code: []byte{0x13, 0x37}}, + testEmpty: {Balance: big.NewInt(1)}, + }, ExtraData: []byte("test genesis"), Timestamp: 9000, } @@ -110,6 +114,12 @@ func TestGethClient(t *testing.T) { }, { "TestGetProof2", func(t *testing.T) { testGetProof(t, client, testContract) }, + }, { + "TestGetProofEmpty", + func(t *testing.T) { testGetProof(t, client, testEmpty) }, + }, { + "TestGetProofNonExistent", + func(t *testing.T) { testGetProofNonExistent(t, client) }, }, { "TestGetProofCanonicalizeKeys", func(t *testing.T) { testGetProofCanonicalizeKeys(t, client) }, @@ -274,6 +284,38 @@ func testGetProofCanonicalizeKeys(t *testing.T, client *rpc.Client) { } } +func testGetProofNonExistent(t *testing.T, client *rpc.Client) { + addr := common.HexToAddress("0x0001") + ec := New(client) + result, err := ec.GetProof(context.Background(), addr, nil, nil) + if err != nil { + t.Fatal(err) + } + if result.Address != addr { + t.Fatalf("unexpected address, have: %v want: %v", result.Address, addr) + } + // test nonce + if result.Nonce != 0 { + t.Fatalf("invalid nonce, want: %v got: %v", 0, result.Nonce) + } + // test balance + if result.Balance.Cmp(big.NewInt(0)) != 0 { + t.Fatalf("invalid balance, want: %v got: %v", 0, result.Balance) + } + // test storage + if have := len(result.StorageProof); have != 0 { + t.Fatalf("invalid storage proof, want 0 proof, got %v proof(s)", have) + } + // test codeHash + if have, want := result.CodeHash, (common.Hash{}); have != want { + t.Fatalf("codehash wrong, have %v want %v ", have, want) + } + // test codeHash + if have, want := result.StorageHash, (common.Hash{}); have != want { + t.Fatalf("storagehash wrong, have %v want %v ", have, want) + } +} + func testGCStats(t *testing.T, client *rpc.Client) { ec := New(client) _, err := ec.GCStats(context.Background()) From 49b2c5f43c00b12f345182096f12b25f6599786a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Fri, 10 Nov 2023 13:15:06 +0100 Subject: [PATCH 30/41] build: upgrade -dlgo version to Go 1.21.4 (#28505) --- build/checksums.txt | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index dd8a9cdbf06a..c96bd8566786 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -5,22 +5,22 @@ # https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/ 485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz -# version:golang 1.21.3 +# version:golang 1.21.4 # https://go.dev/dl/ -186f2b6f8c8b704e696821b09ab2041a5c1ee13dcbc3156a13adcf75931ee488 go1.21.3.src.tar.gz -27014fc69e301d7588a169ca239b3cc609f0aa1abf38528bf0d20d3b259211eb go1.21.3.darwin-amd64.tar.gz -65302a7a9f7a4834932b3a7a14cb8be51beddda757b567a2f9e0cbd0d7b5a6ab go1.21.3.darwin-arm64.tar.gz -8e0cd2f66cf1bde9d07b4aee01e3d7c3cfdd14e20650488e1683da4b8492594a go1.21.3.freebsd-386.tar.gz -6e74f65f586e93d1f3947894766f69e9b2ebda488592a09df61f36f06bfe58a8 go1.21.3.freebsd-amd64.tar.gz -fb209fd070db500a84291c5a95251cceeb1723e8f6142de9baca5af70a927c0e go1.21.3.linux-386.tar.gz -1241381b2843fae5a9707eec1f8fb2ef94d827990582c7c7c32f5bdfbfd420c8 go1.21.3.linux-amd64.tar.gz -fc90fa48ae97ba6368eecb914343590bbb61b388089510d0c56c2dde52987ef3 go1.21.3.linux-arm64.tar.gz -a1ddcaaf0821a12a800884c14cb4268ce1c1f5a0301e9060646f1e15e611c6c7 go1.21.3.linux-armv6l.tar.gz -3b0e10a3704f164a6e85e0377728ec5fd21524fabe4c925610e34076586d5826 go1.21.3.linux-ppc64le.tar.gz -4c78e2e6f4c684a3d5a9bdc97202729053f44eb7be188206f0627ef3e18716b6 go1.21.3.linux-s390x.tar.gz -e36737f4f2fadb4d2f919ec4ce517133a56e06064cca6e82fc883bb000c4d56c go1.21.3.windows-386.zip -27c8daf157493f288d42a6f38debc6a2cb391f6543139eba9152fceca0be2a10 go1.21.3.windows-amd64.zip -bfb7a5c56f9ded07d8ae0e0b3702ac07b65e68fa8f33da24ed6df4ce01fe2c5c go1.21.3.windows-arm64.zip +47b26a83d2b65a3c1c1bcace273b69bee49a7a7b5168a7604ded3d26a37bd787 go1.21.4.src.tar.gz +cd3bdcc802b759b70e8418bc7afbc4a65ca73a3fe576060af9fc8a2a5e71c3b8 go1.21.4.darwin-amd64.tar.gz +8b7caf2ac60bdff457dba7d4ff2a01def889592b834453431ae3caecf884f6a5 go1.21.4.darwin-arm64.tar.gz +f1e685d086eb36f4be5b8b953b52baf7752bc6235400d84bb7d87e500b65f03e go1.21.4.freebsd-386.tar.gz +59f9b32187efb98d344a3818a631d3815ebb5c7bbefc367bab6515caaca544e9 go1.21.4.freebsd-amd64.tar.gz +64d3e5d295806e137c9e39d1e1f10b00a30fcd5c2f230d72b3298f579bb3c89a go1.21.4.linux-386.tar.gz +73cac0215254d0c7d1241fa40837851f3b9a8a742d0b54714cbdfb3feaf8f0af go1.21.4.linux-amd64.tar.gz +ce1983a7289856c3a918e1fd26d41e072cc39f928adfb11ba1896440849b95da go1.21.4.linux-arm64.tar.gz +6c62e89113750cc77c498194d13a03fadfda22bd2c7d44e8a826fd354db60252 go1.21.4.linux-armv6l.tar.gz +2c63b36d2adcfb22013102a2ee730f058ec2f93b9f27479793c80b2e3641783f go1.21.4.linux-ppc64le.tar.gz +7a75ba4afc7a96058ca65903d994cd862381825d7dca12b2183f087c757c26c0 go1.21.4.linux-s390x.tar.gz +870a0e462b94671dc2d6cac707e9e19f7524fdc3c90711e6cd4450c3713a8ce0 go1.21.4.windows-386.zip +79e5428e068c912d9cfa6cd115c13549856ec689c1332eac17f5d6122e19d595 go1.21.4.windows-amd64.zip +58bc7c6f4d4c72da2df4d2650c8222fe03c9978070eb3c66be8bbaa2a4757ac1 go1.21.4.windows-arm64.zip # version:golangci 1.51.1 # https://github.com/golangci/golangci-lint/releases/ From f265cc24b4b17b55b468b315303a3a599fc01d81 Mon Sep 17 00:00:00 2001 From: Delweng Date: Fri, 10 Nov 2023 20:26:13 +0800 Subject: [PATCH 31/41] cmd/geth: remove some whitespace in code and comments (#28148) This changes just removes some whitespace --- cmd/geth/dbcmd.go | 6 +++--- cmd/geth/snapshot.go | 10 +++++----- cmd/utils/flags.go | 3 +-- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 6f802716c5e8..ab2626c1204f 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -108,7 +108,7 @@ a data corruption.`, utils.CacheFlag, utils.CacheDatabaseFlag, }, utils.NetworkFlags, utils.DatabaseFlags), - Description: `This command performs a database compaction. + Description: `This command performs a database compaction. WARNING: This operation may take a very long time to finish, and may cause database corruption if it is aborted during execution'!`, } @@ -130,7 +130,7 @@ corruption if it is aborted during execution'!`, Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, }, utils.NetworkFlags, utils.DatabaseFlags), - Description: `This command deletes the specified database key from the database. + Description: `This command deletes the specified database key from the database. WARNING: This is a low-level operation which may cause database corruption!`, } dbPutCmd = &cli.Command{ @@ -141,7 +141,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, }, utils.NetworkFlags, utils.DatabaseFlags), - Description: `This command sets a given database key to the given value. + Description: `This command sets a given database key to the given value. WARNING: This is a low-level operation which may cause database corruption!`, } dbGetSlotsCmd = &cli.Command{ diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 64134825116a..25c6311c4c43 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -84,8 +84,8 @@ In other words, this command does the snapshot to trie conversion. Action: checkDanglingStorage, Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` -geth snapshot check-dangling-storage traverses the snap storage -data, and verifies that all snapshot storage data has a corresponding account. +geth snapshot check-dangling-storage traverses the snap storage +data, and verifies that all snapshot storage data has a corresponding account. `, }, { @@ -96,7 +96,7 @@ data, and verifies that all snapshot storage data has a corresponding account. Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot inspect-account
checks all snapshot layers and prints out -information about the specified address. +information about the specified address. `, }, { @@ -125,7 +125,7 @@ geth snapshot traverse-rawstate will traverse the whole state from the given root and will abort if any referenced trie node or contract code is missing. This command can be used for state integrity verification. The default checking target is the HEAD state. It's basically identical -to traverse-state, but the check granularity is smaller. +to traverse-state, but the check granularity is smaller. It's also usable without snapshot enabled. `, @@ -143,7 +143,7 @@ It's also usable without snapshot enabled. }, utils.NetworkFlags, utils.DatabaseFlags), Description: ` This command is semantically equivalent to 'geth dump', but uses the snapshots -as the backend data source, making this command a lot faster. +as the backend data source, making this command a lot faster. The argument is interpreted as block number or hash. If none is provided, the latest block is used. diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index db226c73d823..e9a7c7c110f9 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -2045,12 +2045,11 @@ func SplitTagsFlag(tagsFlag string) map[string]string { return tagsMap } -// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails. +// MakeChainDatabase opens a database using the flags passed to the client and will hard crash if it fails. func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database { var ( cache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100 handles = MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) - err error chainDb ethdb.Database ) From 916d6a441a866cb618ae826c220866de118899f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 14 Nov 2023 15:02:24 +0300 Subject: [PATCH 32/41] params: release Geth v1.15.5 --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index bcab461a4358..5fb9631f1b3b 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 13 // Minor version component of the current release - VersionPatch = 5 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 13 // Minor version component of the current release + VersionPatch = 5 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 3d92f68729167b6d7e2a087fcd583cb0ac0ab264 Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Mon, 21 Oct 2024 22:24:38 +0800 Subject: [PATCH 33/41] refactor: use new da-codec interfaces (#1068) * refactor: use new da-codec interfaces * go mod tidy * tweak * add a tweak * tweak logs * update da-codec commit * refactor: use new da codec interfaces in syncing from L1 (#1078) * use new da-codec interface in syncing from l1 * delete unused * nit * uncomment * typo --------- Co-authored-by: colinlyguo * update da-codec * use IsL1MessageSkipped in da-codec repo * use canonical version * address AI's comments: add nil checks --------- Co-authored-by: Nazarii Denha --- core/rawdb/accessors_rollup_event.go | 41 --- core/rawdb/accessors_rollup_event_test.go | 64 ----- core/rawdb/schema.go | 6 - go.mod | 4 +- go.sum | 12 +- rollup/da_syncer/batch_queue.go | 2 +- rollup/da_syncer/da/calldata_blob_source.go | 27 +- rollup/da_syncer/da/commitV0.go | 27 +- rollup/da_syncer/da/commitV1.go | 30 +-- rollup/da_syncer/da/commitV2.go | 40 --- rollup/da_syncer/da/commitV4.go | 40 --- rollup/da_syncer/da/da.go | 8 +- .../rollup_sync_service.go | 245 +++--------------- .../rollup_sync_service_test.go | 225 ++++++++++------ 14 files changed, 234 insertions(+), 537 deletions(-) delete mode 100644 rollup/da_syncer/da/commitV2.go delete mode 100644 rollup/da_syncer/da/commitV4.go diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 6670b4b7b85f..1b60f6e4f0d8 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -58,47 +58,6 @@ func ReadRollupEventSyncedL1BlockNumber(db ethdb.Reader) *uint64 { return &rollupEventSyncedL1BlockNumber } -// WriteBatchChunkRanges writes the block ranges for each chunk within a batch to the database. -// It serializes the chunk ranges using RLP and stores them under a key derived from the batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBlockRanges []*ChunkBlockRange) { - value, err := rlp.EncodeToBytes(chunkBlockRanges) - if err != nil { - log.Crit("failed to RLP encode batch chunk ranges", "batch index", batchIndex, "err", err) - } - if err := db.Put(batchChunkRangesKey(batchIndex), value); err != nil { - log.Crit("failed to store batch chunk ranges", "batch index", batchIndex, "value", value, "err", err) - } -} - -// DeleteBatchChunkRanges removes the block ranges of all chunks associated with a specific batch from the database. -// Note: Only non-finalized batches can be reverted. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) { - if err := db.Delete(batchChunkRangesKey(batchIndex)); err != nil { - log.Crit("failed to delete batch chunk ranges", "batch index", batchIndex, "err", err) - } -} - -// ReadBatchChunkRanges retrieves the block ranges of all chunks associated with a specific batch from the database. -// It returns a list of ChunkBlockRange pointers, or nil if no chunk ranges are found for the given batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange { - data, err := db.Get(batchChunkRangesKey(batchIndex)) - if err != nil && isNotFoundErr(err) { - return nil - } - if err != nil { - log.Crit("failed to read batch chunk ranges from database", "err", err) - } - - cr := new([]*ChunkBlockRange) - if err := rlp.Decode(bytes.NewReader(data), cr); err != nil { - log.Crit("Invalid ChunkBlockRange RLP", "batch index", batchIndex, "data", data, "err", err) - } - return *cr -} - // WriteFinalizedBatchMeta stores the metadata of a finalized batch in the database. func WriteFinalizedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, finalizedBatchMeta *FinalizedBatchMeta) { value, err := rlp.EncodeToBytes(finalizedBatchMeta) diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index bbe82efde59a..5eb165dcb0c8 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -147,70 +147,6 @@ func TestFinalizedBatchMeta(t *testing.T) { } } -func TestBatchChunkRanges(t *testing.T) { - chunks := [][]*ChunkBlockRange{ - { - {StartBlockNumber: 1, EndBlockNumber: 100}, - {StartBlockNumber: 101, EndBlockNumber: 200}, - }, - { - {StartBlockNumber: 201, EndBlockNumber: 300}, - {StartBlockNumber: 301, EndBlockNumber: 400}, - }, - { - {StartBlockNumber: 401, EndBlockNumber: 500}, - }, - } - - db := NewMemoryDatabase() - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - WriteBatchChunkRanges(db, batchIndex, chunkRange) - } - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if len(readChunkRange) != len(chunkRange) { - t.Fatal("Mismatch in number of chunk ranges", "expected", len(chunkRange), "got", len(readChunkRange)) - } - - for j, cr := range readChunkRange { - if cr.StartBlockNumber != chunkRange[j].StartBlockNumber || cr.EndBlockNumber != chunkRange[j].EndBlockNumber { - t.Fatal("Mismatch in chunk range", "batch index", batchIndex, "expected", chunkRange[j], "got", cr) - } - } - } - - // over-write - newRange := []*ChunkBlockRange{{StartBlockNumber: 1001, EndBlockNumber: 1100}} - WriteBatchChunkRanges(db, 0, newRange) - readChunkRange := ReadBatchChunkRanges(db, 0) - if len(readChunkRange) != 1 || readChunkRange[0].StartBlockNumber != 1001 || readChunkRange[0].EndBlockNumber != 1100 { - t.Fatal("Over-write failed for chunk range", "expected", newRange, "got", readChunkRange) - } - - // read non-existing value - if readChunkRange = ReadBatchChunkRanges(db, uint64(len(chunks)+1)); readChunkRange != nil { - t.Fatal("Expected nil for non-existing value", "got", readChunkRange) - } - - // delete: revert batch - for i := range chunks { - batchIndex := uint64(i) - DeleteBatchChunkRanges(db, batchIndex) - - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if readChunkRange != nil { - t.Fatal("Chunk range was not deleted", "batch index", batchIndex) - } - } - - // delete non-existing value: ensure the delete operation handles non-existing values without errors. - DeleteBatchChunkRanges(db, uint64(len(chunks)+1)) -} - func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { db := NewMemoryDatabase() diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index b553045f0a40..aec2c365f661 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -149,7 +149,6 @@ var ( // Scroll rollup event store rollupEventSyncedL1BlockNumberKey = []byte("R-LastRollupEventSyncedL1BlockNumber") - batchChunkRangesPrefix = []byte("R-bcr") batchMetaPrefix = []byte("R-bm") finalizedL2BlockNumberKey = []byte("R-finalized") lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex") @@ -410,11 +409,6 @@ func SkippedTransactionHashKey(index uint64) []byte { return append(skippedTransactionHashPrefix, encodeBigEndian(index)...) } -// batchChunkRangesKey = batchChunkRangesPrefix + batch index (uint64 big endian) -func batchChunkRangesKey(batchIndex uint64) []byte { - return append(batchChunkRangesPrefix, encodeBigEndian(batchIndex)...) -} - // batchMetaKey = batchMetaPrefix + batch index (uint64 big endian) func batchMetaKey(batchIndex uint64) []byte { return append(batchMetaPrefix, encodeBigEndian(batchIndex)...) diff --git a/go.mod b/go.mod index ab4da0ed3a09..eb5cbb594568 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Microsoft/go-winio v0.6.1 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 @@ -57,7 +57,7 @@ require ( github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967 + github.com/scroll-tech/da-codec v0.1.2 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 diff --git a/go.sum b/go.sum index e3c2b369d3f9..fa1a27460115 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,10 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38= +github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -472,8 +474,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967 h1:FSM0l1n5KszBjPFOnMbSa4pg3zv07DYIU2VnH6BUH34= -github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967/go.mod h1:O9jsbQGNnTEfyfZg7idevq6jGGSQshX70elX+TRH8vU= +github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= +github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= @@ -693,9 +695,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/rollup/da_syncer/batch_queue.go b/rollup/da_syncer/batch_queue.go index 7a3d094f6322..a0172a86c077 100644 --- a/rollup/da_syncer/batch_queue.go +++ b/rollup/da_syncer/batch_queue.go @@ -41,7 +41,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context) (da.Entry, error) { return nil, err } switch daEntry.Type() { - case da.CommitBatchV0Type, da.CommitBatchV1Type, da.CommitBatchV2Type: + case da.CommitBatchV0Type, da.CommitBatchWithBlobType: bq.addBatch(daEntry) case da.RevertBatchType: bq.deleteBatch(daEntry) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 231cc4c1829e..47eabfceb65f 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" @@ -205,19 +206,21 @@ func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Lo if err != nil { return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) } - if method.Name == commitBatchMethodName { args, err := newCommitBatchArgs(method, values) if err != nil { return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } switch args.Version { case 0: - return NewCommitBatchDAV0(ds.db, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) - case 1: - return NewCommitBatchDAV1(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - case 2: - return NewCommitBatchDAV2(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) + case 1, 2: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) default: return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) } @@ -226,12 +229,14 @@ func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Lo if err != nil { return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } switch args.Version { - case 3: - // we can use V2 for version 3, because it's same - return NewCommitBatchDAV2(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - case 4: - return NewCommitBatchDAV4(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + case 3, 4: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) default: return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) } diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 66a13786c9cb..135a76d79518 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv0" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" @@ -18,13 +17,14 @@ type CommitBatchDAV0 struct { batchIndex uint64 parentTotalL1MessagePopped uint64 skippedL1MessageBitmap []byte - chunks []*codecv0.DAChunkRawTx + chunks []*encoding.DAChunkRawTx l1Txs []*types.L1MessageTx l1BlockNumber uint64 } func NewCommitBatchDAV0(db ethdb.Database, + codec encoding.Codec, version uint8, batchIndex uint64, parentBatchHeader []byte, @@ -32,7 +32,7 @@ func NewCommitBatchDAV0(db ethdb.Database, skippedL1MessageBitmap []byte, l1BlockNumber uint64, ) (*CommitBatchDAV0, error) { - decodedChunks, err := codecv0.DecodeDAChunksRawTx(chunks) + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err) } @@ -44,7 +44,7 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database, version uint8, batchIndex uint64, parentBatchHeader []byte, - decodedChunks []*codecv0.DAChunkRawTx, + decodedChunks []*encoding.DAChunkRawTx, skippedL1MessageBitmap []byte, l1BlockNumber uint64, ) (*CommitBatchDAV0, error) { @@ -100,24 +100,24 @@ func (c *CommitBatchDAV0) Blocks() []*PartialBlock { for _, chunk := range c.chunks { for blockId, daBlock := range chunk.Blocks { // create txs - txs := make(types.Transactions, 0, daBlock.NumTransactions) + txs := make(types.Transactions, 0, daBlock.NumTransactions()) // insert l1 msgs - for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages) { + for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages()) { l1Tx := types.NewTx(c.l1Txs[l1TxPointer]) txs = append(txs, l1Tx) l1TxPointer++ } - curL1TxIndex += uint64(daBlock.NumL1Messages) + curL1TxIndex += uint64(daBlock.NumL1Messages()) // insert l2 txs txs = append(txs, chunk.Transactions[blockId]...) block := NewPartialBlock( &PartialHeader{ - Number: daBlock.BlockNumber, - Time: daBlock.Timestamp, - BaseFee: daBlock.BaseFee, - GasLimit: daBlock.GasLimit, + Number: daBlock.Number(), + Time: daBlock.Timestamp(), + BaseFee: daBlock.BaseFee(), + GasLimit: daBlock.GasLimit(), Difficulty: 10, // TODO: replace with real difficulty ExtraData: []byte{1, 2, 3, 4, 5, 6, 7, 8}, // TODO: replace with real extra data }, @@ -129,11 +129,11 @@ func (c *CommitBatchDAV0) Blocks() []*PartialBlock { return blocks } -func getTotalMessagesPoppedFromChunks(decodedChunks []*codecv0.DAChunkRawTx) int { +func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) int { totalL1MessagePopped := 0 for _, chunk := range decodedChunks { for _, block := range chunk.Blocks { - totalL1MessagePopped += int(block.NumL1Messages) + totalL1MessagePopped += int(block.NumL1Messages()) } } return totalL1MessagePopped @@ -141,7 +141,6 @@ func getTotalMessagesPoppedFromChunks(decodedChunks []*codecv0.DAChunkRawTx) int func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) { var txs []*types.L1MessageTx - decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped) if err != nil { return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err) diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index d94a046c81df..4670eec8bbcb 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -5,8 +5,7 @@ import ( "crypto/sha256" "fmt" - "github.com/scroll-tech/da-codec/encoding/codecv0" - "github.com/scroll-tech/da-codec/encoding/codecv1" + "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" @@ -21,7 +20,8 @@ type CommitBatchDAV1 struct { *CommitBatchDAV0 } -func NewCommitBatchDAV1(ctx context.Context, db ethdb.Database, +func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, + codec encoding.Codec, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, vLog *types.Log, @@ -31,21 +31,7 @@ func NewCommitBatchDAV1(ctx context.Context, db ethdb.Database, chunks [][]byte, skippedL1MessageBitmap []byte, ) (*CommitBatchDAV1, error) { - return NewCommitBatchDAV1WithBlobDecodeFunc(ctx, db, l1Client, blobClient, vLog, version, batchIndex, parentBatchHeader, chunks, skippedL1MessageBitmap, codecv1.DecodeTxsFromBlob) -} - -func NewCommitBatchDAV1WithBlobDecodeFunc(ctx context.Context, db ethdb.Database, - l1Client *rollup_sync_service.L1Client, - blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, - parentBatchHeader []byte, - chunks [][]byte, - skippedL1MessageBitmap []byte, - decodeTxsFromBlobFunc func(*kzg4844.Blob, []*codecv0.DAChunkRawTx) error, -) (*CommitBatchDAV1, error) { - decodedChunks, err := codecv1.DecodeDAChunksRawTx(chunks) + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", batchIndex, err) } @@ -74,11 +60,15 @@ func NewCommitBatchDAV1WithBlobDecodeFunc(ctx context.Context, db ethdb.Database } // decode txs from blob - err = decodeTxsFromBlobFunc(blob, decodedChunks) + err = codec.DecodeTxsFromBlob(blob, decodedChunks) if err != nil { return nil, fmt.Errorf("failed to decode txs from blob: %w", err) } + if decodedChunks == nil { + return nil, fmt.Errorf("decodedChunks is nil after decoding") + } + v0, err := NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, vLog.BlockNumber) if err != nil { return nil, err @@ -88,5 +78,5 @@ func NewCommitBatchDAV1WithBlobDecodeFunc(ctx context.Context, db ethdb.Database } func (c *CommitBatchDAV1) Type() Type { - return CommitBatchV1Type + return CommitBatchWithBlobType } diff --git a/rollup/da_syncer/da/commitV2.go b/rollup/da_syncer/da/commitV2.go deleted file mode 100644 index c1e6d353fc5b..000000000000 --- a/rollup/da_syncer/da/commitV2.go +++ /dev/null @@ -1,40 +0,0 @@ -package da - -import ( - "context" - - "github.com/scroll-tech/da-codec/encoding/codecv2" - - "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" - - "github.com/scroll-tech/go-ethereum/core/types" -) - -type CommitBatchDAV2 struct { - *CommitBatchDAV1 -} - -func NewCommitBatchDAV2(ctx context.Context, db ethdb.Database, - l1Client *rollup_sync_service.L1Client, - blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, - parentBatchHeader []byte, - chunks [][]byte, - skippedL1MessageBitmap []byte, -) (*CommitBatchDAV2, error) { - - v1, err := NewCommitBatchDAV1WithBlobDecodeFunc(ctx, db, l1Client, blobClient, vLog, version, batchIndex, parentBatchHeader, chunks, skippedL1MessageBitmap, codecv2.DecodeTxsFromBlob) - if err != nil { - return nil, err - } - - return &CommitBatchDAV2{v1}, nil -} - -func (c *CommitBatchDAV2) Type() Type { - return CommitBatchV2Type -} diff --git a/rollup/da_syncer/da/commitV4.go b/rollup/da_syncer/da/commitV4.go deleted file mode 100644 index 9b590b2bfff5..000000000000 --- a/rollup/da_syncer/da/commitV4.go +++ /dev/null @@ -1,40 +0,0 @@ -package da - -import ( - "context" - - "github.com/scroll-tech/da-codec/encoding/codecv4" - - "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" - - "github.com/scroll-tech/go-ethereum/core/types" -) - -type CommitBatchDAV4 struct { - *CommitBatchDAV1 -} - -func NewCommitBatchDAV4(ctx context.Context, db ethdb.Database, - l1Client *rollup_sync_service.L1Client, - blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, - parentBatchHeader []byte, - chunks [][]byte, - skippedL1MessageBitmap []byte, -) (*CommitBatchDAV2, error) { - - v1, err := NewCommitBatchDAV1WithBlobDecodeFunc(ctx, db, l1Client, blobClient, vLog, version, batchIndex, parentBatchHeader, chunks, skippedL1MessageBitmap, codecv4.DecodeTxsFromBlob) - if err != nil { - return nil, err - } - - return &CommitBatchDAV2{v1}, nil -} - -func (c *CommitBatchDAV4) Type() Type { - return CommitBatchV4Type -} diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go index 5f00e86115a1..1ad618d7ba3d 100644 --- a/rollup/da_syncer/da/da.go +++ b/rollup/da_syncer/da/da.go @@ -11,12 +11,8 @@ type Type int const ( // CommitBatchV0Type contains data of event of CommitBatchV0Type CommitBatchV0Type Type = iota - // CommitBatchV1Type contains data of event of CommitBatchV1Type - CommitBatchV1Type - // CommitBatchV2Type contains data of event of CommitBatchV2Type - CommitBatchV2Type - // CommitBatchV4Type contains data of event of CommitBatchV2Type - CommitBatchV4Type + // CommitBatchWithBlobType contains data of event of CommitBatchWithBlobType (v1, v2, v3, v4) + CommitBatchWithBlobType // RevertBatchType contains data of event of RevertBatchType RevertBatchType // FinalizeBatchType contains data of event of FinalizeBatchType diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 6838342adcf5..e132456b2cad 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -4,18 +4,12 @@ import ( "context" "encoding/json" "fmt" - "math/big" "os" "reflect" "sync" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv0" - "github.com/scroll-tech/da-codec/encoding/codecv1" - "github.com/scroll-tech/da-codec/encoding/codecv2" - "github.com/scroll-tech/da-codec/encoding/codecv3" - "github.com/scroll-tech/da-codec/encoding/codecv4" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" @@ -221,12 +215,11 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB batchIndex := event.BatchIndex.Uint64() log.Trace("found new CommitBatch event", "batch index", batchIndex) - committedBatchMeta, chunkBlockRanges, err := s.getCommittedBatchMeta(batchIndex, &vLog) + committedBatchMeta, err := s.getCommittedBatchMeta(batchIndex, &vLog) if err != nil { return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) } rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) - rawdb.WriteBatchChunkRanges(s.db, batchIndex, chunkBlockRanges) case s.l1RevertBatchEventSignature: event := &L1RevertBatchEvent{} @@ -237,7 +230,6 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Trace("found new RevertBatch event", "batch index", batchIndex) rawdb.DeleteCommittedBatchMeta(s.db, batchIndex) - rawdb.DeleteBatchChunkRanges(s.db, batchIndex) case s.l1FinalizeBatchEventSignature: event := &L1FinalizeBatchEvent{} @@ -272,12 +264,12 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB for index := startBatchIndex; index <= batchIndex; index++ { committedBatchMeta := rawdb.ReadCommittedBatchMeta(s.db, index) - chunks, err := s.getLocalChunksForBatch(index) + chunks, err := s.getLocalChunksForBatch(committedBatchMeta.ChunkBlockRanges) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.bc.Config(), s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } @@ -312,12 +304,10 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB return nil } -func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encoding.Chunk, error) { - chunkBlockRanges := rawdb.ReadBatchChunkRanges(s.db, batchIndex) +func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.ChunkBlockRange) ([]*encoding.Chunk, error) { if len(chunkBlockRanges) == 0 { - return nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges") + return nil, fmt.Errorf("chunkBlockRanges is empty") } - endBlockNumber := chunkBlockRanges[len(chunkBlockRanges)-1].EndBlockNumber for i := 0; i < defaultMaxRetries; i++ { if s.ctx.Err() != nil { @@ -365,13 +355,13 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi return chunks, nil } -func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, []*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, error) { if batchIndex == 0 { return &rawdb.CommittedBatchMeta{ Version: 0, BlobVersionedHashes: nil, ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, - }, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + }, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) @@ -380,11 +370,11 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) if err != nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) } if block == nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) } found := false @@ -396,7 +386,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types } } if !found { - return nil, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) } } @@ -405,19 +395,19 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types if tx.Type() == types.BlobTxType { blobVersionedHashes := tx.BlobHashes() if blobVersionedHashes == nil { - return nil, nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) + return nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) } commitBatchMeta.BlobVersionedHashes = blobVersionedHashes } version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) if err != nil { - return nil, nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) } commitBatchMeta.Version = version commitBatchMeta.ChunkBlockRanges = ranges - return &commitBatchMeta, ranges, nil + return &commitBatchMeta, nil } // decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. @@ -492,10 +482,8 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // - batchIndex: batch index of the validated batch // - event: L1 finalize batch event data // - parentFinalizedBatchMeta: metadata of the finalized parent batch -// - committedBatchMeta: committed batch metadata stored in the database. -// Can be nil for older client versions that don't store this information. +// - committedBatchMeta: committed batch metadata stored in the database // - chunks: slice of chunk data for the current batch -// - chainCfg: chain configuration to identify the codec version when committedBatchMeta is nil // - stack: node stack to terminate the node in case of inconsistency // // Returns: @@ -506,7 +494,7 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -531,71 +519,17 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz Chunks: chunks, } - var codecVersion encoding.CodecVersion - if committedBatchMeta != nil { - codecVersion = encoding.CodecVersion(committedBatchMeta.Version) - } else { - codecVersion = determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg) + codecVersion := encoding.CodecVersion(committedBatchMeta.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return 0, nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) } - var localBatchHash common.Hash - if codecVersion == encoding.CodecV0 { - daBatch, err := codecv0.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv0 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV1 { - daBatch, err := codecv1.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv1 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV2 { - daBatch, err := codecv2.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv2 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV3 { - daBatch, err := codecv3.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv3 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV4 { - // Check if committedBatchMeta exists, for backward compatibility with older client versions - if committedBatchMeta == nil { - return 0, nil, fmt.Errorf("missing committed batch metadata for codecV4, please use the latest client version, batch index: %v", batchIndex) - } - - // Validate BlobVersionedHashes - if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) != 1 { - return 0, nil, fmt.Errorf("invalid blob hashes, batch index: %v, blob hashes: %v", batchIndex, committedBatchMeta.BlobVersionedHashes) - } - - // Attempt to create DA batch with compression - daBatch, err := codecv4.NewDABatch(batch, true) - if err != nil { - // If compression fails, try without compression - log.Warn("failed to create codecv4 DA batch with compress enabling", "batch index", batchIndex, "err", err) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } else if daBatch.BlobVersionedHash != committedBatchMeta.BlobVersionedHashes[0] { - // Inconsistent blob versioned hash, fallback to uncompressed DA batch - log.Warn("impossible case: inconsistent blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } - - localBatchHash = daBatch.Hash() - } else { - return 0, nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + daBatch, err := codec.NewDABatch(batch) + if err != nil { + return 0, nil, fmt.Errorf("failed to create DA batch, batch index: %v, codec version: %v, expected blob hashes: %v, err: %w", batchIndex, codecVersion, committedBatchMeta.BlobVersionedHashes, err) } + localBatchHash := daBatch.Hash() localStateRoot := endBlock.Header.Root localWithdrawRoot := endBlock.WithdrawRoot @@ -647,126 +581,29 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } -// determineCodecVersion determines the codec version based on the block number and chain configuration. -func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig) encoding.CodecVersion { - switch { - case startBlockNumber.Uint64() == 0 || !chainCfg.IsBernoulli(startBlockNumber): - return encoding.CodecV0 // codecv0: genesis batch or batches before Bernoulli - case !chainCfg.IsCurie(startBlockNumber): - return encoding.CodecV1 // codecv1: batches after Bernoulli and before Curie - case !chainCfg.IsDarwin(startBlockNumber, startBlockTimestamp): - return encoding.CodecV2 // codecv2: batches after Curie and before Darwin - case !chainCfg.IsDarwinV2(startBlockNumber, startBlockTimestamp): - return encoding.CodecV3 // codecv3: batches after Darwin - default: - return encoding.CodecV4 // codecv4: batches after DarwinV2 - } -} - // decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { - var chunkBlockRanges []*rawdb.ChunkBlockRange - for _, chunk := range chunks { - if len(chunk) < 1 { - return nil, fmt.Errorf("invalid chunk, length is less than 1") - } - - numBlocks := int(chunk[0]) - - switch codecVersion { - case encoding.CodecV0: - if len(chunk) < 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv0.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv0.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV1: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv1.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv1.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV2: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv2.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv2.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV3: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv3.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv3.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV4: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv4.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv4.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + daChunksRawTx, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to decode DA chunks, version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - default: - return nil, fmt.Errorf("unexpected batch version %v", codecVersion) + var chunkBlockRanges []*rawdb.ChunkBlockRange + for _, daChunkRawTx := range daChunksRawTx { + if len(daChunkRawTx.Blocks) == 0 { + return nil, fmt.Errorf("no blocks found in DA chunk, version: %v", codecVersion) } + + chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ + StartBlockNumber: daChunkRawTx.Blocks[0].Number(), + EndBlockNumber: daChunkRawTx.Blocks[len(daChunkRawTx.Blocks)-1].Number(), + }) } + return chunkBlockRanges, nil } diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 83b8c72c3d15..f1b09a37a1f2 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -313,7 +313,7 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x0"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) @@ -324,13 +324,13 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { {StartBlockNumber: 911156, EndBlockNumber: 911159}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -367,7 +367,7 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x1"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) @@ -376,13 +376,13 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { {StartBlockNumber: 1, EndBlockNumber: 11}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -419,7 +419,7 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x2"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) @@ -456,13 +456,13 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { {StartBlockNumber: 174, EndBlockNumber: 174}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -499,7 +499,7 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x3"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) @@ -537,20 +537,18 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { {StartBlockNumber: 70, EndBlockNumber: 70}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } func TestValidateBatchCodecv0(t *testing.T) { - chainConfig := ¶ms.ChainConfig{} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -560,50 +558,57 @@ func TestValidateBatchCodecv0(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv1(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -613,50 +618,56 @@ func TestValidateBatchCodecv1(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv2(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -666,50 +677,56 @@ func TestValidateBatchCodecv2(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv3(t *testing.T) { - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -719,7 +736,7 @@ func TestValidateBatchCodecv3(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), @@ -727,46 +744,53 @@ func TestValidateBatchCodecv3(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchUpgrades(t *testing.T) { - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(3), CurieBlock: big.NewInt(14), DarwinTime: func() *uint64 { t := uint64(1684762320); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), @@ -774,82 +798,97 @@ func TestValidateBatchUpgrades(t *testing.T) { WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 0, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 0, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) event3 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(2), BatchHash: common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentBatchMeta3, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, + } + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta4 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta4 := &rawdb.FinalizedBatchMeta{ BatchHash: event3.BatchHash, TotalL1MessagePopped: 11, StateRoot: event3.StateRoot, WithdrawRoot: event3.WithdrawRoot, } - assert.Equal(t, parentBatchMeta4, finalizedBatchMeta3) + assert.Equal(t, parentFinalizedBatchMeta4, finalizedBatchMeta3) event4 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(3), BatchHash: common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentBatchMeta4, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event4.BatchHash, TotalL1MessagePopped: 42, StateRoot: event4.StateRoot, WithdrawRoot: event4.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func TestValidateBatchInFinalizeByBundle(t *testing.T) { - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: func() *uint64 { t := uint64(0); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") @@ -867,29 +906,49 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01bbc6b98d7d3783730b6208afac839ad37dcf211b9d9e7c83a5f9d02125ddd7")}, + } + + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01c81e5696e00f1e6e7d76c197f74ed51650147c49c4e6e5b0b702cdcc54352a")}, + } + + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x012e15203534ae3f4cbe1b0f58fe6db6e5c29432115a8ece6ef5550bf2ffce4c")}, + } + + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) - endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) - endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) - endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event.BatchHash, TotalL1MessagePopped: 42, StateRoot: event.StateRoot, WithdrawRoot: event.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { From 0942cb789911ccca2e4033ec5f96bac4fb9860d7 Mon Sep 17 00:00:00 2001 From: georgehao Date: Tue, 22 Oct 2024 09:33:49 +0800 Subject: [PATCH 34/41] add more log to track tx (#1079) --- core/txpool/legacypool/legacypool.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 14f016c81b70..16a4e8df10c7 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -414,6 +414,7 @@ func (pool *LegacyPool) loop() { if time.Since(pool.beats[addr]) > pool.config.Lifetime { list := pool.queue[addr].Flatten() for _, tx := range list { + log.Debug("evict queue tx for timeout", "tx", tx.Hash().String()) pool.removeTx(tx.Hash(), true, true) } queuedEvictionMeter.Mark(int64(len(list))) @@ -962,6 +963,9 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local if pool.all.Get(hash) == nil && !addAll { log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) } + + log.Debug("Enqueued transaction", "hash", hash.String(), "from", from, "to", tx.To(), "new tx", !addAll) + if addAll { pool.all.Add(tx, local) pool.priced.Put(tx, local) @@ -1015,6 +1019,9 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ // Nothing was replaced, bump the pending counter pendingGauge.Inc(1) } + + log.Debug("Promoted transaction from queue to pending", "hash", hash.String(), "from", addr, "to", tx.To()) + // Set the potentially new pending nonce and notify any subsystems of the new tx pool.pendingNonces.set(addr, tx.Nonce()+1) @@ -1199,6 +1206,9 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo if tx == nil { return 0 } + + log.Debug("remove tx", "hash", hash, "outofbound", outofbound) + addr, _ := types.Sender(pool.signer, tx) // already validated during insertion // If after deletion there are no more transactions belonging to this account, @@ -1859,6 +1869,9 @@ func (pool *LegacyPool) calculateTxsLifecycle(txs types.Transactions, t time.Tim for _, tx := range txs { if tx.Time().Before(t) { txLifecycle := t.Sub(tx.Time()) + if txLifecycle >= time.Minute*30 { + log.Debug("calculate tx life cycle, cost over 30 minutes", "tx", tx.Hash().String(), "txLifecycle(s)", txLifecycle.Seconds()) + } txLifecycleTimer.Update(txLifecycle) } } From 3e138a9f231af5ca45b0fd59b362fcb1bbeb3675 Mon Sep 17 00:00:00 2001 From: 0xmountaintop <37070449+0xmountaintop@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:48:19 +0800 Subject: [PATCH 35/41] fix sepolia deployment issues (#1071) * Revert "cmd/devp2p, eth: drop eth/66 (#28239)" This reverts commit bc6d184872889224480cf9df58b0539b210ffa9e. * hack shanghaiBlock * disable time-based forks in ForkID --------- Co-authored-by: colinlyguo --- cmd/devp2p/internal/ethtest/chain_test.go | 6 +- cmd/devp2p/internal/ethtest/helpers.go | 13 +- cmd/devp2p/internal/ethtest/suite.go | 38 ++-- cmd/devp2p/internal/ethtest/types.go | 26 +-- core/forkid/forkid.go | 26 +-- core/forkid/forkid_test.go | 25 +++ eth/downloader/downloader_test.go | 136 ++++++------ eth/downloader/fetchers.go | 8 +- eth/downloader/fetchers_concurrent_bodies.go | 2 +- eth/downloader/fetchers_concurrent_headers.go | 2 +- .../fetchers_concurrent_receipts.go | 2 +- eth/downloader/skeleton.go | 2 +- eth/downloader/skeleton_test.go | 6 +- eth/fetcher/block_fetcher.go | 4 +- eth/fetcher/block_fetcher_test.go | 4 +- eth/handler.go | 2 +- eth/handler_eth.go | 4 +- eth/handler_eth_test.go | 15 +- eth/protocols/eth/handler.go | 66 ++++-- eth/protocols/eth/handler_test.go | 208 ++++++++++++++---- eth/protocols/eth/handlers.go | 138 ++++++++---- eth/protocols/eth/handshake_test.go | 3 +- eth/protocols/eth/peer.go | 98 ++++++--- eth/protocols/eth/protocol.go | 197 +++++++++-------- eth/protocols/eth/protocol_test.go | 102 +++++---- eth/sync_test.go | 2 +- params/config.go | 12 +- 27 files changed, 719 insertions(+), 428 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 02f00e0a2bf4..5f3d498eb593 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) { }{ { req: GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Number: uint64(2)}, Amount: uint64(5), Skip: 1, @@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)}, Amount: uint64(3), Skip: 0, @@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Hash: chain.Head().Hash()}, Amount: uint64(1), Skip: 0, diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index ca082dce15e4..e385a0b0c6e4 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -62,6 +62,7 @@ func (s *Suite) dial() (*Conn, error) { } // set default p2p capabilities conn.caps = []p2p.Cap{ + {Name: "eth", Version: 66}, {Name: "eth", Version: 67}, {Name: "eth", Version: 68}, } @@ -236,8 +237,8 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { return errorf("could not get headers for inbound header request: %v", err) } resp := &BlockHeaders{ - RequestId: msg.ReqID(), - BlockHeadersRequest: eth.BlockHeadersRequest(headers), + RequestId: msg.ReqID(), + BlockHeadersPacket: eth.BlockHeadersPacket(headers), } if err := c.Write(resp); err != nil { return errorf("could not write to connection: %v", err) @@ -266,7 +267,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint if !ok { return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) } - headers := []*types.Header(resp.BlockHeadersRequest) + headers := []*types.Header(resp.BlockHeadersPacket) return headers, nil } @@ -378,7 +379,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error { conn.SetReadDeadline(time.Now().Add(20 * time.Second)) // create request req := &GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1, }, @@ -603,8 +604,8 @@ func (s *Suite) hashAnnounce() error { pretty.Sdump(blockHeaderReq)) } err = sendConn.Write(&BlockHeaders{ - RequestId: blockHeaderReq.ReqID(), - BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()}, + RequestId: blockHeaderReq.ReqID(), + BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()}, }) if err != nil { return fmt.Errorf("failed to write to connection: %v", err) diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 98bdd966849b..77d834e8960b 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { } // write request req := &GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()}, Amount: 2, Skip: 1, @@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { // create two requests req1 := &GetBlockHeaders{ RequestId: uint64(111), - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { } req2 := &GetBlockHeaders{ RequestId: uint64(222), - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected headers for request 2: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersRequest) { + if !headersMatch(expected1, headers1.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersRequest) { + if !headersMatch(expected2, headers2.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { reqID := uint64(1234) request1 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Number: 1, }, @@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { } request2 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Number: 33, }, @@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected block headers: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersRequest) { + if !headersMatch(expected1, headers1.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersRequest) { + if !headersMatch(expected2, headers2.BlockHeadersPacket) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) { t.Fatalf("peering failed: %v", err) } req := &GetBlockHeaders{ - GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{Number: 0}, Amount: 2, }, @@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { // create block bodies request req := &GetBlockBodies{ RequestId: uint64(55), - GetBlockBodiesRequest: eth.GetBlockBodiesRequest{ + GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash(), }, @@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if !ok { t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } - bodies := resp.BlockBodiesResponse + bodies := resp.BlockBodiesPacket t.Logf("received %d block bodies", len(bodies)) - if len(bodies) != len(req.GetBlockBodiesRequest) { + if len(bodies) != len(req.GetBlockBodiesPacket) { t.Fatalf("wrong bodies in response: expected %d bodies, "+ - "got %d", len(req.GetBlockBodiesRequest), len(bodies)) + "got %d", len(req.GetBlockBodiesPacket), len(bodies)) } } @@ -481,8 +481,8 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { hashes = append(hashes, hash) } getTxReq := &GetPooledTransactions{ - RequestId: 1234, - GetPooledTransactionsRequest: hashes, + RequestId: 1234, + GetPooledTransactionsPacket: hashes, } if err = conn.Write(getTxReq); err != nil { t.Fatalf("could not write to conn: %v", err) @@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { // check that all received transactions match those that were sent to node switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { case *PooledTransactions: - for _, gotTx := range msg.PooledTransactionsResponse { + for _, gotTx := range msg.PooledTransactionsPacket { if _, exists := hashMap[gotTx.Hash()]; !exists { t.Fatalf("unexpected tx received: %v", gotTx.Hash()) } @@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) { msg := conn.readAndServe(s.chain, timeout) switch msg := msg.(type) { case *GetPooledTransactions: - if len(msg.GetPooledTransactionsRequest) != len(hashes) { - t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest)) + if len(msg.GetPooledTransactionsPacket) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) } return diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 339f4e713755..8fa21af05b3b 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 } func (msg Transactions) ReqID() uint64 { return 18 } // GetBlockHeaders represents a block header query. -type GetBlockHeaders eth.GetBlockHeadersPacket +type GetBlockHeaders eth.GetBlockHeadersPacket66 func (msg GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } -type BlockHeaders eth.BlockHeadersPacket +type BlockHeaders eth.BlockHeadersPacket66 func (msg BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } // GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies eth.GetBlockBodiesPacket +type GetBlockBodies eth.GetBlockBodiesPacket66 func (msg GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } // BlockBodies is the network packet for block content distribution. -type BlockBodies eth.BlockBodiesPacket +type BlockBodies eth.BlockBodiesPacket66 func (msg BlockBodies) Code() int { return 22 } func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } @@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 } func (msg NewBlock) ReqID() uint64 { return 0 } // NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. -type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67 +type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66 func (msg NewPooledTransactionHashes66) Code() int { return 24 } func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } @@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68 func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } -type GetPooledTransactions eth.GetPooledTransactionsPacket +type GetPooledTransactions eth.GetPooledTransactionsPacket66 func (msg GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } -type PooledTransactions eth.PooledTransactionsPacket +type PooledTransactions eth.PooledTransactionsPacket66 func (msg PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } @@ -180,25 +180,25 @@ func (c *Conn) Read() Message { case (Status{}).Code(): msg = new(Status) case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket) + ethMsg := new(eth.GetBlockHeadersPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockHeaders)(ethMsg) case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket) + ethMsg := new(eth.BlockHeadersPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*BlockHeaders)(ethMsg) case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket) + ethMsg := new(eth.GetBlockBodiesPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockBodies)(ethMsg) case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket) + ethMsg := new(eth.BlockBodiesPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } @@ -217,13 +217,13 @@ func (c *Conn) Read() Message { } msg = new(NewPooledTransactionHashes66) case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket) + ethMsg := new(eth.GetPooledTransactionsPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetPooledTransactions)(ethMsg) case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket) + ethMsg := new(eth.PooledTransactionsPacket66) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index 901fd8dc8c68..91b568774c68 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -77,7 +77,7 @@ func NewID(config *params.ChainConfig, genesis *types.Block, head, time uint64) hash := crc32.ChecksumIEEE(genesis.Hash().Bytes()) // Calculate the current fork checksum and the next fork block - forksByBlock, forksByTime := gatherForks(config, genesis.Time()) + forksByBlock, _ := gatherForks(config, genesis.Time()) for _, fork := range forksByBlock { if fork <= head { // Fork already passed, checksum the previous hash and the fork number @@ -86,14 +86,6 @@ func NewID(config *params.ChainConfig, genesis *types.Block, head, time uint64) } return ID{Hash: checksumToBytes(hash), Next: fork} } - for _, fork := range forksByTime { - if fork <= time { - // Fork already passed, checksum the previous hash and fork timestamp - hash = checksumUpdate(hash, fork) - continue - } - return ID{Hash: checksumToBytes(hash), Next: fork} - } return ID{Hash: checksumToBytes(hash), Next: 0} } @@ -134,9 +126,8 @@ func NewStaticFilter(config *params.ChainConfig, genesis *types.Block) Filter { func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() (uint64, uint64)) Filter { // Calculate the all the valid fork hash and fork next combos var ( - forksByBlock, forksByTime = gatherForks(config, genesis.Time()) - forks = append(append([]uint64{}, forksByBlock...), forksByTime...) - sums = make([][4]byte, len(forks)+1) // 0th is the genesis + forks, _ = gatherForks(config, genesis.Time()) + sums = make([][4]byte, len(forks)+1) // 0th is the genesis ) hash := crc32.ChecksumIEEE(genesis.Hash().Bytes()) sums[0] = checksumToBytes(hash) @@ -147,10 +138,6 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() ( // Add two sentries to simplify the fork checks and don't require special // casing the last one. forks = append(forks, math.MaxUint64) // Last fork will never be passed - if len(forksByTime) == 0 { - // In purely block based forks, avoid the sentry spilling into timestapt territory - forksByBlock = append(forksByBlock, math.MaxUint64) // Last fork will never be passed - } // Create a validator that will filter out incompatible chains return func(id ID) error { // Run the fork checksum validation ruleset: @@ -172,13 +159,10 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() ( // the remote, but at this current point in time we don't have enough // information. // 4. Reject in all other cases. - block, time := headfn() + block, _ := headfn() for i, fork := range forks { // Pick the head comparison based on fork progression head := block - if i >= len(forksByBlock) { - head = time - } // If our head is beyond this fork, continue to the next (we have a dummy // fork of maxuint64 as the last item to always fail this check eventually). if head >= fork { @@ -189,7 +173,7 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() ( if sums[i] == id.Hash { // Fork checksum matched, check if a remote future fork block already passed // locally without the local node being aware of it (rule #1a). - if id.Next > 0 && (head >= id.Next || (id.Next > timestampThreshold && time >= id.Next)) { + if id.Next > 0 && head >= id.Next { return ErrLocalIncompatibleOrStale } // Haven't passed locally a remote-only fork, accept the connection (rule #1b). diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 8b98cfa75fde..54d7bff8ba5d 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -448,3 +448,28 @@ func TestTimeBasedForkInGenesis(t *testing.T) { } } } + +func TestScroll(t *testing.T) { + tests := []struct { + config *params.ChainConfig + genesis *types.Block + head uint64 + time uint64 + want ID + }{ + // Scroll test cases + { + params.ScrollMainnetChainConfig, + core.DefaultScrollMainnetGenesisBlock().ToBlock(), + 10281275, + 1729250728, // omit timestamp-based forks + ID{Hash: checksumToBytes(0x18d3c8d9), Next: 0}, // 0x18d3c8d9 is fetched from develop branch + }, + } + + for i, tt := range tests { + if have := NewID(tt.config, tt.genesis, tt.head, tt.time); have != tt.want { + t.Errorf("test %d: fork ID mismatch: have %x, want %x", i, have, tt.want) + } + } +} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 273114748a78..bc0a289991f9 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Hash: origin, }, @@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ Origin: eth.HashOrNumber{ Number: origin, }, @@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: (*eth.BlockBodiesPacket)(&bodies), Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan * } res := ð.Response{ Req: req, - Res: (*eth.ReceiptsResponse)(&receipts), + Res: (*eth.ReceiptsPacket)(&receipts), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { } } -func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } -func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } -func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } +func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } +func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } +func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } @@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. -func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) } -func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) } +func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } +func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } @@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. -func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } -func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } -func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } +func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } +func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } +func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } @@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronising against a much shorter but much heavier fork works // currently and is not dropped. -func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } +func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } +func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } +func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } @@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. -func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } +func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } +func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } +func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } @@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head for short but heavy forks too. These are a bit special because they // take different ancestor lookup paths. -func TestBoundedHeavyForkedSync68Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) +func TestBoundedHeavyForkedSync66Full(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedHeavyForkedSync68Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) +func TestBoundedHeavyForkedSync66Snap(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) } -func TestBoundedHeavyForkedSync68Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) +func TestBoundedHeavyForkedSync66Light(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) } func TestBoundedHeavyForkedSync67Full(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) @@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { } // Tests that a canceled download wipes all previously accumulated state. -func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } -func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } -func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } +func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } +func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } +func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } @@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { } // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } -func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } -func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } +func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } +func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } +func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } @@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } -func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } -func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } +func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } +func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } +func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } @@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) + tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) // Synchronise with the requested peer and make sure all blocks were retrieved @@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chain.blocks)) // Check that no peers have been dropped off - for _, version := range []int{68, 67} { + for _, version := range []int{66, 67} { peer := fmt.Sprintf("peer %d", version) if _, ok := tester.peers[peer]; !ok { t.Errorf("%s dropped", peer) @@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } -func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } -func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } +func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } +func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } +func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } @@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } -func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } -func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } +func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } +func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } +func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } @@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. -func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } -func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } -func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } +func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } +func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } +func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } @@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack68Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, FullSync) +func TestHighTDStarvationAttack66Full(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, FullSync) } -func TestHighTDStarvationAttack68Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, SnapSync) +func TestHighTDStarvationAttack66Snap(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, SnapSync) } -func TestHighTDStarvationAttack68Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, LightSync) +func TestHighTDStarvationAttack66Light(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, LightSync) } func TestHighTDStarvationAttack67Full(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH67, FullSync) @@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } +func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { @@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. -func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } -func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } +func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } +func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } +func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } @@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // Tests that synchronisation progress (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } -func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } -func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } +func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } +func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } +func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } @@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if synchronisation is aborted due to some failure, then the progress // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } -func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } +func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } +func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } +func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } @@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } -func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } +func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } +func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } +func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } @@ -1330,10 +1330,8 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. -func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } -func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } -func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) } -func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) } +func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } +func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index 4fa4e0b73710..a7022240cd52 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil } } @@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil } } diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 86b97c790aff..2015379a00ae 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the body data and delivering it to the downloader's queue. func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack() + txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2]) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go index 9eab36772fec..aa4234bca716 100644 --- a/eth/downloader/fetchers_concurrent_headers.go +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the header data and delivering it to the downloader's queue. func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersRequest) + headers := *packet.Res.(*eth.BlockHeadersPacket) hashes := packet.Meta.([]common.Hash) accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go index ca4164ef9b96..1692fede382f 100644 --- a/eth/downloader/fetchers_concurrent_receipts.go +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the receipt data and delivering it to the downloader's queue. func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - receipts := *packet.Res.(*eth.ReceiptsResponse) + receipts := *packet.Res.(*eth.ReceiptsPacket) hashes := packet.Meta.([]common.Hash) // {receipt hashes} accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index 6857e6b551d1..fb05cca6dbdd 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { case res := <-resCh: // Headers successfully retrieved, update the metrics - headers := *res.Res.(*eth.BlockHeadersRequest) + headers := *res.Res.(*eth.BlockHeadersPacket) headerReqTimer.Update(time.Since(start)) s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 7c603f61e5d5..6a616881f50c 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Meta: hashes, Time: 1, Done: make(chan error), @@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // Create a peer set to feed headers through peerset := newPeerSet() for _, peer := range tt.peers { - peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id))) + peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) } // Create a peer dropper to track malicious peers dropped := make(map[string]int) @@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { skeleton.Sync(tt.newHead, nil, true) } if tt.newPeer != nil { - if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { + if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { t.Errorf("test %d: failed to register new peer: %v", i, err) } } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 854ac860f574..2fc6d4fa6e7e 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() { select { case res := <-resCh: res.Done <- nil - f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time)) + f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) case <-timeout.C: // The peer didn't respond in time. The request @@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() { case res := <-resCh: res.Done <- nil // Ignoring withdrawals here, since the block fetcher is not used post-merge. - txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack() + txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() f.FilterBodies(peer, txs, uncles, time.Now()) case <-timeout.C: diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 9bb0ac6bdc04..ef47929a5481 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), + Res: (*eth.BlockHeadersPacket)(&headers), Time: drift, Done: make(chan error, 1), // Ignore the returned status } @@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: (*eth.BlockBodiesPacket)(&bodies), Time: drift, Done: make(chan error, 1), // Ignore the returned status } diff --git a/eth/handler.go b/eth/handler.go index 839a45ac33c0..973ec1808b01 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -434,7 +434,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { select { case res := <-resCh: - headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest)) + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) if len(headers) == 0 { // Required blocks are allowed to be missing if the remote // node is not yet synced diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 890827837acc..111981e43128 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -67,7 +67,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { case *eth.NewBlockPacket: return h.handleBlockBroadcast(peer, packet.Block, packet.TD) - case *eth.NewPooledTransactionHashesPacket67: + case *eth.NewPooledTransactionHashesPacket66: return h.txFetcher.Notify(peer.ID(), nil, nil, *packet) case *eth.NewPooledTransactionHashesPacket68: @@ -81,7 +81,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { } return h.txFetcher.Enqueue(peer.ID(), *packet, false) - case *eth.PooledTransactionsResponse: + case *eth.PooledTransactionsPacket: return h.txFetcher.Enqueue(peer.ID(), *packet, true) default: diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index fe0da63aef1d..e2a66e606a84 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.blockBroadcasts.Send(packet.Block) return nil - case *eth.NewPooledTransactionHashesPacket67: + case *eth.NewPooledTransactionHashesPacket66: h.txAnnounces.Send(([]common.Hash)(*packet)) return nil @@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil - case *eth.PooledTransactionsResponse: + case *eth.PooledTransactionsPacket: h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil @@ -81,6 +81,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { // Tests that peers are correctly accepted (or rejected) based on the advertised // fork IDs in the protocol handshake. +func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } @@ -236,6 +237,7 @@ func testForkIDSplit(t *testing.T, protocol uint) { } // Tests that received transactions are added to the local pool. +func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } @@ -294,6 +296,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { } // This test checks that pending transactions are sent. +func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } @@ -353,7 +356,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen := make(map[common.Hash]struct{}) for len(seen) < len(insert) { switch protocol { - case 67, 68: + case 66, 67, 68: select { case hashes := <-anns: for _, hash := range hashes { @@ -379,6 +382,7 @@ func testSendTransactions(t *testing.T, protocol uint) { // Tests that transactions get propagated to all attached peers, either via direct // broadcasts or via announcements/retrievals. +func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } @@ -486,8 +490,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) - sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) + sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) + sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) defer sourcePeer.Close() defer sinkPeer.Close() @@ -539,6 +543,7 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { // Tests that a propagated malformed block (uncles or transactions don't match // with the hashes in the header) gets discarded and not broadcast forward. +func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 2a2fd6c23416..07255251f815 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -23,6 +23,7 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/metrics" "github.com/scroll-tech/go-ethereum/p2p" @@ -44,6 +45,10 @@ const ( // nowadays, the practical limit will always be softResponseLimit. maxBodiesServe = 1024 + // maxNodeDataServe is the maximum number of state trie nodes to serve. This + // number is there to limit the number of disk lookups. + maxNodeDataServe = 1024 + // maxReceiptsServe is the maximum number of block receipts to serve. This // number is mostly there to limit the number of disk lookups. With block // containing 200+ transactions nowadays, the practical limit will always @@ -99,6 +104,10 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 } version := version // Closure + // Path scheme does not support GetNodeData, don't advertise eth66 on it + if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme { + continue + } protocols = append(protocols, p2p.Protocol{ Name: ProtocolName, Version: version, @@ -166,19 +175,36 @@ type Decoder interface { Time() time.Time } +var eth66 = map[uint64]msgHandler{ + NewBlockHashesMsg: handleNewBlockhashes, + NewBlockMsg: handleNewBlock, + TransactionsMsg: handleTransactions, + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, + GetBlockHeadersMsg: handleGetBlockHeaders66, + BlockHeadersMsg: handleBlockHeaders66, + GetBlockBodiesMsg: handleGetBlockBodies66, + BlockBodiesMsg: handleBlockBodies66, + GetNodeDataMsg: handleGetNodeData66, + NodeDataMsg: handleNodeData66, + GetReceiptsMsg: handleGetReceipts66, + ReceiptsMsg: handleReceipts66, + GetPooledTransactionsMsg: handleGetPooledTransactions66, + PooledTransactionsMsg: handlePooledTransactions66, +} + var eth67 = map[uint64]msgHandler{ NewBlockHashesMsg: handleNewBlockhashes, NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67, - GetBlockHeadersMsg: handleGetBlockHeaders, - BlockHeadersMsg: handleBlockHeaders, - GetBlockBodiesMsg: handleGetBlockBodies, - BlockBodiesMsg: handleBlockBodies, - GetReceiptsMsg: handleGetReceipts, - ReceiptsMsg: handleReceipts, - GetPooledTransactionsMsg: handleGetPooledTransactions, - PooledTransactionsMsg: handlePooledTransactions, + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, + GetBlockHeadersMsg: handleGetBlockHeaders66, + BlockHeadersMsg: handleBlockHeaders66, + GetBlockBodiesMsg: handleGetBlockBodies66, + BlockBodiesMsg: handleBlockBodies66, + GetReceiptsMsg: handleGetReceipts66, + ReceiptsMsg: handleReceipts66, + GetPooledTransactionsMsg: handleGetPooledTransactions66, + PooledTransactionsMsg: handlePooledTransactions66, } var eth68 = map[uint64]msgHandler{ @@ -186,14 +212,14 @@ var eth68 = map[uint64]msgHandler{ NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68, - GetBlockHeadersMsg: handleGetBlockHeaders, - BlockHeadersMsg: handleBlockHeaders, - GetBlockBodiesMsg: handleGetBlockBodies, - BlockBodiesMsg: handleBlockBodies, - GetReceiptsMsg: handleGetReceipts, - ReceiptsMsg: handleReceipts, - GetPooledTransactionsMsg: handleGetPooledTransactions, - PooledTransactionsMsg: handlePooledTransactions, + GetBlockHeadersMsg: handleGetBlockHeaders66, + BlockHeadersMsg: handleBlockHeaders66, + GetBlockBodiesMsg: handleGetBlockBodies66, + BlockBodiesMsg: handleBlockBodies66, + GetReceiptsMsg: handleGetReceipts66, + ReceiptsMsg: handleReceipts66, + GetPooledTransactionsMsg: handleGetPooledTransactions66, + PooledTransactionsMsg: handlePooledTransactions66, } // handleMessage is invoked whenever an inbound message is received from a remote @@ -209,10 +235,14 @@ func handleMessage(backend Backend, peer *Peer) error { } defer msg.Discard() - var handlers = eth67 + var handlers = eth66 + if peer.Version() == ETH67 { + handlers = eth67 + } if peer.Version() >= ETH68 { handlers = eth68 } + // Track the amount of time it takes to serve the request and run the handler if metrics.Enabled { h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index c592905820a4..9dcb7073915f 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -28,6 +28,7 @@ import ( "github.com/scroll-tech/go-ethereum/consensus/ethash" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/state" "github.com/scroll-tech/go-ethereum/core/txpool" "github.com/scroll-tech/go-ethereum/core/txpool/legacypool" "github.com/scroll-tech/go-ethereum/core/types" @@ -150,6 +151,7 @@ func (b *testBackend) Handle(*Peer, Packet) error { } // Tests that block headers can be retrieved from a remote chain based on user queries. +func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) } func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) } func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } @@ -176,29 +178,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { // Create a batch of tests for various scenarios limit := uint64(maxHeadersServe) tests := []struct { - query *GetBlockHeadersRequest // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected + query *GetBlockHeadersPacket // The query to execute for header retrieval + expect []common.Hash // The hashes of the block whose headers are expected }{ // A single random block should be retrievable by hash { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // A single random block should be retrievable by number { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), @@ -207,14 +209,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Multiple headers with skip lists should be retrievable { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), @@ -223,31 +225,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // The chain endpoints should be retrievable { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, { // If the peer requests a bit into the future, we deliver what we have - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), }, // Check that requesting more than available is handled gracefully { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(0).Hash(), @@ -255,13 +257,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check that requesting more than available is handled gracefully, even if mid skip { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), }, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -269,7 +271,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where requesting more can iterate past the endpoints { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(2).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -278,24 +280,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where skipping overflow loops back into the chain start { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, []common.Hash{ backend.chain.GetBlockByNumber(3).Hash(), }, }, // Check a corner case where skipping overflow loops back to the same header { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, []common.Hash{ backend.chain.GetBlockByNumber(1).Hash(), }, }, // Check that non existing headers aren't returned { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -307,13 +309,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ - RequestId: 123, - GetBlockHeadersRequest: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ + RequestId: 123, + GetBlockHeadersPacket: tt.query, }) - if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{ - RequestId: 123, - BlockHeadersRequest: headers, + if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{ + RequestId: 123, + BlockHeadersPacket: headers, }); err != nil { t.Errorf("test %d: headers mismatch: %v", i, err) } @@ -322,11 +324,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ - RequestId: 456, - GetBlockHeadersRequest: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ + RequestId: 456, + GetBlockHeadersPacket: tt.query, }) - expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers} + expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers} if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { t.Errorf("test %d by hash: headers mismatch: %v", i, err) } @@ -336,6 +338,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { } // Tests that block contents can be retrieved from a remote chain based on their hashes. +func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) } func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) } func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } @@ -417,20 +420,139 @@ func testGetBlockBodies(t *testing.T, protocol uint) { } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{ - RequestId: 123, - GetBlockBodiesRequest: hashes, + p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ + RequestId: 123, + GetBlockBodiesPacket: hashes, }) - if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{ - RequestId: 123, - BlockBodiesResponse: bodies, + if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{ + RequestId: 123, + BlockBodiesPacket: bodies, }); err != nil { t.Fatalf("test %d: bodies mismatch: %v", i, err) } } } +// Tests that the state trie nodes can be retrieved based on hashes. +func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) } +func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) } +func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) } + +func testGetNodeData(t *testing.T, protocol uint, drop bool) { + t.Parallel() + + // Define three accounts to simulate transactions with + acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) + + signer := types.HomesteadSigner{} + // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test) + generator := func(i int, block *core.BlockGen) { + switch i { + case 0: + // In block 1, the test bank sends account #1 some ether. + tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) + block.AddTx(tx) + case 1: + // In block 2, the test bank sends some more ether to account #1. + // acc1Addr passes it on to account #2. + tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) + tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key) + block.AddTx(tx1) + block.AddTx(tx2) + case 2: + // Block 3 is empty but was mined by account #2. + block.SetCoinbase(acc2Addr) + block.SetExtra([]byte("yeehaw")) + case 3: + // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). + b2 := block.PrevBlock(1).Header() + b2.Extra = []byte("foo") + block.AddUncle(b2) + b3 := block.PrevBlock(2).Header() + b3.Extra = []byte("foo") + block.AddUncle(b3) + } + } + // Assemble the test environment + backend := newTestBackendWithGenerator(4, false, generator) + defer backend.close() + + peer, _ := newTestPeer("peer", protocol, backend) + defer peer.close() + + // Collect all state tree hashes. + var hashes []common.Hash + it := backend.db.NewIterator(nil, nil) + for it.Next() { + if key := it.Key(); len(key) == common.HashLength { + hashes = append(hashes, common.BytesToHash(key)) + } + } + it.Release() + + // Request all hashes. + p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{ + RequestId: 123, + GetNodeDataPacket: hashes, + }) + msg, err := peer.app.ReadMsg() + if !drop { + if err != nil { + t.Fatalf("failed to read node data response: %v", err) + } + } else { + if err != nil { + return + } + t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg) + } + if msg.Code != NodeDataMsg { + t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg) + } + var res NodeDataPacket66 + if err := msg.Decode(&res); err != nil { + t.Fatalf("failed to decode response node data: %v", err) + } + + // Verify that all hashes correspond to the requested data. + data := res.NodeDataPacket + for i, want := range hashes { + if hash := crypto.Keccak256Hash(data[i]); hash != want { + t.Errorf("data hash mismatch: have %x, want %x", hash, want) + } + } + + // Reconstruct state tree from the received data. + reconstructDB := rawdb.NewMemoryDatabase() + for i := 0; i < len(data); i++ { + rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i]) + } + + // Sanity check whether all state matches. + accounts := []common.Address{testAddr, acc1Addr, acc2Addr} + for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ { + root := backend.chain.GetBlockByNumber(i).Root() + reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil) + for j, acc := range accounts { + state, _ := backend.chain.StateAt(root) + bw := state.GetBalance(acc) + bh := reconstructed.GetBalance(acc) + + if (bw == nil) != (bh == nil) { + t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) + } + if bw != nil && bh != nil && bw.Cmp(bh) != 0 { + t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) + } + } + } +} + // Tests that the transaction receipts can be retrieved based on hashes. +func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) } func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) } func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } @@ -491,13 +613,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) } // Send the hash request and verify the response - p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{ - RequestId: 123, - GetReceiptsRequest: hashes, + p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{ + RequestId: 123, + GetReceiptsPacket: hashes, }) - if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{ - RequestId: 123, - ReceiptsResponse: receipts, + if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{ + RequestId: 123, + ReceiptsPacket: receipts, }); err != nil { t.Errorf("receipts mismatch: %v", err) } diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index cb72f2172861..8ca721e3dfe4 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,25 +22,27 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/trie" ) -func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { +// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders +func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { // Decode the complex header query - var query GetBlockHeadersPacket + var query GetBlockHeadersPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer) + response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) return peer.ReplyBlockHeadersRLP(query.RequestId, response) } // ServiceGetBlockHeadersQuery assembles the response to a header query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { +func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { if query.Skip == 0 { // The fast path: when the request is for a contiguous segment of headers. return serviceContiguousBlockHeaderQuery(chain, query) @@ -49,7 +51,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersR } } -func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { +func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -138,7 +140,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc return headers } -func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue { +func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue { count := query.Amount if count > maxHeadersServe { count = maxHeadersServe @@ -201,19 +203,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe } } -func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { // Decode the block body retrieval message - var query GetBlockBodiesPacket + var query GetBlockBodiesPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest) + response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) return peer.ReplyBlockBodiesRLP(query.RequestId, response) } // ServiceGetBlockBodiesQuery assembles the response to a body query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue { +func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -232,19 +234,60 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequ return bodies } -func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error { +func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { + // Decode the trie node data retrieval message + var query GetNodeDataPacket66 + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket) + return peer.ReplyNodeData(query.RequestId, response) +} + +// ServiceGetNodeDataQuery assembles the response to a node data query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { + // Request nodes by hash is not supported in path-based scheme. + if chain.TrieDB().Scheme() == rawdb.PathScheme { + return nil + } + // Gather state data until the fetch or network limits is reached + var ( + bytes int + nodes [][]byte + ) + for lookups, hash := range query { + if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe || + lookups >= 2*maxNodeDataServe { + break + } + // Retrieve the requested state entry + entry, err := chain.TrieDB().Node(hash) + if len(entry) == 0 || err != nil { + // Read the contract code with prefix only to save unnecessary lookups. + entry, err = chain.ContractCodeWithPrefix(hash) + } + if err == nil && len(entry) > 0 { + nodes = append(nodes, entry) + bytes += len(entry) + } + } + return nodes +} + +func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { // Decode the block receipts retrieval message - var query GetReceiptsPacket + var query GetReceiptsPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest) + response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) return peer.ReplyReceiptsRLP(query.RequestId, response) } // ServiceGetReceiptsQuery assembles the response to a receipt query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue { +func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -313,15 +356,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, ann) } -func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { // A batch of headers arrived to one of our previous requests - res := new(BlockHeadersPacket) + res := new(BlockHeadersPacket66) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { - hashes := make([]common.Hash, len(res.BlockHeadersRequest)) - for i, header := range res.BlockHeadersRequest { + hashes := make([]common.Hash, len(res.BlockHeadersPacket)) + for i, header := range res.BlockHeadersPacket { hashes[i] = header.Hash() } return hashes @@ -329,24 +372,24 @@ func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockHeadersMsg, - Res: &res.BlockHeadersRequest, + Res: &res.BlockHeadersPacket, }, metadata) } -func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { // A batch of block bodies arrived to one of our previous requests - res := new(BlockBodiesPacket) + res := new(BlockBodiesPacket66) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { var ( - txsHashes = make([]common.Hash, len(res.BlockBodiesResponse)) - uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse)) - withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket)) ) hasher := trie.NewStackTrie(nil) - for i, body := range res.BlockBodiesResponse { + for i, body := range res.BlockBodiesPacket { txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) uncleHashes[i] = types.CalcUncleHash(body.Uncles) if body.Withdrawals != nil { @@ -358,20 +401,33 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockBodiesMsg, - Res: &res.BlockBodiesResponse, + Res: &res.BlockBodiesPacket, }, metadata) } -func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { +func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { + // A batch of node state data arrived to one of our previous requests + res := new(NodeDataPacket66) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: NodeDataMsg, + Res: &res.NodeDataPacket, + }, nil) // No post-processing, we're not using this packet anymore +} + +func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { // A batch of receipts arrived to one of our previous requests - res := new(ReceiptsPacket) + res := new(ReceiptsPacket66) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { hasher := trie.NewStackTrie(nil) - hashes := make([]common.Hash, len(res.ReceiptsResponse)) - for i, receipt := range res.ReceiptsResponse { + hashes := make([]common.Hash, len(res.ReceiptsPacket)) + for i, receipt := range res.ReceiptsPacket { hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) } return hashes @@ -379,17 +435,17 @@ func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: ReceiptsMsg, - Res: &res.ReceiptsResponse, + Res: &res.ReceiptsPacket, }, metadata) } -func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error { +func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error { // New transaction announcement arrived, make sure we have // a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } - ann := new(NewPooledTransactionHashesPacket67) + ann := new(NewPooledTransactionHashesPacket66) if err := msg.Decode(ann); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } @@ -420,17 +476,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer return backend.Handle(peer, ann) } -func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error { +func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { // Decode the pooled transactions retrieval message - var query GetPooledTransactionsPacket + var query GetPooledTransactionsPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest) + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) } -func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) { +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) { // Gather transactions until the fetch or network limits is reached var ( bytes int @@ -478,17 +534,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, &txs) } -func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { +func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { // Transactions arrived, make sure we have a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } // Transactions can be processed, parse all of them and deliver to the pool - var txs PooledTransactionsPacket + var txs PooledTransactionsPacket66 if err := msg.Decode(&txs); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - for i, tx := range txs.PooledTransactionsResponse { + for i, tx := range txs.PooledTransactionsPacket { // Validate and mark the remote transaction if tx == nil { return fmt.Errorf("%w: transaction %d is nil", errDecode, i) @@ -497,5 +553,5 @@ func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { } requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) - return backend.Handle(peer, &txs.PooledTransactionsResponse) + return backend.Handle(peer, &txs.PooledTransactionsPacket) } diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go index f033139ebe33..00b3850b3556 100644 --- a/eth/protocols/eth/handshake_test.go +++ b/eth/protocols/eth/handshake_test.go @@ -27,8 +27,7 @@ import ( ) // Tests that handshake failures are detected and reported correctly. -func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) } -func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) } +func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) } func testHandshake(t *testing.T, protocol uint) { t.Parallel() diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index b839259b4d25..5f7ecad3ff4e 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) { func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes)) + return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes)) } // sendPooledTransactionHashes68 sends transaction hashes (tagged with their type @@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) { } } -// ReplyPooledTransactionsRLP is the response to RequestTxs. +// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP. func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - // Not packed into PooledTransactionsResponse to avoid RLP decoding - return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{ - RequestId: id, - PooledTransactionsRLPResponse: txs, + // Not packed into PooledTransactionsPacket to avoid RLP decoding + return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{ + RequestId: id, + PooledTransactionsRLPPacket: txs, }) } @@ -309,28 +309,36 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { } } -// ReplyBlockHeadersRLP is the response to GetBlockHeaders. +// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders. func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { - return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{ - RequestId: id, - BlockHeadersRLPResponse: headers, + return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{ + RequestId: id, + BlockHeadersRLPPacket: headers, }) } -// ReplyBlockBodiesRLP is the response to GetBlockBodies. +// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies. func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { - // Not packed into BlockBodiesResponse to avoid RLP decoding - return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{ - RequestId: id, - BlockBodiesRLPResponse: bodies, + // Not packed into BlockBodiesPacket to avoid RLP decoding + return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{ + RequestId: id, + BlockBodiesRLPPacket: bodies, }) } -// ReplyReceiptsRLP is the response to GetReceipts. +// ReplyNodeData is the eth/66 response to GetNodeData. +func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error { + return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{ + RequestId: id, + NodeDataPacket: data, + }) +} + +// ReplyReceiptsRLP is the eth/66 response to GetReceipts. func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { - return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{ - RequestId: id, - ReceiptsRLPResponse: receipts, + return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{ + RequestId: id, + ReceiptsRLPPacket: receipts, }) } @@ -345,9 +353,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket{ + data: &GetBlockHeadersPacket66{ RequestId: id, - GetBlockHeadersRequest: &GetBlockHeadersRequest{ + GetBlockHeadersPacket: &GetBlockHeadersPacket{ Origin: HashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), @@ -372,9 +380,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket{ + data: &GetBlockHeadersPacket66{ RequestId: id, - GetBlockHeadersRequest: &GetBlockHeadersRequest{ + GetBlockHeadersPacket: &GetBlockHeadersPacket{ Origin: HashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -399,9 +407,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket{ + data: &GetBlockHeadersPacket66{ RequestId: id, - GetBlockHeadersRequest: &GetBlockHeadersRequest{ + GetBlockHeadersPacket: &GetBlockHeadersPacket{ Origin: HashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -426,9 +434,31 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques sink: sink, code: GetBlockBodiesMsg, want: BlockBodiesMsg, - data: &GetBlockBodiesPacket{ - RequestId: id, - GetBlockBodiesRequest: hashes, + data: &GetBlockBodiesPacket66{ + RequestId: id, + GetBlockBodiesPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil +} + +// RequestNodeData fetches a batch of arbitrary data from a node's known state +// data, corresponding to the specified hashes. +func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) { + p.Log().Debug("Fetching batch of state data", "count", len(hashes)) + id := rand.Uint64() + + req := &Request{ + id: id, + sink: sink, + code: GetNodeDataMsg, + want: NodeDataMsg, + data: &GetNodeDataPacket66{ + RequestId: id, + GetNodeDataPacket: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -447,9 +477,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ sink: sink, code: GetReceiptsMsg, want: ReceiptsMsg, - data: &GetReceiptsPacket{ - RequestId: id, - GetReceiptsRequest: hashes, + data: &GetReceiptsPacket66{ + RequestId: id, + GetReceiptsPacket: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -464,9 +494,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error { id := rand.Uint64() requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) - return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{ - RequestId: id, - GetPooledTransactionsRequest: hashes, + return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{ + RequestId: id, + GetPooledTransactionsPacket: hashes, }) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index a6b7a76e1437..1fb9dadff5e3 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -30,6 +30,7 @@ import ( // Constants to match up protocol versions and messages const ( + ETH66 = 66 ETH67 = 67 ETH68 = 68 ) @@ -40,11 +41,11 @@ const ProtocolName = "eth" // ProtocolVersions are the supported versions of the `eth` protocol (first // is primary). -var ProtocolVersions = []uint{ETH68, ETH67} +var ProtocolVersions = []uint{ETH68, ETH67, ETH66} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17} +var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 @@ -61,6 +62,8 @@ const ( NewPooledTransactionHashesMsg = 0x08 GetPooledTransactionsMsg = 0x09 PooledTransactionsMsg = 0x0a + GetNodeDataMsg = 0x0d + NodeDataMsg = 0x0e GetReceiptsMsg = 0x0f ReceiptsMsg = 0x10 ) @@ -82,7 +85,7 @@ type Packet interface { Kind() byte // Kind returns the message type. } -// StatusPacket is the network packet for the status message. +// StatusPacket is the network packet for the status message for eth/64 and later. type StatusPacket struct { ProtocolVersion uint32 NetworkID uint64 @@ -115,18 +118,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) { // TransactionsPacket is the network packet for broadcasting new transactions. type TransactionsPacket []*types.Transaction -// GetBlockHeadersRequest represents a block header query. -type GetBlockHeadersRequest struct { +// GetBlockHeadersPacket represents a block header query. +type GetBlockHeadersPacket struct { Origin HashOrNumber // Block from which to retrieve headers Amount uint64 // Maximum number of headers to retrieve Skip uint64 // Blocks to skip between consecutive headers Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) } -// GetBlockHeadersPacket represents a block header query with request ID wrapping. -type GetBlockHeadersPacket struct { +// GetBlockHeadersPacket66 represents a block header query over eth/66 +type GetBlockHeadersPacket66 struct { RequestId uint64 - *GetBlockHeadersRequest + *GetBlockHeadersPacket } // HashOrNumber is a combined field for specifying an origin block. @@ -165,23 +168,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error { } } -// BlockHeadersRequest represents a block header response. -type BlockHeadersRequest []*types.Header +// BlockHeadersPacket represents a block header response. +type BlockHeadersPacket []*types.Header -// BlockHeadersPacket represents a block header response over with request ID wrapping. -type BlockHeadersPacket struct { +// BlockHeadersPacket66 represents a block header response over eth/66. +type BlockHeadersPacket66 struct { RequestId uint64 - BlockHeadersRequest + BlockHeadersPacket } -// BlockHeadersRLPResponse represents a block header response, to use when we already +// BlockHeadersRLPPacket represents a block header response, to use when we already // have the headers rlp encoded. -type BlockHeadersRLPResponse []rlp.RawValue +type BlockHeadersRLPPacket []rlp.RawValue -// BlockHeadersRLPPacket represents a block header response with request ID wrapping. -type BlockHeadersRLPPacket struct { +// BlockHeadersRLPPacket66 represents a block header response over eth/66. +type BlockHeadersRLPPacket66 struct { RequestId uint64 - BlockHeadersRLPResponse + BlockHeadersRLPPacket } // NewBlockPacket is the network packet for the block propagation message. @@ -203,34 +206,33 @@ func (request *NewBlockPacket) sanityCheck() error { return nil } -// GetBlockBodiesRequest represents a block body query. -type GetBlockBodiesRequest []common.Hash +// GetBlockBodiesPacket represents a block body query. +type GetBlockBodiesPacket []common.Hash -// GetBlockBodiesPacket represents a block body query with request ID wrapping. -type GetBlockBodiesPacket struct { +// GetBlockBodiesPacket66 represents a block body query over eth/66. +type GetBlockBodiesPacket66 struct { RequestId uint64 - GetBlockBodiesRequest + GetBlockBodiesPacket } -// BlockBodiesResponse is the network packet for block content distribution. -type BlockBodiesResponse []*BlockBody +// BlockBodiesPacket is the network packet for block content distribution. +type BlockBodiesPacket []*BlockBody -// BlockBodiesPacket is the network packet for block content distribution with -// request ID wrapping. -type BlockBodiesPacket struct { +// BlockBodiesPacket66 is the network packet for block content distribution over eth/66. +type BlockBodiesPacket66 struct { RequestId uint64 - BlockBodiesResponse + BlockBodiesPacket } -// BlockBodiesRLPResponse is used for replying to block body requests, in cases +// BlockBodiesRLPPacket is used for replying to block body requests, in cases // where we already have them RLP-encoded, and thus can avoid the decode-encode // roundtrip. -type BlockBodiesRLPResponse []rlp.RawValue +type BlockBodiesRLPPacket []rlp.RawValue -// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping. -type BlockBodiesRLPPacket struct { +// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66 +type BlockBodiesRLPPacket66 struct { RequestId uint64 - BlockBodiesRLPResponse + BlockBodiesRLPPacket } // BlockBody represents the data content of a single block. @@ -242,7 +244,7 @@ type BlockBody struct { // Unpack retrieves the transactions and uncles from the range packet and returns // them in a split flat format that's more consistent with the internal data structures. -func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { +func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { // TODO(matt): add support for withdrawals to fetchers var ( txset = make([][]*types.Transaction, len(*p)) @@ -255,36 +257,53 @@ func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Heade return txset, uncleset, withdrawalset } -// GetReceiptsRequest represents a block receipts query. -type GetReceiptsRequest []common.Hash +// GetNodeDataPacket represents a trie node data query. +type GetNodeDataPacket []common.Hash + +// GetNodeDataPacket66 represents a trie node data query over eth/66. +type GetNodeDataPacket66 struct { + RequestId uint64 + GetNodeDataPacket +} + +// NodeDataPacket is the network packet for trie node data distribution. +type NodeDataPacket [][]byte -// GetReceiptsPacket represents a block receipts query with request ID wrapping. -type GetReceiptsPacket struct { +// NodeDataPacket66 is the network packet for trie node data distribution over eth/66. +type NodeDataPacket66 struct { RequestId uint64 - GetReceiptsRequest + NodeDataPacket } -// ReceiptsResponse is the network packet for block receipts distribution. -type ReceiptsResponse [][]*types.Receipt +// GetReceiptsPacket represents a block receipts query. +type GetReceiptsPacket []common.Hash -// ReceiptsPacket is the network packet for block receipts distribution with -// request ID wrapping. -type ReceiptsPacket struct { +// GetReceiptsPacket66 represents a block receipts query over eth/66. +type GetReceiptsPacket66 struct { RequestId uint64 - ReceiptsResponse + GetReceiptsPacket } -// ReceiptsRLPResponse is used for receipts, when we already have it encoded -type ReceiptsRLPResponse []rlp.RawValue +// ReceiptsPacket is the network packet for block receipts distribution. +type ReceiptsPacket [][]*types.Receipt -// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping. -type ReceiptsRLPPacket struct { +// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66. +type ReceiptsPacket66 struct { RequestId uint64 - ReceiptsRLPResponse + ReceiptsPacket } -// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67. -type NewPooledTransactionHashesPacket67 []common.Hash +// ReceiptsRLPPacket is used for receipts, when we already have it encoded +type ReceiptsRLPPacket []rlp.RawValue + +// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket +type ReceiptsRLPPacket66 struct { + RequestId uint64 + ReceiptsRLPPacket +} + +// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67. +type NewPooledTransactionHashesPacket66 []common.Hash // NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer. type NewPooledTransactionHashesPacket68 struct { @@ -293,33 +312,31 @@ type NewPooledTransactionHashesPacket68 struct { Hashes []common.Hash } -// GetPooledTransactionsRequest represents a transaction query. -type GetPooledTransactionsRequest []common.Hash +// GetPooledTransactionsPacket represents a transaction query. +type GetPooledTransactionsPacket []common.Hash -// GetPooledTransactionsPacket represents a transaction query with request ID wrapping. -type GetPooledTransactionsPacket struct { +type GetPooledTransactionsPacket66 struct { RequestId uint64 - GetPooledTransactionsRequest + GetPooledTransactionsPacket } -// PooledTransactionsResponse is the network packet for transaction distribution. -type PooledTransactionsResponse []*types.Transaction +// PooledTransactionsPacket is the network packet for transaction distribution. +type PooledTransactionsPacket []*types.Transaction -// PooledTransactionsPacket is the network packet for transaction distribution -// with request ID wrapping. -type PooledTransactionsPacket struct { +// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66. +type PooledTransactionsPacket66 struct { RequestId uint64 - PooledTransactionsResponse + PooledTransactionsPacket } -// PooledTransactionsRLPResponse is the network packet for transaction distribution, used +// PooledTransactionsRLPPacket is the network packet for transaction distribution, used // in the cases we already have them in rlp-encoded form -type PooledTransactionsRLPResponse []rlp.RawValue +type PooledTransactionsRLPPacket []rlp.RawValue -// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping. -type PooledTransactionsRLPPacket struct { +// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket +type PooledTransactionsRLPPacket66 struct { RequestId uint64 - PooledTransactionsRLPResponse + PooledTransactionsRLPPacket } func (*StatusPacket) Name() string { return "Status" } @@ -331,34 +348,40 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg } func (*TransactionsPacket) Name() string { return "Transactions" } func (*TransactionsPacket) Kind() byte { return TransactionsMsg } -func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" } -func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg } +func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" } +func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg } -func (*BlockHeadersRequest) Name() string { return "BlockHeaders" } -func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg } +func (*BlockHeadersPacket) Name() string { return "BlockHeaders" } +func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg } -func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" } -func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg } +func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" } +func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg } -func (*BlockBodiesResponse) Name() string { return "BlockBodies" } -func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg } +func (*BlockBodiesPacket) Name() string { return "BlockBodies" } +func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } -func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" } -func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg } +func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } -func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" } -func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg } +func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } +func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } + +func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } +func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } + +func (*GetNodeDataPacket) Name() string { return "GetNodeData" } +func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } -func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" } -func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg } +func (*NodeDataPacket) Name() string { return "NodeData" } +func (*NodeDataPacket) Kind() byte { return NodeDataMsg } -func (*GetReceiptsRequest) Name() string { return "GetReceipts" } -func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg } +func (*GetReceiptsPacket) Name() string { return "GetReceipts" } +func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } -func (*ReceiptsResponse) Name() string { return "Receipts" } -func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg } +func (*ReceiptsPacket) Name() string { return "Receipts" } +func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index 5090d73e0361..0ec3ef6a5b0a 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } // Assemble some table driven tests tests := []struct { - packet *GetBlockHeadersRequest + packet *GetBlockHeadersPacket fail bool }{ // Providing the origin as either a hash or a number should both work - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}}, - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}}, // Providing arbitrary query field should also work - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, - {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, // Providing both the origin hash and origin number must fail - {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}}, + {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}}, } // Iterate over each of the tests and try to encode and then decode for i, tt := range tests { @@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { t.Fatalf("test %d: encode should have failed", i) } if !tt.fail { - packet := new(GetBlockHeadersRequest) + packet := new(GetBlockHeadersPacket) if err := rlp.DecodeBytes(bytes, packet); err != nil { t.Fatalf("test %d: failed to decode packet: %v", i, err) } @@ -70,40 +70,46 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } } -// TestEmptyMessages tests encoding of empty messages. -func TestEmptyMessages(t *testing.T) { +// TestEth66EmptyMessages tests encoding of empty eth66 messages +func TestEth66EmptyMessages(t *testing.T) { // All empty messages encodes to the same format want := common.FromHex("c4820457c0") for i, msg := range []interface{}{ // Headers - GetBlockHeadersPacket{1111, nil}, - BlockHeadersPacket{1111, nil}, + GetBlockHeadersPacket66{1111, nil}, + BlockHeadersPacket66{1111, nil}, // Bodies - GetBlockBodiesPacket{1111, nil}, - BlockBodiesPacket{1111, nil}, - BlockBodiesRLPPacket{1111, nil}, + GetBlockBodiesPacket66{1111, nil}, + BlockBodiesPacket66{1111, nil}, + BlockBodiesRLPPacket66{1111, nil}, + // Node data + GetNodeDataPacket66{1111, nil}, + NodeDataPacket66{1111, nil}, // Receipts - GetReceiptsPacket{1111, nil}, - ReceiptsPacket{1111, nil}, + GetReceiptsPacket66{1111, nil}, + ReceiptsPacket66{1111, nil}, // Transactions - GetPooledTransactionsPacket{1111, nil}, - PooledTransactionsPacket{1111, nil}, - PooledTransactionsRLPPacket{1111, nil}, + GetPooledTransactionsPacket66{1111, nil}, + PooledTransactionsPacket66{1111, nil}, + PooledTransactionsRLPPacket66{1111, nil}, // Headers - BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})}, + BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})}, // Bodies - GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})}, - BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})}, - BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})}, + GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, + BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, + BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, + // Node data + GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, + NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, // Receipts - GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})}, - ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})}, + GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, + ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, // Transactions - GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})}, - PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})}, - PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})}, + GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, + PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, + PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, } { if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) @@ -111,8 +117,8 @@ func TestEmptyMessages(t *testing.T) { } } -// TestMessages tests the encoding of all messages. -func TestMessages(t *testing.T) { +// TestEth66Messages tests the encoding of all redefined eth66 messages +func TestEth66Messages(t *testing.T) { // Some basic structs used during testing var ( header *types.Header @@ -163,6 +169,10 @@ func TestMessages(t *testing.T) { common.HexToHash("deadc0de"), common.HexToHash("feedbeef"), } + byteSlices := [][]byte{ + common.FromHex("deadc0de"), + common.FromHex("feedbeef"), + } // init the receipts { receipts = []*types.Receipt{ @@ -193,51 +203,59 @@ func TestMessages(t *testing.T) { want []byte }{ { - GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}}, + GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}}, common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"), }, { - GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, + GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, common.FromHex("ca820457c682270f050580"), }, { - BlockHeadersPacket{1111, BlockHeadersRequest{header}}, + BlockHeadersPacket66{1111, BlockHeadersPacket{header}}, common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)}, + GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})}, + BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { // Identical to non-rlp-shortcut version - BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})}, + BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetReceiptsPacket{1111, GetReceiptsRequest(hashes)}, + GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, + common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), + }, + { + NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, + common.FromHex("ce820457ca84deadc0de84feedbeef"), + }, + { + GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})}, + ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})}, + ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)}, + GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)}, + PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, { - PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)}, + PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, } { diff --git a/eth/sync_test.go b/eth/sync_test.go index bbf5d93fcf01..b99b2eaea27e 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -28,8 +28,8 @@ import ( ) // Tests that snap sync is disabled after a successful sync cycle. +func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) } func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) } -func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) } // Tests that snap sync gets disabled as soon as a real block is successfully // imported into the blockchain. diff --git a/params/config.go b/params/config.go index 6a81fe990a4d..61e267595e4c 100644 --- a/params/config.go +++ b/params/config.go @@ -503,12 +503,12 @@ type ChainConfig struct { // Fork scheduling was switched from blocks to timestamps here - ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) - CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) - PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) - VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) - DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) - DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) + ShanghaiTime *uint64 `json:"shanghaiBlock,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) + CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) + PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) + VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) + DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) + DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. From 91c2f9c27f4df601cf2967fefed62e36981ff7b0 Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Wed, 23 Oct 2024 17:39:31 +0800 Subject: [PATCH 36/41] fix(SDK): GetBlockByNumberOrHash empty tx hash check (#1080) --- ethclient/ethclient.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 517dfe7f16e0..c2d6eff36166 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -450,10 +450,10 @@ func (ec *Client) GetBlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc. if head.UncleHash != types.EmptyUncleHash && len(body.UncleHashes) == 0 { return nil, fmt.Errorf("server returned empty uncle list but block header indicates uncles") } - if head.TxHash == types.EmptyRootHash && len(body.Transactions) > 0 { + if head.TxHash == types.EmptyTxsHash && len(body.Transactions) > 0 { return nil, fmt.Errorf("server returned non-empty transaction list but block header indicates no transactions") } - if head.TxHash != types.EmptyRootHash && len(body.Transactions) == 0 { + if head.TxHash != types.EmptyTxsHash && len(body.Transactions) == 0 { return nil, fmt.Errorf("server returned empty transaction list but block header indicates transactions") } // Load uncles because they are not included in the block response. From aefacd4931dd9a262cfe2f3127e076f528b2e67e Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:02:17 +0800 Subject: [PATCH 37/41] feat(tx-pool): disable blob pool (#1081) --- eth/backend.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 71c1d0ebe822..a016bfef2fef 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -36,7 +36,6 @@ import ( "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state/pruner" "github.com/scroll-tech/go-ethereum/core/txpool" - "github.com/scroll-tech/go-ethereum/core/txpool/blobpool" "github.com/scroll-tech/go-ethereum/core/txpool/legacypool" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/vm" @@ -231,17 +230,17 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthCl } eth.bloomIndexer.Start(eth.blockchain) - if config.BlobPool.Datadir != "" { - config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir) - } - blobPool := blobpool.New(config.BlobPool, eth.blockchain) + //if config.BlobPool.Datadir != "" { + // config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir) + //} + //blobPool := blobpool.New(config.BlobPool, eth.blockchain) if config.TxPool.Journal != "" { config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } legacyPool := legacypool.New(config.TxPool, eth.blockchain) - eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool}) if err != nil { return nil, err } From 69ce8d91a7175b35bd9f436fb64cf64b28f14536 Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:31:14 +0800 Subject: [PATCH 38/41] Revert "hack shanghaiBlock" (#1083) This reverts commit 9e69bf86ea7631e7f6109c6a333fd0a90b9c2223. --- params/config.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/params/config.go b/params/config.go index 61e267595e4c..6a81fe990a4d 100644 --- a/params/config.go +++ b/params/config.go @@ -503,12 +503,12 @@ type ChainConfig struct { // Fork scheduling was switched from blocks to timestamps here - ShanghaiTime *uint64 `json:"shanghaiBlock,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) - CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) - PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) - VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) - DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) - DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) + ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) + CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) + PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) + VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) + DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) + DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. From 9f83e9d8217b65a51d619f7ff7a769c4b743da8d Mon Sep 17 00:00:00 2001 From: 0xmountaintop <37070449+0xmountaintop@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:29:36 +1100 Subject: [PATCH 39/41] fix(api): change gas_price to use pending block (#1082) (#1084) * fix(api): change gas_price to use pending block (#1082) * fix(api): change gas_price to use pending block * fix --------- Co-authored-by: Morty <70688412+yiweichi@users.noreply.github.com> --- internal/ethapi/api.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index f5a41c333dce..f233455c4f8d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -67,8 +67,9 @@ func (s *EthereumAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) { if err != nil { return nil, err } - if head := s.b.CurrentHeader(); head.BaseFee != nil { - tipcap.Add(tipcap, head.BaseFee) + pendingBlock, _ := s.b.PendingBlockAndReceipts() + if pendingBlock != nil && pendingBlock.BaseFee() != nil { + tipcap.Add(tipcap, pendingBlock.BaseFee()) } return (*hexutil.Big)(tipcap), err } From 6c4baae8e6460fa8d0df2d1fa4b8fec7c23f9f1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96mer=20Faruk=20Irmak?= Date: Thu, 31 Oct 2024 16:29:13 +0300 Subject: [PATCH 40/41] feat: bring ZkTrie in (#1076) Co-authored-by: colinlyguo --- cmd/evm/runner.go | 2 - cmd/utils/flags.go | 14 +- core/blockchain.go | 14 +- core/blockchain_repair_test.go | 3 +- core/blockchain_sethead_test.go | 2 +- core/blockchain_snapshot_test.go | 3 + core/blockchain_test.go | 91 +- core/chain_makers.go | 12 +- core/chain_makers_test.go | 1 + core/forkid/forkid_test.go | 3 + core/genesis.go | 20 +- core/genesis_test.go | 10 +- core/state/database.go | 16 - core/state/iterator_test.go | 3 + core/state/snapshot/difflayer_test.go | 3 + core/state/snapshot/disklayer_test.go | 3 + core/state/snapshot/generate_test.go | 3 + core/state/snapshot/holdable_iterator_test.go | 3 + core/state/snapshot/iterator_test.go | 3 + core/state/snapshot/snapshot_test.go | 3 + core/state/state_object.go | 21 +- core/state/state_prove.go | 85 -- core/state/state_test.go | 2 + core/state/statedb.go | 12 +- core/state/statedb_fuzz_test.go | 1 + core/state/statedb_test.go | 8 +- core/state/sync_test.go | 3 + core/state_processor.go | 2 +- core/state_processor_test.go | 8 +- core/txpool/blobpool/blobpool_test.go | 7 +- core/txpool/legacypool/legacypool_test.go | 1 + core/types/hashes.go | 14 +- core/types/hashing_test.go | 1 + core/types/state_account_marshalling.go | 6 +- core/types/state_account_marshalling_test.go | 46 +- core/vm/gas_table_test.go | 81 +- core/vm/logger.go | 2 - core/vm/runtime/runtime_test.go | 17 +- eth/filters/filter_system_test.go | 2 +- eth/filters/filter_test.go | 2 +- eth/tracers/js/goja.go | 4 - eth/tracers/logger/access_list_tracer.go | 4 - eth/tracers/logger/logger.go | 4 - eth/tracers/logger/logger_json.go | 4 - eth/tracers/native/4byte.go | 4 - eth/tracers/native/call.go | 4 - eth/tracers/native/call_flat.go | 4 - eth/tracers/native/mux.go | 4 - eth/tracers/native/noop.go | 2 - go.mod | 1 - go.sum | 2 - internal/ethapi/api_test.go | 7 +- rollup/ccc/async_checker_test.go | 2 +- rollup/ccc/logger.go | 4 - .../tracing/proof.go | 65 +- rollup/tracing/proof_test.go | 111 ++ rollup/tracing/tracing.go | 49 +- tests/fuzzers/trie/trie-fuzzer.go | 2 +- trie/byte32.go | 42 + trie/byte32_test.go | 44 + trie/database.go | 54 +- trie/database_supplement.go | 32 - trie/hash.go | 149 ++ trie/hash_test.go | 83 + trie/iterator.go | 4 +- trie/iterator_test.go | 28 +- trie/node_test.go | 3 + trie/proof.go | 7 +- trie/proof_test.go | 3 + trie/secure_trie.go | 50 +- trie/secure_trie_test.go | 9 +- trie/stacktrie.go | 7 +- trie/stacktrie_test.go | 9 + trie/sync.go | 2 +- trie/sync_test.go | 15 +- trie/tracer_test.go | 11 +- trie/trie.go | 10 +- trie/trie_reader.go | 13 +- trie/trie_test.go | 29 +- trie/triedb/hashdb/database.go | 8 +- trie/triedb/hashdb/database_supplement.go | 15 - trie/util.go | 117 ++ trie/util_test.go | 96 ++ trie/zk_trie.go | 1346 +++++++++++++++-- trie/zk_trie_database.go | 172 --- trie/zk_trie_database_test.go | 63 - trie/zk_trie_impl_test.go | 289 ---- trie/zk_trie_node.go | 405 +++++ trie/zk_trie_node_test.go | 240 +++ trie/zk_trie_proof_test.go | 200 +-- trie/zk_trie_test.go | 919 ++++++++--- 91 files changed, 3653 insertions(+), 1631 deletions(-) delete mode 100644 core/state/state_prove.go rename trie/zktrie_deletionproof.go => rollup/tracing/proof.go (71%) create mode 100644 rollup/tracing/proof_test.go create mode 100644 trie/byte32.go create mode 100644 trie/byte32_test.go delete mode 100644 trie/database_supplement.go create mode 100644 trie/hash.go create mode 100644 trie/hash_test.go delete mode 100644 trie/triedb/hashdb/database_supplement.go create mode 100644 trie/util.go create mode 100644 trie/util_test.go delete mode 100644 trie/zk_trie_database.go delete mode 100644 trie/zk_trie_database_test.go delete mode 100644 trie/zk_trie_impl_test.go create mode 100644 trie/zk_trie_node.go create mode 100644 trie/zk_trie_node_test.go diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index ea539490edde..81d2c6d62256 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -151,8 +151,6 @@ func runCmd(ctx *cli.Context) error { triedb := trie.NewDatabase(db, &trie.Config{ Preimages: preimages, HashDB: hashdb.Defaults, - // scroll related - IsUsingZktrie: genesisConfig.Config.Scroll.ZktrieEnabled(), }) defer triedb.Close() genesis := genesisConfig.MustCommit(db, triedb) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 25a61c20838e..f934dd4801ed 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -283,7 +283,7 @@ var ( GCModeFlag = &cli.StringFlag{ Name: "gcmode", Usage: `Blockchain garbage collection mode, only relevant in state.scheme=hash ("full", "archive")`, - Value: GCModeArchive, + Value: GCModeFull, Category: flags.StateCategory, } StateSchemeFlag = &cli.StringFlag{ @@ -2056,12 +2056,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { stack.Config().L1Confirmations = rpc.FinalizedBlockNumber log.Info("Setting flag", "--l1.sync.startblock", "4038000") stack.Config().L1DeploymentBlock = 4038000 - // disable pruning - if ctx.String(GCModeFlag.Name) != GCModeArchive { - log.Crit("Must use --gcmode=archive") - } - log.Info("Pruning disabled") - cfg.NoPruning = true case ctx.Bool(ScrollFlag.Name): if !ctx.IsSet(NetworkIdFlag.Name) { cfg.NetworkId = 534352 @@ -2072,12 +2066,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { stack.Config().L1Confirmations = rpc.FinalizedBlockNumber log.Info("Setting flag", "--l1.sync.startblock", "18306000") stack.Config().L1DeploymentBlock = 18306000 - // disable pruning - if ctx.String(GCModeFlag.Name) != GCModeArchive { - log.Crit("Must use --gcmode=archive") - } - log.Info("Pruning disabled") - cfg.NoPruning = true case ctx.Bool(DeveloperFlag.Name): if !ctx.IsSet(NetworkIdFlag.Name) { cfg.NetworkId = 1337 diff --git a/core/blockchain.go b/core/blockchain.go index 0306f1681b3c..db608897e297 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -153,8 +153,8 @@ type CacheConfig struct { } // triedbConfig derives the configures for trie database. -func (c *CacheConfig) triedbConfig(isUsingZktrie bool) *trie.Config { - config := &trie.Config{Preimages: c.Preimages, IsUsingZktrie: isUsingZktrie} +func (c *CacheConfig) triedbConfig() *trie.Config { + config := &trie.Config{Preimages: c.Preimages} if c.StateScheme == rawdb.HashScheme { config.HashDB = &hashdb.Config{ CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, @@ -176,8 +176,8 @@ var defaultCacheConfig = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, + SnapshotLimit: 0, // Snapshots don't support zkTrie yet + SnapshotWait: false, StateScheme: rawdb.HashScheme, } @@ -272,11 +272,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis cacheConfig = defaultCacheConfig } // Open trie database with provided config - triedbConfig := cacheConfig.triedbConfig(false) - if genesis != nil && genesis.Config != nil && genesis.Config.Scroll.ZktrieEnabled() { - cacheConfig.triedbConfig(true) - } - triedb := trie.NewDatabase(db, triedbConfig) + triedb := trie.NewDatabase(db, cacheConfig.triedbConfig()) // Setup the genesis block, commit the provided genesis specification // to database if the genesis block is not present yet, or load the diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 987816d95cb5..9edb9b8c5cbe 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -1750,7 +1750,7 @@ func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { } func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + for _, scheme := range []string{rawdb.HashScheme /*, rawdb.PathScheme*/} { testRepairWithScheme(t, tt, snapshots, scheme) } } @@ -1898,6 +1898,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s // In this case the snapshot layer of B3 is not created because of existent // state. func TestIssue23496(t *testing.T) { + t.Skip("snapshot doesn't support zktrie") testIssue23496(t, rawdb.HashScheme) testIssue23496(t, rawdb.PathScheme) } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index e16fe57eec50..3e98a76f54b7 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -1954,7 +1954,7 @@ func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { } func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + for _, scheme := range []string{rawdb.HashScheme /*, rawdb.PathScheme*/} { testSetHeadWithScheme(t, tt, snapshots, scheme) } } diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 4a0900657dd3..89cf7bd22159 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2020 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/blockchain_test.go b/core/blockchain_test.go index bc4772f46d74..548f0868f736 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -279,7 +279,7 @@ func TestExtendCanonicalHeaders(t *testing.T) { } func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true, rawdb.HashScheme) - testExtendCanonical(t, true, rawdb.PathScheme) + // testExtendCanonical(t, true, rawdb.PathScheme) } func testExtendCanonical(t *testing.T, full bool, scheme string) { @@ -313,7 +313,7 @@ func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { } func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, true, rawdb.HashScheme) - testExtendCanonicalAfterMerge(t, true, rawdb.PathScheme) + // testExtendCanonicalAfterMerge(t, true, rawdb.PathScheme) } func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { @@ -338,7 +338,7 @@ func TestShorterForkHeaders(t *testing.T) { } func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true, rawdb.HashScheme) - testShorterFork(t, true, rawdb.PathScheme) + // testShorterFork(t, true, rawdb.PathScheme) } func testShorterFork(t *testing.T, full bool, scheme string) { @@ -374,7 +374,7 @@ func TestShorterForkHeadersAfterMerge(t *testing.T) { } func TestShorterForkBlocksAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, true, rawdb.HashScheme) - testShorterForkAfterMerge(t, true, rawdb.PathScheme) + // testShorterForkAfterMerge(t, true, rawdb.PathScheme) } func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { @@ -403,7 +403,7 @@ func TestLongerForkHeaders(t *testing.T) { } func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true, rawdb.HashScheme) - testLongerFork(t, true, rawdb.PathScheme) + // testLongerFork(t, true, rawdb.PathScheme) } func testLongerFork(t *testing.T, full bool, scheme string) { @@ -432,7 +432,7 @@ func TestLongerForkHeadersAfterMerge(t *testing.T) { } func TestLongerForkBlocksAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, true, rawdb.HashScheme) - testLongerForkAfterMerge(t, true, rawdb.PathScheme) + // testLongerForkAfterMerge(t, true, rawdb.PathScheme) } func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { @@ -461,7 +461,7 @@ func TestEqualForkHeaders(t *testing.T) { } func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true, rawdb.HashScheme) - testEqualFork(t, true, rawdb.PathScheme) + // testEqualFork(t, true, rawdb.PathScheme) } func testEqualFork(t *testing.T, full bool, scheme string) { @@ -497,7 +497,7 @@ func TestEqualForkHeadersAfterMerge(t *testing.T) { } func TestEqualForkBlocksAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, true, rawdb.HashScheme) - testEqualForkAfterMerge(t, true, rawdb.PathScheme) + // testEqualForkAfterMerge(t, true, rawdb.PathScheme) } func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { @@ -525,7 +525,7 @@ func TestBrokenHeaderChain(t *testing.T) { } func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true, rawdb.HashScheme) - testBrokenChain(t, true, rawdb.PathScheme) + // testBrokenChain(t, true, rawdb.PathScheme) } func testBrokenChain(t *testing.T, full bool, scheme string) { @@ -558,7 +558,7 @@ func TestReorgLongHeaders(t *testing.T) { } func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true, rawdb.HashScheme) - testReorgLong(t, true, rawdb.PathScheme) + // testReorgLong(t, true, rawdb.PathScheme) } func testReorgLong(t *testing.T, full bool, scheme string) { @@ -573,7 +573,7 @@ func TestReorgShortHeaders(t *testing.T) { } func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true, rawdb.HashScheme) - testReorgShort(t, true, rawdb.PathScheme) + // testReorgShort(t, true, rawdb.PathScheme) } func testReorgShort(t *testing.T, full bool, scheme string) { @@ -667,7 +667,7 @@ func TestBadHeaderHashes(t *testing.T) { } func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true, rawdb.HashScheme) - testBadHashes(t, true, rawdb.PathScheme) + // testBadHashes(t, true, rawdb.PathScheme) } func testBadHashes(t *testing.T, full bool, scheme string) { @@ -707,7 +707,7 @@ func TestReorgBadHeaderHashes(t *testing.T) { } func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true, rawdb.HashScheme) - testReorgBadHashes(t, true, rawdb.PathScheme) + // testReorgBadHashes(t, true, rawdb.PathScheme) } func testReorgBadHashes(t *testing.T, full bool, scheme string) { @@ -768,7 +768,7 @@ func TestHeadersInsertNonceError(t *testing.T) { } func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true, rawdb.HashScheme) - testInsertNonceError(t, true, rawdb.PathScheme) + // testInsertNonceError(t, true, rawdb.PathScheme) } func testInsertNonceError(t *testing.T, full bool, scheme string) { @@ -830,7 +830,7 @@ func testInsertNonceError(t *testing.T, full bool, scheme string) { // classical full block processing. func TestFastVsFullChains(t *testing.T) { testFastVsFullChains(t, rawdb.HashScheme) - testFastVsFullChains(t, rawdb.PathScheme) + // testFastVsFullChains(t, rawdb.PathScheme) } func testFastVsFullChains(t *testing.T, scheme string) { @@ -963,7 +963,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { // positions. func TestLightVsFastVsFullChainHeads(t *testing.T) { testLightVsFastVsFullChainHeads(t, rawdb.HashScheme) - testLightVsFastVsFullChainHeads(t, rawdb.PathScheme) + // testLightVsFastVsFullChainHeads(t, rawdb.PathScheme) } func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { @@ -1080,7 +1080,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Tests that chain reorganisations handle transaction removals and reinsertions. func TestChainTxReorgs(t *testing.T) { testChainTxReorgs(t, rawdb.HashScheme) - testChainTxReorgs(t, rawdb.PathScheme) + // testChainTxReorgs(t, rawdb.PathScheme) } func testChainTxReorgs(t *testing.T, scheme string) { @@ -1199,7 +1199,7 @@ func testChainTxReorgs(t *testing.T, scheme string) { func TestLogReorgs(t *testing.T) { testLogReorgs(t, rawdb.HashScheme) - testLogReorgs(t, rawdb.PathScheme) + // testLogReorgs(t, rawdb.PathScheme) } func testLogReorgs(t *testing.T, scheme string) { @@ -1259,7 +1259,7 @@ var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd // when the chain reorganizes. func TestLogRebirth(t *testing.T) { testLogRebirth(t, rawdb.HashScheme) - testLogRebirth(t, rawdb.PathScheme) + // testLogRebirth(t, rawdb.PathScheme) } func testLogRebirth(t *testing.T, scheme string) { @@ -1341,7 +1341,7 @@ func testLogRebirth(t *testing.T, scheme string) { // when a side chain containing log events overtakes the canonical chain. func TestSideLogRebirth(t *testing.T) { testSideLogRebirth(t, rawdb.HashScheme) - testSideLogRebirth(t, rawdb.PathScheme) + // testSideLogRebirth(t, rawdb.PathScheme) } func testSideLogRebirth(t *testing.T, scheme string) { @@ -1436,7 +1436,7 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan Re func TestReorgSideEvent(t *testing.T) { testReorgSideEvent(t, rawdb.HashScheme) - testReorgSideEvent(t, rawdb.PathScheme) + // testReorgSideEvent(t, rawdb.PathScheme) } func testReorgSideEvent(t *testing.T, scheme string) { @@ -1521,7 +1521,7 @@ done: // Tests if the canonical block can be fetched from the database during chain insertion. func TestCanonicalBlockRetrieval(t *testing.T) { testCanonicalBlockRetrieval(t, rawdb.HashScheme) - testCanonicalBlockRetrieval(t, rawdb.PathScheme) + // testCanonicalBlockRetrieval(t, rawdb.PathScheme) } func testCanonicalBlockRetrieval(t *testing.T, scheme string) { @@ -1571,7 +1571,7 @@ func testCanonicalBlockRetrieval(t *testing.T, scheme string) { } func TestEIP155Transition(t *testing.T) { testEIP155Transition(t, rawdb.HashScheme) - testEIP155Transition(t, rawdb.PathScheme) + // testEIP155Transition(t, rawdb.PathScheme) } func testEIP155Transition(t *testing.T, scheme string) { @@ -1685,7 +1685,7 @@ func testEIP155Transition(t *testing.T, scheme string) { } func TestEIP161AccountRemoval(t *testing.T) { testEIP161AccountRemoval(t, rawdb.HashScheme) - testEIP161AccountRemoval(t, rawdb.PathScheme) + // testEIP161AccountRemoval(t, rawdb.PathScheme) } func testEIP161AccountRemoval(t *testing.T, scheme string) { @@ -1760,7 +1760,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) { // https://github.com/ethereum/go-ethereum/pull/15941 func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { testBlockchainHeaderchainReorgConsistency(t, rawdb.HashScheme) - testBlockchainHeaderchainReorgConsistency(t, rawdb.PathScheme) + // testBlockchainHeaderchainReorgConsistency(t, rawdb.PathScheme) } func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) { @@ -1856,7 +1856,7 @@ func TestTrieForkGC(t *testing.T) { // forking point is not available any more. func TestLargeReorgTrieGC(t *testing.T) { testLargeReorgTrieGC(t, rawdb.HashScheme) - testLargeReorgTrieGC(t, rawdb.PathScheme) + // testLargeReorgTrieGC(t, rawdb.PathScheme) } func testLargeReorgTrieGC(t *testing.T, scheme string) { @@ -1865,6 +1865,10 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { genesis := &Genesis{ Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: GenesisAlloc{ + common.Address{2}: {Balance: big.NewInt(1)}, + common.Address{3}: {Balance: big.NewInt(1)}, + }, } genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) @@ -1925,7 +1929,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { func TestBlockchainRecovery(t *testing.T) { testBlockchainRecovery(t, rawdb.HashScheme) - testBlockchainRecovery(t, rawdb.PathScheme) + // testBlockchainRecovery(t, rawdb.PathScheme) } func testBlockchainRecovery(t *testing.T, scheme string) { @@ -1981,7 +1985,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { // This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain. func TestInsertReceiptChainRollback(t *testing.T) { testInsertReceiptChainRollback(t, rawdb.HashScheme) - testInsertReceiptChainRollback(t, rawdb.PathScheme) + // testInsertReceiptChainRollback(t, rawdb.PathScheme) } func testInsertReceiptChainRollback(t *testing.T, scheme string) { @@ -2063,7 +2067,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) { // - https://github.com/ethereum/go-ethereum/pull/18988 func TestLowDiffLongChain(t *testing.T) { testLowDiffLongChain(t, rawdb.HashScheme) - testLowDiffLongChain(t, rawdb.PathScheme) + // testLowDiffLongChain(t, rawdb.PathScheme) } func testLowDiffLongChain(t *testing.T, scheme string) { @@ -2270,15 +2274,15 @@ func TestPrunedImportSideWithMerging(t *testing.T) { func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers", rawdb.HashScheme) - testInsertKnownChainData(t, "headers", rawdb.PathScheme) + // testInsertKnownChainData(t, "headers", rawdb.PathScheme) } func TestInsertKnownReceiptChain(t *testing.T) { testInsertKnownChainData(t, "receipts", rawdb.HashScheme) - testInsertKnownChainData(t, "receipts", rawdb.PathScheme) + // testInsertKnownChainData(t, "receipts", rawdb.PathScheme) } func TestInsertKnownBlocks(t *testing.T) { testInsertKnownChainData(t, "blocks", rawdb.HashScheme) - testInsertKnownChainData(t, "blocks", rawdb.PathScheme) + // testInsertKnownChainData(t, "blocks", rawdb.PathScheme) } func testInsertKnownChainData(t *testing.T, typ string, scheme string) { @@ -2636,7 +2640,7 @@ func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types // 4. The forked block should still be retrievable by hash func TestReorgToShorterRemovesCanonMapping(t *testing.T) { testReorgToShorterRemovesCanonMapping(t, rawdb.HashScheme) - testReorgToShorterRemovesCanonMapping(t, rawdb.PathScheme) + // testReorgToShorterRemovesCanonMapping(t, rawdb.PathScheme) } func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) { @@ -2679,7 +2683,7 @@ func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) { // imports -- that is, for fast sync func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) { testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.HashScheme) - testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.PathScheme) + // testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.PathScheme) } func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme string) { @@ -2827,7 +2831,7 @@ func TestTransactionIndices(t *testing.T) { func TestSkipStaleTxIndicesInSnapSync(t *testing.T) { testSkipStaleTxIndicesInSnapSync(t, rawdb.HashScheme) - testSkipStaleTxIndicesInSnapSync(t, rawdb.PathScheme) + // testSkipStaleTxIndicesInSnapSync(t, rawdb.PathScheme) } func testSkipStaleTxIndicesInSnapSync(t *testing.T, scheme string) { @@ -3024,7 +3028,7 @@ func BenchmarkBlockChain_1x1000Executions(b *testing.B) { // 3. The blocks fetched are all known and canonical blocks func TestSideImportPrunedBlocks(t *testing.T) { testSideImportPrunedBlocks(t, rawdb.HashScheme) - testSideImportPrunedBlocks(t, rawdb.PathScheme) + // testSideImportPrunedBlocks(t, rawdb.PathScheme) } func testSideImportPrunedBlocks(t *testing.T, scheme string) { @@ -3082,7 +3086,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { // first, but the journal wiped the entire state object on create-revert. func TestDeleteCreateRevert(t *testing.T) { testDeleteCreateRevert(t, rawdb.HashScheme) - testDeleteCreateRevert(t, rawdb.PathScheme) + // testDeleteCreateRevert(t, rawdb.PathScheme) } func testDeleteCreateRevert(t *testing.T, scheme string) { @@ -3156,6 +3160,7 @@ func testDeleteCreateRevert(t *testing.T, scheme string) { // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, // and then the new slots exist func TestDeleteRecreateSlots(t *testing.T) { + t.Skip("Scroll doesn't support SELFDESTRUCT") testDeleteRecreateSlots(t, rawdb.HashScheme) testDeleteRecreateSlots(t, rawdb.PathScheme) } @@ -3284,6 +3289,7 @@ func testDeleteRecreateSlots(t *testing.T, scheme string) { // regular value-transfer // Expected outcome is that _all_ slots are cleared from A func TestDeleteRecreateAccount(t *testing.T) { + t.Skip("Scroll doesn't support SELFDESTRUCT") testDeleteRecreateAccount(t, rawdb.HashScheme) testDeleteRecreateAccount(t, rawdb.PathScheme) } @@ -3362,6 +3368,7 @@ func testDeleteRecreateAccount(t *testing.T, scheme string) { // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, // and then the new slots exist func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { + t.Skip("Scroll doesn't support SELFDESTRUCT") testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.HashScheme) testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.PathScheme) } @@ -3569,7 +3576,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) { func TestInitThenFailCreateContract(t *testing.T) { testInitThenFailCreateContract(t, rawdb.HashScheme) - testInitThenFailCreateContract(t, rawdb.PathScheme) + // testInitThenFailCreateContract(t, rawdb.PathScheme) } func testInitThenFailCreateContract(t *testing.T, scheme string) { @@ -3684,7 +3691,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) { // correctly. func TestEIP2718Transition(t *testing.T) { testEIP2718Transition(t, rawdb.HashScheme) - testEIP2718Transition(t, rawdb.PathScheme) + // testEIP2718Transition(t, rawdb.PathScheme) } func testEIP2718Transition(t *testing.T, scheme string) { @@ -3766,7 +3773,7 @@ func testEIP2718Transition(t *testing.T, scheme string) { // 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap). func TestEIP1559Transition(t *testing.T) { testEIP1559Transition(t, rawdb.HashScheme) - testEIP1559Transition(t, rawdb.PathScheme) + // testEIP1559Transition(t, rawdb.PathScheme) } func testEIP1559Transition(t *testing.T, scheme string) { @@ -3912,7 +3919,7 @@ func testEIP1559Transition(t *testing.T, scheme string) { // It expects the state is recovered and all relevant chain markers are set correctly. func TestSetCanonical(t *testing.T) { testSetCanonical(t, rawdb.HashScheme) - testSetCanonical(t, rawdb.PathScheme) + // testSetCanonical(t, rawdb.PathScheme) } func testSetCanonical(t *testing.T, scheme string) { @@ -3999,7 +4006,7 @@ func testSetCanonical(t *testing.T, scheme string) { // correctly in case reorg is called. func TestCanonicalHashMarker(t *testing.T) { testCanonicalHashMarker(t, rawdb.HashScheme) - testCanonicalHashMarker(t, rawdb.PathScheme) + // testCanonicalHashMarker(t, rawdb.PathScheme) } func testCanonicalHashMarker(t *testing.T, scheme string) { diff --git a/core/chain_makers.go b/core/chain_makers.go index 47979b030d1c..31f70a321d6d 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -354,11 +354,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse return nil, nil } // Forcibly use hash-based state scheme for retaining all nodes in disk. - trieConfig := trie.HashDefaults - if config.Scroll.ZktrieEnabled() { - trieConfig = trie.HashDefaultsWithZktrie - } - triedb := trie.NewDatabase(db, trieConfig) + triedb := trie.NewDatabase(db, trie.HashDefaults) defer triedb.Close() for i := 0; i < n; i++ { @@ -379,11 +375,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse // then generate chain on top. func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) { db := rawdb.NewMemoryDatabase() - trieConfig := trie.HashDefaults - if genesis.Config != nil && genesis.Config.Scroll.ZktrieEnabled() { - trieConfig = trie.HashDefaultsWithZktrie - } - triedb := trie.NewDatabase(db, trieConfig) + triedb := trie.NewDatabase(db, trie.HashDefaults) defer triedb.Close() _, err := genesis.Commit(db, triedb) if err != nil { diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index dd2164a33e23..b1f6ba9be68e 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -33,6 +33,7 @@ import ( ) func TestGeneratePOSChain(t *testing.T) { + t.Skip("POS is out of scope") var ( keyHex = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c" key, _ = crypto.HexToECDSA(keyHex) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 54d7bff8ba5d..9dd1f3015c87 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/genesis.go b/core/genesis.go index a939e0174e34..210b48cf29eb 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -125,11 +125,7 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error { func (ga *GenesisAlloc) hash(isUsingZktrie bool) (common.Hash, error) { // Create an ephemeral in-memory database for computing hash, // all the derived states will be discarded to not pollute disk. - trieConfig := trie.HashDefaults - if isUsingZktrie { - trieConfig = trie.HashDefaultsWithZktrie - } - db := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), trieConfig) + db := state.NewDatabase(rawdb.NewMemoryDatabase()) statedb, err := state.New(types.EmptyRootHash, db, nil) if err != nil { return common.Hash{}, err @@ -292,10 +288,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen } else { log.Info("Writing custom genesis block") } - if genesis.Config.Scroll.ZktrieEnabled() { // genesis.Config must be not nil atm - // overwrite triedb IsUsingZktrie config to be safe - triedb.SetIsUsingZktrie(genesis.Config.Scroll.ZktrieEnabled()) - } block, err := genesis.Commit(db, triedb) if err != nil { return genesis.Config, common.Hash{}, err @@ -309,13 +301,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // in this case. header := rawdb.ReadHeader(db, stored, 0) storedcfg := rawdb.ReadChainConfig(db, stored) - if genesis != nil { // genesis.Config must be not nil atm - // overwrite triedb IsUsingZktrie config to be safe - triedb.SetIsUsingZktrie(genesis.Config.Scroll.ZktrieEnabled()) - } else if storedcfg != nil && storedcfg.Scroll.ZktrieEnabled() { - // overwrite triedb IsUsingZktrie config to be safe - triedb.SetIsUsingZktrie(storedcfg.Scroll.ZktrieEnabled()) - } // if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Hash()) { if _, err := state.New(header.Root, state.NewDatabaseWithNodeDB(db, triedb), nil); err != nil { if genesis == nil { @@ -496,9 +481,6 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block if config == nil { config = params.AllEthashProtocolChanges } - if config.Scroll.ZktrieEnabled() != triedb.IsUsingZktrie() { - return nil, fmt.Errorf("ZktrieEnabled mismatch. genesis: %v, triedb: %v", g.Config.Scroll.ZktrieEnabled(), triedb.IsUsingZktrie()) - } block := g.ToBlock() if block.Number().Sign() != 0 { diff --git a/core/genesis_test.go b/core/genesis_test.go index 8784bace214e..7c5d6cf3431e 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -44,12 +44,12 @@ func TestInvalidCliqueConfig(t *testing.T) { func TestSetupGenesis(t *testing.T) { testSetupGenesis(t, rawdb.HashScheme) - testSetupGenesis(t, rawdb.PathScheme) + // testSetupGenesis(t, rawdb.PathScheme) } func testSetupGenesis(t *testing.T, scheme string) { var ( - customghash = common.HexToHash("0x700380ab70d789c462c4e8f0db082842095321f390d0a3f25f400f0746db32bc") + customghash = common.HexToHash("0xc96ed5df64e683d5af1b14ec67126e31b914ca828021c330efa00572a61ede8f") customg = Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)}, Alloc: GenesisAlloc{ @@ -189,11 +189,7 @@ func TestGenesisHashes(t *testing.T) { } { // Test via MustCommit db := rawdb.NewMemoryDatabase() - trieConfig := trie.HashDefaults - if c.genesis.Config.Scroll.ZktrieEnabled() { - trieConfig = trie.HashDefaultsWithZktrie - } - if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trieConfig)).Hash(); have != c.want { + if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)).Hash(); have != c.want { t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) } // Test via ToBlock diff --git a/core/state/database.go b/core/state/database.go index 1b7cc0e23006..7dd410264eb8 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -170,13 +170,6 @@ type cachingDB struct { // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { - if db.triedb.IsUsingZktrie() { - tr, err := trie.NewZkTrie(root, trie.NewZktrieDatabaseFromTriedb(db.triedb)) - if err != nil { - return nil, err - } - return tr, nil - } tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) if err != nil { return nil, err @@ -186,13 +179,6 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { // OpenStorageTrie opens the storage trie of an account. func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) { - if db.triedb.IsUsingZktrie() { - tr, err := trie.NewZkTrie(root, trie.NewZktrieDatabaseFromTriedb(db.triedb)) - if err != nil { - return nil, err - } - return tr, nil - } tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb) if err != nil { return nil, err @@ -205,8 +191,6 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { switch t := t.(type) { case *trie.StateTrie: return t.Copy() - case *trie.ZkTrie: - return t.Copy() default: panic(fmt.Errorf("unknown trie type %T", t)) } diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go index 29981d3c3939..b1c39386d015 100644 --- a/core/state/iterator_test.go +++ b/core/state/iterator_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2016 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 393de837e859..a29e95f02a01 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index ee8182f95137..fd9d50bd28b5 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 9bc87c180d0d..9c963955f339 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state/snapshot/holdable_iterator_test.go b/core/state/snapshot/holdable_iterator_test.go index d699744ca9bd..31609912e05b 100644 --- a/core/state/snapshot/holdable_iterator_test.go +++ b/core/state/snapshot/holdable_iterator_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2022 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go index 18490392bb75..b708c7637b94 100644 --- a/core/state/snapshot/iterator_test.go +++ b/core/state/snapshot/iterator_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index a068d85a6fa7..fc313cfdea47 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2017 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state/state_object.go b/core/state/state_object.go index 6fa3fac156bd..d4f6fba8dcf0 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -143,7 +143,7 @@ func (s *stateObject) touch() { func (s *stateObject) getTrie() (Trie, error) { if s.trie == nil { // Try fetching from prefetcher first - if s.data.Root != s.db.db.TrieDB().EmptyRoot() && s.db.prefetcher != nil { + if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { // When the miner is creating the pending state, there is no prefetcher s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) } @@ -199,15 +199,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { if metrics.EnabledExpensive { s.db.SnapshotStorageReads += time.Since(start) } - if s.db.db.TrieDB().IsUsingZktrie() { - value = common.BytesToHash(enc) - } else if len(enc) > 0 { - _, content, _, err := rlp.Split(enc) - if err != nil { - s.db.setError(err) - } - value.SetBytes(content) - } + value = common.BytesToHash(enc) } // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { @@ -261,7 +253,7 @@ func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure } } - if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != s.db.db.TrieDB().EmptyRoot() { + if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) } if len(s.dirtyStorage) > 0 { @@ -316,12 +308,7 @@ func (s *stateObject) updateTrie() (Trie, error) { s.db.StorageDeleted += 1 } else { trimmed := common.TrimLeftZeroes(value[:]) - if s.db.db.TrieDB().IsUsingZktrie() { - encoded = common.CopyBytes(value[:]) - } else { - // Encoding []byte cannot fail, ok to ignore the error. - encoded, _ = rlp.EncodeToBytes(trimmed) - } + encoded = common.CopyBytes(value[:]) if err := tr.UpdateStorage(s.address, key[:], trimmed); err != nil { s.db.setError(err) return nil, err diff --git a/core/state/state_prove.go b/core/state/state_prove.go deleted file mode 100644 index 5fc176023f5e..000000000000 --- a/core/state/state_prove.go +++ /dev/null @@ -1,85 +0,0 @@ -package state - -import ( - "fmt" - - zkt "github.com/scroll-tech/zktrie/types" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/ethdb" - zktrie "github.com/scroll-tech/go-ethereum/trie" - "github.com/scroll-tech/go-ethereum/trie/zkproof" -) - -type TrieProve interface { - Prove(key []byte, proofDb ethdb.KeyValueWriter) error -} - -type ZktrieProofTracer struct { - *zktrie.ProofTracer -} - -// MarkDeletion overwrite the underlayer method with secure key -func (t ZktrieProofTracer) MarkDeletion(key common.Hash) { - key_s, _ := zkt.ToSecureKeyBytes(key.Bytes()) - t.ProofTracer.MarkDeletion(key_s.Bytes()) -} - -// Merge overwrite underlayer method with proper argument -func (t ZktrieProofTracer) Merge(another ZktrieProofTracer) { - t.ProofTracer.Merge(another.ProofTracer) -} - -func (t ZktrieProofTracer) Available() bool { - return t.ProofTracer != nil -} - -// NewProofTracer is not in Db interface and used explictily for reading proof in storage trie (not updated by the dirty value) -func (s *StateDB) NewProofTracer(trieS Trie) ZktrieProofTracer { - if s.IsUsingZktrie() { - zkTrie := trieS.(*zktrie.ZkTrie) - if zkTrie == nil { - panic("unexpected trie type for zktrie") - } - return ZktrieProofTracer{zkTrie.NewProofTracer()} - } - return ZktrieProofTracer{} -} - -// GetStorageTrieForProof is not in Db interface and used explictily for reading proof in storage trie (not updated by the dirty value) -func (s *StateDB) GetStorageTrieForProof(addr common.Address) (Trie, error) { - // try the trie in stateObject first, else we would create one - stateObject := s.getStateObject(addr) - if stateObject == nil { - // still return a empty trie - dummy_trie, _ := s.db.OpenStorageTrie(s.originalRoot, addr, common.Hash{}) - return dummy_trie, nil - } - - trie := stateObject.trie - var err error - if trie == nil { - // use a new, temporary trie - trie, err = s.db.OpenStorageTrie(s.originalRoot, stateObject.address, stateObject.data.Root) - if err != nil { - return nil, fmt.Errorf("can't create storage trie on root %s: %v ", stateObject.data.Root, err) - } - } - - return trie, nil -} - -// GetSecureTrieProof handle any interface with Prove (should be a Trie in most case) and -// deliver the proof in bytes -func (s *StateDB) GetSecureTrieProof(trieProve TrieProve, key common.Hash) ([][]byte, error) { - var proof zkproof.ProofList - var err error - if s.IsUsingZktrie() { - key_s, _ := zkt.ToSecureKeyBytes(key.Bytes()) - err = trieProve.Prove(key_s.Bytes(), &proof) - } else { - err = trieProve.Prove(crypto.Keccak256(key.Bytes()), &proof) - } - return proof, err -} diff --git a/core/state/state_test.go b/core/state/state_test.go index 4c19e206d62a..d0ded82b076f 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -41,6 +41,7 @@ func newStateEnv() *stateEnv { } func TestDump(t *testing.T) { + t.Skip("Due to ZkTrie not supporting iterators") db := rawdb.NewMemoryDatabase() tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) sdb, _ := New(types.EmptyRootHash, tdb, nil) @@ -101,6 +102,7 @@ func TestDump(t *testing.T) { } func TestIterativeDump(t *testing.T) { + t.Skip("Due to ZkTrie not supporting iterators") db := rawdb.NewMemoryDatabase() tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) sdb, _ := New(types.EmptyRootHash, tdb, nil) diff --git a/core/state/statedb.go b/core/state/statedb.go index 2e74c638b57e..374b47769b16 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -23,8 +23,6 @@ import ( "sort" "time" - zkt "github.com/scroll-tech/zktrie/types" - "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state/snapshot" @@ -209,10 +207,6 @@ func (s *StateDB) Error() error { return s.dbErr } -func (s *StateDB) IsUsingZktrie() bool { - return s.db.TrieDB().IsUsingZktrie() -} - func (s *StateDB) AddLog(log *types.Log) { s.journal.append(addLogChange{txhash: s.thash}) @@ -363,11 +357,7 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { // GetProof returns the Merkle proof for a given account. func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { - if s.IsUsingZktrie() { - addr_s, _ := zkt.ToSecureKeyBytes(addr.Bytes()) - return s.GetProofByHash(common.BytesToHash(addr_s.Bytes())) - } - return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) + return s.GetProofByHash(common.BytesToHash(addr.Bytes())) } // GetProofByHash returns the Merkle proof for a given account. diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index 802b14587268..99072d8e61e7 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -381,6 +381,7 @@ func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Datab } func TestStateChanges(t *testing.T) { + t.Skip("This test doesn't support ZkTrie yet") config := &quick.Config{MaxCount: 1000} err := quick.Check((*stateTest).run, config) if cerr, ok := err.(*quick.CheckError); ok { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index b81de73f1918..42d9a21c6f3e 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -30,6 +30,7 @@ import ( "testing" "testing/quick" + "github.com/holiman/uint256" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state/snapshot" @@ -40,7 +41,6 @@ import ( "github.com/scroll-tech/go-ethereum/trie/triedb/hashdb" "github.com/scroll-tech/go-ethereum/trie/triedb/pathdb" "github.com/scroll-tech/go-ethereum/trie/trienode" - "github.com/holiman/uint256" ) // Tests that updating a state trie does not leak any database writes prior to @@ -796,7 +796,7 @@ func TestDeleteCreateRevert(t *testing.T) { // If we are missing trie nodes, we should not continue writing to the trie func TestMissingTrieNodes(t *testing.T) { testMissingTrieNodes(t, rawdb.HashScheme) - testMissingTrieNodes(t, rawdb.PathScheme) + // testMissingTrieNodes(t, rawdb.PathScheme) } func testMissingTrieNodes(t *testing.T, scheme string) { @@ -1051,7 +1051,7 @@ func TestFlushOrderDataLoss(t *testing.T) { t.Fatalf("failed to commit state trie: %v", err) } triedb.Reference(root, common.Hash{}) - if err := triedb.Cap(1024); err != nil { + if err := triedb.Cap(128); err != nil { t.Fatalf("failed to cap trie dirty cache: %v", err) } if err := triedb.Commit(root, false); err != nil { @@ -1106,6 +1106,7 @@ func TestStateDBTransientStorage(t *testing.T) { } func TestResetObject(t *testing.T) { + t.Skip("Snapshot doesn't support ZkTrie") var ( disk = rawdb.NewMemoryDatabase() tdb = trie.NewDatabase(disk, nil) @@ -1140,6 +1141,7 @@ func TestResetObject(t *testing.T) { } func TestDeleteStorage(t *testing.T) { + t.Skip("Snapshot doesn't support ZkTrie") var ( disk = rawdb.NewMemoryDatabase() tdb = trie.NewDatabase(disk, nil) diff --git a/core/state/sync_test.go b/core/state/sync_test.go index c842292191b6..f485c122aa7a 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/core/state_processor.go b/core/state_processor.go index 47c57f8c53de..f26cfcf3c3e6 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -144,7 +144,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta // Apply the transaction to the current state (included in the env). applyMessageStartTime := time.Now() result, err := ApplyMessage(evm, msg, gp, l1DataFee) - if evm.Config.Tracer != nil && evm.Config.Tracer.IsDebug() { + if evm.Config.Tracer != nil { if erroringTracer, ok := evm.Config.Tracer.(interface{ Error() error }); ok { err = errors.Join(err, erroringTracer.Error()) } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index e8300127c9b8..1f37a077995f 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -200,7 +200,7 @@ func TestStateProcessorErrors(t *testing.T) { txs: []*types.Transaction{ mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(0), big.NewInt(0)), }, - want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0 baseFee: 38100000", + want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0 baseFee: 39370000", }, { // ErrTipVeryHigh txs: []*types.Transaction{ @@ -241,19 +241,19 @@ func TestStateProcessorErrors(t *testing.T) { txs: []*types.Transaction{ mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.InitialBaseFee), tooBigInitCode[:]), }, - want: "could not apply tx 0 [0xa31de6e26bd5ffba0ca91a2bc29fc2eaad6a6cfc5ad9ab6ffb69cac121e0125c]: max initcode size exceeded: code size 49153 limit 49152", + want: "could not apply tx 0 [0xd491405f06c92d118dd3208376fcee18a57c54bc52063ee4a26b1cf296857c25]: max initcode size exceeded: code size 49153 limit 49152", }, { // ErrIntrinsicGas: Not enough gas to cover init code txs: []*types.Transaction{ mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.InitialBaseFee), make([]byte, 320)), }, - want: "could not apply tx 0 [0xf36b7d68cf239f956f7c36be26688a97aaa317ea5f5230d109bb30dbc8598ccb]: intrinsic gas too low: have 54299, want 54300", + want: "could not apply tx 0 [0xfd49536a9b323769d8472fcb3ebb3689b707a349379baee3e2ee3fe7baae06a1]: intrinsic gas too low: have 54299, want 54300", }, { // ErrBlobFeeCapTooLow txs: []*types.Transaction{ mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), []common.Hash{(common.Hash{1})}), }, - want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 38100000", + want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 39370000", }, } { block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config) diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index 0c5a03e91f19..03c1acacb13e 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // @@ -29,6 +32,8 @@ import ( "testing" "time" + "github.com/holiman/billy" + "github.com/holiman/uint256" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/consensus/misc/eip1559" "github.com/scroll-tech/go-ethereum/consensus/misc/eip4844" @@ -43,8 +48,6 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rlp" - "github.com/holiman/billy" - "github.com/holiman/uint256" ) var ( diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index c4b6633db95a..4d05077c7512 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -1505,6 +1505,7 @@ func TestRepricing(t *testing.T) { // // Note, local transactions are never allowed to be dropped. func TestRepricingDynamicFee(t *testing.T) { + t.Skip("broken by https://github.com/scroll-tech/go-ethereum/pull/964/files") t.Parallel() // Create the pool to test the pricing enforcement with diff --git a/core/types/hashes.go b/core/types/hashes.go index aa37e6d1e2f5..1f33d8853092 100644 --- a/core/types/hashes.go +++ b/core/types/hashes.go @@ -23,14 +23,8 @@ import ( ) var ( - // EmptyZkTrieRootHash is the known root hash of an empty zktrie. - EmptyZkTrieRootHash = common.Hash{} - - // EmptyLegacyTrieRootHash is the known root hash of an empty legacy trie. - EmptyLegacyTrieRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - // EmptyRootHash is the known root hash of an empty trie. - EmptyRootHash = EmptyZkTrieRootHash + EmptyRootHash = common.Hash{} // EmptyUncleHash is the known hash of the empty uncle set. EmptyUncleHash = rlpHash([]*Header(nil)) // 1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347 @@ -45,13 +39,13 @@ var ( EmptyPoseidonCodeHash = codehash.EmptyPoseidonCodeHash // EmptyTxsHash is the known hash of the empty transaction set. - EmptyTxsHash = EmptyLegacyTrieRootHash + EmptyTxsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") // EmptyReceiptsHash is the known hash of the empty receipt set. - EmptyReceiptsHash = EmptyLegacyTrieRootHash + EmptyReceiptsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") // EmptyWithdrawalsHash is the known hash of the empty withdrawal set. - EmptyWithdrawalsHash = EmptyLegacyTrieRootHash + EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") ) // TrieRootHash returns the hash itself if it's non-empty or the predefined diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index 5ced9b410604..97948b2a1a8a 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -34,6 +34,7 @@ import ( ) func TestDeriveSha(t *testing.T) { + t.Skip("due to legacy trie being deprecated") txs, err := genTxs(0) if err != nil { t.Fatal(err) diff --git a/core/types/state_account_marshalling.go b/core/types/state_account_marshalling.go index db8fbed345c7..72da2656dfaa 100644 --- a/core/types/state_account_marshalling.go +++ b/core/types/state_account_marshalling.go @@ -23,8 +23,6 @@ import ( "github.com/iden3/go-iden3-crypto/utils" - zkt "github.com/scroll-tech/zktrie/types" - "github.com/scroll-tech/go-ethereum/common" ) @@ -44,8 +42,8 @@ var ( // [96:128] KeccakCodeHash // [128:160] PoseidonCodehash // (total 160 bytes) -func (s *StateAccount) MarshalFields() ([]zkt.Byte32, uint32) { - fields := make([]zkt.Byte32, 5) +func (s *StateAccount) MarshalFields() ([][32]byte, uint32) { + fields := make([][32]byte, 5) if s.Balance == nil { panic("StateAccount balance nil") diff --git a/core/types/state_account_marshalling_test.go b/core/types/state_account_marshalling_test.go index b822329a509d..91b1b3871962 100644 --- a/core/types/state_account_marshalling_test.go +++ b/core/types/state_account_marshalling_test.go @@ -38,18 +38,18 @@ func TestMarshalUnmarshalEmptyAccount(t *testing.T) { assert.Equal(t, 5, len(bytes)) assert.Equal(t, uint32(8), flag) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[0].Bytes()) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[1].Bytes()) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[2].Bytes()) - assert.Equal(t, common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), bytes[3].Bytes()) - assert.Equal(t, common.Hex2Bytes("2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864"), bytes[4].Bytes()) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[0][:]) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[1][:]) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[2][:]) + assert.Equal(t, common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), bytes[3][:]) + assert.Equal(t, common.Hex2Bytes("2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864"), bytes[4][:]) // unmarshal account flatBytes := []byte("") for _, item := range bytes { - flatBytes = append(flatBytes, item.Bytes()...) + flatBytes = append(flatBytes, item[:]...) } acc2, err := UnmarshalStateAccount(flatBytes) @@ -75,11 +75,11 @@ func TestMarshalUnmarshalZeroAccount(t *testing.T) { assert.Equal(t, 5, len(bytes)) assert.Equal(t, uint32(8), flag) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[0].Bytes()) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[1].Bytes()) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[2].Bytes()) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[3].Bytes()) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[4].Bytes()) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[0][:]) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[1][:]) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[2][:]) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[3][:]) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), bytes[4][:]) } func TestMarshalUnmarshalNonEmptyAccount(t *testing.T) { @@ -99,18 +99,18 @@ func TestMarshalUnmarshalNonEmptyAccount(t *testing.T) { assert.Equal(t, 5, len(bytes)) assert.Equal(t, uint32(8), flag) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000222222220000000011111111"), bytes[0].Bytes()) - assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000033333333"), bytes[1].Bytes()) - assert.Equal(t, common.Hex2Bytes("123456789abcdef123456789abcdef123456789abcdef123456789abcdef1234"), bytes[2].Bytes()) - assert.Equal(t, common.Hex2Bytes("1111111111111111111111111111111111111111111111111111111111111111"), bytes[3].Bytes()) - assert.Equal(t, common.Hex2Bytes("2222222222222222222222222222222222222222222222222222222222222222"), bytes[4].Bytes()) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000222222220000000011111111"), bytes[0][:]) + assert.Equal(t, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000033333333"), bytes[1][:]) + assert.Equal(t, common.Hex2Bytes("123456789abcdef123456789abcdef123456789abcdef123456789abcdef1234"), bytes[2][:]) + assert.Equal(t, common.Hex2Bytes("1111111111111111111111111111111111111111111111111111111111111111"), bytes[3][:]) + assert.Equal(t, common.Hex2Bytes("2222222222222222222222222222222222222222222222222222222222222222"), bytes[4][:]) // unmarshal account flatBytes := []byte("") for _, item := range bytes { - flatBytes = append(flatBytes, item.Bytes()...) + flatBytes = append(flatBytes, item[:]...) } acc2, err := UnmarshalStateAccount(flatBytes) @@ -138,18 +138,18 @@ func TestMarshalUnmarshalAccountWithMaxFields(t *testing.T) { assert.Equal(t, 5, len(bytes)) assert.Equal(t, uint32(8), flag) - assert.Equal(t, common.Hex2Bytes("00000000000000000000000000000000ffffffffffffffffffffffffffffffff"), bytes[0].Bytes()) - assert.Equal(t, common.Hex2Bytes("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"), bytes[1].Bytes()) - assert.Equal(t, common.Hex2Bytes("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"), bytes[2].Bytes()) - assert.Equal(t, common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), bytes[3].Bytes()) - assert.Equal(t, common.Hex2Bytes("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"), bytes[4].Bytes()) + assert.Equal(t, common.Hex2Bytes("00000000000000000000000000000000ffffffffffffffffffffffffffffffff"), bytes[0][:]) + assert.Equal(t, common.Hex2Bytes("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"), bytes[1][:]) + assert.Equal(t, common.Hex2Bytes("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"), bytes[2][:]) + assert.Equal(t, common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), bytes[3][:]) + assert.Equal(t, common.Hex2Bytes("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000"), bytes[4][:]) // unmarshal account flatBytes := []byte("") for _, item := range bytes { - flatBytes = append(flatBytes, item.Bytes()...) + flatBytes = append(flatBytes, item[:]...) } acc2, err := UnmarshalStateAccount(flatBytes) diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index bc1ed368400b..b90bd558a5e6 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -133,48 +133,53 @@ var createGasTests = []struct { func TestCreateGas(t *testing.T) { for i, tt := range createGasTests { - var gasUsed = uint64(0) - doCheck := func(testGas int) bool { - address := common.BytesToAddress([]byte("contract")) - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - statedb.CreateAccount(address) - statedb.SetCode(address, hexutil.MustDecode(tt.code)) - statedb.Finalise(true) - vmctx := BlockContext{ - CanTransfer: func(StateDB, common.Address, *big.Int) bool { return true }, - Transfer: func(StateDB, common.Address, common.Address, *big.Int) {}, - BlockNumber: big.NewInt(0), - } - config := Config{} - if tt.eip3860 { - config.ExtraEips = []int{3860} + t.Run("createGasTests", func(t *testing.T) { + if tt.eip3860 == false { + t.Skip("EIP-3860 is enabled by default on Scroll") } + var gasUsed = uint64(0) + doCheck := func(testGas int) bool { + address := common.BytesToAddress([]byte("contract")) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.CreateAccount(address) + statedb.SetCode(address, hexutil.MustDecode(tt.code)) + statedb.Finalise(true) + vmctx := BlockContext{ + CanTransfer: func(StateDB, common.Address, *big.Int) bool { return true }, + Transfer: func(StateDB, common.Address, common.Address, *big.Int) {}, + BlockNumber: big.NewInt(0), + } + config := Config{} + if tt.eip3860 { + config.ExtraEips = []int{3860} + } - vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, config) - var startGas = uint64(testGas) - ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(big.Int)) - if err != nil { - return false + vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, config) + var startGas = uint64(testGas) + ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(big.Int)) + if err != nil { + return false + } + gasUsed = startGas - gas + if len(ret) != 32 { + t.Fatalf("test %d: expected 32 bytes returned, have %d", i, len(ret)) + } + if bytes.Equal(ret, make([]byte, 32)) { + // Failure + return false + } + return true } - gasUsed = startGas - gas - if len(ret) != 32 { - t.Fatalf("test %d: expected 32 bytes returned, have %d", i, len(ret)) + minGas := sort.Search(100_000, doCheck) + if uint64(minGas) != tt.minimumGas { + t.Fatalf("test %d: min gas error, want %d, have %d", i, tt.minimumGas, minGas) } - if bytes.Equal(ret, make([]byte, 32)) { - // Failure - return false + // If the deployment succeeded, we also check the gas used + if minGas < 100_000 { + if gasUsed != tt.gasUsed { + t.Errorf("test %d: gas used mismatch: have %v, want %v", i, gasUsed, tt.gasUsed) + } } - return true - } - minGas := sort.Search(100_000, doCheck) - if uint64(minGas) != tt.minimumGas { - t.Fatalf("test %d: min gas error, want %d, have %d", i, tt.minimumGas, minGas) - } - // If the deployment succeeded, we also check the gas used - if minGas < 100_000 { - if gasUsed != tt.gasUsed { - t.Errorf("test %d: gas used mismatch: have %v, want %v", i, gasUsed, tt.gasUsed) - } - } + }) } } diff --git a/core/vm/logger.go b/core/vm/logger.go index 5b588263fd8f..eb56f1f6b1be 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -41,6 +41,4 @@ type EVMLogger interface { CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) CaptureStateAfter(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) - // Helper function - IsDebug() bool } diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 0ffdead1c34e..adedd4536e24 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -270,6 +270,7 @@ func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header { // TestBlockhash tests the blockhash operation. It's a bit special, since it internally // requires access to a chain reader. func TestBlockhash(t *testing.T) { + t.Skip("Scroll has a different implementation of blockhash") // Current head n := uint64(1000) parentHash := common.Hash{} @@ -672,13 +673,14 @@ func TestColdAccountAccessCost(t *testing.T) { step: 6, want: 2855, }, - { // SELFDESTRUCT(0xff) - code: []byte{ - byte(vm.PUSH1), 0xff, byte(vm.SELFDESTRUCT), - }, - step: 1, - want: 7600, - }, + // disabled due to SELFDESTRUCT not being supported in Scroll + // { // SELFDESTRUCT(0xff) + // code: []byte{ + // byte(vm.PUSH1), 0xff, byte(vm.SELFDESTRUCT), + // }, + // step: 1, + // want: 7600, + // }, } { tracer := logger.NewStructLogger(nil) Execute(tc.code, nil, &Config{ @@ -697,6 +699,7 @@ func TestColdAccountAccessCost(t *testing.T) { } func TestRuntimeJSTracer(t *testing.T) { + t.Skip("disabled due to SELFDESTRUCT not being supported in Scroll") jsTracers := []string{ `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, step: function() { this.steps++}, diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 32326acb9afa..7c3944785950 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -468,7 +468,7 @@ func TestGetLogsRange(t *testing.T) { gspec := &core.Genesis{ Config: params.TestChainConfig, } - _, err := gspec.Commit(db, trie.NewDatabase(db, &trie.Config{IsUsingZktrie: true})) + _, err := gspec.Commit(db, trie.NewDatabase(db, nil)) if err != nil { t.Fatal(err) } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 8da131c1a69d..c26af885cfb3 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -180,7 +180,7 @@ func TestFilters(t *testing.T) { // Hack: GenerateChainWithGenesis creates a new db. // Commit the genesis manually and use GenerateChain. - _, err = gspec.Commit(db, trie.NewDatabase(db, &trie.Config{IsUsingZktrie: true})) + _, err = gspec.Commit(db, trie.NewDatabase(db, nil)) if err != nil { t.Fatal(err) } diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go index 5f584e272473..9659c15d5eda 100644 --- a/eth/tracers/js/goja.go +++ b/eth/tracers/js/goja.go @@ -327,10 +327,6 @@ func (t *jsTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Ad } } -func (t *jsTracer) IsDebug() bool { - return false -} - // CaptureExit is called when EVM exits a scope, even if the scope didn't // execute any code. func (t *jsTracer) CaptureExit(output []byte, gasUsed uint64, err error) { diff --git a/eth/tracers/logger/access_list_tracer.go b/eth/tracers/logger/access_list_tracer.go index 87e37af5dbda..e83cb8c0491f 100644 --- a/eth/tracers/logger/access_list_tracer.go +++ b/eth/tracers/logger/access_list_tracer.go @@ -185,7 +185,3 @@ func (a *AccessListTracer) AccessList() types.AccessList { func (a *AccessListTracer) Equal(other *AccessListTracer) bool { return a.list.equal(other.list) } - -func (a *AccessListTracer) IsDebug() bool { - return false -} diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index 0868dd07fd44..5ef1bb003c55 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -420,8 +420,6 @@ func (l *StructLogger) UpdatedStorages() map[common.Address]Storage { // CreatedAccount return the account data in case it is a create tx func (l *StructLogger) CreatedAccount() *types.AccountWrapper { return l.createdAccount } -func (l *StructLogger) IsDebug() bool { return l.cfg.Debug } - // WriteTrace writes a formatted trace to the given writer func WriteTrace(writer io.Writer, logs []StructLog) { for _, log := range logs { @@ -545,8 +543,6 @@ func (*mdLogger) CaptureTxStart(gasLimit uint64) {} func (*mdLogger) CaptureTxEnd(restGas uint64) {} -func (t *mdLogger) IsDebug() bool { return t.cfg.Debug } - // FormatLogs formats EVM returned structured logs for json output func FormatLogs(logs []StructLog) []types.StructLogRes { formatted := make([]types.StructLogRes, len(logs)) diff --git a/eth/tracers/logger/logger_json.go b/eth/tracers/logger/logger_json.go index dbdfa3b0b3a6..8e6e14ef94b8 100644 --- a/eth/tracers/logger/logger_json.go +++ b/eth/tracers/logger/logger_json.go @@ -104,7 +104,3 @@ func (l *JSONLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} func (l *JSONLogger) CaptureTxStart(gasLimit uint64) {} func (l *JSONLogger) CaptureTxEnd(restGas uint64) {} - -func (l *JSONLogger) IsDebug() bool { - return l.cfg.Debug -} diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go index dea374825c1c..7701d2d9b88b 100644 --- a/eth/tracers/native/4byte.go +++ b/eth/tracers/native/4byte.go @@ -115,10 +115,6 @@ func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from common.Address, to comm t.store(input[0:4], len(input)-4) } -func (t *fourByteTracer) IsDebug() bool { - return false -} - // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *fourByteTracer) GetResult() (json.RawMessage, error) { diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 15571a840bb6..96d8c37eee70 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -259,10 +259,6 @@ func (t *CallTracer) CaptureTxEnd(restGas uint64) { } } -func (t *CallTracer) IsDebug() bool { - return false -} - // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *CallTracer) GetResult() (json.RawMessage, error) { diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index dbfb5809aaba..e504a56c06a5 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -213,10 +213,6 @@ func (t *flatCallTracer) CaptureTxEnd(restGas uint64) { t.tracer.CaptureTxEnd(restGas) } -func (t *flatCallTracer) IsDebug() bool { - return false -} - // GetResult returns an empty json object. func (t *flatCallTracer) GetResult() (json.RawMessage, error) { if len(t.tracer.callstack) < 1 { diff --git a/eth/tracers/native/mux.go b/eth/tracers/native/mux.go index 5c99fefe970b..84a08309edb0 100644 --- a/eth/tracers/native/mux.go +++ b/eth/tracers/native/mux.go @@ -122,10 +122,6 @@ func (t *MuxTracer) CaptureTxEnd(restGas uint64) { } } -func (t *MuxTracer) IsDebug() bool { - return false -} - // GetResult returns an empty json object. func (t *MuxTracer) GetResult() (json.RawMessage, error) { resObject := make(map[string]json.RawMessage) diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go index 3f3851c0e9f1..181d4a60635c 100644 --- a/eth/tracers/native/noop.go +++ b/eth/tracers/native/noop.go @@ -71,8 +71,6 @@ func (*noopTracer) CaptureTxStart(gasLimit uint64) {} func (*noopTracer) CaptureTxEnd(restGas uint64) {} -func (*noopTracer) IsDebug() bool { return false } - // GetResult returns an empty json object. func (t *noopTracer) GetResult() (json.RawMessage, error) { return json.RawMessage(`{}`), nil diff --git a/go.mod b/go.mod index eb5cbb594568..db6f03c51b84 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,6 @@ require ( github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 github.com/rs/cors v1.7.0 github.com/scroll-tech/da-codec v0.1.2 - github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 github.com/status-im/keycard-go v0.2.0 diff --git a/go.sum b/go.sum index fa1a27460115..438ae7a5d44d 100644 --- a/go.sum +++ b/go.sum @@ -476,8 +476,6 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= -github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= -github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 9df3a6d787a0..be6db98c4af4 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -114,6 +114,7 @@ type txData struct { } func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txData { + emptyRootHash := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") return []txData{ { Tx: &types.LegacyTx{ @@ -188,7 +189,7 @@ func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txDa AccessList: types.AccessList{ types.AccessTuple{ Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyLegacyTrieRootHash}, + StorageKeys: []common.Hash{emptyRootHash}, }, }, V: big.NewInt(32), @@ -234,7 +235,7 @@ func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txDa AccessList: types.AccessList{ types.AccessTuple{ Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyLegacyTrieRootHash}, + StorageKeys: []common.Hash{emptyRootHash}, }, }, V: big.NewInt(32), @@ -281,7 +282,7 @@ func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txDa AccessList: types.AccessList{ types.AccessTuple{ Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyLegacyTrieRootHash}, + StorageKeys: []common.Hash{emptyRootHash}, }, }, V: big.NewInt(32), diff --git a/rollup/ccc/async_checker_test.go b/rollup/ccc/async_checker_test.go index e4c7e934e00a..06da50d177aa 100644 --- a/rollup/ccc/async_checker_test.go +++ b/rollup/ccc/async_checker_test.go @@ -30,7 +30,7 @@ func TestAsyncChecker(t *testing.T) { Config: params.TestChainConfig, Alloc: core.GenesisAlloc{testAddr: {Balance: new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))}}, } - gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaultsWithZktrie)) + gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) asyncChecker := NewAsyncChecker(chain, 1, false) diff --git a/rollup/ccc/logger.go b/rollup/ccc/logger.go index 2c1f6678f497..c69095c6da93 100644 --- a/rollup/ccc/logger.go +++ b/rollup/ccc/logger.go @@ -260,10 +260,6 @@ func (l *Logger) CaptureTxStart(gasLimit uint64) { func (l *Logger) CaptureTxEnd(restGas uint64) { } -func (l *Logger) IsDebug() bool { - return true -} - // Error returns an error if executed txns triggered an overflow // Caller should revert some transactions and close the block func (l *Logger) Error() error { diff --git a/trie/zktrie_deletionproof.go b/rollup/tracing/proof.go similarity index 71% rename from trie/zktrie_deletionproof.go rename to rollup/tracing/proof.go index 9fbf2ae36abf..b02f3540b413 100644 --- a/trie/zktrie_deletionproof.go +++ b/rollup/tracing/proof.go @@ -1,30 +1,28 @@ -package trie +package tracing import ( "bytes" "fmt" - zktrie "github.com/scroll-tech/zktrie/trie" - zkt "github.com/scroll-tech/zktrie/types" - "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/trie" ) type ProofTracer struct { - *ZkTrie - deletionTracer map[zkt.Hash]struct{} - rawPaths map[string][]*zktrie.Node - emptyTermPaths map[string][]*zktrie.Node + trie *trie.ZkTrie + deletionTracer map[trie.Hash]struct{} + rawPaths map[string][]*trie.Node + emptyTermPaths map[string][]*trie.Node } // NewProofTracer create a proof tracer object -func (t *ZkTrie) NewProofTracer() *ProofTracer { +func NewProofTracer(t *trie.ZkTrie) *ProofTracer { return &ProofTracer{ - ZkTrie: t, + trie: t, // always consider 0 is "deleted" - deletionTracer: map[zkt.Hash]struct{}{zkt.HashZero: {}}, - rawPaths: make(map[string][]*zktrie.Node), - emptyTermPaths: make(map[string][]*zktrie.Node), + deletionTracer: map[trie.Hash]struct{}{trie.HashZero: {}}, + rawPaths: make(map[string][]*trie.Node), + emptyTermPaths: make(map[string][]*trie.Node), } } @@ -32,7 +30,7 @@ func (t *ZkTrie) NewProofTracer() *ProofTracer { func (t *ProofTracer) Merge(another *ProofTracer) *ProofTracer { // sanity checking - if !bytes.Equal(t.Hash().Bytes(), another.Hash().Bytes()) { + if !bytes.Equal(t.trie.Hash().Bytes(), another.trie.Hash().Bytes()) { panic("can not merge two proof tracer base on different trie") } @@ -59,7 +57,7 @@ func (t *ProofTracer) Merge(another *ProofTracer) *ProofTracer { // always decode the node for its purpose func (t *ProofTracer) GetDeletionProofs() ([][]byte, error) { - retMap := map[zkt.Hash][]byte{} + retMap := map[trie.Hash][]byte{} // check each path: reversively, skip the final leaf node for _, path := range t.rawPaths { @@ -73,18 +71,18 @@ func (t *ProofTracer) GetDeletionProofs() ([][]byte, error) { nodeHash, _ := n.NodeHash() t.deletionTracer[*nodeHash] = struct{}{} } else { - var siblingHash *zkt.Hash + var siblingHash *trie.Hash if deletedL { siblingHash = n.ChildR } else if deletedR { siblingHash = n.ChildL } if siblingHash != nil { - sibling, err := t.ZkTrie.Tree().GetNode(siblingHash) + sibling, err := t.trie.GetNodeByHash(siblingHash) if err != nil { return nil, err } - if sibling.Type != zktrie.NodeTypeEmpty_New { + if sibling.Type != trie.NodeTypeEmpty_New { retMap[*siblingHash] = sibling.Value() } } @@ -103,7 +101,7 @@ func (t *ProofTracer) GetDeletionProofs() ([][]byte, error) { } // MarkDeletion mark a key has been involved into deletion -func (t *ProofTracer) MarkDeletion(key []byte) { +func (t *ProofTracer) MarkDeletion(key []byte) error { if path, existed := t.emptyTermPaths[string(key)]; existed { // copy empty node terminated path for final scanning t.rawPaths[string(key)] = path @@ -111,38 +109,39 @@ func (t *ProofTracer) MarkDeletion(key []byte) { // sanity check leafNode := path[len(path)-1] - if leafNode.Type != zktrie.NodeTypeLeaf_New { + if leafNode.Type != trie.NodeTypeLeaf_New { panic("all path recorded in proofTrace should be ended with leafNode") } nodeHash, _ := leafNode.NodeHash() t.deletionTracer[*nodeHash] = struct{}{} } + return nil } // Prove act the same as zktrie.Prove, while also collect the raw path // for collecting deletion proofs in a post-work func (t *ProofTracer) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { fromLevel := uint(0) - var mptPath []*zktrie.Node - err := t.ZkTrie.ProveWithDeletion(key, fromLevel, - func(n *zktrie.Node) error { + var mptPath []*trie.Node + return t.trie.ProveWithDeletion(key, fromLevel, + func(n *trie.Node) error { nodeHash, err := n.NodeHash() if err != nil { return err } switch n.Type { - case zktrie.NodeTypeLeaf_New: - preImage := t.GetKey(n.NodeKey.Bytes()) + case trie.NodeTypeLeaf_New: + preImage := t.trie.GetKey(n.NodeKey.Bytes()) if len(preImage) > 0 { - n.KeyPreimage = &zkt.Byte32{} + n.KeyPreimage = &trie.Byte32{} copy(n.KeyPreimage[:], preImage) } - case zktrie.NodeTypeBranch_0, zktrie.NodeTypeBranch_1, - zktrie.NodeTypeBranch_2, zktrie.NodeTypeBranch_3: + case trie.NodeTypeBranch_0, trie.NodeTypeBranch_1, + trie.NodeTypeBranch_2, trie.NodeTypeBranch_3: mptPath = append(mptPath, n) - case zktrie.NodeTypeEmpty_New: + case trie.NodeTypeEmpty_New: // empty node is considered as "unhit" but it should be also being added // into a temporary slot for possibly being marked as deletion later mptPath = append(mptPath, n) @@ -153,17 +152,11 @@ func (t *ProofTracer) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { return proofDb.Put(nodeHash[:], n.Value()) }, - func(n *zktrie.Node, _ *zktrie.Node) { + func(n *trie.Node, _ *trie.Node) { // only "hit" path (i.e. the leaf node corresponding the input key can be found) // would be add into tracer mptPath = append(mptPath, n) t.rawPaths[string(key)] = mptPath }, ) - if err != nil { - return err - } - // we put this special kv pair in db so we can distinguish the type and - // make suitable Proof - return proofDb.Put(magicHash, zktrie.ProofMagicBytes()) } diff --git a/rollup/tracing/proof_test.go b/rollup/tracing/proof_test.go new file mode 100644 index 000000000000..0d2c457db921 --- /dev/null +++ b/rollup/tracing/proof_test.go @@ -0,0 +1,111 @@ +package tracing + +import ( + "bytes" + "testing" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/ethdb/memorydb" + "github.com/scroll-tech/go-ethereum/trie" + "github.com/stretchr/testify/assert" +) + +func newTestingMerkle(t *testing.T) (*trie.ZkTrie, *trie.Database) { + db := trie.NewDatabase(rawdb.NewMemoryDatabase(), &trie.Config{}) + return newTestingMerkleWithDb(t, common.Hash{}, db) +} + +func newTestingMerkleWithDb(t *testing.T, root common.Hash, db *trie.Database) (*trie.ZkTrie, *trie.Database) { + maxLevels := trie.NodeKeyValidBytes * 8 + mt, err := trie.NewZkTrie(trie.TrieID(root), db) + if err != nil { + t.Fatal(err) + return nil, nil + } + assert.Equal(t, maxLevels, mt.MaxLevels()) + return mt, db +} + +// Tests that new "proof trace" feature +func TestProofWithDeletion(t *testing.T) { + mt, _ := newTestingMerkle(t) + key1 := bytes.Repeat([]byte("b"), 32) + key2 := bytes.Repeat([]byte("c"), 32) + err := mt.TryUpdate( + key1, + 1, + []trie.Byte32{*trie.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("v"), 32))}, + ) + assert.NoError(t, err) + err = mt.TryUpdate( + key2, + 1, + []trie.Byte32{*trie.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("n"), 32))}, + ) + assert.NoError(t, err) + + proof := memorydb.New() + proofTracer := NewProofTracer(mt) + + err = proofTracer.Prove(key1, proof) + assert.NoError(t, err) + nd, err := mt.TryGet(key2) + assert.NoError(t, err) + + key4 := bytes.Repeat([]byte("x"), 32) + err = proofTracer.Prove(key4, proof) + assert.NoError(t, err) + //assert.Equal(t, len(sibling1), len(delTracer.GetProofs())) + + siblings, err := proofTracer.GetDeletionProofs() + assert.NoError(t, err) + assert.Equal(t, 0, len(siblings)) + + proofTracer.MarkDeletion(key1) + siblings, err = proofTracer.GetDeletionProofs() + assert.NoError(t, err) + assert.Equal(t, 1, len(siblings)) + l := len(siblings[0]) + // a hacking to grep the value part directly from the encoded leaf node, + // notice the sibling of key `k*32`` is just the leaf of key `m*32` + assert.Equal(t, siblings[0][l-33:l-1], nd) + + // Marking a key that is currently not hit (but terminated by an empty node) + // also causes it to be added to the deletion proof + proofTracer.MarkDeletion(key4) + siblings, err = proofTracer.GetDeletionProofs() + assert.NoError(t, err) + assert.Equal(t, 2, len(siblings)) + + key3 := bytes.Repeat([]byte("x"), 32) + err = mt.TryUpdate( + key3, + 1, + []trie.Byte32{*trie.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("z"), 32))}, + ) + assert.NoError(t, err) + + proofTracer = NewProofTracer(mt) + err = proofTracer.Prove(key1, proof) + assert.NoError(t, err) + err = proofTracer.Prove(key4, proof) + assert.NoError(t, err) + + proofTracer.MarkDeletion(key1) + siblings, err = proofTracer.GetDeletionProofs() + assert.NoError(t, err) + assert.Equal(t, 1, len(siblings)) + + proofTracer.MarkDeletion(key4) + siblings, err = proofTracer.GetDeletionProofs() + assert.NoError(t, err) + assert.Equal(t, 2, len(siblings)) + + // one of the siblings is just leaf for key2, while + // another one must be a middle node + match1 := bytes.Equal(siblings[0][l-33:l-1], nd) + match2 := bytes.Equal(siblings[1][l-33:l-1], nd) + assert.True(t, match1 || match2) + assert.False(t, match1 && match2) +} diff --git a/rollup/tracing/tracing.go b/rollup/tracing/tracing.go index 6e4c16a9619a..a84604c5244e 100644 --- a/rollup/tracing/tracing.go +++ b/rollup/tracing/tracing.go @@ -27,6 +27,8 @@ import ( "github.com/scroll-tech/go-ethereum/rollup/fees" "github.com/scroll-tech/go-ethereum/rollup/rcfg" "github.com/scroll-tech/go-ethereum/rollup/withdrawtrie" + "github.com/scroll-tech/go-ethereum/trie" + "github.com/scroll-tech/go-ethereum/trie/zkproof" ) var ( @@ -78,7 +80,7 @@ type TraceEnv struct { TxStorageTraces []*types.StorageTrace Codes map[common.Hash]logger.CodeInfo // zktrie tracer is used for zktrie storage to build additional deletion proof - ZkTrieTracer map[string]state.ZktrieProofTracer + ZkTrieTracer map[string]*ProofTracer // StartL1QueueIndex is the next L1 message queue index that this block can process. // Example: If the parent block included QueueIndex=9, then StartL1QueueIndex will @@ -117,7 +119,7 @@ func CreateTraceEnvHelper(chainConfig *params.ChainConfig, logConfig *logger.Con }, TxStorageTraces: make([]*types.StorageTrace, block.Transactions().Len()), Codes: make(map[common.Hash]logger.CodeInfo), - ZkTrieTracer: make(map[string]state.ZktrieProofTracer), + ZkTrieTracer: make(map[string]*ProofTracer), StartL1QueueIndex: startL1QueueIndex, } } @@ -435,14 +437,15 @@ func (env *TraceEnv) getTxResult(state *state.StateDB, index int, block *types.B } env.sMu.Lock() - trie, err := state.GetStorageTrieForProof(addr) - if err != nil { + storageTrie, err := state.Database().OpenStorageTrie(state.GetRootHash(), addr, state.GetOrNewStateObject(addr).Root()) + zkStorageTrie, isZk := storageTrie.(*trie.ZkTrie) + if err != nil || !isZk { // but we still continue to next address log.Error("Storage trie not available", "error", err, "address", addr) env.sMu.Unlock() continue } - zktrieTracer := state.NewProofTracer(trie) + zktrieTracer := NewProofTracer(zkStorageTrie) env.sMu.Unlock() for key := range keys { @@ -458,29 +461,23 @@ func (env *TraceEnv) getTxResult(state *state.StateDB, index int, block *types.B m = make(map[string][]hexutil.Bytes) env.StorageProofs[addrStr] = m } - if zktrieTracer.Available() && !env.ZkTrieTracer[addrStr].Available() { - env.ZkTrieTracer[addrStr] = state.NewProofTracer(trie) + if _, exists := env.ZkTrieTracer[addrStr]; !exists { + env.ZkTrieTracer[addrStr] = zktrieTracer } if proof, existed := m[keyStr]; existed { txm[keyStr] = proof // still need to touch tracer for deletion - if isDelete && zktrieTracer.Available() { - env.ZkTrieTracer[addrStr].MarkDeletion(key) + if isDelete { + env.ZkTrieTracer[addrStr].MarkDeletion(key.Bytes()) } env.sMu.Unlock() continue } env.sMu.Unlock() - var proof [][]byte - var err error - if zktrieTracer.Available() { - proof, err = state.GetSecureTrieProof(zktrieTracer, key) - } else { - proof, err = state.GetSecureTrieProof(trie, key) - } - if err != nil { + var proof zkproof.ProofList + if err = zkStorageTrie.Prove(key.Bytes(), &proof); err != nil { log.Error("Storage proof not available", "error", err, "address", addrStr, "key", keyStr) // but we still mark the proofs map with nil array } @@ -488,12 +485,10 @@ func (env *TraceEnv) getTxResult(state *state.StateDB, index int, block *types.B env.sMu.Lock() txm[keyStr] = wrappedProof m[keyStr] = wrappedProof - if zktrieTracer.Available() { - if isDelete { - zktrieTracer.MarkDeletion(key) - } - env.ZkTrieTracer[addrStr].Merge(zktrieTracer) + if isDelete { + zktrieTracer.MarkDeletion(key.Bytes()) } + env.ZkTrieTracer[addrStr].Merge(zktrieTracer) env.sMu.Unlock() } } @@ -564,9 +559,13 @@ func (env *TraceEnv) fillBlockTrace(block *types.Block) (*types.BlockTrace, erro for _, slot := range storages { if _, existed := env.StorageProofs[addr.String()][slot.String()]; !existed { - if trie, err := statedb.GetStorageTrieForProof(addr); err != nil { - log.Error("Storage proof for intrinstic address not available", "error", err, "address", addr) - } else if proof, err := statedb.GetSecureTrieProof(trie, slot); err != nil { + var proof zkproof.ProofList + storageTrie, err := statedb.Database().OpenStorageTrie(statedb.GetRootHash(), addr, statedb.GetOrNewStateObject(addr).Root()) + zkStorageTrie, isZk := storageTrie.(*trie.ZkTrie) + if err != nil || !isZk { + // but we still continue to next address + log.Error("Storage trie not available", "error", err, "address", addr) + } else if err := zkStorageTrie.Prove(slot.Bytes(), &proof); err != nil { log.Error("Get storage proof for intrinstic address failed", "error", err, "address", addr, "slot", slot) } else { env.StorageProofs[addr.String()][slot.String()] = types.WrapProof(proof) diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go index e8c700d67ee1..b1fef12fb13d 100644 --- a/tests/fuzzers/trie/trie-fuzzer.go +++ b/tests/fuzzers/trie/trie-fuzzer.go @@ -145,7 +145,7 @@ func runRandTest(rt randTest) error { var ( triedb = trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) tr = trie.NewEmpty(triedb) - origin = types.EmptyLegacyTrieRootHash + origin = types.EmptyRootHash values = make(map[string]string) // tracks content of the trie ) for i, step := range rt { diff --git a/trie/byte32.go b/trie/byte32.go new file mode 100644 index 000000000000..313b4062b725 --- /dev/null +++ b/trie/byte32.go @@ -0,0 +1,42 @@ +package trie + +import ( + "math/big" + + "github.com/scroll-tech/go-ethereum/crypto/poseidon" +) + +type Byte32 [32]byte + +func (b *Byte32) Hash() (*big.Int, error) { + first16 := new(big.Int).SetBytes(b[0:16]) + last16 := new(big.Int).SetBytes(b[16:32]) + hash, err := poseidon.HashFixedWithDomain([]*big.Int{first16, last16}, big.NewInt(HASH_DOMAIN_BYTE32)) + if err != nil { + return nil, err + } + return hash, nil +} + +func (b *Byte32) Bytes() []byte { return b[:] } + +// same action as common.Hash (truncate bytes longer than 32 bytes FROM beginning, +// and padding 0 at the beginning for shorter bytes) +func NewByte32FromBytes(b []byte) *Byte32 { + + byte32 := new(Byte32) + + if len(b) > 32 { + b = b[len(b)-32:] + } + + copy(byte32[32-len(b):], b) + return byte32 +} + +// create bytes32 with zeropadding to shorter bytes, or truncate it +func NewByte32FromBytesPaddingZero(b []byte) *Byte32 { + byte32 := new(Byte32) + copy(byte32[:], b) + return byte32 +} diff --git a/trie/byte32_test.go b/trie/byte32_test.go new file mode 100644 index 000000000000..d261c97de2a4 --- /dev/null +++ b/trie/byte32_test.go @@ -0,0 +1,44 @@ +package trie + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewByte32(t *testing.T) { + var tests = []struct { + input []byte + expected []byte + expectedPaddingZero []byte + expectedHash string + expectedHashPadding string + }{ + {bytes.Repeat([]byte{1}, 4), + []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}, + []byte{1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + "1120169262217660912395665138727312015286293827539936259020934722663991619468", + "11815021958450380571374861379539732018094133931187815125213818828376493710327", + }, + {bytes.Repeat([]byte{1}, 34), + []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, + []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, + "2219239698457798269997113163039475489501011181643161136091371987815450431154", + "2219239698457798269997113163039475489501011181643161136091371987815450431154", + }, + } + + for _, tt := range tests { + byte32Result := NewByte32FromBytes(tt.input) + byte32PaddingResult := NewByte32FromBytesPaddingZero(tt.input) + assert.Equal(t, tt.expected, byte32Result.Bytes()) + assert.Equal(t, tt.expectedPaddingZero, byte32PaddingResult.Bytes()) + hashResult, err := byte32Result.Hash() + assert.NoError(t, err) + hashPaddingResult, err := byte32PaddingResult.Hash() + assert.NoError(t, err) + assert.Equal(t, tt.expectedHash, hashResult.String()) + assert.Equal(t, tt.expectedHashPadding, hashPaddingResult.String()) + } +} diff --git a/trie/database.go b/trie/database.go index f0268d096317..da243da4c1c0 100644 --- a/trie/database.go +++ b/trie/database.go @@ -18,7 +18,6 @@ package trie import ( "errors" - "sync" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/ethdb" @@ -34,9 +33,6 @@ type Config struct { Preimages bool // Flag whether the preimage of node key is recorded HashDB *hashdb.Config // Configs for hash-based scheme PathDB *pathdb.Config // Configs for experimental path-based scheme - - // zktrie related stuff - IsUsingZktrie bool } // HashDefaults represents a config for using hash-based scheme with @@ -46,13 +42,6 @@ var HashDefaults = &Config{ HashDB: hashdb.Defaults, } -// HashDefaultsWithZktrie represents a config based on HashDefaults but with zktrie enabled. -var HashDefaultsWithZktrie = &Config{ - Preimages: false, - HashDB: hashdb.Defaults, - IsUsingZktrie: true, -} - // HashDefaultsWithPreimages represents a config based on HashDefaults but with Preimages enabled. var HashDefaultsWithPreimages = &Config{ Preimages: true, @@ -90,9 +79,6 @@ type backend interface { // Close closes the trie database backend and releases all held resources. Close() error - - // database supplementary methods, to get the underlying fields - GetLock() *sync.RWMutex } // Database is the wrapper of the underlying backend which is shared by different @@ -103,10 +89,6 @@ type Database struct { diskdb ethdb.Database // Persistent database to store the snapshot preimages *preimageStore // The store for caching preimages backend backend // The backend for managing trie nodes - - // zktrie related stuff - // TODO: It's a quick&dirty implementation. FIXME later. - rawDirties KvMap } // NewDatabase initializes the trie database with default settings, note @@ -124,8 +106,6 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database { config: config, diskdb: diskdb, preimages: preimages, - // scroll-related - rawDirties: make(KvMap), } if config.HashDB != nil && config.PathDB != nil { log.Crit("Both 'hash' and 'path' mode are configured") @@ -133,24 +113,11 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database { if config.PathDB != nil { db.backend = pathdb.New(diskdb, config.PathDB) } else { - db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) + db.backend = hashdb.New(diskdb, config.HashDB, ZkChildResolver{}) } return db } -func (db *Database) IsUsingZktrie() bool { - // compatible logic for light mode - if db == nil || db.config == nil { - return false - } - return db.config.IsUsingZktrie -} - -func (db *Database) SetIsUsingZktrie(isUsingZktrie bool) { - // config must not be nil - db.config.IsUsingZktrie = isUsingZktrie -} - // Reader returns a reader for accessing all trie nodes with provided state root. // An error will be returned if the requested state is not available. func (db *Database) Reader(blockRoot common.Hash) (Reader, error) { @@ -181,25 +148,6 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n // to disk. As a side effect, all pre-images accumulated up to this point are // also written. func (db *Database) Commit(root common.Hash, report bool) error { - batch := db.diskdb.NewBatch() - - db.GetLock().Lock() - for _, v := range db.rawDirties { - batch.Put(v.K, v.V) - } - for k := range db.rawDirties { - delete(db.rawDirties, k) - } - db.GetLock().Unlock() - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - - if (root == common.Hash{}) { - return nil - } - if db.preimages != nil { db.preimages.commit(true) } diff --git a/trie/database_supplement.go b/trie/database_supplement.go deleted file mode 100644 index fa04d4dbb2ff..000000000000 --- a/trie/database_supplement.go +++ /dev/null @@ -1,32 +0,0 @@ -package trie - -import ( - "sync" - - "github.com/VictoriaMetrics/fastcache" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/trie/triedb/hashdb" -) - -func (db *Database) GetLock() *sync.RWMutex { - return db.backend.GetLock() -} - -func (db *Database) GetCleans() *fastcache.Cache { - hdb, ok := db.backend.(*hashdb.Database) - if !ok { - panic("only hashdb supported") - } - return hdb.GetCleans() -} - -// EmptyRoot indicate what root is for an empty trie, it depends on its underlying implement (zktrie or common trie) -func (db *Database) EmptyRoot() common.Hash { - if db.IsUsingZktrie() { - return types.EmptyZkTrieRootHash - } else { - return types.EmptyLegacyTrieRootHash - } -} diff --git a/trie/hash.go b/trie/hash.go new file mode 100644 index 000000000000..e97013a9f4b0 --- /dev/null +++ b/trie/hash.go @@ -0,0 +1,149 @@ +package trie + +import ( + "encoding/hex" + "fmt" + "math/big" + "slices" +) + +var Q *big.Int + +const ( + HASH_DOMAIN_ELEMS_BASE = 256 + HASH_DOMAIN_BYTE32 = 2 * HASH_DOMAIN_ELEMS_BASE +) + +func init() { + qString := "21888242871839275222246405745257275088548364400416034343698204186575808495617" + var ok bool + Q, ok = new(big.Int).SetString(qString, 10) //nolint:gomnd + if !ok { + panic(fmt.Sprintf("Bad base 10 string %s", qString)) + } +} + +// CheckBigIntInField checks if given *big.Int fits in a Field Q element +func CheckBigIntInField(a *big.Int) bool { + return a.Cmp(Q) == -1 +} + +const numCharPrint = 8 + +// HashByteLen is the length of the Hash byte array +const HashByteLen = 32 + +var HashZero = Hash{} + +// Hash is the generic type to store the hash in the MerkleTree, encoded in little endian +type Hash [HashByteLen]byte + +// MarshalText implements the marshaler for the Hash type +func (h Hash) MarshalText() ([]byte, error) { + return []byte(h.BigInt().String()), nil +} + +// UnmarshalText implements the unmarshaler for the Hash type +func (h *Hash) UnmarshalText(b []byte) error { + ha, err := NewHashFromString(string(b)) + copy(h[:], ha[:]) + return err +} + +// String returns decimal representation in string format of the Hash +func (h Hash) String() string { + s := h.BigInt().String() + if len(s) < numCharPrint { + return s + } + return s[0:numCharPrint] + "..." +} + +// Hex returns the hexadecimal representation of the Hash +func (h Hash) Hex() string { + return hex.EncodeToString(h.Bytes()) +} + +// BigInt returns the *big.Int representation of the *Hash +func (h *Hash) BigInt() *big.Int { + return big.NewInt(0).SetBytes(h.Bytes()) +} + +// SetBytes sets the value of the hash from the given big endian byte array +func (h *Hash) SetBytes(b []byte) { + *h = HashZero + _ = h[len(b)-1] // eliminate range checks + for i := 0; i < len(b); i++ { + h[len(b)-i-1] = b[i] + } +} + +// Bytes returns the byte representation of the *Hash in big-endian encoding. +// The function converts the byte order from little endian to big endian. +func (h *Hash) Bytes() []byte { + b := [HashByteLen]byte{} + copy(b[:], h[:]) + slices.Reverse(b[:]) + return b[:] +} + +// Set copies the given hash in to this +func (h *Hash) Set(other *Hash) { + *h = *other +} + +// Copy copies the given hash in to this +func (h *Hash) Clone() *Hash { + var clone Hash + clone.Set(h) + return &clone +} + +// NewBigIntFromHashBytes returns a *big.Int from a byte array, swapping the +// endianness in the process. This is the intended method to get a *big.Int +// from a byte array that previously has ben generated by the Hash.Bytes() +// method. +func NewBigIntFromHashBytes(b []byte) (*big.Int, error) { + if len(b) != HashByteLen { + return nil, fmt.Errorf("expected %d bytes, but got %d bytes", HashByteLen, len(b)) + } + bi := new(big.Int).SetBytes(b) + if !CheckBigIntInField(bi) { + return nil, fmt.Errorf("NewBigIntFromHashBytes: Value not inside the Finite Field") + } + return bi, nil +} + +// NewHashFromBigInt returns a *Hash representation of the given *big.Int +func NewHashFromBigInt(b *big.Int) *Hash { + var bytes [HashByteLen]byte + return NewHashFromBytes(b.FillBytes(bytes[:])) +} + +// NewHashFromBytes returns a *Hash from a byte array considered to be +// a represent of big-endian integer, it swapping the endianness +// in the process. +func NewHashFromBytes(b []byte) *Hash { + var h Hash + h.SetBytes(b) + return &h +} + +// NewHashFromCheckedBytes is the intended method to get a *Hash from a byte array +// that previously has ben generated by the Hash.Bytes() method. so it check the +// size of bytes to be expected length +func NewHashFromCheckedBytes(b []byte) (*Hash, error) { + if len(b) != HashByteLen { + return nil, fmt.Errorf("expected %d bytes, but got %d bytes", HashByteLen, len(b)) + } + return NewHashFromBytes(b), nil +} + +// NewHashFromString returns a *Hash representation of the given decimal string +func NewHashFromString(s string) (*Hash, error) { + bi, ok := new(big.Int).SetString(s, 10) + if !ok { + return nil, fmt.Errorf("cannot parse the string to Hash") + } + return NewHashFromBigInt(bi), nil +} diff --git a/trie/hash_test.go b/trie/hash_test.go new file mode 100644 index 000000000000..8f25e731cc9d --- /dev/null +++ b/trie/hash_test.go @@ -0,0 +1,83 @@ +package trie + +import ( + "bytes" + "crypto/rand" + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCheckBigIntInField(t *testing.T) { + bi := big.NewInt(0) + assert.True(t, CheckBigIntInField(bi)) + + bi = new(big.Int).Sub(Q, big.NewInt(1)) + assert.True(t, CheckBigIntInField(bi)) + + bi = new(big.Int).Set(Q) + assert.False(t, CheckBigIntInField(bi)) +} + +func TestNewHashAndBigIntFromBytes(t *testing.T) { + b := bytes.Repeat([]byte{1, 2}, 16) + h := NewHashFromBytes(b) + assert.Equal(t, "0102010201020102010201020102010201020102010201020102010201020102", h.Hex()) + assert.Equal(t, "45585349...", h.String()) + + h, err := NewHashFromCheckedBytes(b) + assert.NoError(t, err) + assert.Equal(t, "0102010201020102010201020102010201020102010201020102010201020102", h.Hex()) + + bi, err := NewBigIntFromHashBytes(b) + assert.NoError(t, err) + assert.Equal(t, "455853498485199945361735166433836579326217380693297711485161465995904286978", bi.String()) + + h1 := NewHashFromBytes(b) + text, err := h1.MarshalText() + assert.NoError(t, err) + assert.Equal(t, "455853498485199945361735166433836579326217380693297711485161465995904286978", h1.BigInt().String()) + h2 := &Hash{} + err = h2.UnmarshalText(text) + assert.NoError(t, err) + assert.Equal(t, h1, h2) + + short := []byte{1, 2, 3, 4, 5} + _, err = NewHashFromCheckedBytes(short) + assert.Error(t, err) + assert.Equal(t, fmt.Sprintf("expected %d bytes, but got %d bytes", HashByteLen, len(short)), err.Error()) + + short = []byte{1, 2, 3, 4, 5} + _, err = NewBigIntFromHashBytes(short) + assert.Error(t, err) + assert.Equal(t, fmt.Sprintf("expected %d bytes, but got %d bytes", HashByteLen, len(short)), err.Error()) + + outOfField := bytes.Repeat([]byte{255}, 32) + _, err = NewBigIntFromHashBytes(outOfField) + assert.Error(t, err) + assert.Equal(t, "NewBigIntFromHashBytes: Value not inside the Finite Field", err.Error()) +} + +func TestNewHashFromBigIntAndString(t *testing.T) { + bi := big.NewInt(12345) + h := NewHashFromBigInt(bi) + assert.Equal(t, "0000000000000000000000000000000000000000000000000000000000003039", h.Hex()) + assert.Equal(t, "12345", h.String()) + + s := "454086624460063511464984254936031011189294057512315937409637584344757371137" + h, err := NewHashFromString(s) + assert.NoError(t, err) + assert.Equal(t, "0101010101010101010101010101010101010101010101010101010101010101", h.Hex()) + assert.Equal(t, "45408662...", h.String()) +} + +func TestNewHashFromBytes(t *testing.T) { + h := HashZero + read, err := rand.Read(h[:]) + require.NoError(t, err) + require.Equal(t, HashByteLen, read) + require.Equal(t, h, *NewHashFromBytes(h.Bytes())) +} diff --git a/trie/iterator.go b/trie/iterator.go index d4539c1a397a..6a06be88b44b 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -161,7 +161,7 @@ func (e seekError) Error() string { } func newNodeIterator(trie *Trie, start []byte) NodeIterator { - if trie.Hash() == types.EmptyLegacyTrieRootHash { + if trie.Hash() == types.EmptyRootHash { return &nodeIterator{ trie: trie, err: errIteratorEnd, @@ -303,7 +303,7 @@ func (it *nodeIterator) seek(prefix []byte) error { func (it *nodeIterator) init() (*nodeIteratorState, error) { root := it.trie.Hash() state := &nodeIteratorState{node: it.trie.root, index: -1} - if root != types.EmptyLegacyTrieRootHash { + if root != types.EmptyRootHash { state.hash = root } return state, state.resolve(it, nil) diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 007be947bfe9..77569aecc72c 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2014 The go-ethereum Authors // This file is part of the go-ethereum library. // @@ -60,7 +63,7 @@ func TestIterator(t *testing.T) { trie.MustUpdate([]byte(val.k), []byte(val.v)) } root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) found := make(map[string]string) @@ -76,15 +79,6 @@ func TestIterator(t *testing.T) { } } -type kv struct { - k, v []byte - t bool -} - -func (k *kv) cmp(other *kv) int { - return bytes.Compare(k.k, other.k) -} - func TestIteratorLargeData(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) @@ -252,7 +246,7 @@ func TestDifferenceIterator(t *testing.T) { triea.MustUpdate([]byte(val.k), []byte(val.v)) } rootA, nodesA, _ := triea.Commit(false) - dba.Update(rootA, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) + dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) triea, _ = New(TrieID(rootA), dba) dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) @@ -261,7 +255,7 @@ func TestDifferenceIterator(t *testing.T) { trieb.MustUpdate([]byte(val.k), []byte(val.v)) } rootB, nodesB, _ := trieb.Commit(false) - dbb.Update(rootB, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) + dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) trieb, _ = New(TrieID(rootB), dbb) found := make(map[string]string) @@ -294,7 +288,7 @@ func TestUnionIterator(t *testing.T) { triea.MustUpdate([]byte(val.k), []byte(val.v)) } rootA, nodesA, _ := triea.Commit(false) - dba.Update(rootA, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) + dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) triea, _ = New(TrieID(rootA), dba) dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) @@ -303,7 +297,7 @@ func TestUnionIterator(t *testing.T) { trieb.MustUpdate([]byte(val.k), []byte(val.v)) } rootB, nodesB, _ := trieb.Commit(false) - dbb.Update(rootB, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) + dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) trieb, _ = New(TrieID(rootB), dbb) di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)}) @@ -365,7 +359,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { tr.MustUpdate([]byte(val.k), []byte(val.v)) } root, nodes, _ := tr.Commit(false) - tdb.Update(root, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) if !memonly { tdb.Commit(root, false) } @@ -481,7 +475,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin break } } - triedb.Update(root, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) if !memonly { triedb.Commit(root, false) } @@ -555,7 +549,7 @@ func testIteratorNodeBlob(t *testing.T, scheme string) { trie.MustUpdate([]byte(val.k), []byte(val.v)) } root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyZkTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) triedb.Commit(root, false) var found = make(map[common.Hash][]byte) diff --git a/trie/node_test.go b/trie/node_test.go index 70a924f86268..3c0359ca5ae5 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2016 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/trie/proof.go b/trie/proof.go index 4e98708c945e..15418758909c 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -107,7 +107,7 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. -func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { +func (t *stateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { return t.trie.Prove(key, proofDb) } @@ -115,11 +115,6 @@ func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { // key in a trie with the given root hash. VerifyProof returns an error if the // proof contains invalid trie nodes or the wrong value. func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) { - // test the type of proof (for trie or SMT) - if buf, _ := proofDb.Get(magicHash); buf != nil { - return VerifyProofSMT(rootHash, key, proofDb) - } - key = keybytesToHex(key) wantHash := rootHash for i := 0; ; i++ { diff --git a/trie/proof_test.go b/trie/proof_test.go index 249c4a021ae6..07057fb20ebb 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 0b21d7a34580..de1364941034 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -48,12 +48,12 @@ func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *D // the preimage of each key if preimage recording is enabled. // // StateTrie is not safe for concurrent use. -type StateTrie struct { +type stateTrie struct { trie Trie preimages *preimageStore hashKeyBuf [common.HashLength]byte secKeyCache map[string][]byte - secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch + secKeyCacheOwner *stateTrie // Pointer to self, replace the key cache on mismatch } // NewStateTrie creates a trie with an existing root node from a backing database. @@ -61,7 +61,7 @@ type StateTrie struct { // If root is the zero hash or the sha3 hash of an empty string, the // trie is initially empty. Otherwise, New will panic if db is nil // and returns MissingNodeError if the root node cannot be found. -func NewStateTrie(id *ID, db *Database) (*StateTrie, error) { +func newStateTrie(id *ID, db *Database) (*stateTrie, error) { if db == nil { panic("trie.NewStateTrie called without a database") } @@ -69,7 +69,7 @@ func NewStateTrie(id *ID, db *Database) (*StateTrie, error) { if err != nil { return nil, err } - return &StateTrie{trie: *trie, preimages: db.preimages}, nil + return &stateTrie{trie: *trie, preimages: db.preimages}, nil } // MustGet returns the value for key stored in the trie. @@ -77,7 +77,7 @@ func NewStateTrie(id *ID, db *Database) (*StateTrie, error) { // // This function will omit any encountered error but just // print out an error message. -func (t *StateTrie) MustGet(key []byte) []byte { +func (t *stateTrie) MustGet(key []byte) []byte { return t.trie.MustGet(t.hashKey(key)) } @@ -85,7 +85,7 @@ func (t *StateTrie) MustGet(key []byte) []byte { // and slot key. The value bytes must not be modified by the caller. // If the specified storage slot is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { +func (t *stateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { enc, err := t.trie.Get(t.hashKey(key)) if err != nil || len(enc) == 0 { return nil, err @@ -97,7 +97,7 @@ func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { // GetAccount attempts to retrieve an account with provided account address. // If the specified account is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, error) { +func (t *stateTrie) GetAccount(address common.Address) (*types.StateAccount, error) { res, err := t.trie.Get(t.hashKey(address.Bytes())) if res == nil || err != nil { return nil, err @@ -110,7 +110,7 @@ func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, err // GetAccountByHash does the same thing as GetAccount, however it expects an // account hash that is the hash of address. This constitutes an abstraction // leak, since the client code needs to know the key format. -func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) { +func (t *stateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) { res, err := t.trie.Get(addrHash.Bytes()) if res == nil || err != nil { return nil, err @@ -124,7 +124,7 @@ func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, // possible to use keybyte-encoding as the path might contain odd nibbles. // If the specified trie node is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) GetNode(path []byte) ([]byte, int, error) { +func (t *stateTrie) GetNode(path []byte) ([]byte, int, error) { return t.trie.GetNode(path) } @@ -137,7 +137,7 @@ func (t *StateTrie) GetNode(path []byte) ([]byte, int, error) { // // This function will omit any encountered error but just print out an // error message. -func (t *StateTrie) MustUpdate(key, value []byte) { +func (t *stateTrie) MustUpdate(key, value []byte) { hk := t.hashKey(key) t.trie.MustUpdate(hk, value) t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) @@ -151,7 +151,7 @@ func (t *StateTrie) MustUpdate(key, value []byte) { // stored in the trie. // // If a node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error { +func (t *stateTrie) UpdateStorage(_ common.Address, key, value []byte) error { hk := t.hashKey(key) v, _ := rlp.EncodeToBytes(value) err := t.trie.Update(hk, v) @@ -163,7 +163,7 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error { } // UpdateAccount will abstract the write of an account to the secure trie. -func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { +func (t *stateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { hk := t.hashKey(address.Bytes()) data, err := rlp.EncodeToBytes(acc) if err != nil { @@ -176,13 +176,13 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun return nil } -func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { +func (t *stateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { return nil } // MustDelete removes any existing value for key from the trie. This function // will omit any encountered error but just print out an error message. -func (t *StateTrie) MustDelete(key []byte) { +func (t *stateTrie) MustDelete(key []byte) { hk := t.hashKey(key) delete(t.getSecKeyCache(), string(hk)) t.trie.MustDelete(hk) @@ -191,14 +191,14 @@ func (t *StateTrie) MustDelete(key []byte) { // DeleteStorage removes any existing storage slot from the trie. // If the specified trie node is not in the trie, nothing will be changed. // If a node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) DeleteStorage(_ common.Address, key []byte) error { +func (t *stateTrie) DeleteStorage(_ common.Address, key []byte) error { hk := t.hashKey(key) delete(t.getSecKeyCache(), string(hk)) return t.trie.Delete(hk) } // DeleteAccount abstracts an account deletion from the trie. -func (t *StateTrie) DeleteAccount(address common.Address) error { +func (t *stateTrie) DeleteAccount(address common.Address) error { hk := t.hashKey(address.Bytes()) delete(t.getSecKeyCache(), string(hk)) return t.trie.Delete(hk) @@ -206,7 +206,7 @@ func (t *StateTrie) DeleteAccount(address common.Address) error { // GetKey returns the sha3 preimage of a hashed key that was // previously used to store a value. -func (t *StateTrie) GetKey(shaKey []byte) []byte { +func (t *stateTrie) GetKey(shaKey []byte) []byte { if key, ok := t.getSecKeyCache()[string(shaKey)]; ok { return key } @@ -223,7 +223,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte { // All cached preimages will be also flushed if preimages recording is enabled. // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { +func (t *stateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { // Write all the pre-images to the actual disk database if len(t.getSecKeyCache()) > 0 { if t.preimages != nil { @@ -241,13 +241,13 @@ func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, er // Hash returns the root hash of StateTrie. It does not write to the // database and can be used even if the trie doesn't have one. -func (t *StateTrie) Hash() common.Hash { +func (t *stateTrie) Hash() common.Hash { return t.trie.Hash() } // Copy returns a copy of StateTrie. -func (t *StateTrie) Copy() *StateTrie { - return &StateTrie{ +func (t *stateTrie) Copy() *stateTrie { + return &stateTrie{ trie: *t.trie.Copy(), preimages: t.preimages, secKeyCache: t.secKeyCache, @@ -256,20 +256,20 @@ func (t *StateTrie) Copy() *StateTrie { // NodeIterator returns an iterator that returns nodes of the underlying trie. // Iteration starts at the key after the given start key. -func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) { +func (t *stateTrie) NodeIterator(start []byte) (NodeIterator, error) { return t.trie.NodeIterator(start) } // MustNodeIterator is a wrapper of NodeIterator and will omit any encountered // error but just print out an error message. -func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator { +func (t *stateTrie) MustNodeIterator(start []byte) NodeIterator { return t.trie.MustNodeIterator(start) } // hashKey returns the hash of key as an ephemeral buffer. // The caller must not hold onto the return value because it will become // invalid on the next call to hashKey or secKey. -func (t *StateTrie) hashKey(key []byte) []byte { +func (t *stateTrie) hashKey(key []byte) []byte { h := newHasher(false) h.sha.Reset() h.sha.Write(key) @@ -281,7 +281,7 @@ func (t *StateTrie) hashKey(key []byte) []byte { // getSecKeyCache returns the current secure key cache, creating a new one if // ownership changed (i.e. the current secure trie is a copy of another owning // the actual cache). -func (t *StateTrie) getSecKeyCache() map[string][]byte { +func (t *stateTrie) getSecKeyCache() map[string][]byte { if t != t.secKeyCacheOwner { t.secKeyCacheOwner = t t.secKeyCache = make(map[string][]byte) diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index 69003e3245b4..0b7f4d3fb8cd 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // @@ -31,7 +34,7 @@ import ( ) func newEmptySecure() *StateTrie { - trie, _ := NewStateTrie(TrieID(types.EmptyZkTrieRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie, _ := NewStateTrie(TrieID(types.EmptyStateRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil)) return trie } @@ -39,7 +42,7 @@ func newEmptySecure() *StateTrie { func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) - trie, _ := NewStateTrie(TrieID(types.EmptyZkTrieRootHash), triedb) + trie, _ := NewStateTrie(TrieID(types.EmptyStateRootHash), triedb) // Fill it with some arbitrary data content := make(map[string][]byte) @@ -61,7 +64,7 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { } } root, nodes, _ := trie.Commit(false) - if err := triedb.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + if err := triedb.Update(root, types.EmptyStateRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } // Re-create the trie based on the new state diff --git a/trie/stacktrie.go b/trie/stacktrie.go index 13143911862b..3a64e8319dd9 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -344,15 +344,16 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) { // This method also sets 'st.type' to hashedNode, and clears 'st.key'. func (t *StackTrie) hash(st *stNode, path []byte) { var ( - blob []byte // RLP-encoded node blob - internal [][]byte // List of node paths covered by the extension node + emptyHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + blob []byte // RLP-encoded node blob + internal [][]byte // List of node paths covered by the extension node ) switch st.typ { case hashedNode: return case emptyNode: - st.val = types.EmptyLegacyTrieRootHash.Bytes() + st.val = emptyHash.Bytes() st.key = st.key[:0] st.typ = hashedNode return diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index 453402f4a56c..b9cda9d8ebea 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -421,6 +421,15 @@ func buildPartialTree(entries []*kv, t *testing.T) map[string]common.Hash { return nodes } +type kv struct { + k, v []byte + t bool +} + +func (k *kv) cmp(other *kv) int { + return bytes.Compare(k.k, other.k) +} + func TestPartialStackTrie(t *testing.T) { for round := 0; round < 100; round++ { var ( diff --git a/trie/sync.go b/trie/sync.go index 44c028c04474..07509b1121b2 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -211,7 +211,7 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb // hex format and contain all the parent path if it's layered trie node. func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, parentPath []byte, callback LeafCallback) { // Short circuit if the trie is empty or already known - if root == types.EmptyLegacyTrieRootHash { + if root == types.EmptyRootHash { return } if s.membatch.hasNode(path) { diff --git a/trie/sync_test.go b/trie/sync_test.go index 3cb1c01b92ee..a20c5fb09030 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // @@ -35,7 +38,7 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str // Create an empty trie db := rawdb.NewMemoryDatabase() triedb := newTestDatabase(db, scheme) - trie, _ := NewStateTrie(TrieID(types.EmptyZkTrieRootHash), triedb) + trie, _ := NewStateTrie(TrieID(types.EmptyStateRootHash), triedb) // Fill it with some arbitrary data content := make(map[string][]byte) @@ -57,7 +60,7 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str } } root, nodes, _ := trie.Commit(false) - if err := triedb.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + if err := triedb.Update(root, types.EmptyStateRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } if err := triedb.Commit(root, false); err != nil { @@ -137,9 +140,9 @@ func TestEmptySync(t *testing.T) { // dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) emptyA := NewEmpty(dbA) - emptyB, _ := New(TrieID(types.EmptyZkTrieRootHash), dbB) + emptyB, _ := New(TrieID(types.EmptyRootHash), dbB) // emptyC := NewEmpty(dbC) - // emptyD, _ := New(TrieID(types.EmptyLegacyTrieRootHash), dbD) + // emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) // for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} { // sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB, dbC, dbD}[i].Scheme()) @@ -811,7 +814,7 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) { var ( srcDisk = rawdb.NewMemoryDatabase() srcTrieDB = newTestDatabase(srcDisk, scheme) - srcTrie, _ = New(TrieID(types.EmptyZkTrieRootHash), srcTrieDB) + srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB) deleteFn = func(key []byte, tr *Trie, states map[string][]byte) { tr.Delete(key) @@ -845,7 +848,7 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) { writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA) rootA, nodesA, _ := srcTrie.Commit(false) - if err := srcTrieDB.Update(rootA, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil { + if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil { panic(err) } if err := srcTrieDB.Commit(rootA, false); err != nil { diff --git a/trie/tracer_test.go b/trie/tracer_test.go index 36b57efcf4a2..47775020276c 100644 --- a/trie/tracer_test.go +++ b/trie/tracer_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2022 The go-ethereum Authors // This file is part of the go-ethereum library. // @@ -71,7 +74,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { insertSet := copySet(trie.tracer.inserts) // copy before commit deleteSet := copySet(trie.tracer.deletes) // copy before commit root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) seen := setKeys(iterNodes(db, root)) if !compareSet(insertSet, seen) { @@ -137,7 +140,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { trie.MustUpdate([]byte(val.k), []byte(val.v)) } root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -219,7 +222,7 @@ func TestAccessListLeak(t *testing.T) { trie.MustUpdate([]byte(val.k), []byte(val.v)) } root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) var cases = []struct { op func(tr *Trie) @@ -269,7 +272,7 @@ func TestTinyTree(t *testing.T) { trie.MustUpdate([]byte(val.k), randBytes(32)) } root, set, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(set), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil) parent := root trie, _ = New(TrieID(root), db) diff --git a/trie/trie.go b/trie/trie.go index 836443ce4b44..5839e8d6cc4e 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -89,7 +89,7 @@ func New(id *ID, db *Database) (*Trie, error) { reader: reader, tracer: newTracer(), } - if id.Root != (common.Hash{}) && id.Root != types.EmptyLegacyTrieRootHash { + if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash { rootnode, err := trie.resolveAndTrack(id.Root[:], nil) if err != nil { return nil, err @@ -101,7 +101,7 @@ func New(id *ID, db *Database) (*Trie, error) { // NewEmpty is a shortcut to create empty tree. It's mostly used in tests. func NewEmpty(db *Database) *Trie { - tr, _ := New(TrieID(types.EmptyZkTrieRootHash), db) + tr, _ := New(TrieID(types.EmptyRootHash), db) return tr } @@ -619,13 +619,13 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) if t.root == nil { paths := t.tracer.deletedNodes() if len(paths) == 0 { - return types.EmptyLegacyTrieRootHash, nil, nil // case (a) + return types.EmptyRootHash, nil, nil // case (a) } nodes := trienode.NewNodeSet(t.owner) for _, path := range paths { nodes.AddNode([]byte(path), trienode.NewDeleted()) } - return types.EmptyLegacyTrieRootHash, nodes, nil // case (b) + return types.EmptyRootHash, nodes, nil // case (b) } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -650,7 +650,7 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) // hashRoot calculates the root hash of the given trie func (t *Trie) hashRoot() (node, node) { if t.root == nil { - return hashNode(types.EmptyLegacyTrieRootHash.Bytes()), nil + return hashNode(types.EmptyRootHash.Bytes()), nil } // If the number of changes is below 100, we let one thread handle it h := newHasher(t.unhashed >= 100) diff --git a/trie/trie_reader.go b/trie/trie_reader.go index 73dd7d4d515d..72c14a7301fa 100644 --- a/trie/trie_reader.go +++ b/trie/trie_reader.go @@ -19,6 +19,7 @@ package trie import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/trie/triestate" ) @@ -45,14 +46,10 @@ type trieReader struct { // newTrieReader initializes the trie reader with the given node reader. func newTrieReader(stateRoot, owner common.Hash, db *Database) (*trieReader, error) { - // if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash { - // if stateRoot == (common.Hash{}) { - // log.Error("Zero state root hash!") - // } - // return &trieReader{owner: owner}, nil - // } - if stateRoot == types.EmptyZkTrieRootHash { - // log.Error("Zero state root hash!") + if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash { + if stateRoot == (common.Hash{}) { + log.Error("Zero state root hash!") + } return &trieReader{owner: owner}, nil } reader, err := db.Reader(stateRoot) diff --git a/trie/trie_test.go b/trie/trie_test.go index 9de9125070bc..cf09b0fe9ffd 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -1,3 +1,6 @@ +//go:build all_tests +// +build all_tests + // Copyright 2014 The go-ethereum Authors // This file is part of the go-ethereum library. // @@ -48,7 +51,7 @@ func init() { func TestEmptyTrie(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) res := trie.Hash() - exp := types.EmptyLegacyTrieRootHash + exp := types.EmptyRootHash if res != exp { t.Errorf("expected %x got %x", exp, res) } @@ -95,7 +98,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) { updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer") updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) if !memonly { triedb.Commit(root, false) @@ -209,7 +212,7 @@ func TestGet(t *testing.T) { return } root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) } } @@ -281,7 +284,7 @@ func TestReplication(t *testing.T) { updateString(trie, val.k, val.v) } root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) // create a new trie on top of the database and check that lookups work. trie2, err := New(TrieID(root), db) @@ -300,7 +303,7 @@ func TestReplication(t *testing.T) { // recreate the trie after commit if nodes != nil { - db.Update(hash, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) } trie2, err = New(TrieID(hash), db) if err != nil { @@ -467,7 +470,7 @@ func runRandTest(rt randTest) bool { scheme = rawdb.PathScheme } var ( - origin = types.EmptyLegacyTrieRootHash + origin = types.EmptyRootHash triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme) tr = NewEmpty(triedb) values = make(map[string]string) // tracks content of the trie @@ -492,7 +495,7 @@ func runRandTest(rt randTest) bool { } case opProve: hash := tr.Hash() - if hash == types.EmptyLegacyTrieRootHash { + if hash == types.EmptyRootHash { continue } proofDb := rawdb.NewMemoryDatabase() @@ -764,7 +767,7 @@ func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) { for i := 0; i < len(accounts); i++ { var ( nonce = uint64(random.Int63()) - root = types.EmptyLegacyTrieRootHash + root = types.EmptyRootHash codekeccak = codehash.EmptyKeccakCodeHash codeposeidon = codehash.EmptyPoseidonCodeHash ) @@ -862,7 +865,7 @@ func TestCommitSequence(t *testing.T) { } // Flush trie -> database root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) // Flush memdb -> disk (sponge) db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { @@ -903,7 +906,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { } // Flush trie -> database root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) // Flush memdb -> disk (sponge) db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { @@ -946,7 +949,7 @@ func TestCommitSequenceStackTrie(t *testing.T) { // Flush trie -> database root, nodes, _ := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Commit(root, false) // And flush stacktrie -> disk stRoot := stTrie.Commit() @@ -994,7 +997,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) { // Flush trie -> database root, nodes, _ := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Commit(root, false) // And flush stacktrie -> disk stRoot := stTrie.Commit() @@ -1163,7 +1166,7 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [] } h := trie.Hash() root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyLegacyTrieRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) b.StartTimer() triedb.Dereference(h) b.StopTimer() diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go index 504566b7c483..ca6d79731a7b 100644 --- a/trie/triedb/hashdb/database.go +++ b/trie/triedb/hashdb/database.go @@ -30,7 +30,6 @@ import ( "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/metrics" - "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/trie/trienode" "github.com/scroll-tech/go-ethereum/trie/triestate" ) @@ -575,8 +574,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n // Ensure the parent state is present and signal a warning if not. if parent != types.EmptyRootHash { if blob, _ := db.Node(parent); len(blob) == 0 { - // Silence the warning because it is not applicable to zktrie. - // log.Error("parent state is not present") + log.Error("parent state is not present") } } db.lock.Lock() @@ -611,8 +609,8 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n // to an account trie leaf. if set, present := nodes.Sets[common.Hash{}]; present { for _, n := range set.Leaves { - var account types.StateAccount - if err := rlp.DecodeBytes(n.Blob, &account); err != nil { + account, err := types.UnmarshalStateAccount(n.Blob) + if err != nil { return err } if account.Root != types.EmptyRootHash { diff --git a/trie/triedb/hashdb/database_supplement.go b/trie/triedb/hashdb/database_supplement.go deleted file mode 100644 index 841bcc371163..000000000000 --- a/trie/triedb/hashdb/database_supplement.go +++ /dev/null @@ -1,15 +0,0 @@ -package hashdb - -import ( - "sync" - - "github.com/VictoriaMetrics/fastcache" -) - -func (db *Database) GetLock() *sync.RWMutex { - return &db.lock -} - -func (db *Database) GetCleans() *fastcache.Cache { - return db.cleans -} diff --git a/trie/util.go b/trie/util.go new file mode 100644 index 000000000000..79f87e4aacb4 --- /dev/null +++ b/trie/util.go @@ -0,0 +1,117 @@ +package trie + +import ( + "math/big" + + "github.com/scroll-tech/go-ethereum/crypto/poseidon" +) + +// HashElemsWithDomain performs a recursive poseidon hash over the array of ElemBytes, each hash +// reduce 2 fieds into one, with a specified domain field which would be used in +// every recursiving call +func HashElemsWithDomain(domain, fst, snd *big.Int, elems ...*big.Int) (*Hash, error) { + + l := len(elems) + baseH, err := poseidon.HashFixedWithDomain([]*big.Int{fst, snd}, domain) + if err != nil { + return nil, err + } + if l == 0 { + return NewHashFromBigInt(baseH), nil + } else if l == 1 { + return HashElemsWithDomain(domain, baseH, elems[0]) + } + + tmp := make([]*big.Int, (l+1)/2) + for i := range tmp { + if (i+1)*2 > l { + tmp[i] = elems[i*2] + } else { + h, err := poseidon.HashFixedWithDomain(elems[i*2:(i+1)*2], domain) + if err != nil { + return nil, err + } + tmp[i] = h + } + } + + return HashElemsWithDomain(domain, baseH, tmp[0], tmp[1:]...) +} + +// HashElems call HashElemsWithDomain with a domain of HASH_DOMAIN_ELEMS_BASE(256)* +func HashElems(fst, snd *big.Int, elems ...*big.Int) (*Hash, error) { + + return HashElemsWithDomain(big.NewInt(int64(len(elems)*HASH_DOMAIN_ELEMS_BASE)+HASH_DOMAIN_BYTE32), + fst, snd, elems...) +} + +// HandlingElemsAndByte32 hash an arry mixed with field and byte32 elements, turn each byte32 into +// field elements first then calculate the hash with HashElems +func HandlingElemsAndByte32(flagArray uint32, elems []Byte32) (*Hash, error) { + + ret := make([]*big.Int, len(elems)) + var err error + + for i, elem := range elems { + if flagArray&(1<. - package trie import ( + "bytes" + "errors" "fmt" - - zktrie "github.com/scroll-tech/zktrie/trie" - zkt "github.com/scroll-tech/zktrie/types" + "io" + "maps" + "math/big" + "sync" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/crypto/poseidon" "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/trie/trienode" ) -var magicHash []byte = []byte("THIS IS THE MAGIC INDEX FOR ZKTRIE") +const ( + // NodeKeyValidBytes is the number of least significant bytes in the node key + // that are considered valid to addressing the leaf node, and thus limits the + // maximum trie depth to NodeKeyValidBytes * 8. + // We need to truncate the node key because the key is the output of Poseidon + // hash and the key space doesn't fully occupy the range of power of two. It can + // lead to an ambiguous bit representation of the key in the finite field + // causing a soundness issue in the zk circuit. + NodeKeyValidBytes = 31 + + // proofFlagsLen is the byte length of the flags in the proof header + // (first 32 bytes). + proofFlagsLen = 2 +) + +var ( + magicHash = []byte("THIS IS THE MAGIC INDEX FOR ZKTRIE") + magicSMTBytes = []byte("THIS IS SOME MAGIC BYTES FOR SMT m1rRXgP2xpDI") + + // ErrNodeKeyAlreadyExists is used when a node key already exists. + ErrInvalidField = errors.New("Key not inside the Finite Field") + // ErrNodeKeyAlreadyExists is used when a node key already exists. + ErrNodeKeyAlreadyExists = errors.New("key already exists") + // ErrKeyNotFound is used when a key is not found in the ZkTrie. + ErrKeyNotFound = errors.New("key not found in ZkTrie") + // ErrNodeBytesBadSize is used when the data of a node has an incorrect + // size and can't be parsed. + ErrNodeBytesBadSize = errors.New("node data has incorrect size in the DB") + // ErrReachedMaxLevel is used when a traversal of the MT reaches the + // maximum level. + ErrReachedMaxLevel = errors.New("reached maximum level of the merkle tree") + // ErrInvalidNodeFound is used when an invalid node is found and can't + // be parsed. + ErrInvalidNodeFound = errors.New("found an invalid node in the DB") + // ErrInvalidProofBytes is used when a serialized proof is invalid. + ErrInvalidProofBytes = errors.New("the serialized proof is invalid") + // ErrEntryIndexAlreadyExists is used when the entry index already + // exists in the tree. + ErrEntryIndexAlreadyExists = errors.New("the entry index already exists in the tree") + // ErrNotWritable is used when the ZkTrie is not writable and a + // write function is called + ErrNotWritable = errors.New("merkle Tree not writable") +) + +// StateTrie is just an alias for ZkTrie now +type StateTrie = ZkTrie -// wrap zktrie for trie interface +// ZkTrie is the struct with the main elements of the ZkTrie type ZkTrie struct { - *zktrie.ZkTrie - db *ZktrieDatabase + lock sync.RWMutex + owner common.Hash + reader *trieReader + rootKey *Hash + maxLevels int + + // Preimage store + preimages *preimageStore + secKeyCache map[string][]byte + + // Flag whether the commit operation is already performed. If so the + // trie is not usable(latest states is invisible). + committed bool + dirtyIndex *big.Int + dirtyStorage map[Hash]*Node +} + +// NewStateTrie is just an alias for NewZkTrie now +var NewStateTrie = NewZkTrie + +// NewZkTrie loads a new ZkTrie. If in the storage already exists one +// will open that one, if not, will create a new one. +func NewZkTrie(id *ID, db *Database) (*ZkTrie, error) { + reader, err := newTrieReader(id.StateRoot, id.Owner, db) + if err != nil { + return nil, err + } + + mt := ZkTrie{ + owner: id.Owner, + reader: reader, + maxLevels: NodeKeyValidBytes * 8, + dirtyIndex: big.NewInt(0), + dirtyStorage: make(map[Hash]*Node), + preimages: db.preimages, + secKeyCache: make(map[string][]byte), + } + mt.rootKey = NewHashFromBytes(id.Root.Bytes()) + if *mt.rootKey != HashZero { + _, err := mt.GetNodeByHash(mt.rootKey) + if err != nil { + return nil, err + } + } + return &mt, nil +} + +// Root returns the MerkleRoot +func (mt *ZkTrie) Root() (*Hash, error) { + mt.lock.Lock() + defer mt.lock.Unlock() + return mt.root() +} + +func (mt *ZkTrie) root() (*Hash, error) { + // short circuit if there are no nodes to hash + if mt.dirtyIndex.Cmp(big.NewInt(0)) == 0 { + return mt.rootKey, nil + } + + hashedDirtyStorage := make(map[Hash]*Node) + rootKey, err := mt.calcCommitment(mt.rootKey, hashedDirtyStorage, new(sync.Mutex)) + if err != nil { + return nil, err + } + + mt.rootKey = rootKey + mt.dirtyIndex = big.NewInt(0) + mt.dirtyStorage = hashedDirtyStorage + return mt.rootKey, nil +} + +// Hash returns the root hash of SecureBinaryTrie. It does not write to the +// database and can be used even if the trie doesn't have one. +func (mt *ZkTrie) Hash() common.Hash { + root, err := mt.Root() + if err != nil { + panic("root failed in trie.Hash") + } + return common.BytesToHash(root.Bytes()) +} + +// MaxLevels returns the MT maximum level +func (mt *ZkTrie) MaxLevels() int { + return mt.maxLevels +} + +// TryUpdate updates a nodeKey & value into the ZkTrie. Where the `k` determines the +// path from the Root to the Leaf. This also return the updated leaf node +func (mt *ZkTrie) TryUpdate(key []byte, vFlag uint32, vPreimage []Byte32) error { + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return ErrCommitted + } + + secureKey, err := ToSecureKey(key) + if err != nil { + return err + } + nodeKey := NewHashFromBigInt(secureKey) + + // verify that k are valid and fit inside the Finite Field. + if !CheckBigIntInField(nodeKey.BigInt()) { + return ErrInvalidField + } + + newLeafNode := NewLeafNode(nodeKey, vFlag, vPreimage) + path := getPath(mt.maxLevels, nodeKey[:]) + + mt.lock.Lock() + defer mt.lock.Unlock() + + mt.secKeyCache[string(nodeKey.Bytes())] = key + + newRootKey, _, err := mt.addLeaf(newLeafNode, mt.rootKey, 0, path) + // sanity check + if err == ErrEntryIndexAlreadyExists { + panic("Encounter unexpected errortype: ErrEntryIndexAlreadyExists") + } else if err != nil { + return err + } + if newRootKey != nil { + mt.rootKey = newRootKey + } + return nil +} + +// UpdateStorage updates the storage with the given key and value +func (mt *ZkTrie) UpdateStorage(_ common.Address, key, value []byte) error { + return mt.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes(value)}) +} + +// UpdateAccount updates the account with the given address and account +func (mt *ZkTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { + value, flag := acc.MarshalFields() + accValue := make([]Byte32, 0, len(value)) + for _, v := range value { + accValue = append(accValue, *NewByte32FromBytes(v[:])) + } + return mt.TryUpdate(address.Bytes(), flag, accValue) +} + +// UpdateContractCode updates the contract code with the given address and code +func (mt *ZkTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { + return nil +} + +// pushLeaf recursively pushes an existing oldLeaf down until its path diverges +// from newLeaf, at which point both leafs are stored, all while updating the +// path. pushLeaf returns the node hash of the parent of the oldLeaf and newLeaf +func (mt *ZkTrie) pushLeaf(newLeaf *Node, oldLeaf *Node, lvl int, + pathNewLeaf []bool, pathOldLeaf []bool) (*Hash, error) { + if lvl > mt.maxLevels-2 { + return nil, ErrReachedMaxLevel + } + var newParentNode *Node + if pathNewLeaf[lvl] == pathOldLeaf[lvl] { // We need to go deeper! + // notice the node corresponding to return hash is always branch + nextNodeHash, err := mt.pushLeaf(newLeaf, oldLeaf, lvl+1, pathNewLeaf, pathOldLeaf) + if err != nil { + return nil, err + } + if pathNewLeaf[lvl] { // go right + newParentNode = NewParentNode(NodeTypeBranch_1, &HashZero, nextNodeHash) + } else { // go left + newParentNode = NewParentNode(NodeTypeBranch_2, nextNodeHash, &HashZero) + } + + newParentNodeKey := mt.newDirtyNodeKey() + mt.dirtyStorage[*newParentNodeKey] = newParentNode + return newParentNodeKey, nil + } + oldLeafHash, err := oldLeaf.NodeHash() + if err != nil { + return nil, err + } + newLeafHash, err := newLeaf.NodeHash() + if err != nil { + return nil, err + } + + if pathNewLeaf[lvl] { + newParentNode = NewParentNode(NodeTypeBranch_0, oldLeafHash, newLeafHash) + } else { + newParentNode = NewParentNode(NodeTypeBranch_0, newLeafHash, oldLeafHash) + } + // We can add newLeaf now. We don't need to add oldLeaf because it's + // already in the tree. + mt.dirtyStorage[*newLeafHash] = newLeaf + newParentNodeKey := mt.newDirtyNodeKey() + mt.dirtyStorage[*newParentNodeKey] = newParentNode + return newParentNodeKey, nil +} + +// Commit calculates the root for the entire trie and persist all the dirty nodes +func (mt *ZkTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { + mt.lock.Lock() + defer mt.lock.Unlock() + + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return common.Hash{}, nil, ErrCommitted + } + + root, err := mt.root() + if err != nil { + return common.Hash{}, nil, err + } + + nodeSet := trienode.NewNodeSet(mt.owner) + if err := mt.commit(root, nil, nodeSet, collectLeaf); err != nil { + return common.Hash{}, nil, err + } + + mt.dirtyStorage = make(map[Hash]*Node) + mt.committed = true + + return common.BytesToHash(root.Bytes()), nodeSet, nil } -func init() { - zkt.InitHashScheme(poseidon.HashFixedWithDomain) +// Commit calculates the root for the entire trie and persist all the dirty nodes +func (mt *ZkTrie) commit(nodeHash *Hash, path []byte, nodeSet *trienode.NodeSet, collectLeaf bool) error { + node := mt.dirtyStorage[*nodeHash] + if node == nil { + return nil + } + + if node.Type == NodeTypeLeaf_New { + if mt.preimages != nil { + mt.preimages.insertPreimage(map[common.Hash][]byte{ + common.BytesToHash(nodeHash.Bytes()): node.NodeKey.Bytes(), + }) + } + if collectLeaf { + nodeSet.AddLeaf(common.BytesToHash(nodeHash.Bytes()), node.Data()) + } + } + if node.ChildL != nil { + if err := mt.commit(node.ChildL, append(path, byte(0)), nodeSet, collectLeaf); err != nil { + return err + } + } + if node.ChildR != nil { + if err := mt.commit(node.ChildR, append(path, byte(1)), nodeSet, collectLeaf); err != nil { + return err + } + } + nodeSet.AddNode(path, trienode.New(common.BytesToHash(nodeHash.Bytes()), node.CanonicalValue())) + return nil } -func sanityCheckByte32Key(b []byte) { - if len(b) != 32 && len(b) != 20 { - panic(fmt.Errorf("do not support length except for 120bit and 256bit now. data: %v len: %v", b, len(b))) +// addLeaf recursively adds a newLeaf in the MT while updating the path, and returns the key +// of the new added leaf. +func (mt *ZkTrie) addLeaf(newLeaf *Node, currNodeKey *Hash, + lvl int, path []bool) (*Hash, bool, error) { + var err error + if lvl > mt.maxLevels-1 { + return nil, false, ErrReachedMaxLevel + } + n, err := mt.getNode(currNodeKey) + if err != nil { + return nil, false, err + } + switch n.Type { + case NodeTypeEmpty_New: + newLeafHash, err := newLeaf.NodeHash() + if err != nil { + return nil, false, err + } + + mt.dirtyStorage[*newLeafHash] = newLeaf + return newLeafHash, true, nil + case NodeTypeLeaf_New: + newLeafHash, err := newLeaf.NodeHash() + if err != nil { + return nil, false, err + } + + if bytes.Equal(currNodeKey[:], newLeafHash[:]) { + // do nothing, duplicate entry + return nil, true, nil + } else if bytes.Equal(newLeaf.NodeKey.Bytes(), n.NodeKey.Bytes()) { + // update the existing leaf + mt.dirtyStorage[*newLeafHash] = newLeaf + return newLeafHash, true, nil + } + newSubTrieRootHash, err := mt.pushLeaf(newLeaf, n, lvl, path, getPath(mt.maxLevels, n.NodeKey[:])) + return newSubTrieRootHash, false, err + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + // We need to go deeper, continue traversing the tree, left or + // right depending on path + branchRight := path[lvl] + childSubTrieRoot := n.ChildL + if branchRight { + childSubTrieRoot = n.ChildR + } + newChildSubTrieRoot, isTerminal, err := mt.addLeaf(newLeaf, childSubTrieRoot, lvl+1, path) + if err != nil { + return nil, false, err + } + + // do nothing, if child subtrie was not modified + if newChildSubTrieRoot == nil { + return nil, false, nil + } + + newNodetype := n.Type + if !isTerminal { + newNodetype = newNodetype.DeduceUpgradeType(branchRight) + } + + var newNode *Node + if branchRight { + newNode = NewParentNode(newNodetype, n.ChildL, newChildSubTrieRoot) + } else { + newNode = NewParentNode(newNodetype, newChildSubTrieRoot, n.ChildR) + } + + // if current node is already dirty, modify in-place + // else create a new dirty sub-trie + newCurTrieRootKey := mt.newDirtyNodeKey() + mt.dirtyStorage[*newCurTrieRootKey] = newNode + return newCurTrieRootKey, false, err + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter unsupported deprecated node type") + default: + return nil, false, ErrInvalidNodeFound } } -// NewZkTrie creates a trie -// NewZkTrie bypasses all the buffer mechanism in *Database, it directly uses the -// underlying diskdb -func NewZkTrie(root common.Hash, db *ZktrieDatabase) (*ZkTrie, error) { - tr, err := zktrie.NewZkTrie(*zkt.NewByte32FromBytes(root.Bytes()), db) +// newDirtyNodeKey increments the dirtyIndex and creates a new dirty node key +func (mt *ZkTrie) newDirtyNodeKey() *Hash { + mt.dirtyIndex.Add(mt.dirtyIndex, BigOne) + return NewHashFromBigInt(mt.dirtyIndex) +} + +// isDirtyNode returns if the node with the given key is dirty or not +func (mt *ZkTrie) isDirtyNode(nodeKey *Hash) bool { + _, found := mt.dirtyStorage[*nodeKey] + return found +} + +// calcCommitment calculates the commitment for the given sub trie +func (mt *ZkTrie) calcCommitment(rootKey *Hash, hashedDirtyNodes map[Hash]*Node, commitLock *sync.Mutex) (*Hash, error) { + if !mt.isDirtyNode(rootKey) { + return rootKey, nil + } + + root, err := mt.getNode(rootKey) + if err != nil { + return nil, err + } + + switch root.Type { + case NodeTypeEmpty: + return &HashZero, nil + case NodeTypeLeaf_New: + // leaves are already hashed, we just need to persist it + break + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + leftDone := make(chan struct{}) + var leftErr error + go func() { + root.ChildL, leftErr = mt.calcCommitment(root.ChildL, hashedDirtyNodes, commitLock) + close(leftDone) + }() + root.ChildR, err = mt.calcCommitment(root.ChildR, hashedDirtyNodes, commitLock) + if err != nil { + return nil, err + } + <-leftDone + if leftErr != nil { + return nil, leftErr + } + default: + return nil, errors.New(fmt.Sprint("unexpected node type", root.Type)) + } + + rootHash, err := root.NodeHash() if err != nil { return nil, err } - return &ZkTrie{tr, db}, nil + + commitLock.Lock() + defer commitLock.Unlock() + hashedDirtyNodes[*rootHash] = root + return rootHash, nil +} + +func (mt *ZkTrie) tryGet(nodeKey *Hash) (*Node, error) { + + path := getPath(mt.maxLevels, nodeKey[:]) + var nextKey Hash + nextKey.Set(mt.rootKey) + n := new(Node) + //sanity check + lastNodeType := NodeTypeBranch_3 + for i := 0; i < mt.maxLevels; i++ { + err := mt.getNodeTo(&nextKey, n) + if err != nil { + return nil, err + } + //sanity check + if i > 0 && n.IsTerminal() { + if lastNodeType == NodeTypeBranch_3 { + panic("parent node has invalid type: children are not terminal") + } else if path[i-1] && lastNodeType == NodeTypeBranch_1 { + panic("parent node has invalid type: right child is not terminal") + } else if !path[i-1] && lastNodeType == NodeTypeBranch_2 { + panic("parent node has invalid type: left child is not terminal") + } + } + + lastNodeType = n.Type + switch n.Type { + case NodeTypeEmpty_New: + return NewEmptyNode(), ErrKeyNotFound + case NodeTypeLeaf_New: + if bytes.Equal(nodeKey[:], n.NodeKey[:]) { + return n, nil + } + return n, ErrKeyNotFound + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + if path[i] { + nextKey.Set(n.ChildR) + } else { + nextKey.Set(n.ChildL) + } + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter deprecated node types") + default: + return nil, ErrInvalidNodeFound + } + } + + return nil, ErrReachedMaxLevel } -// Get returns the value for key stored in the trie. +// TryGet returns the value for key stored in the trie. // The value bytes must not be modified by the caller. -func (t *ZkTrie) Get(key []byte) []byte { - sanityCheckByte32Key(key) - res, err := t.TryGet(key) +// If a node was not found in the database, a MissingNodeError is returned. +func (mt *ZkTrie) TryGet(key []byte) ([]byte, error) { + mt.lock.RLock() + defer mt.lock.RUnlock() + + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return nil, ErrCommitted + } + + secureK, err := ToSecureKey(key) if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + return nil, err + } + + node, err := mt.tryGet(NewHashFromBigInt(secureK)) + if err == ErrKeyNotFound { + // according to https://github.com/ethereum/go-ethereum/blob/37f9d25ba027356457953eab5f181c98b46e9988/trie/trie.go#L135 + return nil, nil + } else if err != nil { + return nil, err } - return res + return node.Data(), nil +} + +// GetStorage returns the value for key stored in the trie. +func (mt *ZkTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { + return mt.TryGet(key) } -func (t *ZkTrie) GetAccount(address common.Address) (*types.StateAccount, error) { +// GetAccount returns the account for the given address. +func (mt *ZkTrie) GetAccount(address common.Address) (*types.StateAccount, error) { key := address.Bytes() - sanityCheckByte32Key(key) - res, err := t.TryGet(key) + res, err := mt.TryGet(key) if res == nil || err != nil { return nil, err } return types.UnmarshalStateAccount(res) } -func (t *ZkTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { - sanityCheckByte32Key(key) - return t.TryGet(key) +// GetKey returns the key for the given hash. +func (mt *ZkTrie) GetKey(hashKey []byte) []byte { + mt.lock.RLock() + defer mt.lock.RUnlock() + return mt.getKey(hashKey) } -func (t *ZkTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { - return t.TryUpdateAccount(address.Bytes(), acc) +// GetKey returns the key for the given hash. +func (mt *ZkTrie) getKey(hashKey []byte) []byte { + if key, ok := mt.secKeyCache[string(hashKey)]; ok { + return key + } + if mt.preimages == nil { + return nil + } + return mt.preimages.preimage(common.BytesToHash(hashKey)) } -// TryUpdateAccount will abstract the write of an account to the -// secure trie. -func (t *ZkTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { - sanityCheckByte32Key(key) - value, flag := acc.MarshalFields() - return t.ZkTrie.TryUpdate(key, flag, value) +// Delete removes the specified Key from the ZkTrie and updates the path +// from the deleted key to the Root with the new values. This method removes +// the key from the ZkTrie, but does not remove the old nodes from the +// key-value database; this means that if the tree is accessed by an old Root +// where the key was not deleted yet, the key will still exist. If is desired +// to remove the key-values from the database that are not under the current +// Root, an option could be to dump all the leafs (using mt.DumpLeafs) and +// import them in a new ZkTrie in a new database (using +// mt.ImportDumpedLeafs), but this will lose all the Root history of the +// ZkTrie +func (mt *ZkTrie) TryDelete(key []byte) error { + secureKey, err := ToSecureKey(key) + if err != nil { + return err + } + + nodeKey := NewHashFromBigInt(secureKey) + + // verify that k is valid and fit inside the Finite Field. + if !CheckBigIntInField(nodeKey.BigInt()) { + return ErrInvalidField + } + + mt.lock.Lock() + defer mt.lock.Unlock() + + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return ErrCommitted + } + + //mitigate the create-delete issue: do not delete unexisted key + if r, _ := mt.tryGet(nodeKey); r == nil { + return nil + } + + newRootKey, _, err := mt.tryDelete(mt.rootKey, nodeKey, getPath(mt.maxLevels, nodeKey[:])) + if err != nil && !errors.Is(err, ErrKeyNotFound) { + return err + } + if newRootKey != nil { + mt.rootKey = newRootKey + } + return nil } -// Update associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. -func (t *ZkTrie) Update(key, value []byte) { - if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) +func (mt *ZkTrie) tryDelete(rootKey *Hash, nodeKey *Hash, path []bool) (*Hash, bool, error) { + root, err := mt.getNode(rootKey) + if err != nil { + return nil, false, err + } + + switch root.Type { + case NodeTypeEmpty_New: + return nil, false, ErrKeyNotFound + case NodeTypeLeaf_New: + if bytes.Equal(nodeKey[:], root.NodeKey[:]) { + return &HashZero, true, nil + } + return nil, false, ErrKeyNotFound + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + branchRight := path[0] + childKey, siblingKey := root.ChildL, root.ChildR + if branchRight { + childKey, siblingKey = root.ChildR, root.ChildL + } + + newChildKey, newChildIsTerminal, err := mt.tryDelete(childKey, nodeKey, path[1:]) + if err != nil { + return nil, false, err + } + + siblingIsTerminal := root.Type == NodeTypeBranch_0 || + (branchRight && root.Type == NodeTypeBranch_1) || + (!branchRight && root.Type == NodeTypeBranch_2) + + leftChild, rightChild := newChildKey, siblingKey + leftIsTerminal, rightIsTerminal := newChildIsTerminal, siblingIsTerminal + if branchRight { + leftChild, rightChild = siblingKey, newChildKey + leftIsTerminal, rightIsTerminal = siblingIsTerminal, newChildIsTerminal + } + + var newNodeType NodeType + if leftIsTerminal && rightIsTerminal { + leftIsEmpty := bytes.Equal(HashZero[:], (*leftChild)[:]) + rightIsEmpty := bytes.Equal(HashZero[:], (*rightChild)[:]) + + // if both children are terminal and one of them is empty, prune the root node + // and send return the non-empty child + if leftIsEmpty || rightIsEmpty { + if leftIsEmpty { + return rightChild, true, nil + } + return leftChild, true, nil + } else { + newNodeType = NodeTypeBranch_0 + } + } else if leftIsTerminal { + newNodeType = NodeTypeBranch_1 + } else if rightIsTerminal { + newNodeType = NodeTypeBranch_2 + } else { + newNodeType = NodeTypeBranch_3 + } + + newRootKey := mt.newDirtyNodeKey() + mt.dirtyStorage[*newRootKey] = NewParentNode(newNodeType, leftChild, rightChild) + return newRootKey, false, nil + default: + panic("encounter unsupported deprecated node type") } } -// NOTE: value is restricted to length of bytes32. -// we override the underlying zktrie's TryUpdate method -func (t *ZkTrie) TryUpdate(key, value []byte) error { - sanityCheckByte32Key(key) - return t.ZkTrie.TryUpdate(key, 1, []zkt.Byte32{*zkt.NewByte32FromBytes(value)}) +// DeleteAccount removes the account with the given address from the trie. +func (mt *ZkTrie) DeleteAccount(address common.Address) error { + return mt.TryDelete(address.Bytes()) } -func (t *ZkTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { - return nil +// DeleteStorage removes the key from the trie. +func (mt *ZkTrie) DeleteStorage(_ common.Address, key []byte) error { + return mt.TryDelete(key) } -func (t *ZkTrie) UpdateStorage(_ common.Address, key, value []byte) error { - return t.TryUpdate(key, value) +// GetLeafNode is more underlying method than TryGet, which obtain an leaf node +// or nil if not exist +func (mt *ZkTrie) GetLeafNode(key []byte) (*Node, error) { + mt.lock.RLock() + defer mt.lock.RUnlock() + + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return nil, ErrCommitted + } + + secureKey, err := ToSecureKey(key) + if err != nil { + return nil, err + } + + nodeKey := NewHashFromBigInt(secureKey) + + n, err := mt.tryGet(nodeKey) + return n, err } -// Delete removes any existing value for key from the trie. -func (t *ZkTrie) Delete(key []byte) { - sanityCheckByte32Key(key) - if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) +// GetNodeByHash gets a node by node hash from the MT. Empty nodes are not stored in the +// tree; they are all the same and assumed to always exist. +// for non exist key, return (NewEmptyNode(), nil) +func (mt *ZkTrie) GetNodeByHash(nodeHash *Hash) (*Node, error) { + mt.lock.RLock() + defer mt.lock.RUnlock() + + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return nil, ErrCommitted } + return mt.getNode(nodeHash) } -func (t *ZkTrie) DeleteAccount(address common.Address) error { - key := address.Bytes() - sanityCheckByte32Key(key) - return t.TryDelete(key) +func (mt *ZkTrie) getNodeTo(nodeHash *Hash, node *Node) error { + if bytes.Equal(nodeHash[:], HashZero[:]) { + *node = *NewEmptyNode() + return nil + } + if dirtyNode, found := mt.dirtyStorage[*nodeHash]; found { + *node = *dirtyNode.Copy() + return nil + } + + var hash common.Hash + hash.SetBytes(nodeHash.Bytes()) + nBytes, err := mt.reader.node(nil, hash) + if err != nil { + return err + } + return node.SetBytes(nBytes) +} + +func (mt *ZkTrie) getNode(nodeHash *Hash) (*Node, error) { + var n Node + if err := mt.getNodeTo(nodeHash, &n); err != nil { + return nil, err + } + return &n, nil +} + +// getPath returns the binary path, from the root to the leaf. +func getPath(numLevels int, k []byte) []bool { + path := make([]bool, numLevels) + for n := 0; n < numLevels; n++ { + path[n] = TestBit(k[:], uint(n)) + } + return path +} + +// NodeAux contains the auxiliary node used in a non-existence proof. +type NodeAux struct { + Key *Hash // Key is the node key + Value *Hash // Value is the value hash in the node +} + +// Proof defines the required elements for a MT proof of existence or +// non-existence. +type Proof struct { + // existence indicates wether this is a proof of existence or + // non-existence. + Existence bool + // depth indicates how deep in the tree the proof goes. + depth uint + // notempties is a bitmap of non-empty Siblings found in Siblings. + notempties [HashByteLen - proofFlagsLen]byte + // Siblings is a list of non-empty sibling node hashes. + Siblings []*Hash + // NodeInfos is a list of nod types along mpt path + NodeInfos []NodeType + // NodeKey record the key of node and path + NodeKey *Hash + // NodeAux contains the auxiliary information of the lowest common ancestor + // node in a non-existence proof. + NodeAux *NodeAux } -func (t *ZkTrie) DeleteStorage(_ common.Address, key []byte) error { - sanityCheckByte32Key(key) - return t.TryDelete(key) +// BuildZkTrieProof prove uniformed way to turn some data collections into Proof struct +func BuildZkTrieProof(rootHash *Hash, k *big.Int, lvl int, getNode func(key *Hash) (*Node, error)) (*Proof, + *Node, error) { + + p := &Proof{} + var siblingHash *Hash + + p.NodeKey = NewHashFromBigInt(k) + kHash := p.NodeKey + path := getPath(lvl, kHash[:]) + + nextHash := rootHash + for p.depth = 0; p.depth < uint(lvl); p.depth++ { + n, err := getNode(nextHash) + if err != nil { + return nil, nil, err + } + p.NodeInfos = append(p.NodeInfos, n.Type) + switch n.Type { + case NodeTypeEmpty_New: + return p, n, nil + case NodeTypeLeaf_New: + if bytes.Equal(kHash[:], n.NodeKey[:]) { + p.Existence = true + return p, n, nil + } + vHash, err := n.ValueHash() + // We found a leaf whose entry didn't match hIndex + p.NodeAux = &NodeAux{Key: n.NodeKey, Value: vHash} + return p, n, err + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + if path[p.depth] { + nextHash = n.ChildR + siblingHash = n.ChildL + } else { + nextHash = n.ChildL + siblingHash = n.ChildR + } + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter deprecated node types") + default: + return nil, nil, ErrInvalidNodeFound + } + if !bytes.Equal(siblingHash[:], HashZero[:]) { + SetBitBigEndian(p.notempties[:], p.depth) + p.Siblings = append(p.Siblings, siblingHash) + } + } + return nil, nil, ErrKeyNotFound + } -// GetKey returns the preimage of a hashed key that was -// previously used to store a value. -func (t *ZkTrie) GetKey(kHashBytes []byte) []byte { - // TODO: use a kv cache in memory - k, err := zkt.NewBigIntFromHashBytes(kHashBytes) +// VerifyProof verifies the Merkle Proof for the entry and root. +// nodeHash can be nil when try to verify a nonexistent proof +func VerifyProofZkTrie(rootHash *Hash, proof *Proof, node *Node) bool { + var nodeHash *Hash + var err error + if node == nil { + if proof.NodeAux != nil { + nodeHash, err = LeafHash(proof.NodeAux.Key, proof.NodeAux.Value) + } else { + nodeHash = &HashZero + } + } else { + nodeHash, err = node.NodeHash() + } + if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + return false } - if t.db.db.preimages != nil { - return t.db.db.preimages.preimage(common.BytesToHash(k.Bytes())) + + rootFromProof, err := proof.rootFromProof(nodeHash, proof.NodeKey) + if err != nil { + return false } - return nil + return bytes.Equal(rootHash[:], rootFromProof[:]) } -// Commit writes all nodes and the secure hash pre-images to the trie's database. -// Nodes are stored with their sha3 hash as the key. -// -// Committing flushes nodes from memory. Subsequent Get calls will load nodes -// from the database. -func (t *ZkTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { - if err := t.ZkTrie.Commit(); err != nil { - return common.Hash{}, nil, err +// Verify the proof and calculate the root, nodeHash can be nil when try to verify +// a nonexistent proof +func (proof *Proof) Verify(nodeHash *Hash) (*Hash, error) { + if proof.Existence { + if nodeHash == nil { + return nil, ErrKeyNotFound + } + return proof.rootFromProof(nodeHash, proof.NodeKey) + } else { + if proof.NodeAux == nil { + return proof.rootFromProof(&HashZero, proof.NodeKey) + } else { + if bytes.Equal(proof.NodeKey[:], proof.NodeAux.Key[:]) { + return nil, fmt.Errorf("non-existence proof being checked against hIndex equal to nodeAux") + } + midHash, err := LeafHash(proof.NodeAux.Key, proof.NodeAux.Value) + if err != nil { + return nil, err + } + return proof.rootFromProof(midHash, proof.NodeKey) + } } - return t.Hash(), nil, nil + } -// Hash returns the root hash of SecureBinaryTrie. It does not write to the -// database and can be used even if the trie doesn't have one. -func (t *ZkTrie) Hash() common.Hash { - var hash common.Hash - hash.SetBytes(t.ZkTrie.Hash()) - return hash +func (proof *Proof) rootFromProof(nodeHash, nodeKey *Hash) (*Hash, error) { + var err error + + sibIdx := len(proof.Siblings) - 1 + path := getPath(int(proof.depth), nodeKey[:]) + for lvl := int(proof.depth) - 1; lvl >= 0; lvl-- { + var siblingHash *Hash + if TestBitBigEndian(proof.notempties[:], uint(lvl)) { + siblingHash = proof.Siblings[sibIdx] + sibIdx-- + } else { + siblingHash = &HashZero + } + curType := proof.NodeInfos[lvl] + if path[lvl] { + nodeHash, err = NewParentNode(curType, siblingHash, nodeHash).NodeHash() + if err != nil { + return nil, err + } + } else { + nodeHash, err = NewParentNode(curType, nodeHash, siblingHash).NodeHash() + if err != nil { + return nil, err + } + } + } + return nodeHash, nil +} + +// walk is a helper recursive function to iterate over all tree branches +func (mt *ZkTrie) walk(nodeHash *Hash, f func(*Node)) error { + n, err := mt.getNode(nodeHash) + if err != nil { + return err + } + if n.IsTerminal() { + f(n) + } else { + f(n) + if err := mt.walk(n.ChildL, f); err != nil { + return err + } + if err := mt.walk(n.ChildR, f); err != nil { + return err + } + } + return nil } -// Copy returns a copy of SecureBinaryTrie. -func (t *ZkTrie) Copy() *ZkTrie { - return &ZkTrie{t.ZkTrie.Copy(), t.db} +// Walk iterates over all the branches of a ZkTrie with the given rootHash +// if rootHash is nil, it will get the current RootHash of the current state of +// the ZkTrie. For each node, it calls the f function given in the +// parameters. See some examples of the Walk function usage in the +// ZkTrie.go and merkletree_test.go +func (mt *ZkTrie) Walk(rootHash *Hash, f func(*Node)) error { + var err error + if rootHash == nil { + rootHash, err = mt.Root() + if err != nil { + return err + } + } + mt.lock.RLock() + defer mt.lock.RUnlock() + + err = mt.walk(rootHash, f) + return err } -// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration -// starts at the key after the given start key. -func (t *ZkTrie) NodeIterator(start []byte) (NodeIterator, error) { - /// FIXME - panic("not implemented") +// GraphViz uses Walk function to generate a string GraphViz representation of +// the tree and writes it to w +func (mt *ZkTrie) GraphViz(w io.Writer, rootHash *Hash) error { + if rootHash == nil { + var err error + rootHash, err = mt.Root() + if err != nil { + return err + } + } + + mt.lock.RLock() + defer mt.lock.RUnlock() + + fmt.Fprintf(w, + "--------\nGraphViz of the ZkTrie with RootHash "+rootHash.BigInt().String()+"\n") + + fmt.Fprintf(w, `digraph hierarchy { +node [fontname=Monospace,fontsize=10,shape=box] +`) + cnt := 0 + var errIn error + err := mt.walk(rootHash, func(n *Node) { + hash, err := n.NodeHash() + if err != nil { + errIn = err + } + switch n.Type { + case NodeTypeEmpty_New: + case NodeTypeLeaf_New: + fmt.Fprintf(w, "\"%v\" [style=filled];\n", hash.String()) + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + lr := [2]string{n.ChildL.String(), n.ChildR.String()} + emptyNodes := "" + for i := range lr { + if lr[i] == "0" { + lr[i] = fmt.Sprintf("empty%v", cnt) + emptyNodes += fmt.Sprintf("\"%v\" [style=dashed,label=0];\n", lr[i]) + cnt++ + } + } + fmt.Fprintf(w, "\"%v\" -> {\"%v\" \"%v\"}\n", hash.String(), lr[0], lr[1]) + fmt.Fprint(w, emptyNodes) + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter unsupported deprecated node type") + default: + } + }) + fmt.Fprintf(w, "}\n") + + fmt.Fprintf(w, + "End of GraphViz of the ZkTrie with RootHash "+rootHash.BigInt().String()+"\n--------\n") + + if errIn != nil { + return errIn + } + return err } -// hashKey returns the hash of key as an ephemeral buffer. -// The caller must not hold onto the return value because it will become -// invalid on the next call to hashKey or secKey. -/*func (t *ZkTrie) hashKey(key []byte) []byte { - if len(key) != 32 { - panic("non byte32 input to hashKey") - } - low16 := new(big.Int).SetBytes(key[:16]) - high16 := new(big.Int).SetBytes(key[16:]) - hash, err := poseidon.Hash([]*big.Int{low16, high16}) - if err != nil { - panic(err) - } - return hash.Bytes() +// Copy creates a new independent zkTrie from the given trie +func (mt *ZkTrie) Copy() *ZkTrie { + mt.lock.RLock() + defer mt.lock.RUnlock() + + // Deep copy in-memory dirty nodes + newDirtyStorage := make(map[Hash]*Node, len(mt.dirtyStorage)) + for key, dirtyNode := range mt.dirtyStorage { + newDirtyStorage[key] = dirtyNode.Copy() + } + + newRootKey := *mt.rootKey + return &ZkTrie{ + reader: mt.reader, + maxLevels: mt.maxLevels, + dirtyIndex: new(big.Int).Set(mt.dirtyIndex), + dirtyStorage: newDirtyStorage, + rootKey: &newRootKey, + committed: mt.committed, + preimages: mt.preimages, + secKeyCache: maps.Clone(mt.secKeyCache), + } } -*/ // Prove constructs a merkle proof for key. The result contains all encoded nodes // on the path to the value at key. The value itself is also included in the last @@ -214,49 +1048,220 @@ func (t *ZkTrie) NodeIterator(start []byte) (NodeIterator, error) { // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. // func (t *ZkTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { -func (t *ZkTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { +func (mt *ZkTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { fromLevel := uint(0) - err := t.ZkTrie.Prove(key, fromLevel, func(n *zktrie.Node) error { + err := mt.ProveWithDeletion(key, fromLevel, func(n *Node) error { nodeHash, err := n.NodeHash() if err != nil { return err } - if n.Type == zktrie.NodeTypeLeaf_New { - preImage := t.GetKey(n.NodeKey.Bytes()) + if n.Type == NodeTypeLeaf_New { + preImage := mt.getKey(n.NodeKey.Bytes()) if len(preImage) > 0 { - n.KeyPreimage = &zkt.Byte32{} + n.KeyPreimage = &Byte32{} copy(n.KeyPreimage[:], preImage) - //return fmt.Errorf("key preimage not found for [%x] ref %x", n.NodeKey.Bytes(), k.Bytes()) } } return proofDb.Put(nodeHash[:], n.Value()) - }) + }, nil) if err != nil { return err } // we put this special kv pair in db so we can distinguish the type and // make suitable Proof - return proofDb.Put(magicHash, zktrie.ProofMagicBytes()) + return proofDb.Put(magicHash, magicSMTBytes) +} + +// DecodeProof try to decode a node bytes, return can be nil for any non-node data (magic code) +func DecodeSMTProof(data []byte) (*Node, error) { + + if bytes.Equal(magicSMTBytes, data) { + //skip magic bytes node + return nil, nil + } + + return NewNodeFromBytes(data) +} + +// ProveWithDeletion constructs a merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root node), ending +// with the node that proves the absence of the key. +// +// If the trie contain value for key, the onHit is called BEFORE writeNode being called, +// both the hitted leaf node and its sibling node is provided as arguments so caller +// would receive enough information for launch a deletion and calculate the new root +// base on the proof data +// Also notice the sibling can be nil if the trie has only one leaf +func (mt *ZkTrie) ProveWithDeletion(key []byte, fromLevel uint, writeNode func(*Node) error, onHit func(*Node, *Node)) error { + secureKey, err := ToSecureKey(key) + if err != nil { + return err + } + + nodeKey := NewHashFromBigInt(secureKey) + var prev *Node + return mt.prove(nodeKey, fromLevel, func(n *Node) (err error) { + defer func() { + if err == nil { + err = writeNode(n) + } + prev = n + }() + + if prev != nil { + switch prev.Type { + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + default: + // sanity check: we should stop after obtain leaf/empty + panic("unexpected behavior in prove") + } + } + + if onHit == nil { + return + } + + // check and call onhit + if n.Type == NodeTypeLeaf_New && bytes.Equal(n.NodeKey.Bytes(), nodeKey.Bytes()) { + if prev == nil { + // for sole element trie + onHit(n, nil) + } else { + var sibling, nHash *Hash + nHash, err = n.NodeHash() + if err != nil { + return + } + + if bytes.Equal(nHash.Bytes(), prev.ChildL.Bytes()) { + sibling = prev.ChildR + } else { + sibling = prev.ChildL + } + + if siblingNode, err := mt.getNode(sibling); err == nil { + onHit(n, siblingNode) + } else { + onHit(n, nil) + } + } + + } + return + }) +} + +// Prove constructs a merkle proof for SMT, it respect the protocol used by the ethereum-trie +// but save the node data with a compact form +func (mt *ZkTrie) prove(kHash *Hash, fromLevel uint, writeNode func(*Node) error) error { + // force root hash calculation if needed + if _, err := mt.Root(); err != nil { + return err + } + + mt.lock.RLock() + defer mt.lock.RUnlock() + + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return ErrCommitted + } + + path := getPath(mt.maxLevels, kHash[:]) + var nodes []*Node + var lastN *Node + tn := mt.rootKey + for i := 0; i < mt.maxLevels; i++ { + n, err := mt.getNode(tn) + if err != nil { + fmt.Println("get node fail", err, tn.Hex(), + lastN.ChildL.Hex(), + lastN.ChildR.Hex(), + path, + i, + ) + return err + } + nodeHash := tn + lastN = n + + finished := true + switch n.Type { + case NodeTypeEmpty_New: + case NodeTypeLeaf_New: + // notice even we found a leaf whose entry didn't match the expected k, + // we still include it as the proof of absence + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + finished = false + if path[i] { + tn = n.ChildR + } else { + tn = n.ChildL + } + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter deprecated node types") + default: + return ErrInvalidNodeFound + } + + nCopy := n.Copy() + nCopy.nodeHash = nodeHash + nodes = append(nodes, nCopy) + if finished { + break + } + } + + for _, n := range nodes { + if fromLevel > 0 { + fromLevel-- + continue + } + + // TODO: notice here we may have broken some implicit on the proofDb: + // the key is not kecca(value) and it even can not be derived from + // the value by any means without a actually decoding + if err := writeNode(n); err != nil { + return err + } + } + + return nil +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration +// starts at the key after the given start key. And error will be returned +// if fails to create node iterator. +func (mt *ZkTrie) NodeIterator(start []byte) (NodeIterator, error) { + // Short circuit if the trie is already committed and not usable. + if mt.committed { + return nil, ErrCommitted + } + return nil, errors.New("not implemented") } // VerifyProof checks merkle proofs. The given proof must contain the value for // key in a trie with the given root hash. VerifyProof returns an error if the // proof contains invalid trie nodes or the wrong value. func VerifyProofSMT(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) { - h := zkt.NewHashFromBytes(rootHash.Bytes()) - k, err := zkt.ToSecureKey(key) + h := NewHashFromBytes(rootHash.Bytes()) + k, err := ToSecureKey(key) if err != nil { return nil, err } - proof, n, err := zktrie.BuildZkTrieProof(h, k, len(key)*8, func(key *zkt.Hash) (*zktrie.Node, error) { + proof, n, err := BuildZkTrieProof(h, k, len(key)*8, func(key *Hash) (*Node, error) { buf, _ := proofDb.Get(key[:]) if buf == nil { - return nil, zktrie.ErrKeyNotFound + return nil, ErrKeyNotFound } - n, err := zktrie.NewNodeFromBytes(buf) + n, err := NewNodeFromBytes(buf) return n, err }) @@ -267,9 +1272,56 @@ func VerifyProofSMT(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueRead return nil, nil } - if zktrie.VerifyProofZkTrie(h, proof, n) { + if VerifyProofZkTrie(h, proof, n) { return n.Data(), nil } else { return nil, fmt.Errorf("bad proof node %v", proof) } } + +// MustDelete deletes the key from the trie and panics if it fails. +func (mt *ZkTrie) MustDelete(key []byte) { + if err := mt.TryDelete(key); err != nil { + panic(err) + } +} + +// MustUpdate updates the key with the given value and panics if it fails. +func (mt *ZkTrie) MustUpdate(key, value []byte) { + if err := mt.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes(value)}); err != nil { + panic(err) + } +} + +// MustGet returns the value for key stored in the trie and panics if it fails. +func (mt *ZkTrie) MustGet(key []byte) []byte { + v, err := mt.TryGet(key) + if err != nil { + panic(err) + } + return v +} + +// MustNodeIterator returns an iterator that returns nodes of the trie and panics if it fails. +func (mt *ZkTrie) MustNodeIterator(start []byte) NodeIterator { + itr, err := mt.NodeIterator(start) + if err != nil { + panic(err) + } + return itr +} + +// GetAccountByHash does the same thing as GetAccount, however it expects an +// account hash that is the hash of address. This constitutes an abstraction +// leak, since the client code needs to know the key format. +func (mt *ZkTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) { + return nil, errors.New("not implemented") +} + +// GetNode attempts to retrieve a trie node by compact-encoded path. It is not +// possible to use keybyte-encoding as the path might contain odd nibbles. +// If the specified trie node is not in the trie, nil will be returned. +// If a trie node is not found in the database, a MissingNodeError is returned. +func (mt *ZkTrie) GetNode(path []byte) ([]byte, int, error) { + return nil, 0, errors.New("not implemented") +} diff --git a/trie/zk_trie_database.go b/trie/zk_trie_database.go deleted file mode 100644 index b4c3fbbc268e..000000000000 --- a/trie/zk_trie_database.go +++ /dev/null @@ -1,172 +0,0 @@ -package trie - -import ( - "math/big" - - "github.com/syndtr/goleveldb/leveldb" - - zktrie "github.com/scroll-tech/zktrie/trie" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/trie/triedb/hashdb" -) - -// ZktrieDatabase Database adaptor implements zktrie.ZktrieDatbase -// It also reverses the bit order of the key being persisted. -// This ensures that the adjacent leaf in zktrie maintains minimal -// distance when persisted with dictionary order in LevelDB. -// Consequently, this optimizes the snapshot operation, allowing it -// to iterate through adjacent leaves at a reduced cost. - -type ZktrieDatabase struct { - db *Database - prefix []byte -} - -func NewZktrieDatabase(diskdb ethdb.Database) *ZktrieDatabase { - db := NewDatabase(diskdb, nil) - db.config.IsUsingZktrie = true - return &ZktrieDatabase{db: db, prefix: []byte{}} -} - -// adhoc wrapper... -func NewZktrieDatabaseFromTriedb(db *Database) *ZktrieDatabase { - db.config.IsUsingZktrie = true - return &ZktrieDatabase{db: db, prefix: []byte{}} -} - -// Put saves a key:value into the Storage -func (l *ZktrieDatabase) Put(k, v []byte) error { - k = bitReverse(k) - l.db.GetLock().Lock() - l.db.rawDirties.Put(Concat(l.prefix, k[:]), v) - l.db.GetLock().Unlock() - return nil -} - -// Get retrieves a value from a key in the Storage -func (l *ZktrieDatabase) Get(key []byte) ([]byte, error) { - key = bitReverse(key) - concatKey := Concat(l.prefix, key[:]) - l.db.GetLock().RLock() - value, ok := l.db.rawDirties.Get(concatKey) - l.db.GetLock().RUnlock() - if ok { - return value, nil - } - - if l.db.GetCleans() != nil { - if enc := l.db.GetCleans().Get(nil, concatKey); enc != nil { - hashdb.MemcacheCleanHitMeter.Mark(1) - hashdb.MemcacheCleanReadMeter.Mark(int64(len(enc))) - return enc, nil - } - } - - v, err := l.db.diskdb.Get(concatKey) - if err == leveldb.ErrNotFound { - return nil, zktrie.ErrKeyNotFound - } - if l.db.GetCleans() != nil { - l.db.GetCleans().Set(concatKey[:], v) - hashdb.MemcacheCleanMissMeter.Mark(1) - hashdb.MemcacheCleanWriteMeter.Mark(int64(len(v))) - } - return v, err -} - -func (l *ZktrieDatabase) UpdatePreimage(preimage []byte, hashField *big.Int) { - db := l.db - if db.preimages != nil { // Ugly direct check but avoids the below write lock - // we must copy the input key - db.preimages.insertPreimage(map[common.Hash][]byte{common.BytesToHash(hashField.Bytes()): common.CopyBytes(preimage)}) - } -} - -// Iterate implements the method Iterate of the interface Storage -func (l *ZktrieDatabase) Iterate(f func([]byte, []byte) (bool, error)) error { - iter := l.db.diskdb.NewIterator(l.prefix, nil) - defer iter.Release() - for iter.Next() { - localKey := bitReverse(iter.Key()[len(l.prefix):]) - if cont, err := f(localKey, iter.Value()); err != nil { - return err - } else if !cont { - break - } - } - iter.Release() - return iter.Error() -} - -// Close implements the method Close of the interface Storage -func (l *ZktrieDatabase) Close() { - // FIXME: is this correct? - if err := l.db.diskdb.Close(); err != nil { - panic(err) - } -} - -// List implements the method List of the interface Storage -func (l *ZktrieDatabase) List(limit int) ([]KV, error) { - ret := []KV{} - err := l.Iterate(func(key []byte, value []byte) (bool, error) { - ret = append(ret, KV{K: Clone(key), V: Clone(value)}) - if len(ret) == limit { - return false, nil - } - return true, nil - }) - return ret, err -} - -func bitReverseForNibble(b byte) byte { - switch b { - case 0: - return 0 - case 1: - return 8 - case 2: - return 4 - case 3: - return 12 - case 4: - return 2 - case 5: - return 10 - case 6: - return 6 - case 7: - return 14 - case 8: - return 1 - case 9: - return 9 - case 10: - return 5 - case 11: - return 13 - case 12: - return 3 - case 13: - return 11 - case 14: - return 7 - case 15: - return 15 - default: - panic("unexpected input") - } -} - -func bitReverse(inp []byte) (out []byte) { - l := len(inp) - out = make([]byte, l) - - for i, b := range inp { - out[l-i-1] = bitReverseForNibble(b&15)<<4 + bitReverseForNibble(b>>4) - } - - return -} diff --git a/trie/zk_trie_database_test.go b/trie/zk_trie_database_test.go deleted file mode 100644 index 6d8c15e6fa27..000000000000 --- a/trie/zk_trie_database_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package trie - -import ( - "bytes" - "testing" - - "github.com/scroll-tech/go-ethereum/common" -) - -// grep from `feat/snap` -func reverseBitInPlace(b []byte) { - var v [8]uint8 - for i := 0; i < len(b); i++ { - for j := 0; j < 8; j++ { - v[j] = (b[i] >> j) & 1 - } - var tmp uint8 = 0 - for j := 0; j < 8; j++ { - tmp |= v[8-j-1] << j - } - b[i] = tmp - } -} - -func reverseBytesInPlace(b []byte) { - for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { - b[i], b[j] = b[j], b[i] - } -} - -func TestBitReverse(t *testing.T) { - for _, testBytes := range [][]byte{ - common.FromHex("7b908cce3bc16abb3eac5dff6c136856526f15225f74ce860a2bec47912a5492"), - common.FromHex("fac65cd2ad5e301083d0310dd701b5faaff1364cbe01cdbfaf4ec3609bb4149e"), - common.FromHex("55791f6ec2f83fee512a2d3d4b505784fdefaea89974e10440d01d62a18a298a"), - common.FromHex("5ab775b64d86a8058bb71c3c765d0f2158c14bbeb9cb32a65eda793a7e95e30f"), - common.FromHex("ccb464abf67804538908c62431b3a6788e8dc6dee62aff9bfe6b10136acfceac"), - common.FromHex("b908adff17a5aa9d6787324c39014a74b04cef7fba6a92aeb730f48da1ca665d"), - } { - b1 := bitReverse(testBytes) - reverseBitInPlace(testBytes) - reverseBytesInPlace(testBytes) - if !bytes.Equal(b1, testBytes) { - t.Errorf("unexpected bit reversed %x vs %x", b1, testBytes) - } - } -} - -func TestBitDoubleReverse(t *testing.T) { - for _, testBytes := range [][]byte{ - common.FromHex("7b908cce3bc16abb3eac5dff6c136856526f15225f74ce860a2bec47912a5492"), - common.FromHex("fac65cd2ad5e301083d0310dd701b5faaff1364cbe01cdbfaf4ec3609bb4149e"), - common.FromHex("55791f6ec2f83fee512a2d3d4b505784fdefaea89974e10440d01d62a18a298a"), - common.FromHex("5ab775b64d86a8058bb71c3c765d0f2158c14bbeb9cb32a65eda793a7e95e30f"), - common.FromHex("ccb464abf67804538908c62431b3a6788e8dc6dee62aff9bfe6b10136acfceac"), - common.FromHex("b908adff17a5aa9d6787324c39014a74b04cef7fba6a92aeb730f48da1ca665d"), - } { - b := bitReverse(bitReverse(testBytes)) - if !bytes.Equal(b, testBytes) { - t.Errorf("unexpected double bit reversed %x vs %x", b, testBytes) - } - } -} diff --git a/trie/zk_trie_impl_test.go b/trie/zk_trie_impl_test.go deleted file mode 100644 index 3da9fe91932c..000000000000 --- a/trie/zk_trie_impl_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package trie - -import ( - "math/big" - "testing" - - "github.com/iden3/go-iden3-crypto/constants" - cryptoUtils "github.com/iden3/go-iden3-crypto/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - zktrie "github.com/scroll-tech/zktrie/trie" - zkt "github.com/scroll-tech/zktrie/types" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/types" -) - -// we do not need zktrie impl anymore, only made a wrapper for adapting testing -type zkTrieImplTestWrapper struct { - *zktrie.ZkTrieImpl -} - -func newZkTrieImpl(storage *ZktrieDatabase, maxLevels int) (*zkTrieImplTestWrapper, error) { - return newZkTrieImplWithRoot(storage, &zkt.HashZero, maxLevels) -} - -// NewZkTrieImplWithRoot loads a new ZkTrieImpl. If in the storage already exists one -// will open that one, if not, will create a new one. -func newZkTrieImplWithRoot(storage *ZktrieDatabase, root *zkt.Hash, maxLevels int) (*zkTrieImplTestWrapper, error) { - impl, err := zktrie.NewZkTrieImplWithRoot(storage, root, maxLevels) - if err != nil { - return nil, err - } - - return &zkTrieImplTestWrapper{impl}, nil -} - -// AddWord -// Deprecated: Add a Bytes32 kv to ZkTrieImpl, only for testing -func (mt *zkTrieImplTestWrapper) AddWord(kPreimage, vPreimage *zkt.Byte32) error { - k, err := kPreimage.Hash() - if err != nil { - return err - } - - if v, _ := mt.TryGet(k.Bytes()); v != nil { - return zktrie.ErrEntryIndexAlreadyExists - } - - return mt.ZkTrieImpl.TryUpdate(zkt.NewHashFromBigInt(k), 1, []zkt.Byte32{*vPreimage}) -} - -// GetLeafNodeByWord -// Deprecated: Get a Bytes32 kv to ZkTrieImpl, only for testing -func (mt *zkTrieImplTestWrapper) GetLeafNodeByWord(kPreimage *zkt.Byte32) (*zktrie.Node, error) { - k, err := kPreimage.Hash() - if err != nil { - return nil, err - } - return mt.ZkTrieImpl.GetLeafNode(zkt.NewHashFromBigInt(k)) -} - -// Deprecated: only for testing -func (mt *zkTrieImplTestWrapper) UpdateWord(kPreimage, vPreimage *zkt.Byte32) error { - k, err := kPreimage.Hash() - if err != nil { - return err - } - - return mt.ZkTrieImpl.TryUpdate(zkt.NewHashFromBigInt(k), 1, []zkt.Byte32{*vPreimage}) -} - -// Deprecated: only for testing -func (mt *zkTrieImplTestWrapper) DeleteWord(kPreimage *zkt.Byte32) error { - k, err := kPreimage.Hash() - if err != nil { - return err - } - return mt.ZkTrieImpl.TryDelete(zkt.NewHashFromBigInt(k)) -} - -func (mt *zkTrieImplTestWrapper) TryGet(key []byte) ([]byte, error) { - return mt.ZkTrieImpl.TryGet(zkt.NewHashFromBytes(key)) -} - -func (mt *zkTrieImplTestWrapper) TryDelete(key []byte) error { - return mt.ZkTrieImpl.TryDelete(zkt.NewHashFromBytes(key)) -} - -// TryUpdateAccount will abstract the write of an account to the trie -func (mt *zkTrieImplTestWrapper) TryUpdateAccount(key []byte, acc *types.StateAccount) error { - value, flag := acc.MarshalFields() - return mt.ZkTrieImpl.TryUpdate(zkt.NewHashFromBytes(key), flag, value) -} - -// NewHashFromHex returns a *Hash representation of the given hex string -func NewHashFromHex(h string) (*zkt.Hash, error) { - return zkt.NewHashFromCheckedBytes(common.FromHex(h)) -} - -type Fatalable interface { - Fatal(args ...interface{}) -} - -func newTestingMerkle(f Fatalable, numLevels int) *zkTrieImplTestWrapper { - mt, err := newZkTrieImpl(NewZktrieDatabase(rawdb.NewMemoryDatabase()), numLevels) - if err != nil { - f.Fatal(err) - return nil - } - return mt -} - -func TestHashParsers(t *testing.T) { - h0 := zkt.NewHashFromBigInt(big.NewInt(0)) - assert.Equal(t, "0", h0.String()) - h1 := zkt.NewHashFromBigInt(big.NewInt(1)) - assert.Equal(t, "1", h1.String()) - h10 := zkt.NewHashFromBigInt(big.NewInt(10)) - assert.Equal(t, "10", h10.String()) - - h7l := zkt.NewHashFromBigInt(big.NewInt(1234567)) - assert.Equal(t, "1234567", h7l.String()) - h8l := zkt.NewHashFromBigInt(big.NewInt(12345678)) - assert.Equal(t, "12345678...", h8l.String()) - - b, ok := new(big.Int).SetString("4932297968297298434239270129193057052722409868268166443802652458940273154854", 10) //nolint:lll - assert.True(t, ok) - h := zkt.NewHashFromBigInt(b) - assert.Equal(t, "4932297968297298434239270129193057052722409868268166443802652458940273154854", h.BigInt().String()) //nolint:lll - assert.Equal(t, "49322979...", h.String()) - assert.Equal(t, "0ae794eb9c3d8bbb9002e993fc2ed301dcbd2af5508ed072c375e861f1aa5b26", h.Hex()) - - b1, err := zkt.NewBigIntFromHashBytes(b.Bytes()) - assert.Nil(t, err) - assert.Equal(t, new(big.Int).SetBytes(b.Bytes()).String(), b1.String()) - - b2, err := zkt.NewHashFromCheckedBytes(b.Bytes()) - assert.Nil(t, err) - assert.Equal(t, b.String(), b2.BigInt().String()) - - h2, err := NewHashFromHex(h.Hex()) - assert.Nil(t, err) - assert.Equal(t, h, h2) - _, err = NewHashFromHex("0x12") - assert.NotNil(t, err) - - // check limits - a := new(big.Int).Sub(constants.Q, big.NewInt(1)) - testHashParsers(t, a) - a = big.NewInt(int64(1)) - testHashParsers(t, a) -} - -func testHashParsers(t *testing.T, a *big.Int) { - require.True(t, cryptoUtils.CheckBigIntInField(a)) - h := zkt.NewHashFromBigInt(a) - assert.Equal(t, a, h.BigInt()) - hFromBytes, err := zkt.NewHashFromCheckedBytes(h.Bytes()) - assert.Nil(t, err) - assert.Equal(t, h, hFromBytes) - assert.Equal(t, a, hFromBytes.BigInt()) - assert.Equal(t, a.String(), hFromBytes.BigInt().String()) - hFromHex, err := NewHashFromHex(h.Hex()) - assert.Nil(t, err) - assert.Equal(t, h, hFromHex) - - aBIFromHBytes, err := zkt.NewBigIntFromHashBytes(h.Bytes()) - assert.Nil(t, err) - assert.Equal(t, a, aBIFromHBytes) - assert.Equal(t, new(big.Int).SetBytes(a.Bytes()).String(), aBIFromHBytes.String()) -} - -func TestMerkleTree_AddUpdateGetWord(t *testing.T) { - mt := newTestingMerkle(t, 10) - err := mt.AddWord(&zkt.Byte32{1}, &zkt.Byte32{2}) - assert.Nil(t, err) - err = mt.AddWord(&zkt.Byte32{3}, &zkt.Byte32{4}) - assert.Nil(t, err) - err = mt.AddWord(&zkt.Byte32{5}, &zkt.Byte32{6}) - assert.Nil(t, err) - err = mt.AddWord(&zkt.Byte32{5}, &zkt.Byte32{7}) - assert.Equal(t, zktrie.ErrEntryIndexAlreadyExists, err) - - node, err := mt.GetLeafNodeByWord(&zkt.Byte32{1}) - assert.Nil(t, err) - assert.Equal(t, len(node.ValuePreimage), 1) - assert.Equal(t, (&zkt.Byte32{2})[:], node.ValuePreimage[0][:]) - node, err = mt.GetLeafNodeByWord(&zkt.Byte32{3}) - assert.Nil(t, err) - assert.Equal(t, len(node.ValuePreimage), 1) - assert.Equal(t, (&zkt.Byte32{4})[:], node.ValuePreimage[0][:]) - node, err = mt.GetLeafNodeByWord(&zkt.Byte32{5}) - assert.Nil(t, err) - assert.Equal(t, len(node.ValuePreimage), 1) - assert.Equal(t, (&zkt.Byte32{6})[:], node.ValuePreimage[0][:]) - - err = mt.UpdateWord(&zkt.Byte32{1}, &zkt.Byte32{7}) - assert.Nil(t, err) - err = mt.UpdateWord(&zkt.Byte32{3}, &zkt.Byte32{8}) - assert.Nil(t, err) - err = mt.UpdateWord(&zkt.Byte32{5}, &zkt.Byte32{9}) - assert.Nil(t, err) - - node, err = mt.GetLeafNodeByWord(&zkt.Byte32{1}) - assert.Nil(t, err) - assert.Equal(t, len(node.ValuePreimage), 1) - assert.Equal(t, (&zkt.Byte32{7})[:], node.ValuePreimage[0][:]) - node, err = mt.GetLeafNodeByWord(&zkt.Byte32{3}) - assert.Nil(t, err) - assert.Equal(t, len(node.ValuePreimage), 1) - assert.Equal(t, (&zkt.Byte32{8})[:], node.ValuePreimage[0][:]) - node, err = mt.GetLeafNodeByWord(&zkt.Byte32{5}) - assert.Nil(t, err) - assert.Equal(t, len(node.ValuePreimage), 1) - assert.Equal(t, (&zkt.Byte32{9})[:], node.ValuePreimage[0][:]) - _, err = mt.GetLeafNodeByWord(&zkt.Byte32{100}) - assert.Equal(t, zktrie.ErrKeyNotFound, err) -} - -func TestMerkleTree_UpdateAccount(t *testing.T) { - mt := newTestingMerkle(t, 10) - - acc1 := &types.StateAccount{ - Nonce: 1, - Balance: big.NewInt(10000000), - Root: common.HexToHash("22fb59aa5410ed465267023713ab42554c250f394901455a3366e223d5f7d147"), - KeccakCodeHash: common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - PoseidonCodeHash: common.HexToHash("0c0a77f6e063b4b62eb7d9ed6f427cf687d8d0071d751850cfe5d136bc60d3ab").Bytes(), - CodeSize: 0, - } - err := mt.TryUpdateAccount(common.HexToAddress("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes(), acc1) - assert.Nil(t, err) - - acc2 := &types.StateAccount{ - Nonce: 5, - Balance: big.NewInt(50000000), - Root: common.HexToHash("0"), - KeccakCodeHash: common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - PoseidonCodeHash: common.HexToHash("05d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - CodeSize: 5, - } - err = mt.TryUpdateAccount(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes(), acc2) - assert.Nil(t, err) - - bt, err := mt.TryGet(common.HexToAddress("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes()) - assert.Nil(t, err) - - acc, err := types.UnmarshalStateAccount(bt) - assert.Nil(t, err) - assert.Equal(t, acc1.Nonce, acc.Nonce) - assert.Equal(t, acc1.Balance.Uint64(), acc.Balance.Uint64()) - assert.Equal(t, acc1.Root.Bytes(), acc.Root.Bytes()) - assert.Equal(t, acc1.KeccakCodeHash, acc.KeccakCodeHash) - assert.Equal(t, acc1.PoseidonCodeHash, acc.PoseidonCodeHash) - assert.Equal(t, acc1.CodeSize, acc.CodeSize) - - bt, err = mt.TryGet(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes()) - assert.Nil(t, err) - - acc, err = types.UnmarshalStateAccount(bt) - assert.Nil(t, err) - assert.Equal(t, acc2.Nonce, acc.Nonce) - assert.Equal(t, acc2.Balance.Uint64(), acc.Balance.Uint64()) - assert.Equal(t, acc2.Root.Bytes(), acc.Root.Bytes()) - assert.Equal(t, acc2.KeccakCodeHash, acc.KeccakCodeHash) - assert.Equal(t, acc2.PoseidonCodeHash, acc.PoseidonCodeHash) - assert.Equal(t, acc2.CodeSize, acc.CodeSize) - - bt, err = mt.TryGet(common.HexToAddress("0x8dE13967F19410A7991D63c2c0179feBFDA0c261").Bytes()) - assert.Nil(t, err) - assert.Nil(t, bt) - - err = mt.TryDelete(common.HexToHash("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes()) - assert.Nil(t, err) - - bt, err = mt.TryGet(common.HexToAddress("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes()) - assert.Nil(t, err) - assert.Nil(t, bt) - - err = mt.TryDelete(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes()) - assert.Nil(t, err) - - bt, err = mt.TryGet(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes()) - assert.Nil(t, err) - assert.Nil(t, bt) -} diff --git a/trie/zk_trie_node.go b/trie/zk_trie_node.go new file mode 100644 index 000000000000..950a77e69ee9 --- /dev/null +++ b/trie/zk_trie_node.go @@ -0,0 +1,405 @@ +package trie + +import ( + "encoding/binary" + "fmt" + "math/big" + "reflect" + "slices" + "unsafe" + + "github.com/scroll-tech/go-ethereum/common" +) + +// NodeType defines the type of node in the MT. +type NodeType byte + +const ( + // NodeTypeParent indicates the type of parent Node that has children. + NodeTypeParent NodeType = 0 + // NodeTypeLeaf indicates the type of a leaf Node that contains a key & + // value. + NodeTypeLeaf NodeType = 1 + // NodeTypeEmpty indicates the type of an empty Node. + NodeTypeEmpty NodeType = 2 + + // DBEntryTypeRoot indicates the type of a DB entry that indicates the + // current Root of a MerkleTree + DBEntryTypeRoot NodeType = 3 + + NodeTypeLeaf_New NodeType = 4 + NodeTypeEmpty_New NodeType = 5 + // branch node for both child are terminal nodes + NodeTypeBranch_0 NodeType = 6 + // branch node for left child is terminal node and right child is branch + NodeTypeBranch_1 NodeType = 7 + // branch node for left child is branch node and right child is terminal + NodeTypeBranch_2 NodeType = 8 + // branch node for both child are branch nodes + NodeTypeBranch_3 NodeType = 9 +) + +// DeduceUploadType deduce a new branch type from current branch when one of its child become non-terminal +func (n NodeType) DeduceUpgradeType(goRight bool) NodeType { + if goRight { + switch n { + case NodeTypeBranch_0: + return NodeTypeBranch_1 + case NodeTypeBranch_1: + return n + case NodeTypeBranch_2, NodeTypeBranch_3: + return NodeTypeBranch_3 + } + } else { + switch n { + case NodeTypeBranch_0: + return NodeTypeBranch_2 + case NodeTypeBranch_1, NodeTypeBranch_3: + return NodeTypeBranch_3 + case NodeTypeBranch_2: + return n + } + } + + panic(fmt.Errorf("invalid NodeType: %d", n)) +} + +// DeduceDowngradeType deduce a new branch type from current branch when one of its child become terminal +func (n NodeType) DeduceDowngradeType(atRight bool) NodeType { + if atRight { + switch n { + case NodeTypeBranch_1: + return NodeTypeBranch_0 + case NodeTypeBranch_3: + return NodeTypeBranch_2 + case NodeTypeBranch_0, NodeTypeBranch_2: + panic(fmt.Errorf("can not downgrade a node with terminal child (%d)", n)) + } + } else { + switch n { + case NodeTypeBranch_3: + return NodeTypeBranch_1 + case NodeTypeBranch_2: + return NodeTypeBranch_0 + case NodeTypeBranch_0, NodeTypeBranch_1: + panic(fmt.Errorf("can not downgrade a node with terminal child (%d)", n)) + } + } + panic(fmt.Errorf("invalid NodeType: %d", n)) +} + +// Node is the struct that represents a node in the MT. The node should not be +// modified after creation because the cached key won't be updated. +type Node struct { + // Type is the type of node in the tree. + Type NodeType + // ChildL is the node hash of the left child of a parent node. + ChildL *Hash + // ChildR is the node hash of the right child of a parent node. + ChildR *Hash + // NodeKey is the node's key stored in a leaf node. + NodeKey *Hash + // ValuePreimage can store at most 256 byte32 as fields (represnted by BIG-ENDIAN integer) + // and the first 24 can be compressed (each bytes32 consider as 2 fields), in hashing the compressed + // elemments would be calculated first + ValuePreimage []Byte32 + // CompressedFlags use each bit for indicating the compressed flag for the first 24 fields + CompressedFlags uint32 + // nodeHash is the cache of the hash of the node to avoid recalculating + nodeHash *Hash + // valueHash is the cache of the hash of valuePreimage to avoid recalculating, only valid for leaf node + valueHash *Hash + // KeyPreimage is the original key value that derives the NodeKey, kept here only for proof + KeyPreimage *Byte32 +} + +// NewLeafNode creates a new leaf node. +func NewLeafNode(k *Hash, valueFlags uint32, valuePreimage []Byte32) *Node { + return &Node{Type: NodeTypeLeaf_New, NodeKey: k, CompressedFlags: valueFlags, ValuePreimage: valuePreimage} +} + +// NewParentNode creates a new parent node. +func NewParentNode(ntype NodeType, childL *Hash, childR *Hash) *Node { + return &Node{Type: ntype, ChildL: childL, ChildR: childR} +} + +// NewEmptyNode creates a new empty node. +func NewEmptyNode() *Node { + return &Node{Type: NodeTypeEmpty_New} +} + +// NewNodeFromBytes creates a new node by parsing the input []byte. +func NewNodeFromBytes(b []byte) (*Node, error) { + var n Node + if err := n.SetBytes(b); err != nil { + return nil, err + } + return &n, nil +} + +// LeafHash computes the key of a leaf node given the hIndex and hValue of the +// entry of the leaf. +func LeafHash(k, v *Hash) (*Hash, error) { + return HashElemsWithDomain(big.NewInt(int64(NodeTypeLeaf_New)), k.BigInt(), v.BigInt()) +} + +func (n *Node) SetBytes(b []byte) error { + if len(b) < 1 { + return ErrNodeBytesBadSize + } + nType := NodeType(b[0]) + b = b[1:] + switch nType { + case NodeTypeParent, NodeTypeBranch_0, + NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + if len(b) != 2*HashByteLen { + return ErrNodeBytesBadSize + } + + childL := n.ChildL + childR := n.ChildR + + if childL == nil { + childL = NewHashFromBytes(b[:HashByteLen]) + } else { + childL.SetBytes(b[:HashByteLen]) + } + + if childR == nil { + childR = NewHashFromBytes(b[HashByteLen : HashByteLen*2]) + } else { + childR.SetBytes(b[HashByteLen : HashByteLen*2]) + } + + *n = Node{ + Type: nType, + ChildL: childL, + ChildR: childR, + } + case NodeTypeLeaf, NodeTypeLeaf_New: + if len(b) < HashByteLen+4 { + return ErrNodeBytesBadSize + } + nodeKey := NewHashFromBytes(b[0:HashByteLen]) + mark := binary.LittleEndian.Uint32(b[HashByteLen : HashByteLen+4]) + preimageLen := int(mark & 255) + compressedFlags := mark >> 8 + valuePreimage := slices.Grow(n.ValuePreimage[0:], preimageLen) + curPos := HashByteLen + 4 + if len(b) < curPos+preimageLen*32+1 { + return ErrNodeBytesBadSize + } + for i := 0; i < preimageLen; i++ { + var byte32 Byte32 + copy(byte32[:], b[i*32+curPos:(i+1)*32+curPos]) + valuePreimage = append(valuePreimage, byte32) + } + curPos += preimageLen * 32 + preImageSize := int(b[curPos]) + curPos += 1 + + var keyPreimage *Byte32 + if preImageSize != 0 { + if len(b) < curPos+preImageSize { + return ErrNodeBytesBadSize + } + + keyPreimage = n.KeyPreimage + if keyPreimage == nil { + keyPreimage = new(Byte32) + } + copy(keyPreimage[:], b[curPos:curPos+preImageSize]) + } + + *n = Node{ + Type: nType, + NodeKey: nodeKey, + CompressedFlags: compressedFlags, + ValuePreimage: valuePreimage, + KeyPreimage: keyPreimage, + } + case NodeTypeEmpty, NodeTypeEmpty_New: + *n = Node{Type: nType} + default: + return ErrInvalidNodeFound + } + return nil +} + +// IsTerminal returns if the node is 'terminated', i.e. empty or leaf node +func (n *Node) IsTerminal() bool { + switch n.Type { + case NodeTypeEmpty_New, NodeTypeLeaf_New: + return true + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + return false + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter deprecated node types") + default: + panic(fmt.Errorf("encounter unknown node types %d", n.Type)) + } + +} + +// NodeHash computes the hash digest of the node by hashing the content in a +// specific way for each type of node. This key is used as the hash of the +// Merkle tree for each node. +func (n *Node) NodeHash() (*Hash, error) { + if n.nodeHash == nil { // Cache the key to avoid repeated hash computations. + // NOTE: We are not using the type to calculate the hash! + switch n.Type { + case NodeTypeBranch_0, + NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: // H(ChildL || ChildR) + var err error + n.nodeHash, err = HashElemsWithDomain(big.NewInt(int64(n.Type)), + n.ChildL.BigInt(), n.ChildR.BigInt()) + if err != nil { + return nil, err + } + case NodeTypeLeaf_New: + var err error + n.valueHash, err = HandlingElemsAndByte32(n.CompressedFlags, n.ValuePreimage) + if err != nil { + return nil, err + } + + n.nodeHash, err = LeafHash(n.NodeKey, n.valueHash) + if err != nil { + return nil, err + } + + case NodeTypeEmpty_New: // Zero + n.nodeHash = &HashZero + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter deprecated node types") + default: + n.nodeHash = &HashZero + } + } + return n.nodeHash, nil +} + +// ValueHash computes the hash digest of the value stored in the leaf node. For +// other node types, it returns the zero hash. +func (n *Node) ValueHash() (*Hash, error) { + if n.Type != NodeTypeLeaf_New { + return &HashZero, nil + } + if _, err := n.NodeHash(); err != nil { + return nil, err + } + return n.valueHash, nil +} + +// Data returns the wrapped data inside LeafNode and cast them into bytes +// for other node type it just return nil +func (n *Node) Data() []byte { + switch n.Type { + case NodeTypeLeaf_New: + var data []byte + hdata := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + //TODO: uintptr(reflect.ValueOf(n.ValuePreimage).UnsafePointer()) should be more elegant but only available until go 1.18 + hdata.Data = uintptr(unsafe.Pointer(&n.ValuePreimage[0])) + hdata.Len = 32 * len(n.ValuePreimage) + hdata.Cap = hdata.Len + return data + default: + return nil + } +} + +// CanonicalValue returns the byte form of a node required to be persisted, and strip unnecessary fields +// from the encoding (current only KeyPreimage for Leaf node) to keep a minimum size for content being +// stored in backend storage +func (n *Node) CanonicalValue() []byte { + switch n.Type { + case NodeTypeBranch_0, NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: // {Type || ChildL || ChildR} + bytes := []byte{byte(n.Type)} + bytes = append(bytes, n.ChildL.Bytes()...) + bytes = append(bytes, n.ChildR.Bytes()...) + return bytes + case NodeTypeLeaf_New: // {Type || Data...} + bytes := []byte{byte(n.Type)} + bytes = append(bytes, n.NodeKey.Bytes()...) + tmp := make([]byte, 4) + compressedFlag := (n.CompressedFlags << 8) + uint32(len(n.ValuePreimage)) + binary.LittleEndian.PutUint32(tmp, compressedFlag) + bytes = append(bytes, tmp...) + for _, elm := range n.ValuePreimage { + bytes = append(bytes, elm[:]...) + } + bytes = append(bytes, 0) + return bytes + case NodeTypeEmpty_New: // { Type } + return []byte{byte(n.Type)} + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + panic("encounter deprecated node types") + default: + return []byte{} + } +} + +// Value returns the encoded bytes of a node, include all information of it +func (n *Node) Value() []byte { + outBytes := n.CanonicalValue() + switch n.Type { + case NodeTypeLeaf_New: // {Type || Data...} + if n.KeyPreimage != nil { + outBytes[len(outBytes)-1] = byte(len(n.KeyPreimage)) + outBytes = append(outBytes, n.KeyPreimage[:]...) + } + } + + return outBytes +} + +// String outputs a string representation of a node (different for each type). +func (n *Node) String() string { + switch n.Type { + // {Type || ChildL || ChildR} + case NodeTypeBranch_0: + return fmt.Sprintf("Parent L(t):%s R(t):%s", n.ChildL, n.ChildR) + case NodeTypeBranch_1: + return fmt.Sprintf("Parent L(t):%s R:%s", n.ChildL, n.ChildR) + case NodeTypeBranch_2: + return fmt.Sprintf("Parent L:%s R(t):%s", n.ChildL, n.ChildR) + case NodeTypeBranch_3: + return fmt.Sprintf("Parent L:%s R:%s", n.ChildL, n.ChildR) + case NodeTypeLeaf_New: // {Type || Data...} + return fmt.Sprintf("Leaf I:%v Items: %d, First:%v", n.NodeKey, len(n.ValuePreimage), n.ValuePreimage[0]) + case NodeTypeEmpty_New: // {} + return "Empty" + case NodeTypeEmpty, NodeTypeLeaf, NodeTypeParent: + return "deprecated Node" + default: + return "Invalid Node" + } +} + +// Copy creates a new Node instance from the given node +func (n *Node) Copy() *Node { + newNode, err := NewNodeFromBytes(n.Value()) + if err != nil { + panic("failed to copy trie node") + } + return newNode +} + +type ZkChildResolver struct{} + +// ForEach iterates over the children of a node and calls the given function +// note: original implementation from geth works recursively, but our Node definition +// doesn't allow that. So we only iterate over the children of the current node, which +// should be fine. +func (r ZkChildResolver) ForEach(node []byte, onChild func(common.Hash)) { + switch NodeType(node[0]) { + case NodeTypeParent, NodeTypeBranch_0, + NodeTypeBranch_1, NodeTypeBranch_2, NodeTypeBranch_3: + + var childHash common.Hash + childHash.SetBytes(node[1 : HashByteLen+1]) + onChild(childHash) + childHash.SetBytes(node[HashByteLen+1 : HashByteLen*2+1]) + onChild(childHash) + } +} diff --git a/trie/zk_trie_node_test.go b/trie/zk_trie_node_test.go new file mode 100644 index 000000000000..1cd5daa385ca --- /dev/null +++ b/trie/zk_trie_node_test.go @@ -0,0 +1,240 @@ +package trie + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewNode(t *testing.T) { + t.Run("Test NewEmptyNode", func(t *testing.T) { + node := NewEmptyNode() + assert.Equal(t, NodeTypeEmpty_New, node.Type) + + hash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, hash) + + hash, err = node.ValueHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, hash) + }) + + t.Run("Test NewLeafNode", func(t *testing.T) { + k := NewHashFromBytes(bytes.Repeat([]byte("0"), 32)) + vp := []Byte32{*NewByte32FromBytes(bytes.Repeat([]byte("b"), 32))} + node := NewLeafNode(k, 1, vp) + assert.Equal(t, NodeTypeLeaf_New, node.Type) + assert.Equal(t, uint32(1), node.CompressedFlags) + assert.Equal(t, vp, node.ValuePreimage) + + hash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, "2536e274d373c4ca79bc85c6aa140fe911eb7fe04939e1311004bbaf3c13c32a", hash.Hex()) + + hash, err = node.ValueHash() + assert.NoError(t, err) + hashFromVp, err := vp[0].Hash() + assert.NoError(t, err) + assert.Equal(t, hashFromVp.Text(16), hash.Hex()) + }) + + t.Run("Test NewParentNode", func(t *testing.T) { + k := NewHashFromBytes(bytes.Repeat([]byte("0"), 32)) + node := NewParentNode(NodeTypeBranch_3, k, k) + assert.Equal(t, NodeTypeBranch_3, node.Type) + assert.Equal(t, k, node.ChildL) + assert.Equal(t, k, node.ChildR) + + hash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, "242d3e8a6a7683f9858a08cdf1db2a4448638c168e32168ef4e5e9e2e8794629", hash.Hex()) + + hash, err = node.ValueHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, hash) + }) + + t.Run("Test NewParentNodeWithEmptyChild", func(t *testing.T) { + k := NewHashFromBytes(bytes.Repeat([]byte("0"), 32)) + r, err := NewEmptyNode().NodeHash() + assert.NoError(t, err) + node := NewParentNode(NodeTypeBranch_2, k, r) + + assert.Equal(t, NodeTypeBranch_2, node.Type) + assert.Equal(t, k, node.ChildL) + assert.Equal(t, r, node.ChildR) + + hash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, "005bc4e8f3b3f2ff0b980d4f3c32973de6a01f89ddacb08b0e7903d1f1f0c50f", hash.Hex()) + + hash, err = node.ValueHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, hash) + }) + + t.Run("Test Invalid Node", func(t *testing.T) { + node := &Node{Type: 99} + + invalidNodeHash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, invalidNodeHash) + }) +} + +func TestNewNodeFromBytes(t *testing.T) { + t.Run("ParentNode", func(t *testing.T) { + k1 := NewHashFromBytes(bytes.Repeat([]byte("0"), 32)) + k2 := NewHashFromBytes(bytes.Repeat([]byte("0"), 32)) + node := NewParentNode(NodeTypeBranch_0, k1, k2) + b := node.Value() + + node, err := NewNodeFromBytes(b) + assert.NoError(t, err) + + assert.Equal(t, NodeTypeBranch_0, node.Type) + assert.Equal(t, k1, node.ChildL) + assert.Equal(t, k2, node.ChildR) + + hash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, "12b90fefb7b19131d25980a38ca92edb66bb91828d305836e4ab7e961165c83f", hash.Hex()) + + hash, err = node.ValueHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, hash) + }) + + t.Run("LeafNode", func(t *testing.T) { + k := NewHashFromBytes(bytes.Repeat([]byte("0"), 32)) + vp := make([]Byte32, 1) + node := NewLeafNode(k, 1, vp) + + node.KeyPreimage = NewByte32FromBytes(bytes.Repeat([]byte("b"), 32)) + + nodeBytes := node.Value() + newNode, err := NewNodeFromBytes(nodeBytes) + assert.NoError(t, err) + + assert.Equal(t, node.Type, newNode.Type) + assert.Equal(t, node.NodeKey, newNode.NodeKey) + assert.Equal(t, node.ValuePreimage, newNode.ValuePreimage) + assert.Equal(t, node.KeyPreimage, newNode.KeyPreimage) + + hash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, "2f7094f04ed1592909311471ba67d84d7d11e2438c055f4d5d43189390c5cf5a", hash.Hex()) + + hash, err = node.ValueHash() + assert.NoError(t, err) + hashFromVp, err := vp[0].Hash() + + assert.Equal(t, NewHashFromBigInt(hashFromVp), hash) + }) + + t.Run("EmptyNode", func(t *testing.T) { + node := NewEmptyNode() + b := node.Value() + + node, err := NewNodeFromBytes(b) + assert.NoError(t, err) + + assert.Equal(t, NodeTypeEmpty_New, node.Type) + + hash, err := node.NodeHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, hash) + + hash, err = node.ValueHash() + assert.NoError(t, err) + assert.Equal(t, &HashZero, hash) + }) + + t.Run("BadSize", func(t *testing.T) { + testCases := [][]byte{ + {}, + {0, 1, 2}, + func() []byte { + b := make([]byte, HashByteLen+3) + b[0] = byte(NodeTypeLeaf) + return b + }(), + func() []byte { + k := NewHashFromBytes([]byte{1, 2, 3, 4, 5}) + vp := make([]Byte32, 1) + node := NewLeafNode(k, 1, vp) + b := node.Value() + return b[:len(b)-32] + }(), + func() []byte { + k := NewHashFromBytes([]byte{1, 2, 3, 4, 5}) + vp := make([]Byte32, 1) + node := NewLeafNode(k, 1, vp) + node.KeyPreimage = NewByte32FromBytes([]byte{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37}) + + b := node.Value() + return b[:len(b)-1] + }(), + } + + for _, b := range testCases { + node, err := NewNodeFromBytes(b) + assert.ErrorIs(t, err, ErrNodeBytesBadSize) + assert.Nil(t, node) + } + }) + + t.Run("InvalidType", func(t *testing.T) { + b := []byte{255} + + node, err := NewNodeFromBytes(b) + assert.ErrorIs(t, err, ErrInvalidNodeFound) + assert.Nil(t, node) + }) +} + +func TestNodeValueAndData(t *testing.T) { + k := NewHashFromBytes(bytes.Repeat([]byte("a"), 32)) + vp := []Byte32{*NewByte32FromBytes(bytes.Repeat([]byte("b"), 32))} + + node := NewLeafNode(k, 1, vp) + canonicalValue := node.CanonicalValue() + assert.Equal(t, []byte{0x4, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x1, 0x1, 0x0, 0x0, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x0}, canonicalValue) + assert.Equal(t, []byte{0x4, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x1, 0x1, 0x0, 0x0, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x0}, node.Value()) + node.KeyPreimage = NewByte32FromBytes(bytes.Repeat([]byte("c"), 32)) + assert.Equal(t, []byte{0x4, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x1, 0x1, 0x0, 0x0, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x20, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63}, node.Value()) + assert.Equal(t, []byte{0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62}, node.Data()) + + parentNode := NewParentNode(NodeTypeBranch_3, k, k) + canonicalValue = parentNode.CanonicalValue() + assert.Equal(t, []byte{0x9, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61}, canonicalValue) + assert.Nil(t, parentNode.Data()) + + emptyNode := &Node{Type: NodeTypeEmpty_New} + assert.Equal(t, []byte{byte(emptyNode.Type)}, emptyNode.CanonicalValue()) + assert.Nil(t, emptyNode.Data()) + + invalidNode := &Node{Type: 99} + assert.Equal(t, []byte{}, invalidNode.CanonicalValue()) + assert.Nil(t, invalidNode.Data()) +} + +func TestNodeString(t *testing.T) { + k := NewHashFromBytes(bytes.Repeat([]byte("a"), 32)) + vp := []Byte32{*NewByte32FromBytes(bytes.Repeat([]byte("b"), 32))} + + leafNode := NewLeafNode(k, 1, vp) + assert.Equal(t, fmt.Sprintf("Leaf I:%v Items: %d, First:%v", leafNode.NodeKey, len(leafNode.ValuePreimage), leafNode.ValuePreimage[0]), leafNode.String()) + + parentNode := NewParentNode(NodeTypeBranch_3, k, k) + assert.Equal(t, fmt.Sprintf("Parent L:%s R:%s", parentNode.ChildL, parentNode.ChildR), parentNode.String()) + + emptyNode := NewEmptyNode() + assert.Equal(t, "Empty", emptyNode.String()) + + invalidNode := &Node{Type: 99} + assert.Equal(t, "Invalid Node", invalidNode.String()) +} diff --git a/trie/zk_trie_proof_test.go b/trie/zk_trie_proof_test.go index bab8950ec217..4701a0229cb9 100644 --- a/trie/zk_trie_proof_test.go +++ b/trie/zk_trie_proof_test.go @@ -18,17 +18,14 @@ package trie import ( "bytes" + "crypto/rand" mrand "math/rand" "testing" "time" "github.com/stretchr/testify/assert" - zkt "github.com/scroll-tech/zktrie/types" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/ethdb/memorydb" ) @@ -43,13 +40,8 @@ func makeSMTProvers(mt *ZkTrie) []func(key []byte) *memorydb.Database { // Create a direct trie based Merkle prover provers = append(provers, func(key []byte) *memorydb.Database { - word := zkt.NewByte32FromBytesPaddingZero(key) - k, err := word.Hash() - if err != nil { - panic(err) - } proofDB := memorydb.New() - err = mt.Prove(common.BytesToHash(k.Bytes()).Bytes(), proofDB) + err := mt.Prove(key, proofDB) if err != nil { panic(err) } @@ -64,14 +56,14 @@ func verifyValue(proveVal []byte, vPreimage []byte) bool { } func TestSMTOneElementProof(t *testing.T) { - tr, _ := NewZkTrie(common.Hash{}, NewZktrieDatabase(rawdb.NewMemoryDatabase())) - mt := &zkTrieImplTestWrapper{tr.Tree()} - err := mt.UpdateWord( - zkt.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("k"), 32)), - zkt.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("v"), 32)), + mt, _ := newTestingMerkle(t) + err := mt.TryUpdate( + NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("k"), 32)).Bytes(), + 1, + []Byte32{*NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("v"), 32))}, ) assert.Nil(t, err) - for i, prover := range makeSMTProvers(tr) { + for i, prover := range makeSMTProvers(mt) { keyBytes := bytes.Repeat([]byte("k"), 32) proof := prover(keyBytes) if proof == nil { @@ -84,7 +76,7 @@ func TestSMTOneElementProof(t *testing.T) { root, err := mt.Root() assert.NoError(t, err) - val, err := VerifyProof(common.BytesToHash(root.Bytes()), keyBytes, proof) + val, err := VerifyProofSMT(common.BytesToHash(root.Bytes()), keyBytes, proof) if err != nil { t.Fatalf("prover %d: failed to verify proof: %v\nraw proof: %x", i, err, proof) } @@ -96,21 +88,22 @@ func TestSMTOneElementProof(t *testing.T) { func TestSMTProof(t *testing.T) { mt, vals := randomZktrie(t, 500) - root, err := mt.Tree().Root() + root, err := mt.Root() assert.NoError(t, err) for i, prover := range makeSMTProvers(mt) { - for _, kv := range vals { - proof := prover(kv.k) + for kStr, v := range vals { + k := []byte(kStr) + proof := prover(k) if proof == nil { - t.Fatalf("prover %d: missing key %x while constructing proof", i, kv.k) + t.Fatalf("prover %d: missing key %x while constructing proof", i, k) } - val, err := VerifyProof(common.BytesToHash(root.Bytes()), kv.k, proof) + val, err := VerifyProofSMT(common.BytesToHash(root.Bytes()), k, proof) if err != nil { - t.Fatalf("prover %d: failed to verify proof for key %x: %v\nraw proof: %x\n", i, kv.k, err, proof) + t.Fatalf("prover %d: failed to verify proof for key %x: %v\nraw proof: %x\n", i, k, err, proof) } - if !verifyValue(val, zkt.NewByte32FromBytesPaddingZero(kv.v)[:]) { - t.Fatalf("prover %d: verified value mismatch for key %x, want %x, get %x", i, kv.k, kv.v, val) + if !verifyValue(val, NewByte32FromBytesPaddingZero(v)[:]) { + t.Fatalf("prover %d: verified value mismatch for key %x, want %x, get %x", i, k, v, val) } } } @@ -118,29 +111,30 @@ func TestSMTProof(t *testing.T) { func TestSMTBadProof(t *testing.T) { mt, vals := randomZktrie(t, 500) - root, err := mt.Tree().Root() + root, err := mt.Root() assert.NoError(t, err) for i, prover := range makeSMTProvers(mt) { - for _, kv := range vals { - proof := prover(kv.k) + for kStr, _ := range vals { + k := []byte(kStr) + proof := prover(k) if proof == nil { t.Fatalf("prover %d: nil proof", i) } it := proof.NewIterator(nil, nil) - for i, d := 0, mrand.Intn(proof.Len()); i <= d; i++ { + for i, d := 0, mrand.Intn(proof.Len()-1); i <= d; i++ { + it.Next() + } + if bytes.Equal(it.Key(), magicHash) { it.Next() } + key := it.Key() - val, _ := proof.Get(key) proof.Delete(key) it.Release() - mutateByte(val) - proof.Put(crypto.Keccak256(val), val) - - if _, err := VerifyProof(common.BytesToHash(root.Bytes()), kv.k, proof); err == nil { - t.Fatalf("prover %d: expected proof to fail for key %x", i, kv.k) + if value, err := VerifyProof(common.BytesToHash(root.Bytes()), k, proof); err == nil && value != nil { + t.Fatalf("prover %d: expected proof to fail for key %x", i, k) } } } @@ -149,15 +143,15 @@ func TestSMTBadProof(t *testing.T) { // Tests that missing keys can also be proven. The test explicitly uses a single // entry trie and checks for missing keys both before and after the single entry. func TestSMTMissingKeyProof(t *testing.T) { - tr, _ := NewZkTrie(common.Hash{}, NewZktrieDatabase(rawdb.NewMemoryDatabase())) - mt := &zkTrieImplTestWrapper{tr.Tree()} - err := mt.UpdateWord( - zkt.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("k"), 32)), - zkt.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("v"), 32)), + mt, _ := newTestingMerkle(t) + err := mt.TryUpdate( + NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("k"), 32)).Bytes(), + 1, + []Byte32{*NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("v"), 32))}, ) assert.Nil(t, err) - prover := makeSMTProvers(tr)[0] + prover := makeSMTProvers(mt)[0] for i, key := range []string{"a", "j", "l", "z"} { keyBytes := bytes.Repeat([]byte(key), 32) @@ -170,7 +164,7 @@ func TestSMTMissingKeyProof(t *testing.T) { root, err := mt.Root() assert.NoError(t, err) - val, err := VerifyProof(common.BytesToHash(root.Bytes()), keyBytes, proof) + val, err := VerifyProofSMT(common.BytesToHash(root.Bytes()), keyBytes, proof) if err != nil { t.Fatalf("test %d: failed to verify proof: %v\nraw proof: %x", i, err, proof) } @@ -180,117 +174,35 @@ func TestSMTMissingKeyProof(t *testing.T) { } } -func randomZktrie(t *testing.T, n int) (*ZkTrie, map[string]*kv) { - tr, err := NewZkTrie(common.Hash{}, NewZktrieDatabase(rawdb.NewMemoryDatabase())) - if err != nil { - panic(err) +func randomZktrie(t *testing.T, n int) (*ZkTrie, map[string][]byte) { + randBytes := func(len int) []byte { + buf := make([]byte, len) + if n, err := rand.Read(buf); n != len || err != nil { + panic(err) + } + return buf } - mt := &zkTrieImplTestWrapper{tr.Tree()} - vals := make(map[string]*kv) + + mt, _ := newTestingMerkle(t) + vals := make(map[string][]byte) for i := byte(0); i < 100; i++ { - value := &kv{common.LeftPadBytes([]byte{i}, 32), bytes.Repeat([]byte{i}, 32), false} - value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), bytes.Repeat([]byte{i}, 32), false} + key, value := common.LeftPadBytes([]byte{i}, 32), NewByte32FromBytes(bytes.Repeat([]byte{i}, 32)) + key2, value2 := common.LeftPadBytes([]byte{i + 10}, 32), NewByte32FromBytes(bytes.Repeat([]byte{i}, 32)) - err = mt.UpdateWord(zkt.NewByte32FromBytesPaddingZero(value.k), zkt.NewByte32FromBytesPaddingZero(value.v)) + err := mt.TryUpdate(key, 1, []Byte32{*value}) assert.Nil(t, err) - err = mt.UpdateWord(zkt.NewByte32FromBytesPaddingZero(value2.k), zkt.NewByte32FromBytesPaddingZero(value2.v)) + err = mt.TryUpdate(key2, 1, []Byte32{*value2}) assert.Nil(t, err) - vals[string(value.k)] = value - vals[string(value2.k)] = value2 + vals[string(key)] = value.Bytes() + vals[string(key2)] = value2.Bytes() } for i := 0; i < n; i++ { - value := &kv{randBytes(32), randBytes(20), false} - err = mt.UpdateWord(zkt.NewByte32FromBytesPaddingZero(value.k), zkt.NewByte32FromBytesPaddingZero(value.v)) + key, value := randBytes(32), NewByte32FromBytes(randBytes(20)) + err := mt.TryUpdate(key, 1, []Byte32{*value}) assert.Nil(t, err) - vals[string(value.k)] = value + vals[string(key)] = value.Bytes() } - return tr, vals -} - -// Tests that new "proof trace" feature -func TestProofWithDeletion(t *testing.T) { - tr, _ := NewZkTrie(common.Hash{}, NewZktrieDatabase(rawdb.NewMemoryDatabase())) - mt := &zkTrieImplTestWrapper{tr.Tree()} - key1 := bytes.Repeat([]byte("l"), 32) - key2 := bytes.Repeat([]byte("m"), 32) - err := mt.UpdateWord( - zkt.NewByte32FromBytesPaddingZero(key1), - zkt.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("v"), 32)), - ) - assert.NoError(t, err) - err = mt.UpdateWord( - zkt.NewByte32FromBytesPaddingZero(key2), - zkt.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("n"), 32)), - ) - assert.NoError(t, err) - - proof := memorydb.New() - s_key1, err := zkt.ToSecureKeyBytes(key1) - assert.NoError(t, err) - - proofTracer := tr.NewProofTracer() - - err = proofTracer.Prove(s_key1.Bytes(), proof) - assert.NoError(t, err) - nd, err := tr.TryGet(key2) - assert.NoError(t, err) - - s_key2, err := zkt.ToSecureKeyBytes(bytes.Repeat([]byte("x"), 32)) - assert.NoError(t, err) - - err = proofTracer.Prove(s_key2.Bytes(), proof) - assert.NoError(t, err) - //assert.Equal(t, len(sibling1), len(delTracer.GetProofs())) - - siblings, err := proofTracer.GetDeletionProofs() - assert.NoError(t, err) - assert.Equal(t, 0, len(siblings)) - - proofTracer.MarkDeletion(s_key1.Bytes()) - siblings, err = proofTracer.GetDeletionProofs() - assert.NoError(t, err) - assert.Equal(t, 1, len(siblings)) - l := len(siblings[0]) - // a hacking to grep the value part directly from the encoded leaf node, - // notice the sibling of key `k*32`` is just the leaf of key `m*32` - assert.Equal(t, siblings[0][l-33:l-1], nd) - - // Marking a key that is currently not hit (but terminated by an empty node) - // also causes it to be added to the deletion proof - proofTracer.MarkDeletion(s_key2.Bytes()) - siblings, err = proofTracer.GetDeletionProofs() - assert.NoError(t, err) - assert.Equal(t, 2, len(siblings)) - - key3 := bytes.Repeat([]byte("x"), 32) - err = mt.UpdateWord( - zkt.NewByte32FromBytesPaddingZero(key3), - zkt.NewByte32FromBytesPaddingZero(bytes.Repeat([]byte("z"), 32)), - ) - assert.NoError(t, err) - - proofTracer = tr.NewProofTracer() - err = proofTracer.Prove(s_key1.Bytes(), proof) - assert.NoError(t, err) - err = proofTracer.Prove(s_key2.Bytes(), proof) - assert.NoError(t, err) - - proofTracer.MarkDeletion(s_key1.Bytes()) - siblings, err = proofTracer.GetDeletionProofs() - assert.NoError(t, err) - assert.Equal(t, 1, len(siblings)) - - proofTracer.MarkDeletion(s_key2.Bytes()) - siblings, err = proofTracer.GetDeletionProofs() - assert.NoError(t, err) - assert.Equal(t, 2, len(siblings)) - - // one of the siblings is just leaf for key2, while - // another one must be a middle node - match1 := bytes.Equal(siblings[0][l-33:l-1], nd) - match2 := bytes.Equal(siblings[1][l-33:l-1], nd) - assert.True(t, match1 || match2) - assert.False(t, match1 && match2) + return mt, vals } diff --git a/trie/zk_trie_test.go b/trie/zk_trie_test.go index 61e5d33427f2..db03180b96c0 100644 --- a/trie/zk_trie_test.go +++ b/trie/zk_trie_test.go @@ -1,122 +1,763 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - package trie import ( "bytes" - "encoding/binary" - "io/ioutil" - "os" + "math/big" "runtime" "sync" "testing" - "github.com/stretchr/testify/assert" - - zkt "github.com/scroll-tech/zktrie/types" - + "github.com/iden3/go-iden3-crypto/constants" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/trie/triedb/hashdb" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/trie/trienode" + "github.com/stretchr/testify/assert" ) -func newEmptyZkTrie() *ZkTrie { - trie, _ := NewZkTrie( - common.Hash{}, - &ZktrieDatabase{ - db: NewDatabase(rawdb.NewMemoryDatabase(), - &Config{Preimages: true}), - prefix: []byte{}, - }, - ) - return trie +func newTestingMerkle(t *testing.T) (*ZkTrie, *Database) { + db := NewDatabase(rawdb.NewMemoryDatabase(), HashDefaults) + return newTestingMerkleWithDb(t, common.Hash{}, db) +} + +func newTestingMerkleWithDb(t *testing.T, root common.Hash, db *Database) (*ZkTrie, *Database) { + maxLevels := NodeKeyValidBytes * 8 + mt, err := NewZkTrie(TrieID(root), db) + if err != nil { + t.Fatal(err) + return nil, nil + } + assert.Equal(t, maxLevels, mt.MaxLevels()) + return mt, db +} + +func TestMerkleTree_Init(t *testing.T) { + maxLevels := 248 + t.Run("Test NewZkTrieImpl", func(t *testing.T) { + mt, _ := newTestingMerkle(t) + mtRoot, err := mt.Root() + assert.NoError(t, err) + assert.Equal(t, HashZero.Bytes(), mtRoot.Bytes()) + }) + + t.Run("Test NewZkTrieImplWithRoot with zero hash root", func(t *testing.T) { + mt, _ := newTestingMerkle(t) + mtRoot, err := mt.Root() + assert.NoError(t, err) + assert.Equal(t, HashZero.Bytes(), mtRoot.Bytes()) + }) + + t.Run("Test NewZkTrieImplWithRoot with non-zero hash root and node exists", func(t *testing.T) { + mt1, db := newTestingMerkle(t) + mt1Root, err := mt1.Root() + assert.NoError(t, err) + assert.Equal(t, HashZero.Bytes(), mt1Root.Bytes()) + err = mt1.TryUpdate([]byte{1}, 1, []Byte32{{byte(1)}}) + assert.NoError(t, err) + mt1Root, err = mt1.Root() + assert.NoError(t, err) + assert.Equal(t, "1525946038598ec48c663db06fa4f0b68ba40b80d7b1ddce3206d4857ac4a47c", mt1Root.Hex()) + rootHash, nodeSet, err := mt1.Commit(false) + assert.NoError(t, err) + assert.NoError(t, db.Update(rootHash, common.Hash{}, 0, trienode.NewWithNodeSet(nodeSet), nil)) + assert.NoError(t, db.Commit(rootHash, false)) + + mt2, _ := newTestingMerkleWithDb(t, rootHash, db) + assert.Equal(t, maxLevels, mt2.maxLevels) + mt2Root, err := mt2.Root() + assert.NoError(t, err) + assert.Equal(t, "1525946038598ec48c663db06fa4f0b68ba40b80d7b1ddce3206d4857ac4a47c", mt2Root.Hex()) + }) +} + +func TestMerkleTree_AddUpdateGetWord(t *testing.T) { + mt, _ := newTestingMerkle(t) + + testData := []struct { + key byte + initialVal byte + updatedVal byte + }{ + {1, 2, 7}, + {3, 4, 8}, + {5, 6, 9}, + } + + for _, td := range testData { + err := mt.TryUpdate([]byte{td.key}, 1, []Byte32{{td.initialVal}}) + assert.NoError(t, err) + + node, err := mt.GetLeafNode([]byte{td.key}) + assert.NoError(t, err) + assert.Equal(t, 1, len(node.ValuePreimage)) + assert.Equal(t, (&Byte32{td.initialVal})[:], node.ValuePreimage[0][:]) + } + + for _, td := range testData { + err := mt.TryUpdate([]byte{td.key}, 1, []Byte32{{td.updatedVal}}) + assert.NoError(t, err) + + node, err := mt.GetLeafNode([]byte{td.key}) + assert.NoError(t, err) + assert.Equal(t, 1, len(node.ValuePreimage)) + assert.Equal(t, (&Byte32{td.updatedVal})[:], node.ValuePreimage[0][:]) + } + + _, err := mt.GetLeafNode([]byte{100}) + assert.Equal(t, ErrKeyNotFound, err) } -// makeTestSecureTrie creates a large enough secure trie for testing. -func makeTestZkTrie() (*ZktrieDatabase, *ZkTrie, map[string][]byte) { - // Create an empty trie - triedb := NewZktrieDatabase(rawdb.NewMemoryDatabase()) - trie, _ := NewZkTrie(common.Hash{}, triedb) - - // Fill it with some arbitrary data - content := make(map[string][]byte) - for i := byte(0); i < 255; i++ { - // Map the same data under multiple keys - key, val := common.LeftPadBytes([]byte{1, i}, 32), bytes.Repeat([]byte{i}, 32) - content[string(key)] = val - trie.Update(key, val) - - key, val = common.LeftPadBytes([]byte{2, i}, 32), bytes.Repeat([]byte{i}, 32) - content[string(key)] = val - trie.Update(key, val) - - // Add some other data to inflate the trie - for j := byte(3); j < 13; j++ { - key, val = common.LeftPadBytes([]byte{j, i}, 32), bytes.Repeat([]byte{j, i}, 16) - content[string(key)] = val - trie.Update(key, val) +func TestMerkleTree_Deletion(t *testing.T) { + t.Run("Check root consistency", func(t *testing.T) { + var err error + mt, _ := newTestingMerkle(t) + hashes := make([]*Hash, 7) + hashes[0], err = mt.Root() + assert.NoError(t, err) + + for i := 0; i < 6; i++ { + err := mt.TryUpdate([]byte{byte(i)}, 1, []Byte32{{byte(i)}}) + assert.NoError(t, err) + hashes[i+1], err = mt.Root() + assert.NoError(t, err) + } + + for i := 5; i >= 0; i-- { + err := mt.TryDelete([]byte{byte(i)}) + assert.NoError(t, err) + root, err := mt.Root() + assert.NoError(t, err) + assert.Equal(t, hashes[i], root, i) } + }) +} + +func TestZkTrieImpl_Add(t *testing.T) { + k1 := NewByte32FromBytes([]byte{1}) + k2 := NewByte32FromBytes([]byte{2}) + k3 := NewByte32FromBytes([]byte{3}) + + kvMap := map[*Byte32]*Byte32{ + k1: NewByte32FromBytes([]byte{1}), + k2: NewByte32FromBytes([]byte{2}), + k3: NewByte32FromBytes([]byte{3}), } - trie.Commit(false) - // Return the generated trie - return triedb, trie, content + t.Run("Add 1 and 2 in different orders", func(t *testing.T) { + orders := [][]*Byte32{ + {k1, k2}, + {k2, k1}, + } + + roots := make([]*Hash, len(orders)) + for i, order := range orders { + mt, _ := newTestingMerkle(t) + for _, key := range order { + value := kvMap[key] + err := mt.TryUpdate(key.Bytes(), 1, []Byte32{*value}) + assert.NoError(t, err) + } + var err error + roots[i], err = mt.Root() + assert.NoError(t, err) + } + + assert.Equal(t, "254d2db0dc83bbd21708e2af65597e14bce405b38867cedea74a5e3b3be4271a", roots[0].Hex()) + assert.Equal(t, roots[0], roots[1]) + }) + + t.Run("Add 1, 2, 3 in different orders", func(t *testing.T) { + orders := [][]*Byte32{ + {k1, k2, k3}, + {k1, k3, k2}, + {k2, k1, k3}, + {k2, k3, k1}, + {k3, k1, k2}, + {k3, k2, k1}, + } + + roots := make([]*Hash, len(orders)) + for i, order := range orders { + mt, _ := newTestingMerkle(t) + for _, key := range order { + value := kvMap[key] + err := mt.TryUpdate(key.Bytes(), 1, []Byte32{*value}) + assert.NoError(t, err) + } + var err error + roots[i], err = mt.Root() + assert.NoError(t, err) + } + + for i := 1; i < len(roots); i++ { + assert.Equal(t, "0274b9caacecfaaaffa25a00c1c17bd91b9a0fc590aedc06ef22c8d2ba7c76a7", roots[0].Hex()) + assert.Equal(t, roots[0], roots[i]) + } + }) } -func TestZktrieDelete(t *testing.T) { - t.Skip("var-len kv not supported") - trie := newEmptyZkTrie() - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"ether", ""}, - {"dog", "puppy"}, - {"shaman", ""}, +func TestZkTrieImpl_Update(t *testing.T) { + k1 := []byte{1} + k2 := []byte{2} + k3 := []byte{3} + + t.Run("Update 1", func(t *testing.T) { + mt1, _ := newTestingMerkle(t) + err := mt1.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + root1, err := mt1.Root() + assert.NoError(t, err) + + mt2, _ := newTestingMerkle(t) + err = mt2.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{2})}) + assert.NoError(t, err) + err = mt2.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + root2, err := mt2.Root() + assert.NoError(t, err) + + assert.Equal(t, root1, root2) + }) + + t.Run("Update 2", func(t *testing.T) { + mt1, _ := newTestingMerkle(t) + err := mt1.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + err = mt1.TryUpdate(k2, 1, []Byte32{*NewByte32FromBytes([]byte{2})}) + assert.NoError(t, err) + root1, err := mt1.Root() + assert.NoError(t, err) + + mt2, _ := newTestingMerkle(t) + err = mt2.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + err = mt2.TryUpdate(k2, 1, []Byte32{*NewByte32FromBytes([]byte{3})}) + assert.NoError(t, err) + err = mt2.TryUpdate(k2, 1, []Byte32{*NewByte32FromBytes([]byte{2})}) + assert.NoError(t, err) + root2, err := mt2.Root() + assert.NoError(t, err) + + assert.Equal(t, root1, root2) + }) + + t.Run("Update 1, 2, 3", func(t *testing.T) { + mt1, _ := newTestingMerkle(t) + mt2, _ := newTestingMerkle(t) + keys := [][]byte{k1, k2, k3} + for i, key := range keys { + err := mt1.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{byte(i)})}) + assert.NoError(t, err) + } + for i, key := range keys { + err := mt2.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{byte(i + 3)})}) + assert.NoError(t, err) + } + for i, key := range keys { + err := mt1.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{byte(i + 6)})}) + assert.NoError(t, err) + err = mt2.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{byte(i + 6)})}) + assert.NoError(t, err) + } + + root1, err := mt1.Root() + assert.NoError(t, err) + root2, err := mt2.Root() + assert.NoError(t, err) + + assert.Equal(t, root1, root2) + }) + + t.Run("Update same value", func(t *testing.T) { + mt, _ := newTestingMerkle(t) + keys := [][]byte{k1, k2, k3} + for _, key := range keys { + err := mt.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + err = mt.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + node, err := mt.GetLeafNode(key) + assert.NoError(t, err) + assert.Equal(t, 1, len(node.ValuePreimage)) + assert.Equal(t, NewByte32FromBytes([]byte{1}).Bytes(), node.ValuePreimage[0][:]) + } + }) + + t.Run("Update non-existent word", func(t *testing.T) { + mt, _ := newTestingMerkle(t) + err := mt.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + node, err := mt.GetLeafNode(k1) + assert.NoError(t, err) + assert.Equal(t, 1, len(node.ValuePreimage)) + assert.Equal(t, NewByte32FromBytes([]byte{1}).Bytes(), node.ValuePreimage[0][:]) + }) +} + +func TestZkTrieImpl_Delete(t *testing.T) { + k1 := []byte{1} + k2 := []byte{2} + k3 := []byte{3} + k4 := []byte{4} + + t.Run("Test deletion leads to empty tree", func(t *testing.T) { + emptyMT, _ := newTestingMerkle(t) + emptyMTRoot, err := emptyMT.Root() + assert.NoError(t, err) + + mt1, _ := newTestingMerkle(t) + err = mt1.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + err = mt1.TryDelete(k1) + assert.NoError(t, err) + mt1Root, err := mt1.Root() + assert.NoError(t, err) + assert.Equal(t, HashZero, *mt1Root) + assert.Equal(t, emptyMTRoot, mt1Root) + + keys := [][]byte{k1, k2, k3, k4} + mt2, _ := newTestingMerkle(t) + for _, key := range keys { + err := mt2.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + } + for _, key := range keys { + err := mt2.TryDelete(key) + assert.NoError(t, err) + } + mt2Root, err := mt2.Root() + assert.NoError(t, err) + assert.Equal(t, HashZero, *mt2Root) + assert.Equal(t, emptyMTRoot, mt2Root) + + mt3, _ := newTestingMerkle(t) + for _, key := range keys { + err := mt3.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + } + for i := len(keys) - 1; i >= 0; i-- { + err := mt3.TryDelete(keys[i]) + assert.NoError(t, err) + } + mt3Root, err := mt3.Root() + assert.NoError(t, err) + assert.Equal(t, HashZero, *mt3Root) + assert.Equal(t, emptyMTRoot, mt3Root) + }) + + t.Run("Test equivalent trees after deletion", func(t *testing.T) { + keys := [][]byte{k1, k2, k3, k4} + + mt1, _ := newTestingMerkle(t) + for i, key := range keys { + err := mt1.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{byte(i + 1)})}) + assert.NoError(t, err) + } + err := mt1.TryDelete(k1) + assert.NoError(t, err) + err = mt1.TryDelete(k2) + assert.NoError(t, err) + + mt2, _ := newTestingMerkle(t) + err = mt2.TryUpdate(k3, 1, []Byte32{*NewByte32FromBytes([]byte{byte(3)})}) + assert.NoError(t, err) + err = mt2.TryUpdate(k4, 1, []Byte32{*NewByte32FromBytes([]byte{byte(4)})}) + assert.NoError(t, err) + + mt1Root, err := mt1.Root() + assert.NoError(t, err) + mt2Root, err := mt2.Root() + assert.NoError(t, err) + + assert.Equal(t, mt1Root, mt2Root) + + mt3, _ := newTestingMerkle(t) + for i, key := range keys { + err := mt3.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes([]byte{byte(i + 1)})}) + assert.NoError(t, err) + } + err = mt3.TryDelete(k1) + assert.NoError(t, err) + err = mt3.TryDelete(k3) + assert.NoError(t, err) + mt4, _ := newTestingMerkle(t) + err = mt4.TryUpdate(k2, 1, []Byte32{*NewByte32FromBytes([]byte{2})}) + assert.NoError(t, err) + err = mt4.TryUpdate(k4, 1, []Byte32{*NewByte32FromBytes([]byte{4})}) + assert.NoError(t, err) + + mt3Root, err := mt3.Root() + assert.NoError(t, err) + mt4Root, err := mt4.Root() + assert.NoError(t, err) + + assert.Equal(t, mt3Root, mt4Root) + }) + + t.Run("Test repeat deletion", func(t *testing.T) { + mt, _ := newTestingMerkle(t) + err := mt.TryUpdate(k1, 1, []Byte32{*NewByte32FromBytes([]byte{1})}) + assert.NoError(t, err) + err = mt.TryDelete(k1) + assert.NoError(t, err) + err = mt.TryDelete(k1) + assert.NoError(t, err) + }) + + t.Run("Test deletion of non-existent node", func(t *testing.T) { + mt, _ := newTestingMerkle(t) + err := mt.TryDelete(k1) + assert.NoError(t, err) + }) +} + +func TestMerkleTree_BuildAndVerifyZkTrieProof(t *testing.T) { + zkTrie, _ := newTestingMerkle(t) + + testData := []struct { + key *big.Int + value byte + }{ + {big.NewInt(1), 2}, + {big.NewInt(3), 4}, + {big.NewInt(5), 6}, + {big.NewInt(7), 8}, + {big.NewInt(9), 10}, } - for _, val := range vals { - if val.v != "" { - trie.Update([]byte(val.k), []byte(val.v)) - } else { - trie.Delete([]byte(val.k)) + + nonExistentKey := big.NewInt(11) + + for _, td := range testData { + err := zkTrie.TryUpdate([]byte{byte(td.key.Int64())}, 1, []Byte32{{td.value}}) + assert.NoError(t, err) + } + _, err := zkTrie.Root() + assert.NoError(t, err) + + t.Run("Test with existent key", func(t *testing.T) { + for _, td := range testData { + + node, err := zkTrie.GetLeafNode([]byte{byte(td.key.Int64())}) + assert.NoError(t, err) + assert.Equal(t, 1, len(node.ValuePreimage)) + assert.Equal(t, (&Byte32{td.value})[:], node.ValuePreimage[0][:]) + proof, node, err := BuildZkTrieProof(zkTrie.rootKey, td.key, 10, zkTrie.GetNodeByHash) + assert.NoError(t, err) + + valid := VerifyProofZkTrie(zkTrie.rootKey, proof, node) + assert.True(t, valid) } + }) + + t.Run("Test with non-existent key", func(t *testing.T) { + proof, node, err := BuildZkTrieProof(zkTrie.rootKey, nonExistentKey, 10, zkTrie.GetNodeByHash) + assert.NoError(t, err) + assert.False(t, proof.Existence) + valid := VerifyProofZkTrie(zkTrie.rootKey, proof, node) + assert.True(t, valid) + nodeAnother, err := zkTrie.GetLeafNode([]byte{byte(big.NewInt(1).Int64())}) + assert.NoError(t, err) + valid = VerifyProofZkTrie(zkTrie.rootKey, proof, nodeAnother) + assert.False(t, valid) + + hash, err := proof.Verify(node.nodeHash) + assert.NoError(t, err) + assert.Equal(t, hash[:], zkTrie.rootKey[:]) + }) +} + +func TestMerkleTree_GraphViz(t *testing.T) { + mt, _ := newTestingMerkle(t) + + var buffer bytes.Buffer + err := mt.GraphViz(&buffer, nil) + assert.NoError(t, err) + assert.Equal(t, "--------\nGraphViz of the ZkTrie with RootHash 0\ndigraph hierarchy {\nnode [fontname=Monospace,fontsize=10,shape=box]\n}\nEnd of GraphViz of the ZkTrie with RootHash 0\n--------\n", buffer.String()) + buffer.Reset() + + key1 := []byte{1} //0b1 + err = mt.TryUpdate(key1, 1, []Byte32{{1}}) + assert.NoError(t, err) + key2 := []byte{3} //0b11 + err = mt.TryUpdate(key2, 1, []Byte32{{3}}) + assert.NoError(t, err) + + err = mt.GraphViz(&buffer, nil) + assert.NoError(t, err) + assert.Equal(t, "--------\nGraphViz of the ZkTrie with RootHash 1210085283654691963881487672127167617844540538182653450104829037534096200821\ndigraph hierarchy {\nnode [fontname=Monospace,fontsize=10,shape=box]\n\"12100852...\" -> {\"95649672...\" \"20807384...\"}\n\"95649672...\" [style=filled];\n\"20807384...\" [style=filled];\n}\nEnd of GraphViz of the ZkTrie with RootHash 1210085283654691963881487672127167617844540538182653450104829037534096200821\n--------\n", buffer.String()) + buffer.Reset() +} + +func TestZkTrie_GetUpdateDelete(t *testing.T) { + mt, _ := newTestingMerkle(t) + val, err := mt.TryGet([]byte("key")) + assert.NoError(t, err) + assert.Nil(t, val) + assert.Equal(t, common.Hash{}, mt.Hash()) + + err = mt.TryUpdate([]byte("key"), 1, []Byte32{{1}}) + assert.NoError(t, err) + expected := common.HexToHash("0x0b9402772b5bfa4c7caaaeb14f489ae201d536c430c3fc29abb0fde923cd1df4") + assert.Equal(t, expected, mt.Hash()) + + val, err = mt.TryGet([]byte("key")) + assert.NoError(t, err) + assert.Equal(t, (&Byte32{1}).Bytes(), val) + + err = mt.TryDelete([]byte("key")) + assert.NoError(t, err) + assert.Equal(t, common.Hash{}, mt.Hash()) + + val, err = mt.TryGet([]byte("key")) + assert.NoError(t, err) + assert.Nil(t, val) +} + +func TestZkTrie_Copy(t *testing.T) { + mt, _ := newTestingMerkle(t) + + mt.TryUpdate([]byte("key"), 1, []Byte32{{1}}) + + copyTrie := mt.Copy() + val, err := copyTrie.TryGet([]byte("key")) + assert.NoError(t, err) + assert.Equal(t, (&Byte32{1}).Bytes(), val) +} + +func TestZkTrie_ProveAndProveWithDeletion(t *testing.T) { + mt, _ := newTestingMerkle(t) + + keys := []string{"key1", "key2", "key3", "key4", "key5"} + for i, keyStr := range keys { + key := make([]byte, 32) + copy(key, []byte(keyStr)) + + err := mt.TryUpdate(key, uint32(i+1), []Byte32{{byte(uint32(i + 1))}}) + assert.NoError(t, err) + + writeNode := func(n *Node) error { + return nil + } + + k, err := ToSecureKey(key) + assert.NoError(t, err) + + for j := 0; j <= i; j++ { + err = mt.ProveWithDeletion(NewHashFromBigInt(k).Bytes(), uint(j), writeNode, nil) + assert.NoError(t, err) + } + } +} + +func newHashFromHex(h string) (*Hash, error) { + return NewHashFromCheckedBytes(common.FromHex(h)) +} + +func TestHashParsers(t *testing.T) { + h0 := NewHashFromBigInt(big.NewInt(0)) + assert.Equal(t, "0", h0.String()) + h1 := NewHashFromBigInt(big.NewInt(1)) + assert.Equal(t, "1", h1.String()) + h10 := NewHashFromBigInt(big.NewInt(10)) + assert.Equal(t, "10", h10.String()) + + h7l := NewHashFromBigInt(big.NewInt(1234567)) + assert.Equal(t, "1234567", h7l.String()) + h8l := NewHashFromBigInt(big.NewInt(12345678)) + assert.Equal(t, "12345678...", h8l.String()) + + b, ok := new(big.Int).SetString("4932297968297298434239270129193057052722409868268166443802652458940273154854", 10) //nolint:lll + assert.True(t, ok) + h := NewHashFromBigInt(b) + assert.Equal(t, "4932297968297298434239270129193057052722409868268166443802652458940273154854", h.BigInt().String()) //nolint:lll + assert.Equal(t, "49322979...", h.String()) + assert.Equal(t, "0ae794eb9c3d8bbb9002e993fc2ed301dcbd2af5508ed072c375e861f1aa5b26", h.Hex()) + + b1, err := NewBigIntFromHashBytes(b.Bytes()) + assert.Nil(t, err) + assert.Equal(t, new(big.Int).SetBytes(b.Bytes()).String(), b1.String()) + + b2, err := NewHashFromCheckedBytes(b.Bytes()) + assert.Nil(t, err) + assert.Equal(t, b.String(), b2.BigInt().String()) + + h2, err := newHashFromHex(h.Hex()) + assert.Nil(t, err) + assert.Equal(t, h, h2) + _, err = newHashFromHex("0x12") + assert.NotNil(t, err) + + // check limits + a := new(big.Int).Sub(constants.Q, big.NewInt(1)) + testHashParsers(t, a) + a = big.NewInt(int64(1)) + testHashParsers(t, a) +} + +func testHashParsers(t *testing.T, a *big.Int) { + h := NewHashFromBigInt(a) + assert.Equal(t, a, h.BigInt()) + hFromBytes, err := NewHashFromCheckedBytes(h.Bytes()) + assert.Nil(t, err) + assert.Equal(t, h, hFromBytes) + assert.Equal(t, a, hFromBytes.BigInt()) + assert.Equal(t, a.String(), hFromBytes.BigInt().String()) + hFromHex, err := newHashFromHex(h.Hex()) + assert.Nil(t, err) + assert.Equal(t, h, hFromHex) + + aBIFromHBytes, err := NewBigIntFromHashBytes(h.Bytes()) + assert.Nil(t, err) + assert.Equal(t, a, aBIFromHBytes) + assert.Equal(t, new(big.Int).SetBytes(a.Bytes()).String(), aBIFromHBytes.String()) +} + +func TestMerkleTree_AddUpdateGetWord_2(t *testing.T) { + mt, _ := newTestingMerkle(t) + err := mt.TryUpdate([]byte{1}, 1, []Byte32{{2}}) + assert.Nil(t, err) + err = mt.TryUpdate([]byte{3}, 1, []Byte32{{4}}) + assert.Nil(t, err) + err = mt.TryUpdate([]byte{5}, 1, []Byte32{{6}}) + assert.Nil(t, err) + + mt.GetLeafNode([]byte{1}) + node, err := mt.GetLeafNode([]byte{1}) + assert.Nil(t, err) + assert.Equal(t, len(node.ValuePreimage), 1) + assert.Equal(t, (&Byte32{2})[:], node.ValuePreimage[0][:]) + node, err = mt.GetLeafNode([]byte{3}) + assert.Nil(t, err) + assert.Equal(t, len(node.ValuePreimage), 1) + assert.Equal(t, (&Byte32{4})[:], node.ValuePreimage[0][:]) + node, err = mt.GetLeafNode([]byte{5}) + assert.Nil(t, err) + assert.Equal(t, len(node.ValuePreimage), 1) + assert.Equal(t, (&Byte32{6})[:], node.ValuePreimage[0][:]) + + err = mt.TryUpdate([]byte{1}, 1, []Byte32{{7}}) + assert.Nil(t, err) + err = mt.TryUpdate([]byte{3}, 1, []Byte32{{8}}) + assert.Nil(t, err) + err = mt.TryUpdate([]byte{5}, 1, []Byte32{{9}}) + assert.Nil(t, err) + + node, err = mt.GetLeafNode([]byte{1}) + assert.Nil(t, err) + assert.Equal(t, len(node.ValuePreimage), 1) + assert.Equal(t, (&Byte32{7})[:], node.ValuePreimage[0][:]) + node, err = mt.GetLeafNode([]byte{3}) + assert.Nil(t, err) + assert.Equal(t, len(node.ValuePreimage), 1) + assert.Equal(t, (&Byte32{8})[:], node.ValuePreimage[0][:]) + node, err = mt.GetLeafNode([]byte{5}) + assert.Nil(t, err) + assert.Equal(t, len(node.ValuePreimage), 1) + assert.Equal(t, (&Byte32{9})[:], node.ValuePreimage[0][:]) + _, err = mt.GetLeafNode([]byte{100}) + assert.Equal(t, ErrKeyNotFound, err) +} + +func TestMerkleTree_UpdateAccount(t *testing.T) { + mt, _ := newTestingMerkle(t) + + acc1 := &types.StateAccount{ + Nonce: 1, + Balance: big.NewInt(10000000), + Root: common.HexToHash("22fb59aa5410ed465267023713ab42554c250f394901455a3366e223d5f7d147"), + KeccakCodeHash: common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), + PoseidonCodeHash: common.HexToHash("0c0a77f6e063b4b62eb7d9ed6f427cf687d8d0071d751850cfe5d136bc60d3ab").Bytes(), + CodeSize: 0, + } + value, flag := acc1.MarshalFields() + accValue := []Byte32{} + for _, v := range value { + accValue = append(accValue, *NewByte32FromBytes(v[:])) + } + err := mt.TryUpdate(common.HexToAddress("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes(), flag, accValue) + assert.Nil(t, err) + + acc2 := &types.StateAccount{ + Nonce: 5, + Balance: big.NewInt(50000000), + Root: common.HexToHash("0"), + KeccakCodeHash: common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), + PoseidonCodeHash: common.HexToHash("05d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), + CodeSize: 5, } - hash := trie.Hash() - exp := common.HexToHash("29b235a58c3c25ab83010c327d5932bcf05324b7d6b1185e650798034783ca9d") - if hash != exp { - t.Errorf("expected %x got %x", exp, hash) + value, flag = acc2.MarshalFields() + accValue = []Byte32{} + for _, v := range value { + accValue = append(accValue, *NewByte32FromBytes(v[:])) } + err = mt.TryUpdate(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes(), flag, accValue) + assert.Nil(t, err) + + bt, err := mt.TryGet(common.HexToAddress("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes()) + assert.Nil(t, err) + + acc, err := types.UnmarshalStateAccount(bt) + assert.Nil(t, err) + assert.Equal(t, acc1.Nonce, acc.Nonce) + assert.Equal(t, acc1.Balance.Uint64(), acc.Balance.Uint64()) + assert.Equal(t, acc1.Root.Bytes(), acc.Root.Bytes()) + assert.Equal(t, acc1.KeccakCodeHash, acc.KeccakCodeHash) + assert.Equal(t, acc1.PoseidonCodeHash, acc.PoseidonCodeHash) + assert.Equal(t, acc1.CodeSize, acc.CodeSize) + + bt, err = mt.TryGet(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes()) + assert.Nil(t, err) + + acc, err = types.UnmarshalStateAccount(bt) + assert.Nil(t, err) + assert.Equal(t, acc2.Nonce, acc.Nonce) + assert.Equal(t, acc2.Balance.Uint64(), acc.Balance.Uint64()) + assert.Equal(t, acc2.Root.Bytes(), acc.Root.Bytes()) + assert.Equal(t, acc2.KeccakCodeHash, acc.KeccakCodeHash) + assert.Equal(t, acc2.PoseidonCodeHash, acc.PoseidonCodeHash) + assert.Equal(t, acc2.CodeSize, acc.CodeSize) + + bt, err = mt.TryGet(common.HexToAddress("0x8dE13967F19410A7991D63c2c0179feBFDA0c261").Bytes()) + assert.Nil(t, err) + assert.Nil(t, bt) + + err = mt.TryDelete(common.HexToAddress("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes()) + assert.Nil(t, err) + + bt, err = mt.TryGet(common.HexToAddress("0x05fDbDfaE180345C6Cff5316c286727CF1a43327").Bytes()) + assert.Nil(t, err) + assert.Nil(t, bt) + + err = mt.TryDelete(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes()) + assert.Nil(t, err) + + bt, err = mt.TryGet(common.HexToAddress("0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571").Bytes()) + assert.Nil(t, err) + assert.Nil(t, bt) +} + +func TestDecodeSMTProof(t *testing.T) { + node, err := DecodeSMTProof(magicSMTBytes) + assert.NoError(t, err) + assert.Nil(t, node) + + k1 := NewHashFromBytes([]byte{1, 2, 3, 4, 5}) + k2 := NewHashFromBytes([]byte{6, 7, 8, 9, 0}) + origNode := NewParentNode(NodeTypeBranch_0, k1, k2) + node, err = DecodeSMTProof(origNode.Value()) + assert.NoError(t, err) + assert.Equal(t, origNode.Value(), node.Value()) } func TestZktrieGetKey(t *testing.T) { - trie := newEmptyZkTrie() + trie, _ := newTestingMerkle(t) key := []byte("0a1b2c3d4e5f6g7h8i9j0a1b2c3d4e5f") value := []byte("9j8i7h6g5f4e3d2c1b0a9j8i7h6g5f4e") - trie.Update(key, value) + trie.TryUpdate(key, 1, []Byte32{*NewByte32FromBytes(value)}) - kPreimage := zkt.NewByte32FromBytesPaddingZero(key) + kPreimage := NewByte32FromBytesPaddingZero(key) kHash, err := kPreimage.Hash() assert.Nil(t, err) - - if !bytes.Equal(trie.Get(key), value) { - t.Errorf("Get did not return bar") - } if k := trie.GetKey(kHash.Bytes()); !bytes.Equal(k, key) { t.Errorf("GetKey returned %q, want %q", k, key) } @@ -124,7 +765,7 @@ func TestZktrieGetKey(t *testing.T) { func TestZkTrieConcurrency(t *testing.T) { // Create an initial trie and copy if for concurrent access - _, trie, _ := makeTestZkTrie() + trie, _ := newTestingMerkle(t) threads := runtime.NumCPU() tries := make([]*ZkTrie, threads) @@ -141,15 +782,15 @@ func TestZkTrieConcurrency(t *testing.T) { for j := byte(0); j < 255; j++ { // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{byte(index), 1, j}, 32), bytes.Repeat([]byte{j}, 32) - tries[index].Update(key, val) + tries[index].TryUpdate(key, 1, []Byte32{*NewByte32FromBytes(val)}) key, val = common.LeftPadBytes([]byte{byte(index), 2, j}, 32), bytes.Repeat([]byte{j}, 32) - tries[index].Update(key, val) + tries[index].TryUpdate(key, 1, []Byte32{*NewByte32FromBytes(val)}) // Add some other data to inflate the trie for k := byte(3); k < 13; k++ { key, val = common.LeftPadBytes([]byte{byte(index), k, j}, 32), bytes.Repeat([]byte{k, j}, 16) - tries[index].Update(key, val) + tries[index].TryUpdate(key, 1, []Byte32{*NewByte32FromBytes(val)}) } } tries[index].Commit(false) @@ -159,109 +800,23 @@ func TestZkTrieConcurrency(t *testing.T) { pend.Wait() } -func tempDBZK(b *testing.B) (string, *Database) { - dir, err := ioutil.TempDir("", "zktrie-bench") - assert.NoError(b, err) - - diskdb, err := rawdb.NewLevelDBDatabase(dir, 256, 0, "", false) - assert.NoError(b, err) - config := &Config{ - Preimages: true, - HashDB: &hashdb.Config{CleanCacheSize: 256}, - IsUsingZktrie: true, - } - return dir, NewDatabase(diskdb, config) -} - -const benchElemCountZk = 10000 - -func BenchmarkZkTrieGet(b *testing.B) { - dir, tmpdb := tempDBZK(b) - zkTrie, _ := NewZkTrie(common.Hash{}, NewZktrieDatabaseFromTriedb(tmpdb)) - defer func() { - ldb := zkTrie.db.db.diskdb - ldb.Close() - os.RemoveAll(dir) - }() - - k := make([]byte, 32) - for i := 0; i < benchElemCountZk; i++ { - binary.LittleEndian.PutUint64(k, uint64(i)) - - err := zkTrie.TryUpdate(k, k) - assert.NoError(b, err) - } - - zkTrie.db.db.Commit(common.Hash{}, true) - b.ResetTimer() - for i := 0; i < b.N; i++ { - binary.LittleEndian.PutUint64(k, uint64(i)) - _, err := zkTrie.TryGet(k) - assert.NoError(b, err) - } - b.StopTimer() -} - -func BenchmarkZkTrieUpdate(b *testing.B) { - dir, tmpdb := tempDBZK(b) - zkTrie, _ := NewZkTrie(common.Hash{}, NewZktrieDatabaseFromTriedb(tmpdb)) - defer func() { - ldb := zkTrie.db.db.diskdb - ldb.Close() - os.RemoveAll(dir) - }() - - k := make([]byte, 32) - v := make([]byte, 32) - b.ReportAllocs() - - for i := 0; i < benchElemCountZk; i++ { - binary.LittleEndian.PutUint64(k, uint64(i)) - err := zkTrie.TryUpdate(k, k) - assert.NoError(b, err) - } - binary.LittleEndian.PutUint64(k, benchElemCountZk/2) - - //zkTrie.Commit(false) - zkTrie.db.db.Commit(common.Hash{}, true) - b.ResetTimer() - for i := 0; i < b.N; i++ { - binary.LittleEndian.PutUint64(k, uint64(i)) - binary.LittleEndian.PutUint64(v, 0xffffffff+uint64(i)) - err := zkTrie.TryUpdate(k, v) - assert.NoError(b, err) - } - b.StopTimer() -} - func TestZkTrieDelete(t *testing.T) { - key := make([]byte, 32) - value := make([]byte, 32) - trie1 := newEmptyZkTrie() + trie1, _ := newTestingMerkle(t) var count int = 6 var hashes []common.Hash hashes = append(hashes, trie1.Hash()) for i := 0; i < count; i++ { - binary.LittleEndian.PutUint64(key, uint64(i)) - binary.LittleEndian.PutUint64(value, uint64(i)) - err := trie1.TryUpdate(key, value) + err := trie1.TryUpdate([]byte{byte(i)}, 1, []Byte32{{byte(i)}}) assert.NoError(t, err) hashes = append(hashes, trie1.Hash()) } - // binary.LittleEndian.PutUint64(key, uint64(0xffffff)) - // err := trie1.TryDelete(key) - // assert.Equal(t, err, zktrie.ErrKeyNotFound) - - trie1.Commit(false) - for i := count - 1; i >= 0; i-- { - binary.LittleEndian.PutUint64(key, uint64(i)) - v, err := trie1.TryGet(key) + v, err := trie1.TryGet([]byte{byte(i)}) assert.NoError(t, err) assert.NotEmpty(t, v) - err = trie1.TryDelete(key) + err = trie1.TryDelete([]byte{byte(i)}) assert.NoError(t, err) hash := trie1.Hash() assert.Equal(t, hashes[i].Hex(), hash.Hex()) From 4f473ce40bd46e1fc5dabb5121032315411406da Mon Sep 17 00:00:00 2001 From: Zhang Zhuo Date: Sun, 10 Nov 2024 16:36:44 +0700 Subject: [PATCH 41/41] chore(libzkp): upgrade to v0.13.2(darwinV2) (#1089) libzkp: v0.13.2 --- rollup/ccc/libzkp/Cargo.lock | 135 ++++++++++++++++++------------- rollup/ccc/libzkp/Cargo.toml | 2 +- rollup/ccc/libzkp/rust-toolchain | 2 +- 3 files changed, 79 insertions(+), 60 deletions(-) diff --git a/rollup/ccc/libzkp/Cargo.lock b/rollup/ccc/libzkp/Cargo.lock index f923299e4b63..17e7907b2020 100644 --- a/rollup/ccc/libzkp/Cargo.lock +++ b/rollup/ccc/libzkp/Cargo.lock @@ -30,8 +30,8 @@ dependencies = [ [[package]] name = "aggregator" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "ark-std 0.3.0", "bitstream-io", @@ -64,13 +64,14 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -297,7 +298,7 @@ checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -340,7 +341,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -536,14 +537,13 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bus-mapping" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "eth-types", "ethers-core", "ethers-providers", "ethers-signers", - "external-tracer", "gadgets", "halo2_proofs", "hex", @@ -553,7 +553,6 @@ dependencies = [ "mpt-zktrie", "num", "poseidon-circuit", - "rand", "revm-precompile", "serde", "serde_json", @@ -1045,7 +1044,7 @@ checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -1125,8 +1124,8 @@ dependencies = [ [[package]] name = "eth-types" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "base64 0.13.1", "ethers-core", @@ -1143,7 +1142,6 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "serde_stacker", "serde_with", "sha3 0.10.8", "strum 0.25.0", @@ -1282,8 +1280,8 @@ dependencies = [ [[package]] name = "external-tracer" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "eth-types", "geth-utils", @@ -1410,7 +1408,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -1464,8 +1462,8 @@ dependencies = [ [[package]] name = "gadgets" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "eth-types", "halo2_proofs", @@ -1487,8 +1485,8 @@ dependencies = [ [[package]] name = "geth-utils" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "env_logger 0.10.0", "gobuild", @@ -1593,7 +1591,7 @@ dependencies = [ [[package]] name = "halo2-base" version = "0.2.2" -source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#817cace374a9f4b2eca682b1cc36f143255ea25f" +source = "git+https://github.com/scroll-tech/halo2-lib?branch=v0.2#817cace374a9f4b2eca682b1cc36f143255ea25f" dependencies = [ "ff", "halo2_proofs", @@ -1608,7 +1606,7 @@ dependencies = [ [[package]] name = "halo2-ecc" version = "0.2.2" -source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#817cace374a9f4b2eca682b1cc36f143255ea25f" +source = "git+https://github.com/scroll-tech/halo2-lib?branch=v0.2#817cace374a9f4b2eca682b1cc36f143255ea25f" dependencies = [ "ff", "group", @@ -2236,8 +2234,8 @@ dependencies = [ [[package]] name = "mock" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "eth-types", "ethers-core", @@ -2251,8 +2249,8 @@ dependencies = [ [[package]] name = "mpt-zktrie" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "eth-types", "halo2curves", @@ -2401,7 +2399,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -2592,7 +2590,7 @@ checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -2635,7 +2633,7 @@ dependencies = [ [[package]] name = "poseidon-base" version = "0.1.0" -source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#6cc36ab9dfa153f554ff7b84305f39838366a8df" +source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#b978cee00aae1e0a1e79e0d74c4683b137f5ea2d" dependencies = [ "bitvec", "halo2curves", @@ -2646,7 +2644,7 @@ dependencies = [ [[package]] name = "poseidon-circuit" version = "0.1.0" -source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#6cc36ab9dfa153f554ff7b84305f39838366a8df" +source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#b978cee00aae1e0a1e79e0d74c4683b137f5ea2d" dependencies = [ "ff", "halo2_proofs", @@ -2724,8 +2722,8 @@ dependencies = [ [[package]] name = "prover" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "aggregator", "anyhow", @@ -3386,7 +3384,7 @@ checksum = "f28482318d6641454cb273da158647922d1be6b5a2fcc6165cd89ebdd7ed576b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -3526,7 +3524,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "snark-verifier" version = "0.1.0" -source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829" +source = "git+https://github.com/scroll-tech/snark-verifier?branch=v0.1#a4f92916c0f61193f325de6c195a733289a7f6a0" dependencies = [ "bytes", "ethereum-types", @@ -3549,7 +3547,7 @@ dependencies = [ [[package]] name = "snark-verifier-sdk" version = "0.0.1" -source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829" +source = "git+https://github.com/scroll-tech/snark-verifier?branch=v0.1#a4f92916c0f61193f325de6c195a733289a7f6a0" dependencies = [ "bincode", "ethereum-types", @@ -3657,7 +3655,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -3692,9 +3690,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.27" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2", "quote", @@ -3746,7 +3744,7 @@ checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -3902,7 +3900,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] @@ -4085,26 +4083,27 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if 1.0.0", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", "wasm-bindgen-shared", ] @@ -4122,9 +4121,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4132,22 +4131,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" @@ -4339,6 +4338,26 @@ dependencies = [ "tap", ] +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "zeroize" version = "1.8.1" @@ -4356,13 +4375,13 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.32", ] [[package]] name = "zkevm-circuits" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" +version = "0.13.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.2#77703545b439b8d267b92ae1f10e369c9441b5af" dependencies = [ "array-init", "bus-mapping", @@ -4420,7 +4439,7 @@ dependencies = [ [[package]] name = "zktrie" version = "0.3.0" -source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.7#23181f209e94137f74337b150179aeb80c72e7c8" +source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.9#460b8c22af65b7809164548cba1e0253b6db5a70" dependencies = [ "gobuild", "zktrie_rust", @@ -4429,7 +4448,7 @@ dependencies = [ [[package]] name = "zktrie_rust" version = "0.3.0" -source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.7#23181f209e94137f74337b150179aeb80c72e7c8" +source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.9#460b8c22af65b7809164548cba1e0253b6db5a70" dependencies = [ "hex", "lazy_static", diff --git a/rollup/ccc/libzkp/Cargo.toml b/rollup/ccc/libzkp/Cargo.toml index e34369bf91ca..5bf298dc62b4 100644 --- a/rollup/ccc/libzkp/Cargo.toml +++ b/rollup/ccc/libzkp/Cargo.toml @@ -23,7 +23,7 @@ poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" } [dependencies] -prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.8", default-features = false, features = ["parallel_syn", "scroll", "strict-ccc"] } +prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.2", default-features = false, features = ["parallel_syn", "scroll", "strict-ccc"] } anyhow = "1.0" base64 = "0.13.0" diff --git a/rollup/ccc/libzkp/rust-toolchain b/rollup/ccc/libzkp/rust-toolchain index f1d81c42196a..e936f9157ba2 100644 --- a/rollup/ccc/libzkp/rust-toolchain +++ b/rollup/ccc/libzkp/rust-toolchain @@ -1 +1 @@ -nightly-2023-12-03 \ No newline at end of file +nightly-2024-07-07