From 28d007bd4003c98bf94e94c602595a6e83f2b9dd Mon Sep 17 00:00:00 2001 From: Matt <98158711+BedrockSquirrel@users.noreply.github.com> Date: Tue, 12 Sep 2023 10:54:58 +0100 Subject: [PATCH 1/2] Filter for tx with any obscuro interactions (#1509) --- go/common/host/services.go | 2 +- go/ethadapter/geth_rpc_client.go | 7 +++++++ go/ethadapter/interface.go | 1 + go/host/enclave/guardian.go | 5 ++++- go/host/l1/blockrepository.go | 20 ++++++++++++++++---- integration/ethereummock/node.go | 21 +++++++++++++++++++++ 6 files changed, 50 insertions(+), 6 deletions(-) diff --git a/go/common/host/services.go b/go/common/host/services.go index fb61454599..af2e8e63a1 100644 --- a/go/common/host/services.go +++ b/go/common/host/services.go @@ -86,7 +86,7 @@ type L1BlockRepository interface { // It returns the new block, a bool which is true if the block is the current L1 head and a bool if the block is on a different fork to prevBlock FetchNextBlock(prevBlock gethcommon.Hash) (*types.Block, bool, error) // FetchObscuroReceipts returns the receipts for a given L1 block - FetchObscuroReceipts(block *common.L1Block) types.Receipts + FetchObscuroReceipts(block *common.L1Block) (types.Receipts, error) } // L1BlockHandler is an interface for receiving new blocks from the repository as they arrive diff --git a/go/ethadapter/geth_rpc_client.go b/go/ethadapter/geth_rpc_client.go index 67349af88e..156d143d9c 100644 --- a/go/ethadapter/geth_rpc_client.go +++ b/go/ethadapter/geth_rpc_client.go @@ -215,6 +215,13 @@ func (e *gethRPCClient) BalanceAt(address gethcommon.Address, blockNum *big.Int) return e.client.BalanceAt(ctx, address, blockNum) } +func (e *gethRPCClient) GetLogs(q ethereum.FilterQuery) ([]types.Log, error) { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + return e.client.FilterLogs(ctx, q) +} + func (e *gethRPCClient) Stop() { e.client.Close() } diff --git a/go/ethadapter/interface.go b/go/ethadapter/interface.go index 3e8efc6863..8e8a5690bb 100644 --- a/go/ethadapter/interface.go +++ b/go/ethadapter/interface.go @@ -26,6 +26,7 @@ type EthClient interface { TransactionReceipt(hash gethcommon.Hash) (*types.Receipt, error) // fetches the ethereum transaction receipt Nonce(address gethcommon.Address) (uint64, error) // fetches the account nonce to use in the next transaction BalanceAt(account gethcommon.Address, blockNumber *big.Int) (*big.Int, error) // fetches the balance of the account + GetLogs(q ethereum.FilterQuery) ([]types.Log, error) // fetches the logs for a given query Info() Info // retrieves the node Info FetchHeadBlock() (*types.Block, error) // retrieves the block at head height diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go index a2bfa86b5f..1223ee9ece 100644 --- a/go/host/enclave/guardian.go +++ b/go/host/enclave/guardian.go @@ -381,12 +381,15 @@ func (g *Guardian) catchupWithL2() error { // todo - @matt - think about removing the TryLock func (g *Guardian) submitL1Block(block *common.L1Block, isLatest bool) (bool, error) { g.logger.Trace("submitting L1 block", log.BlockHashKey, block.Hash(), log.BlockHeightKey, block.Number()) - receipts := g.sl.L1Repo().FetchObscuroReceipts(block) if !g.submitDataLock.TryLock() { g.logger.Info("Unable to submit block, already submitting another block") // we are already submitting a block, and we don't want to leak goroutines, we wil catch up with the block later return false, nil } + receipts, err := g.sl.L1Repo().FetchObscuroReceipts(block) + if err != nil { + return false, fmt.Errorf("could not fetch obscuro receipts for block=%s - %w", block.Hash(), err) + } resp, err := g.enclaveClient.SubmitL1Block(*block, receipts, isLatest) g.submitDataLock.Unlock() if err != nil { diff --git a/go/host/l1/blockrepository.go b/go/host/l1/blockrepository.go index cac26d7db6..ae6ebac168 100644 --- a/go/host/l1/blockrepository.go +++ b/go/host/l1/blockrepository.go @@ -128,11 +128,23 @@ func (r *Repository) latestCanonAncestor(blkHash gethcommon.Hash) (*types.Block, } // FetchObscuroReceipts returns all obscuro-relevant receipts for an L1 block -func (r *Repository) FetchObscuroReceipts(block *common.L1Block) types.Receipts { +func (r *Repository) FetchObscuroReceipts(block *common.L1Block) (types.Receipts, error) { receipts := make([]*types.Receipt, len(block.Transactions())) + blkHash := block.Hash() + // we want to send receipts for any transactions that produced obscuro-relevant log events + logs, err := r.ethClient.GetLogs(ethereum.FilterQuery{BlockHash: &blkHash, Addresses: r.obscuroRelevantContracts}) + if err != nil { + return nil, fmt.Errorf("unable to fetch logs for L1 block - %w", err) + } + // make a lookup map of the relevant tx hashes which need receipts + relevantTx := make(map[gethcommon.Hash]bool) + for _, l := range logs { + relevantTx[l.TxHash] = true + } + for idx, transaction := range block.Transactions() { - if !r.isObscuroTransaction(transaction) { + if !relevantTx[transaction.Hash()] && !r.isObscuroTransaction(transaction) { // put in a dummy receipt so that the index matches the transaction index // (the receipts list maintains the indexes of the transactions, it is a sparse list) receipts[idx] = &types.Receipt{Status: types.ReceiptStatusFailed} @@ -146,12 +158,12 @@ func (r *Repository) FetchObscuroReceipts(block *common.L1Block) types.Receipts } r.logger.Trace("Adding receipt", "status", receipt.Status, log.TxKey, transaction.Hash(), - log.BlockHashKey, block.Hash(), log.CmpKey, log.CrossChainCmp) + log.BlockHashKey, blkHash, log.CmpKey, log.CrossChainCmp) receipts[idx] = receipt } - return receipts + return receipts, nil } // stream blocks from L1 as they arrive and forward them to subscribers, no guarantee of perfect ordering or that there won't be gaps. diff --git a/integration/ethereummock/node.go b/integration/ethereummock/node.go index 7926ada297..feb4626d06 100644 --- a/integration/ethereummock/node.go +++ b/integration/ethereummock/node.go @@ -219,6 +219,27 @@ func (m *Node) BalanceAt(gethcommon.Address, *big.Int) (*big.Int, error) { panic("not implemented") } +// GetLogs is a mock method - we don't really have logs on the mock transactions, so it returns a basic log for every tx +// so the host recognises them as relevant +func (m *Node) GetLogs(fq ethereum.FilterQuery) ([]types.Log, error) { + logs := make([]types.Log, 0) + if fq.BlockHash == nil { + return logs, nil + } + blk, err := m.BlockByHash(*fq.BlockHash) + if err != nil { + return nil, fmt.Errorf("could not retrieve block. Cause: %w", err) + } + for _, tx := range blk.Transactions() { + dummyLog := types.Log{ + BlockHash: blk.Hash(), + TxHash: tx.Hash(), + } + logs = append(logs, dummyLog) + } + return logs, nil +} + // Start runs an infinite loop that listens to the two block producing channels and processes them. func (m *Node) Start() { if m.mining { From 764f98d70cfa1785875db2759581034770e93e91 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Tue, 12 Sep 2023 12:26:07 +0100 Subject: [PATCH 2/2] Tudor/aggressive header compression (#1502) * add compression * add compression * reestablish batch broadcast * clarify and document * update the l1 proofs * rename * fixes * fix tests * fix * more fixes * more fixes * more fixes * more fixes * add comment * fix proto name --- go/common/headers.go | 24 +- go/common/rollups.go | 8 +- go/common/rpc/converters.go | 8 +- go/common/rpc/generated/enclave.pb.go | 21 +- go/common/rpc/generated/enclave.proto | 2 +- go/common/rpc/generated/enclave_grpc.pb.go | 2 +- go/enclave/components/batch_executor.go | 7 +- go/enclave/components/batch_registry.go | 10 +- go/enclave/components/rollup_compression.go | 507 ++++++++++++++++++ go/enclave/components/rollup_consumer.go | 56 +- go/enclave/components/rollup_producer.go | 2 +- go/enclave/core/batch.go | 11 +- go/enclave/core/rollup.go | 77 --- go/enclave/crypto/data_enc_service.go | 36 +- go/enclave/enclave.go | 4 +- go/enclave/evm/evm_facade.go | 2 +- go/enclave/nodetype/sequencer.go | 15 +- go/enclave/nodetype/validator.go | 10 +- go/enclave/storage/enclavedb/block.go | 5 + go/enclave/storage/interfaces.go | 2 + go/enclave/storage/storage.go | 14 + go/host/enclave/guardian.go | 6 +- integration/ethereummock/db.go | 5 + integration/simulation/params/params.go | 7 +- .../simulation_full_network_test.go | 11 +- .../simulation/simulation_geth_in_mem_test.go | 13 +- .../simulation/simulation_in_mem_test.go | 2 - integration/simulation/validate_chain.go | 117 +--- 28 files changed, 681 insertions(+), 303 deletions(-) create mode 100644 go/enclave/components/rollup_compression.go diff --git a/go/common/headers.go b/go/common/headers.go index 3f8ecc600c..703f7d766a 100644 --- a/go/common/headers.go +++ b/go/common/headers.go @@ -6,10 +6,11 @@ import ( "math/big" "sync" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/obscuronet/go-obscuro/contracts/generated/MessageBus" "golang.org/x/crypto/sha3" ) @@ -83,6 +84,25 @@ type RollupHeader struct { LastBatchSeqNo uint64 } +// CalldataRollupHeader contains all information necessary to reconstruct the batches included in the rollup. +// This data structure is serialised, compressed, and encrypted, before being serialised again in the rollup. +type CalldataRollupHeader struct { + FirstBatchSequence *big.Int + FirstCanonBatchHeight *big.Int + FirstCanonParentHash L2BatchHash + + StartTime uint64 + BatchTimeDeltas [][]byte // todo - minimize assuming a default of 1 sec and then store only exceptions + + L1HeightDeltas [][]byte // delta of the block height. Stored as a byte array because rlp can't encode negative numbers + + // these fields are for debugging the compression. Uncomment if there are issues + // BatchHashes []L2BatchHash + // BatchHeaders []*BatchHeader + + ReOrgs [][]byte `rlp:"optional"` // sparse list of reorged headers - non null only for reorgs. +} + // MarshalJSON custom marshals the RollupHeader into a json func (r *RollupHeader) MarshalJSON() ([]byte, error) { type Alias RollupHeader diff --git a/go/common/rollups.go b/go/common/rollups.go index 390b9ad3d4..73d64458ee 100644 --- a/go/common/rollups.go +++ b/go/common/rollups.go @@ -6,10 +6,10 @@ import ( // ExtRollup is an encrypted form of rollup used when passing the rollup around outside an enclave. type ExtRollup struct { - Header *RollupHeader // the fields required by the management contract - BatchHeaders []byte // compressed batch headers - BatchPayloads []byte // The batches included in the rollup, in external/encrypted form. - hash atomic.Value + Header *RollupHeader // the fields required by the management contract + CalldataRollupHeader []byte // encrypted header useful for recreating the batches + BatchPayloads []byte // The transactions included in the rollup, in external/encrypted form. + hash atomic.Value } // Hash returns the keccak256 hash of the rollup's header. diff --git a/go/common/rpc/converters.go b/go/common/rpc/converters.go index 2253eb0219..4f8e1f287f 100644 --- a/go/common/rpc/converters.go +++ b/go/common/rpc/converters.go @@ -208,7 +208,7 @@ func ToExtRollupMsg(rollup *common.ExtRollup) generated.ExtRollupMsg { return generated.ExtRollupMsg{} } - return generated.ExtRollupMsg{Header: ToRollupHeaderMsg(rollup.Header), BatchPayloads: rollup.BatchPayloads, BatchHeaders: rollup.BatchHeaders} + return generated.ExtRollupMsg{Header: ToRollupHeaderMsg(rollup.Header), BatchPayloads: rollup.BatchPayloads, CalldataRollupHeader: rollup.CalldataRollupHeader} } func ToRollupHeaderMsg(header *common.RollupHeader) *generated.RollupHeaderMsg { @@ -236,9 +236,9 @@ func FromExtRollupMsg(msg *generated.ExtRollupMsg) *common.ExtRollup { } return &common.ExtRollup{ - Header: FromRollupHeaderMsg(msg.Header), - BatchPayloads: msg.BatchPayloads, - BatchHeaders: msg.BatchHeaders, + Header: FromRollupHeaderMsg(msg.Header), + BatchPayloads: msg.BatchPayloads, + CalldataRollupHeader: msg.CalldataRollupHeader, } } diff --git a/go/common/rpc/generated/enclave.pb.go b/go/common/rpc/generated/enclave.pb.go index 1ab5470cfd..33066a416f 100644 --- a/go/common/rpc/generated/enclave.pb.go +++ b/go/common/rpc/generated/enclave.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.0 -// protoc v4.24.2 +// protoc v3.21.9 // source: enclave.proto package generated @@ -3566,9 +3566,9 @@ type ExtRollupMsg struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Header *RollupHeaderMsg `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - BatchPayloads []byte `protobuf:"bytes,2,opt,name=batchPayloads,proto3" json:"batchPayloads,omitempty"` - BatchHeaders []byte `protobuf:"bytes,3,opt,name=batchHeaders,proto3" json:"batchHeaders,omitempty"` + Header *RollupHeaderMsg `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + BatchPayloads []byte `protobuf:"bytes,2,opt,name=batchPayloads,proto3" json:"batchPayloads,omitempty"` + CalldataRollupHeader []byte `protobuf:"bytes,3,opt,name=calldataRollupHeader,proto3" json:"calldataRollupHeader,omitempty"` } func (x *ExtRollupMsg) Reset() { @@ -3617,9 +3617,9 @@ func (x *ExtRollupMsg) GetBatchPayloads() []byte { return nil } -func (x *ExtRollupMsg) GetBatchHeaders() []byte { +func (x *ExtRollupMsg) GetCalldataRollupHeader() []byte { if x != nil { - return x.BatchHeaders + return x.CalldataRollupHeader } return nil } @@ -4288,16 +4288,17 @@ var file_enclave_proto_rawDesc = []byte{ 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x73, 0x67, 0x52, 0x12, 0x43, 0x72, 0x6f, 0x73, 0x73, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x8c, 0x01, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x0c, 0x45, 0x78, 0x74, 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x4d, 0x73, 0x67, 0x12, 0x32, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x73, 0x67, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x62, 0x61, 0x74, 0x63, 0x68, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x62, 0x61, 0x74, 0x63, 0x68, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x61, 0x74, 0x63, - 0x68, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, - 0x62, 0x61, 0x74, 0x63, 0x68, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xbf, 0x02, 0x0a, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x63, 0x61, 0x6c, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x63, 0x61, 0x6c, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0xbf, 0x02, 0x0a, 0x0f, 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, diff --git a/go/common/rpc/generated/enclave.proto b/go/common/rpc/generated/enclave.proto index 6ec88bab49..f2c3125dff 100644 --- a/go/common/rpc/generated/enclave.proto +++ b/go/common/rpc/generated/enclave.proto @@ -391,7 +391,7 @@ message BatchHeaderMsg { message ExtRollupMsg { RollupHeaderMsg header = 1; bytes batchPayloads = 2; - bytes batchHeaders = 3; + bytes calldataRollupHeader = 3; } message RollupHeaderMsg { diff --git a/go/common/rpc/generated/enclave_grpc.pb.go b/go/common/rpc/generated/enclave_grpc.pb.go index 8846280336..ce77edf796 100644 --- a/go/common/rpc/generated/enclave_grpc.pb.go +++ b/go/common/rpc/generated/enclave_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v4.24.2 +// - protoc v3.21.9 // source: enclave.proto package generated diff --git a/go/enclave/components/batch_executor.go b/go/enclave/components/batch_executor.go index 20b7e03a53..3391a44943 100644 --- a/go/enclave/components/batch_executor.go +++ b/go/enclave/components/batch_executor.go @@ -52,8 +52,7 @@ func NewBatchExecutor(storage storage.Storage, cc *crosschain.Processors, genesi func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext) (*ComputedBatch, error) { defer executor.logger.Info("Batch context processed", log.DurationKey, measure.NewStopwatch()) - // Block is loaded first since if its missing this batch might be based on l1 fork we dont know about - // and we want to filter out all fork batches based on not knowing the l1 block + // sanity check that the l1 block exists. We don't have to execute batches of forks. block, err := executor.storage.FetchBlock(context.BlockPtr) if errors.Is(err, errutil.ErrNotFound) { return nil, errutil.ErrBlockForBatchNotFound @@ -64,6 +63,7 @@ func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext) (*Co // These variables will be used to create the new batch parent, err := executor.storage.FetchBatch(context.ParentPtr) if errors.Is(err, errutil.ErrNotFound) { + executor.logger.Error(fmt.Sprintf("can't find parent batch %s. Seq %d", context.ParentPtr, context.SequencerNo)) return nil, errutil.ErrAncestorBatchNotFound } if err != nil { @@ -75,7 +75,8 @@ func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext) (*Co var err error parentBlock, err = executor.storage.FetchBlock(parent.Header.L1Proof) if err != nil { - executor.logger.Crit(fmt.Sprintf("Could not retrieve a proof for batch %s", parent.Hash()), log.ErrKey, err) + executor.logger.Error(fmt.Sprintf("Could not retrieve a proof for batch %s", parent.Hash()), log.ErrKey, err) + return nil, err } } diff --git a/go/enclave/components/batch_registry.go b/go/enclave/components/batch_registry.go index 47b1e72cb9..469f2523e9 100644 --- a/go/enclave/components/batch_registry.go +++ b/go/enclave/components/batch_registry.go @@ -104,7 +104,15 @@ func (br *batchRegistry) BatchesAfter(batchSeqNo uint64, rollupLimiter limiters. } batches = append(batches, batch) - br.logger.Info("Added batch to rollup", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.SeqNo()) + br.logger.Info("Added batch to rollup", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.SeqNo(), log.BatchHeightKey, batch.Number(), "l1_proof", batch.Header.L1Proof) + } + + // Sanity check that the rollup includes consecutive batches (according to the seqNo) + current := batches[0].SeqNo().Uint64() + for i, b := range batches { + if current+uint64(i) != b.SeqNo().Uint64() { + return nil, fmt.Errorf("created invalid rollup with batches out of sequence") + } } return batches, nil diff --git a/go/enclave/components/rollup_compression.go b/go/enclave/components/rollup_compression.go new file mode 100644 index 0000000000..9b2674a77d --- /dev/null +++ b/go/enclave/components/rollup_compression.go @@ -0,0 +1,507 @@ +package components + +import ( + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/params" + + "github.com/obscuronet/go-obscuro/go/common/errutil" + + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/obscuronet/go-obscuro/go/common" + "github.com/obscuronet/go-obscuro/go/common/compression" + "github.com/obscuronet/go-obscuro/go/common/log" + "github.com/obscuronet/go-obscuro/go/enclave/core" + "github.com/obscuronet/go-obscuro/go/enclave/crypto" + "github.com/obscuronet/go-obscuro/go/enclave/storage" +) + +/* +RollupCompression - responsible for the compression logic + +## Problem +The main overhead (after the tx payloads), are the batch headers. + +## Requirements: +1. recreate the exact batch headers as the live ones +2. security - make sure it all chains up cryptographically, so it can't be gamed + +## Solution elements: +1. Add another compressed and encrypted metadata blob to the ExtRollup. This is the "CalldataRollupHeader". +It will also be published as calldata, together with the transactions. The role of this header is to contain the bare minimum +information required to recreate the batches. +2. Use implicit position information, deltas, and exceptions to minimise size. +Eg. If the time between 2 batches is always 1second, there is no need to store any extra information. +3. To avoid storing hashes, which don't compress at all, we execute each batch to be able to populate the parent hash. +4. The Signatures over the batches are not stored, since the rollup is itself signed. +5. The cross chain messages are calculated. +*/ +type RollupCompression struct { + dataEncryptionService crypto.DataEncryptionService + dataCompressionService compression.DataCompressionService + batchRegistry BatchRegistry + batchExecutor BatchExecutor + storage storage.Storage + chainConfig *params.ChainConfig + logger gethlog.Logger +} + +func NewRollupCompression( + batchRegistry BatchRegistry, + batchExecutor BatchExecutor, + dataEncryptionService crypto.DataEncryptionService, + dataCompressionService compression.DataCompressionService, + storage storage.Storage, + chainConfig *params.ChainConfig, + logger gethlog.Logger, +) *RollupCompression { + return &RollupCompression{ + batchRegistry: batchRegistry, + batchExecutor: batchExecutor, + dataEncryptionService: dataEncryptionService, + dataCompressionService: dataCompressionService, + storage: storage, + chainConfig: chainConfig, + logger: logger, + } +} + +// temporary data structure to help build a batch from the information found in the rollup +type batchFromRollup struct { + transactions []*common.L2Tx + seqNo *big.Int + height *big.Int + txHash gethcommon.Hash + time uint64 + l1Proof common.L1BlockHash + + header *common.BatchHeader // for reorgs +} + +// CreateExtRollup - creates a compressed and encrypted External rollup from the internal data structure +func (rc *RollupCompression) CreateExtRollup(r *core.Rollup) (*common.ExtRollup, error) { + header, err := rc.createRollupHeader(r.Batches) + if err != nil { + return nil, err + } + encryptedHeader, err := rc.serialiseCompressAndEncrypt(header) + if err != nil { + return nil, err + } + + transactions := make([][]*common.L2Tx, len(r.Batches)) + for i, batch := range r.Batches { + transactions[i] = batch.Transactions + } + encryptedTransactions, err := rc.serialiseCompressAndEncrypt(transactions) + if err != nil { + return nil, err + } + + return &common.ExtRollup{ + Header: r.Header, + BatchPayloads: encryptedTransactions, + CalldataRollupHeader: encryptedHeader, + }, nil +} + +// ProcessExtRollup - given an External rollup, responsible with checking and saving all batches found inside +func (rc *RollupCompression) ProcessExtRollup(rollup *common.ExtRollup) error { + transactionsPerBatch := make([][]*common.L2Tx, 0) + err := rc.decryptDecompressAndDeserialise(rollup.BatchPayloads, &transactionsPerBatch) + if err != nil { + return err + } + + calldataRollupHeader := new(common.CalldataRollupHeader) + err = rc.decryptDecompressAndDeserialise(rollup.CalldataRollupHeader, calldataRollupHeader) + if err != nil { + return err + } + + // The recreation of batches is a 2-step process: + + // 1. calculate fields like: sequence, height, time, l1Proof, from the implicit and explicit information from the metadata + incompleteBatches, err := rc.createIncompleteBatches(calldataRollupHeader, transactionsPerBatch, rollup.Header.L1Proof) + if err != nil { + return err + } + + // 2. execute each batch to be able to calculate the hash which is necessary for the next batch as it is the parent. + err = rc.executeAndSaveIncompleteBatches(calldataRollupHeader, incompleteBatches) + if err != nil { + return err + } + + return nil +} + +// the main logic that goes from a list of batches to the rollup header +func (rc *RollupCompression) createRollupHeader(batches []*core.Batch) (*common.CalldataRollupHeader, error) { + reorgs := make([]*common.BatchHeader, len(batches)) + + deltaTimes := make([]*big.Int, len(batches)) + startTime := batches[0].Header.Time + prev := startTime + + l1HeightDeltas := make([]*big.Int, len(batches)) + var prevL1Height *big.Int + + batchHashes := make([]common.L2BatchHash, len(batches)) + batchHeaders := make([]*common.BatchHeader, len(batches)) + + isReorg := false + for i, batch := range batches { + rc.logger.Info("Add batch to rollup", log.BatchSeqNoKey, batch.SeqNo(), log.BatchHeightKey, batch.Number(), log.BatchHashKey, batch.Hash()) + // determine whether the batch is canonical + can, err := rc.storage.FetchBatchByHeight(batch.NumberU64()) + if err != nil { + return nil, err + } + if can.Hash() != batch.Hash() { + // if the canonical batch of the same height is different from the current batch + // then add the entire header to a "reorgs" array + reorgs[i] = batch.Header + isReorg = true + rc.logger.Info("Reorg", "pos", i) + } else { + reorgs[i] = nil + } + batchHashes[i] = batch.Hash() + batchHeaders[i] = batch.Header + + deltaTimes[i] = big.NewInt(int64(batch.Header.Time - prev)) + prev = batch.Header.Time + + // since this is the sequencer, it must have all the blocks, because it created the batches in the first place + block, err := rc.storage.FetchBlock(batch.Header.L1Proof) + if err != nil { + return nil, err + } + + // the first element is the actual height + if i == 0 { + l1HeightDeltas[i] = block.Number() + } else { + l1HeightDeltas[i] = big.NewInt(block.Number().Int64() - prevL1Height.Int64()) + } + prevL1Height = block.Number() + } + + l1DeltasBA := make([][]byte, len(l1HeightDeltas)) + for i, delta := range l1HeightDeltas { + v, err := delta.GobEncode() + if err != nil { + return nil, err + } + l1DeltasBA[i] = v + } + + timeDeltasBA := make([][]byte, len(deltaTimes)) + for i, delta := range deltaTimes { + v, err := delta.GobEncode() + if err != nil { + return nil, err + } + timeDeltasBA[i] = v + } + + reorgsBA, err := transformToByteArray(reorgs) + if err != nil { + return nil, err + } + // optimisation in case there is no reorg header + if !isReorg { + reorgsBA = nil + } + + // get the first canonical batch ( which means there is no entry in the reorgs array for it) + // this is necessary because the height calculations always have to be performed according to what is perceived as a canonical batch. + firstCanonBatchHeight := batches[0].Number() + firstCanonParentHash := batches[0].Header.ParentHash + for i, reorg := range reorgs { + if reorg == nil { + firstCanonBatchHeight = batches[i].Number() + firstCanonParentHash = batches[i].Header.ParentHash + break + } + } + + calldataRollupHeader := &common.CalldataRollupHeader{ + FirstBatchSequence: batches[0].SeqNo(), + FirstCanonBatchHeight: firstCanonBatchHeight, + FirstCanonParentHash: firstCanonParentHash, + StartTime: startTime, + BatchTimeDeltas: timeDeltasBA, + ReOrgs: reorgsBA, + L1HeightDeltas: l1DeltasBA, + // BatchHashes: batchHashes, + // BatchHeaders: batchHeaders, + } + + return calldataRollupHeader, nil +} + +// the main logic to recreate the batches from the header. The logical pair of: `createRollupHeader` +func (rc *RollupCompression) createIncompleteBatches(calldataRollupHeader *common.CalldataRollupHeader, transactionsPerBatch [][]*common.L2Tx, rollupL1Head common.L1BlockHash) ([]*batchFromRollup, error) { + incompleteBatches := make([]*batchFromRollup, len(transactionsPerBatch)) + + startAtSeq := calldataRollupHeader.FirstBatchSequence.Int64() + currentHeight := calldataRollupHeader.FirstCanonBatchHeight.Int64() - 1 + currentTime := int64(calldataRollupHeader.StartTime) + var currentL1Height *big.Int + + rollupL1Block, err := rc.storage.FetchBlock(rollupL1Head) + if err != nil { + return nil, err + } + + for currentBatchIdx, batchTransactions := range transactionsPerBatch { + // the l1 proofs are stored as deltas, which compress well as it should be a series of 1s and 0s + // the first element is the actual height + l1Delta := big.NewInt(0) + err := l1Delta.GobDecode(calldataRollupHeader.L1HeightDeltas[currentBatchIdx]) + if err != nil { + return nil, err + } + if currentBatchIdx == 0 { + currentL1Height = l1Delta + } else { + currentL1Height = big.NewInt(l1Delta.Int64() + currentL1Height.Int64()) + } + + // get the block with the currentL1Height, relative to the rollupL1Block + block, err := rc.getAncestorOfHeight(currentL1Height, rollupL1Block) + if err != nil { + return nil, err + } + + // todo - this should be 1 second + // todo - multiply delta by something? + timeDelta := big.NewInt(0) + err = timeDelta.GobDecode(calldataRollupHeader.BatchTimeDeltas[currentBatchIdx]) + if err != nil { + return nil, err + } + currentTime += timeDelta.Int64() + + // the transactions stored in a valid rollup belong to sequential batches + currentSeqNo := big.NewInt(startAtSeq + int64(currentBatchIdx)) + + // handle reorgs + var fullReorgedHeader *common.BatchHeader + isCanonical := true + if len(calldataRollupHeader.ReOrgs) > 0 { + // the ReOrgs data structure contains an entire Header + // for the batches that got re-orged. + // the assumption is that it can't be computed because the L1 block won't be available. + encHeader := calldataRollupHeader.ReOrgs[currentBatchIdx] + if len(encHeader) > 0 { + isCanonical = false + fullReorgedHeader = new(common.BatchHeader) + err = rlp.DecodeBytes(encHeader, fullReorgedHeader) + if err != nil { + return nil, err + } + } + } + + if isCanonical { + // only if the batch is canonical, increment the height + currentHeight = currentHeight + 1 + } + + // calculate the hash of the txs + var txHash gethcommon.Hash + if len(batchTransactions) == 0 { + txHash = types.EmptyRootHash + } else { + txHash = types.DeriveSha(types.Transactions(batchTransactions), trie.NewStackTrie(nil)) + } + + incompleteBatches[currentBatchIdx] = &batchFromRollup{ + transactions: batchTransactions, + seqNo: currentSeqNo, + height: big.NewInt(currentHeight), + txHash: txHash, + time: uint64(currentTime), + l1Proof: block.Hash(), + header: fullReorgedHeader, + } + rc.logger.Info("Rollup decompressed batch", log.BatchSeqNoKey, currentSeqNo, log.BatchHeightKey, currentHeight, "rollup_idx", currentBatchIdx, "l1_height", block.Number(), "l1_hash", block.Hash()) + } + return incompleteBatches, nil +} + +func (rc *RollupCompression) getAncestorOfHeight(ancestorHeight *big.Int, head *types.Block) (*types.Block, error) { + if head.NumberU64() == ancestorHeight.Uint64() { + return head, nil + } + p, err := rc.storage.FetchBlock(head.ParentHash()) + if err != nil { + return nil, err + } + return rc.getAncestorOfHeight(ancestorHeight, p) +} + +func (rc *RollupCompression) executeAndSaveIncompleteBatches(calldataRollupHeader *common.CalldataRollupHeader, incompleteBatches []*batchFromRollup) error { //nolint:gocognit + parentHash := calldataRollupHeader.FirstCanonParentHash + + if calldataRollupHeader.FirstBatchSequence.Uint64() != common.L2GenesisSeqNo { + _, err := rc.storage.FetchBatch(parentHash) + if err != nil { + rc.logger.Error("Could not find batch mentioned in the rollup. This should not happen.", log.ErrKey, err) + return err + } + } + + for _, incompleteBatch := range incompleteBatches { + // check whether the batch is already stored in the database + b, err := rc.storage.FetchBatchBySeqNo(incompleteBatch.seqNo.Uint64()) + if err == nil { + parentHash = b.Hash() + continue + } + if !errors.Is(err, errutil.ErrNotFound) { + return err + } + + switch { + // handle genesis + case incompleteBatch.seqNo.Uint64() == common.L2GenesisSeqNo: + genBatch, _, err := rc.batchExecutor.CreateGenesisState(incompleteBatch.l1Proof, incompleteBatch.time) + if err != nil { + return err + } + // Sanity check - uncomment when debugging + //if genBatch.Hash() != calldataRollupHeader.BatchHashes[i] { + // rc.logger.Info(fmt.Sprintf("Good %+v\nCalc %+v", calldataRollupHeader.BatchHeaders[i], genBatch.Header)) + // rc.logger.Crit("Rollup decompression failure. The check hashes don't match") + //} + + err = rc.storage.StoreBatch(genBatch) + if err != nil { + return err + } + err = rc.storage.StoreExecutedBatch(genBatch, nil) + if err != nil { + return err + } + rc.batchRegistry.OnBatchExecuted(genBatch, nil) + + rc.logger.Info("Stored genesis", log.BatchHashKey, genBatch.Hash()) + parentHash = genBatch.Hash() + + // this batch was re-orged + case incompleteBatch.header != nil: + err := rc.storage.StoreBatch(&core.Batch{ + Header: incompleteBatch.header, + Transactions: incompleteBatch.transactions, + }) + if err != nil { + return err + } + + default: + // transforms the incompleteBatch into a BatchHeader by executing the transactions + // and then the info can be used to fill in the parent + computedBatch, err := rc.computeBatch(incompleteBatch.l1Proof, + parentHash, + incompleteBatch.transactions, + incompleteBatch.time, + incompleteBatch.seqNo, + ) + if err != nil { + return err + } + // Sanity check - uncomment when debugging + //if computedBatch.Batch.Hash() != calldataRollupHeader.BatchHashes[i] { + // rc.logger.Info(fmt.Sprintf("Good %+v\nCalc %+v", calldataRollupHeader.BatchHeaders[i], computedBatch.Batch.Header)) + // rc.logger.Crit("Rollup decompression failure. The check hashes don't match") + //} + + if _, err := computedBatch.Commit(true); err != nil { + return fmt.Errorf("cannot commit stateDB for incoming valid batch seq=%d. Cause: %w", incompleteBatch.seqNo, err) + } + + err = rc.storage.StoreBatch(computedBatch.Batch) + if err != nil { + return err + } + err = rc.storage.StoreExecutedBatch(computedBatch.Batch, computedBatch.Receipts) + if err != nil { + return err + } + rc.batchRegistry.OnBatchExecuted(computedBatch.Batch, nil) + + parentHash = computedBatch.Batch.Hash() + } + } + return nil +} + +func (rc *RollupCompression) serialiseCompressAndEncrypt(obj any) ([]byte, error) { + serialised, err := rlp.EncodeToBytes(obj) + if err != nil { + return nil, err + } + compressed, err := rc.dataCompressionService.CompressRollup(serialised) + if err != nil { + return nil, err + } + encrypted, err := rc.dataEncryptionService.Encrypt(compressed) + if err != nil { + return nil, err + } + return encrypted, nil +} + +func (rc *RollupCompression) decryptDecompressAndDeserialise(blob []byte, obj any) error { + plaintextBlob, err := rc.dataEncryptionService.Decrypt(blob) + if err != nil { + return err + } + serialisedBlob, err := rc.dataCompressionService.Decompress(plaintextBlob) + if err != nil { + return err + } + err = rlp.DecodeBytes(serialisedBlob, obj) + if err != nil { + return err + } + return nil +} + +func (rc *RollupCompression) computeBatch(BlockPtr common.L1BlockHash, ParentPtr common.L2BatchHash, Transactions common.L2Transactions, AtTime uint64, SequencerNo *big.Int) (*ComputedBatch, error) { + return rc.batchExecutor.ComputeBatch(&BatchExecutionContext{ + BlockPtr: BlockPtr, + ParentPtr: ParentPtr, + Transactions: Transactions, + AtTime: AtTime, + // Creator: executor, + ChainConfig: rc.chainConfig, + SequencerNo: SequencerNo, + }) +} + +func transformToByteArray(reorgs []*common.BatchHeader) ([][]byte, error) { + reorgsBA := make([][]byte, len(reorgs)) + for i, reorg := range reorgs { + if reorg != nil { + enc, err := rlp.EncodeToBytes(reorg) + if err != nil { + return nil, err + } + reorgsBA[i] = enc + } else { + reorgsBA[i] = []byte{} + } + } + return reorgsBA, nil +} diff --git a/go/enclave/components/rollup_consumer.go b/go/enclave/components/rollup_consumer.go index 0ee6ae36f7..3bbc35b4bf 100644 --- a/go/enclave/components/rollup_consumer.go +++ b/go/enclave/components/rollup_consumer.go @@ -5,16 +5,11 @@ import ( "github.com/obscuronet/go-obscuro/go/enclave/storage" - "github.com/obscuronet/go-obscuro/go/enclave/core" - "github.com/obscuronet/go-obscuro/go/common/measure" - "github.com/obscuronet/go-obscuro/go/common/compression" - gethlog "github.com/ethereum/go-ethereum/log" "github.com/obscuronet/go-obscuro/go/common" "github.com/obscuronet/go-obscuro/go/common/log" - "github.com/obscuronet/go-obscuro/go/enclave/crypto" "github.com/obscuronet/go-obscuro/go/ethadapter" "github.com/obscuronet/go-obscuro/go/ethadapter/mgmtcontractlib" ) @@ -22,12 +17,8 @@ import ( type rollupConsumerImpl struct { MgmtContractLib mgmtcontractlib.MgmtContractLib - dataEncryptionService crypto.DataEncryptionService - dataCompressionService compression.DataCompressionService - batchRegistry BatchRegistry - - ObscuroChainID int64 - EthereumChainID int64 + rollupCompression *RollupCompression + batchRegistry BatchRegistry logger gethlog.Logger @@ -38,24 +29,18 @@ type rollupConsumerImpl struct { func NewRollupConsumer( mgmtContractLib mgmtcontractlib.MgmtContractLib, batchRegistry BatchRegistry, - dataEncryptionService crypto.DataEncryptionService, - dataCompressionService compression.DataCompressionService, - obscuroChainID int64, - ethereumChainID int64, + rollupCompression *RollupCompression, storage storage.Storage, logger gethlog.Logger, verifier *SignatureValidator, ) RollupConsumer { return &rollupConsumerImpl{ - MgmtContractLib: mgmtContractLib, - batchRegistry: batchRegistry, - dataEncryptionService: dataEncryptionService, - dataCompressionService: dataCompressionService, - ObscuroChainID: obscuroChainID, - EthereumChainID: ethereumChainID, - logger: logger, - storage: storage, - sigValidator: verifier, + MgmtContractLib: mgmtContractLib, + batchRegistry: batchRegistry, + rollupCompression: rollupCompression, + logger: logger, + storage: storage, + sigValidator: verifier, } } @@ -75,13 +60,12 @@ func (rc *rollupConsumerImpl) ProcessRollupsInBlock(b *common.BlockAndReceipts) if len(rollups) > 0 { for _, rollup := range rollups { // read batch data from rollup, verify and store it - if err := rc.processRollup(rollup); err != nil { + if err := rc.rollupCompression.ProcessExtRollup(rollup); err != nil { rc.logger.Error("Failed processing rollup", log.RollupHashKey, rollup.Hash(), log.ErrKey, err) return err } } } - return nil } @@ -126,25 +110,5 @@ func (rc *rollupConsumerImpl) extractRollups(br *common.BlockAndReceipts) []*com rollups = append(rollups, r) rc.logger.Info("Extracted rollup from block", log.RollupHashKey, r.Hash(), log.BlockHashKey, b.Hash()) } - return rollups } - -func (rc *rollupConsumerImpl) processRollup(rollup *common.ExtRollup) error { - // todo logic to decompress the rollups on the fly - r, err := core.ToRollup(rollup, rc.dataEncryptionService, rc.dataCompressionService) - if err != nil { - return err - } - - // only stores the batches. They will be executed later - for _, batch := range r.Batches { - rc.logger.Trace("Processing batch from rollup", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.SeqNo()) - err := rc.storage.StoreBatch(batch) - if err != nil { - return err - } - } - - return nil -} diff --git a/go/enclave/components/rollup_producer.go b/go/enclave/components/rollup_producer.go index 0caa526a69..43c1a79346 100644 --- a/go/enclave/components/rollup_producer.go +++ b/go/enclave/components/rollup_producer.go @@ -65,7 +65,7 @@ func (re *rollupProducerImpl) CreateRollup(fromBatchNo uint64, limiter limiters. newRollup := re.createNextRollup(batches) - re.logger.Info(fmt.Sprintf("Created new rollup %s with %d batches", newRollup.Hash(), len(newRollup.Batches))) + re.logger.Info(fmt.Sprintf("Created new rollup %s with %d batches. From %d to %d", newRollup.Hash(), len(newRollup.Batches), batches[0].SeqNo(), batches[len(batches)-1].SeqNo())) return newRollup, nil } diff --git a/go/enclave/core/batch.go b/go/enclave/core/batch.go index b01390f732..4b0bdac4f4 100644 --- a/go/enclave/core/batch.go +++ b/go/enclave/core/batch.go @@ -69,15 +69,22 @@ func (b *Batch) ToExtBatch(transactionBlobCrypto crypto.DataEncryptionService, c if err != nil { return nil, err } + enc, err := transactionBlobCrypto.Encrypt(compressed) + if err != nil { + return nil, err + } return &common.ExtBatch{ Header: b.Header, TxHashes: txHashes, - EncryptedTxBlob: transactionBlobCrypto.Encrypt(compressed), + EncryptedTxBlob: enc, }, nil } func ToBatch(extBatch *common.ExtBatch, transactionBlobCrypto crypto.DataEncryptionService, compression compression.DataCompressionService) (*Batch, error) { - compressed := transactionBlobCrypto.Decrypt(extBatch.EncryptedTxBlob) + compressed, err := transactionBlobCrypto.Decrypt(extBatch.EncryptedTxBlob) + if err != nil { + return nil, err + } encoded, err := compression.Decompress(compressed) if err != nil { return nil, err diff --git a/go/enclave/core/rollup.go b/go/enclave/core/rollup.go index 207f1450e1..3e70aa438c 100644 --- a/go/enclave/core/rollup.go +++ b/go/enclave/core/rollup.go @@ -4,12 +4,6 @@ import "C" import ( "sync/atomic" - "github.com/obscuronet/go-obscuro/go/common/compression" - - "github.com/ethereum/go-ethereum/rlp" - - "github.com/obscuronet/go-obscuro/go/enclave/crypto" - "github.com/obscuronet/go-obscuro/go/common" ) @@ -30,74 +24,3 @@ func (r *Rollup) Hash() common.L2BatchHash { r.hash.Store(v) return v } - -func (r *Rollup) ToExtRollup(dataEncryptionService crypto.DataEncryptionService, compression compression.DataCompressionService) (*common.ExtRollup, error) { - headers := make([]*common.BatchHeader, len(r.Batches)) - transactions := make([][]*common.L2Tx, len(r.Batches)) - for i, batch := range r.Batches { - headers[i] = batch.Header - transactions[i] = batch.Transactions - } - - plaintextTransactionsBlob, err := rlp.EncodeToBytes(transactions) - if err != nil { - return nil, err - } - - headersBlob, err := rlp.EncodeToBytes(headers) - if err != nil { - return nil, err - } - - compressedTransactionsBlob, err := compression.CompressRollup(plaintextTransactionsBlob) - if err != nil { - return nil, err - } - - compressedHeadersBlob, err := compression.CompressRollup(headersBlob) - if err != nil { - return nil, err - } - - return &common.ExtRollup{ - Header: r.Header, - BatchPayloads: dataEncryptionService.Encrypt(compressedTransactionsBlob), - BatchHeaders: compressedHeadersBlob, - }, nil -} - -func ToRollup(encryptedRollup *common.ExtRollup, dataEncryptionService crypto.DataEncryptionService, dataCompressionService compression.DataCompressionService) (*Rollup, error) { - headers := make([]common.BatchHeader, 0) - headersBlob, err := dataCompressionService.Decompress(encryptedRollup.BatchHeaders) - if err != nil { - return nil, err - } - err = rlp.DecodeBytes(headersBlob, &headers) - if err != nil { - return nil, err - } - - transactions := make([][]*common.L2Tx, 0) - decryptedTxs := dataEncryptionService.Decrypt(encryptedRollup.BatchPayloads) - encryptedTransactions, err := dataCompressionService.Decompress(decryptedTxs) - if err != nil { - return nil, err - } - err = rlp.DecodeBytes(encryptedTransactions, &transactions) - if err != nil { - return nil, err - } - - batches := make([]*Batch, len(headers)) - for i := range headers { - batches[i] = &Batch{ - Header: &(headers[i]), - Transactions: transactions[i], - } - } - - return &Rollup{ - Header: encryptedRollup.Header, - Batches: batches, - }, nil -} diff --git a/go/enclave/crypto/data_enc_service.go b/go/enclave/crypto/data_enc_service.go index 43a2623868..13f943991c 100644 --- a/go/enclave/crypto/data_enc_service.go +++ b/go/enclave/crypto/data_enc_service.go @@ -23,13 +23,13 @@ const ( // DataEncryptionService handles the encryption and decryption of the transaction blobs stored inside a rollup. type DataEncryptionService interface { - Encrypt(blob []byte) []byte - Decrypt(encryptedTxs []byte) []byte + Encrypt(blob []byte) ([]byte, error) + Decrypt(blob []byte) ([]byte, error) } type dataEncryptionServiceImpl struct { - transactionCipher cipher.AEAD - logger gethlog.Logger + cipher cipher.AEAD + logger gethlog.Logger } func NewDataEncryptionService(logger gethlog.Logger) DataEncryptionService { @@ -38,38 +38,40 @@ func NewDataEncryptionService(logger gethlog.Logger) DataEncryptionService { if err != nil { logger.Crit("could not initialise AES cipher for enclave rollup key.", log.ErrKey, err) } - transactionCipher, err := cipher.NewGCM(block) + cipher, err := cipher.NewGCM(block) if err != nil { logger.Crit("could not initialise wrapper for AES cipher for enclave rollup key. ", log.ErrKey, err) } return dataEncryptionServiceImpl{ - transactionCipher: transactionCipher, - logger: logger, + cipher: cipher, + logger: logger, } } // todo (#1053) - modify this logic so that transactions with different reveal periods are in different blobs, as per the whitepaper. -func (t dataEncryptionServiceImpl) Encrypt(encodedBatches []byte) []byte { +func (t dataEncryptionServiceImpl) Encrypt(blob []byte) ([]byte, error) { nonce := make([]byte, NonceLength) if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - t.logger.Crit("could not generate nonce to encrypt transactions.", log.ErrKey, err) + t.logger.Error("could not generate nonce to encrypt transactions.", log.ErrKey, err) + return nil, err } // todo - ensure this nonce is not used too many times (2^32?) with the same key, to avoid risk of repeat. - ciphertext := t.transactionCipher.Seal(nil, nonce, encodedBatches, nil) + ciphertext := t.cipher.Seal(nil, nonce, blob, nil) // We prepend the nonce to the ciphertext, so that it can be retrieved when decrypting. - return append(nonce, ciphertext...) //nolint:makezero + return append(nonce, ciphertext...), nil //nolint:makezero } -func (t dataEncryptionServiceImpl) Decrypt(encryptedBatches []byte) []byte { +func (t dataEncryptionServiceImpl) Decrypt(blob []byte) ([]byte, error) { // The nonce is prepended to the ciphertext. - nonce := encryptedBatches[0:NonceLength] - ciphertext := encryptedBatches[NonceLength:] + nonce := blob[0:NonceLength] + ciphertext := blob[NonceLength:] - encodedBatches, err := t.transactionCipher.Open(nil, nonce, ciphertext, nil) + plaintext, err := t.cipher.Open(nil, nonce, ciphertext, nil) if err != nil { - t.logger.Crit("could not decrypt encrypted L2 transactions.", log.ErrKey, err) + t.logger.Error("could not decrypt blob.", log.ErrKey, err) + return nil, err } - return encodedBatches + return plaintext, nil } diff --git a/go/enclave/enclave.go b/go/enclave/enclave.go index 50488206f2..db25c42088 100644 --- a/go/enclave/enclave.go +++ b/go/enclave/enclave.go @@ -211,7 +211,8 @@ func NewEnclave( if err != nil { logger.Crit("Could not initialise the signature validator", log.ErrKey, err) } - rConsumer := components.NewRollupConsumer(mgmtContractLib, registry, dataEncryptionService, dataCompressionService, config.ObscuroChainID, config.L1ChainID, storage, logger, sigVerifier) + rollupCompression := components.NewRollupCompression(registry, batchExecutor, dataEncryptionService, dataCompressionService, storage, &chainConfig, logger) + rConsumer := components.NewRollupConsumer(mgmtContractLib, registry, rollupCompression, storage, logger, sigVerifier) sharedSecretProcessor := components.NewSharedSecretProcessor(mgmtContractLib, attestationProvider, storage, logger) var service nodetype.NodeType @@ -222,6 +223,7 @@ func NewEnclave( registry, rProducer, rConsumer, + rollupCompression, logger, config.HostID, &chainConfig, diff --git a/go/enclave/evm/evm_facade.go b/go/enclave/evm/evm_facade.go index 26f5e903d0..9af1e8b821 100644 --- a/go/enclave/evm/evm_facade.go +++ b/go/enclave/evm/evm_facade.go @@ -175,7 +175,7 @@ func ExecuteObsCall( if err != nil { // also return the result as the result can be evaluated on some errors like ErrIntrinsicGas - logger.Info(fmt.Sprintf("Error applying msg %v:", msg), log.CtrErrKey, err) + logger.Debug(fmt.Sprintf("Error applying msg %v:", msg), log.CtrErrKey, err) return result, err } diff --git a/go/enclave/nodetype/sequencer.go b/go/enclave/nodetype/sequencer.go index 28afa2957b..777d7b7187 100644 --- a/go/enclave/nodetype/sequencer.go +++ b/go/enclave/nodetype/sequencer.go @@ -36,11 +36,12 @@ type SequencerSettings struct { } type sequencer struct { - blockProcessor components.L1BlockProcessor - batchProducer components.BatchExecutor - batchRegistry components.BatchRegistry - rollupProducer components.RollupProducer - rollupConsumer components.RollupConsumer + blockProcessor components.L1BlockProcessor + batchProducer components.BatchExecutor + batchRegistry components.BatchRegistry + rollupProducer components.RollupProducer + rollupConsumer components.RollupConsumer + rollupCompression *components.RollupCompression logger gethlog.Logger @@ -60,6 +61,7 @@ func NewSequencer( registry components.BatchRegistry, rollupProducer components.RollupProducer, rollupConsumer components.RollupConsumer, + rollupCompression *components.RollupCompression, logger gethlog.Logger, @@ -78,6 +80,7 @@ func NewSequencer( batchRegistry: registry, rollupProducer: rollupProducer, rollupConsumer: rollupConsumer, + rollupCompression: rollupCompression, logger: logger, hostID: hostID, chainConfig: chainConfig, @@ -250,7 +253,7 @@ func (s *sequencer) CreateRollup(lastBatchNo uint64) (*common.ExtRollup, error) s.logger.Info("Created new head rollup", log.RollupHashKey, rollup.Hash(), "numBatches", len(rollup.Batches)) - return rollup.ToExtRollup(s.dataEncryptionService, s.dataCompressionService) + return s.rollupCompression.CreateExtRollup(rollup) } func (s *sequencer) duplicateBatches(l1Head *types.Block, nonCanonicalL1Path []common.L1BlockHash) error { diff --git a/go/enclave/nodetype/validator.go b/go/enclave/nodetype/validator.go index 77cd4dfc48..dd6e19ee44 100644 --- a/go/enclave/nodetype/validator.go +++ b/go/enclave/nodetype/validator.go @@ -20,7 +20,7 @@ import ( type obsValidator struct { blockProcessor components.L1BlockProcessor - batchProducer components.BatchExecutor + batchExecutor components.BatchExecutor batchRegistry components.BatchRegistry rollupConsumer components.RollupConsumer @@ -34,7 +34,7 @@ type obsValidator struct { func NewValidator( consumer components.L1BlockProcessor, - producer components.BatchExecutor, + batchExecutor components.BatchExecutor, registry components.BatchRegistry, rollupConsumer components.RollupConsumer, @@ -47,7 +47,7 @@ func NewValidator( ) ObsValidator { return &obsValidator{ blockProcessor: consumer, - batchProducer: producer, + batchExecutor: batchExecutor, batchRegistry: registry, rollupConsumer: rollupConsumer, chainConfig: chainConfig, @@ -96,7 +96,7 @@ func (val *obsValidator) ExecuteStoredBatches() error { } if canExecute { - receipts, err := val.batchProducer.ExecuteBatch(batch) + receipts, err := val.batchExecutor.ExecuteBatch(batch) if err != nil { return fmt.Errorf("could not execute batch %s. Cause: %w", batch.Hash(), err) } @@ -129,7 +129,7 @@ func (val *obsValidator) executionPrerequisites(batch *core.Batch) (bool, error) } func (val *obsValidator) handleGenesis(batch *core.Batch) error { - genBatch, _, err := val.batchProducer.CreateGenesisState(batch.Header.L1Proof, batch.Header.Time) + genBatch, _, err := val.batchExecutor.CreateGenesisState(batch.Header.L1Proof, batch.Header.Time) if err != nil { return err } diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index 3c5db2aad2..6ad750d317 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "math/big" "strings" "github.com/ethereum/go-ethereum/core/types" @@ -87,6 +88,10 @@ func FetchHeadBlock(db *sql.DB) (*types.Block, error) { return fetchBlock(db, "where is_canonical=true and height=(select max(b.height) from block b where is_canonical=true)") } +func FetchBlockHeaderByHeight(db *sql.DB, height *big.Int) (*types.Header, error) { + return fetchBlockHeader(db, "where is_canonical=true and height=?", height.Int64()) +} + func WriteL1Messages(db *sql.DB, blockHash common.L1BlockHash, messages common.CrossChainMessages) error { insert := l1msgInsert + strings.Repeat(l1msgValue+",", len(messages)) insert = insert[0 : len(insert)-1] // remove trailing comma diff --git a/go/enclave/storage/interfaces.go b/go/enclave/storage/interfaces.go index 52daea9f52..0549bc4545 100644 --- a/go/enclave/storage/interfaces.go +++ b/go/enclave/storage/interfaces.go @@ -21,6 +21,8 @@ import ( type BlockResolver interface { // FetchBlock returns the L1 Block with the given hash. FetchBlock(blockHash common.L1BlockHash) (*types.Block, error) + // FetchCanonicaBlockByHeight - self explanatory + FetchCanonicaBlockByHeight(height *big.Int) (*types.Block, error) // FetchHeadBlock - returns the head of the current chain. FetchHeadBlock() (*types.Block, error) // StoreBlock persists the L1 Block and updates the canonical ancestors if there was a fork diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index a291bbaa75..4931aefb1c 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -172,6 +172,19 @@ func (s *storageImpl) FetchBlock(blockHash common.L1BlockHash) (*types.Block, er }) } +func (s *storageImpl) FetchCanonicaBlockByHeight(height *big.Int) (*types.Block, error) { + callStart := time.Now() + defer s.logDuration("FetchCanonicaBlockByHeight", callStart) + header, err := enclavedb.FetchBlockHeaderByHeight(s.db.GetSQLDB(), height) + if err != nil { + return nil, err + } + blockHash := header.Hash() + return s.getCachedBlock(blockHash, func(hash common.L1BlockHash) (*types.Block, error) { + return enclavedb.FetchBlock(s.db.GetSQLDB(), blockHash) + }) +} + func (s *storageImpl) FetchHeadBlock() (*types.Block, error) { callStart := time.Now() defer s.logDuration("FetchHeadBlock", callStart) @@ -537,6 +550,7 @@ func (s *storageImpl) cacheBlock(blockHash common.L1BlockHash, b *types.Block) { func (s *storageImpl) getCachedBlock(hash common.L1BlockHash, onFailed func(common.L1BlockHash) (*types.Block, error)) (*types.Block, error) { value, err := s.blockCache.Get(context.Background(), hash) if err != nil { + // todo metrics for cache misses b, err := onFailed(hash) if err != nil { return b, err diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go index 1223ee9ece..1437c5facc 100644 --- a/go/host/enclave/guardian.go +++ b/go/host/enclave/guardian.go @@ -542,7 +542,11 @@ func (g *Guardian) periodicRollupProduction() { g.logger.Warn("encountered error while trying to retrieve latest sequence number", log.ErrKey, err) continue } - producedRollup, err := g.enclaveClient.CreateRollup(lastBatchNo.Uint64()) + fromBatch := lastBatchNo.Uint64() + if lastBatchNo.Uint64() > common.L2GenesisSeqNo { + fromBatch++ + } + producedRollup, err := g.enclaveClient.CreateRollup(fromBatch) if err != nil { g.logger.Error("unable to produce rollup", log.ErrKey, err) } else { diff --git a/integration/ethereummock/db.go b/integration/ethereummock/db.go index d400e36e5e..bb00b0bc04 100644 --- a/integration/ethereummock/db.go +++ b/integration/ethereummock/db.go @@ -2,6 +2,7 @@ package ethereummock import ( "bytes" + "math/big" "sync" "github.com/obscuronet/go-obscuro/go/enclave/storage" @@ -22,6 +23,10 @@ type blockResolverInMem struct { m sync.RWMutex } +func (n *blockResolverInMem) FetchCanonicaBlockByHeight(_ *big.Int) (*types.Block, error) { + panic("implement me") +} + func (n *blockResolverInMem) Proof(_ *core.Rollup) (*types.Block, error) { panic("implement me") } diff --git a/integration/simulation/params/params.go b/integration/simulation/params/params.go index 17168396ff..c58361ffdc 100644 --- a/integration/simulation/params/params.go +++ b/integration/simulation/params/params.go @@ -19,12 +19,7 @@ type SimParams struct { SimulationTime time.Duration // how long the simulations should run for - // EfficiencyThresholds represents an acceptable "dead blocks" percentage for this simulation. - // dead blocks - Blocks that are produced and gossiped, but don't make it into the canonical chain. - // We test the results against this threshold to catch eventual protocol errors. - L1EfficiencyThreshold float64 - L2EfficiencyThreshold float64 // number of dead obscuro blocks - L2ToL1EfficiencyThreshold float64 // number of ethereum blocks that don't include an obscuro node + L1EfficiencyThreshold float64 // MgmtContractLib allows parsing MgmtContract txs to and from the eth txs MgmtContractLib mgmtcontractlib.MgmtContractLib diff --git a/integration/simulation/simulation_full_network_test.go b/integration/simulation/simulation_full_network_test.go index 294127f9a7..5f1e6e3269 100644 --- a/integration/simulation/simulation_full_network_test.go +++ b/integration/simulation/simulation_full_network_test.go @@ -23,13 +23,10 @@ func TestFullNetworkMonteCarloSimulation(t *testing.T) { wallets := params.NewSimWallets(numberOfSimWallets, numberOfNodes, integration.EthereumChainID, integration.ObscuroChainID) simParams := ¶ms.SimParams{ - NumberOfNodes: numberOfNodes, - AvgBlockDuration: 1 * time.Second, - SimulationTime: 75 * time.Second, - L1EfficiencyThreshold: 0.2, - // Very hard to have precision here as blocks are continually produced and not dependent on the simulation execution thread - L2EfficiencyThreshold: 0.75, // nodes might produce rollups because they receive a new block - L2ToL1EfficiencyThreshold: 0.7, // nodes might stop producing rollups but the geth network is still going + NumberOfNodes: numberOfNodes, + AvgBlockDuration: 1 * time.Second, + SimulationTime: 75 * time.Second, + L1EfficiencyThreshold: 0.2, Wallets: wallets, StartPort: integration.StartPortSimulationFullNetwork, ReceiptTimeout: 65 * time.Second, diff --git a/integration/simulation/simulation_geth_in_mem_test.go b/integration/simulation/simulation_geth_in_mem_test.go index e1f06c7947..5c0f0ded68 100644 --- a/integration/simulation/simulation_geth_in_mem_test.go +++ b/integration/simulation/simulation_geth_in_mem_test.go @@ -30,14 +30,11 @@ func TestGethSimulation(t *testing.T) { AvgBlockDuration: 1 * time.Second, SimulationTime: 35 * time.Second, L1EfficiencyThreshold: 0.2, - // Very hard to have precision here as blocks are continually produced and not dependent on the simulation execution thread - L2EfficiencyThreshold: 0.6, // nodes might produce rollups because they receive a new block - L2ToL1EfficiencyThreshold: 0.7, // nodes might stop producing rollups but the geth network is still going - Wallets: wallets, - StartPort: integration.StartPortSimulationGethInMem, - IsInMem: true, - ReceiptTimeout: 30 * time.Second, - StoppingDelay: 10 * time.Second, + Wallets: wallets, + StartPort: integration.StartPortSimulationGethInMem, + IsInMem: true, + ReceiptTimeout: 30 * time.Second, + StoppingDelay: 10 * time.Second, } simParams.AvgNetworkLatency = simParams.AvgBlockDuration / 15 diff --git a/integration/simulation/simulation_in_mem_test.go b/integration/simulation/simulation_in_mem_test.go index 3a8a5457ed..59b06080be 100644 --- a/integration/simulation/simulation_in_mem_test.go +++ b/integration/simulation/simulation_in_mem_test.go @@ -29,8 +29,6 @@ func TestInMemoryMonteCarloSimulation(t *testing.T) { AvgBlockDuration: 250 * time.Millisecond, SimulationTime: 30 * time.Second, L1EfficiencyThreshold: 0.2, - L2EfficiencyThreshold: 0.5, - L2ToL1EfficiencyThreshold: 0.5, MgmtContractLib: ethereummock.NewMgmtContractLibMock(), ERC20ContractLib: ethereummock.NewERC20ContractLibMock(), Wallets: wallets, diff --git a/integration/simulation/validate_chain.go b/integration/simulation/validate_chain.go index fbc4ad0dfb..fe6e960271 100644 --- a/integration/simulation/validate_chain.go +++ b/integration/simulation/validate_chain.go @@ -9,9 +9,6 @@ import ( "sync" "testing" - "github.com/ethereum/go-ethereum/rlp" - "github.com/obscuronet/go-obscuro/go/common/compression" - testcommon "github.com/obscuronet/go-obscuro/integration/common" "github.com/obscuronet/go-obscuro/integration/ethereummock" @@ -120,7 +117,7 @@ func checkObscuroBlockchainValidity(t *testing.T, s *Simulation, maxL1Height uin wg.Wait() min, max := minMax(heights) // This checks that all the nodes are in sync. When a node falls behind with processing blocks it might highlight a problem. - if max-min > max/10 { + if max-min > max/7 { t.Errorf("There is a problem with the Obscuro chain. Nodes fell out of sync. Max height: %d. Min height: %d -> %+v", max, min, heights) } } @@ -160,11 +157,6 @@ func checkBlockchainOfEthereumNode(t *testing.T, node ethadapter.EthClient, minH t.Errorf("Node %d: No deposits", nodeIdx) } */ - efficiency := float64(s.Stats.TotalL1Blocks-height) / float64(s.Stats.TotalL1Blocks) - if efficiency > s.Params.L1EfficiencyThreshold { - t.Errorf("Node %d: Efficiency in L1 is %f. Expected:%f. Number: %d.", nodeIdx, efficiency, s.Params.L1EfficiencyThreshold, height) - } - // compare the number of reorgs for this node against the height reorgs := s.Stats.NoL1Reorgs[node.Info().L2ID] reorgEfficiency := float64(reorgs) / float64(height) @@ -178,6 +170,9 @@ func checkBlockchainOfEthereumNode(t *testing.T, node ethadapter.EthClient, minH return height } +// this function only performs a very brief check. +// the ultimate check that everything works fine is that each node is able to respond to queries +// and has processed all batches correctly. func checkRollups(t *testing.T, s *Simulation, nodeIdx int, rollups []*common.ExtRollup) { if len(rollups) < 2 { t.Errorf("Node %d: Found less than two submitted rollups! Successful simulation should always produce more than 2", nodeIdx) @@ -188,8 +183,7 @@ func checkRollups(t *testing.T, s *Simulation, nodeIdx int, rollups []*common.Ex return rollups[i].Header.LastBatchSeqNo < rollups[j].Header.LastBatchSeqNo }) - batchNumber := uint64(0) - for idx, rollup := range rollups { + for _, rollup := range rollups { // todo - use the signature if rollup.Header.Coinbase.Hex() != s.Params.Wallets.NodeWallets[0].Address().Hex() { t.Errorf("Node %d: Found rollup produced by non-sequencer %s", nodeIdx, s.Params.Wallets.NodeWallets[0].Address().Hex()) @@ -200,75 +194,9 @@ func checkRollups(t *testing.T, s *Simulation, nodeIdx int, rollups []*common.Ex t.Errorf("Node %d: No batches in rollup!", nodeIdx) continue } - - if idx != 0 { - prevRollup := rollups[idx-1] - checkRollupPair(t, nodeIdx, prevRollup, rollup) - } - headers := extractBatchHeaders(rollup) - - for _, batchHeader := range headers { - currHeight := batchHeader.Number.Uint64() - if currHeight != 0 && currHeight > batchNumber+1 { - t.Errorf("Node %d: Batch gap!", nodeIdx) - } - batchNumber = currHeight - - for _, clients := range s.RPCHandles.AuthObsClients { - client := clients[0] - batchOnNode, err := client.BatchHeaderByHash(batchHeader.Hash()) - if err != nil { - t.Errorf("Node %d: Could not find batch header [idx=%s, hash=%s]. Cause: %s", nodeIdx, batchHeader.Number, batchHeader.Hash(), err) - continue - } - if batchOnNode.Hash() != batchHeader.Hash() { - t.Errorf("Node %d: Batches mismatch!", nodeIdx) - } - } - } } } -func checkRollupPair(t *testing.T, nodeIdx int, prevRollup *common.ExtRollup, rollup *common.ExtRollup) { - if len(prevRollup.BatchHeaders) == 0 { - return - } - - previousHeaders := extractBatchHeaders(prevRollup) - currentHeaders := extractBatchHeaders(rollup) - - lastBatch := previousHeaders[len(previousHeaders)-1] - firstBatch := currentHeaders[0] - isValidChain := firstBatch.SequencerOrderNo.Uint64() == lastBatch.SequencerOrderNo.Uint64() - if !isValidChain { - t.Errorf("Node %d: Found badly chained batches in rollups! from %d to %d", - nodeIdx, - lastBatch.SequencerOrderNo.Uint64(), - firstBatch.SequencerOrderNo.Uint64()) - return - } - - //isValidChain = prevRollup.Header.HeadBatchHash.Hex() == firstBatch.ParentHash.Hex() - //if !isValidChain { - // t.Errorf("Node %d: Found badly chained batches in rollups! Marked header batch does not match!", nodeIdx) - // return - //} -} - -func extractBatchHeaders(rollup *common.ExtRollup) []common.BatchHeader { - dataCompressionService := compression.NewBrotliDataCompressionService() - headers := make([]common.BatchHeader, 0) - headersBlob, err := dataCompressionService.Decompress(rollup.BatchHeaders) - if err != nil { - testlog.Logger().Crit("could not decode rollup.", log.ErrKey, err) - } - err = rlp.DecodeBytes(headersBlob, &headers) - if err != nil { - testlog.Logger().Crit("could not decode rollup.", log.ErrKey, err) - } - return headers -} - // ExtractDataFromEthereumChain returns the deposits, rollups, total amount deposited and length of the blockchain // between the start block and the end block. func ExtractDataFromEthereumChain( @@ -340,36 +268,27 @@ func checkBlockchainOfObscuroNode(t *testing.T, rpcHandles *network.RPCHandles, } // check that the height of the Rollup chain is higher than a minimum expected value. - headRollupHeader, err := getHeadBatchHeader(obscuroClient) + headBatchHeader, err := getHeadBatchHeader(obscuroClient) if err != nil { t.Error(fmt.Errorf("node %d: %w", nodeIdx, err)) } - if headRollupHeader == nil { + if headBatchHeader == nil { t.Errorf("Node %d: No head rollup recorded. Skipping any further checks for this node.\n", nodeIdx) return } - l2Height := headRollupHeader.Number + l2Height := headBatchHeader.Number if l2Height.Uint64() < minObscuroHeight { t.Errorf("Node %d: Node only mined %d rollups. Expected at least: %d.", nodeIdx, l2Height, minObscuroHeight) } // check that the height from the rollup header is consistent with the height returned by eth_blockNumber. - l2HeightFromRollupNumber, err := obscuroClient.BatchNumber() + l2HeightFromBatchNumber, err := obscuroClient.BatchNumber() if err != nil { t.Errorf("Node %d: Could not retrieve block number. Cause: %s", nodeIdx, err) } - if l2HeightFromRollupNumber != l2Height.Uint64() { - t.Errorf("Node %d: Node's head rollup had a height %d, but %s height was %d", nodeIdx, l2Height, rpc.BatchNumber, l2HeightFromRollupNumber) - } - - totalL2Blocks := s.Stats.NoL2Blocks[nodeIdx] - // in case the blockchain has advanced above what was collected, there is no longer a point to this check - if l2Height.Uint64() <= totalL2Blocks { - efficiencyL2 := float64(totalL2Blocks-l2Height.Uint64()) / float64(totalL2Blocks) - if efficiencyL2 > s.Params.L2EfficiencyThreshold { - t.Errorf("Node %d: Efficiency in L2 is %f. Expected:%f", nodeIdx, efficiencyL2, s.Params.L2EfficiencyThreshold) - } + if l2HeightFromBatchNumber != l2Height.Uint64() { + t.Errorf("Node %d: Node's head rollup had a height %d, but %s height was %d", nodeIdx, l2Height, rpc.BatchNumber, l2HeightFromBatchNumber) } notFoundTransfers, notFoundWithdrawals, notFoundNativeTransfers := FindNotIncludedL2Txs(s.ctx, nodeIdx, rpcHandles, s.TxInjector) @@ -390,7 +309,7 @@ func checkBlockchainOfObscuroNode(t *testing.T, rpcHandles *network.RPCHandles, totalSuccessfullyWithdrawn := extractWithdrawals(t, obscuroClient, nodeIdx) - totalAmountLogged := getLoggedWithdrawals(minObscuroHeight, obscuroClient, headRollupHeader) + totalAmountLogged := getLoggedWithdrawals(minObscuroHeight, obscuroClient, headBatchHeader) if totalAmountLogged.Cmp(totalSuccessfullyWithdrawn) != 0 { t.Errorf("Node %d: Logged withdrawals do not match!", nodeIdx) } @@ -432,13 +351,17 @@ func checkBlockchainOfObscuroNode(t *testing.T, rpcHandles *network.RPCHandles, heights[nodeIdx] = l2Height.Uint64() + if headBatchHeader.SequencerOrderNo.Uint64() == common.L2GenesisSeqNo { + return + } // check that the headers are serialised and deserialised correctly, by recomputing a header's hash - parentHeader, err := obscuroClient.BatchHeaderByHash(headRollupHeader.ParentHash) + parentHeader, err := obscuroClient.BatchHeaderByHash(headBatchHeader.ParentHash) if err != nil { - t.Errorf("could not retrieve parent of head rollup") + t.Errorf("could not retrieve parent of head batch") + return } - if parentHeader.Hash() != headRollupHeader.ParentHash { - t.Errorf("mismatch in hash of retrieved header. Parent: %+v\nCurrent: %+v", parentHeader, headRollupHeader) + if parentHeader.Hash() != headBatchHeader.ParentHash { + t.Errorf("mismatch in hash of retrieved header. Parent: %+v\nCurrent: %+v", parentHeader, headBatchHeader) } }