From 0529e965af8cddd90b0b42a06e937624fd1fdb97 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Fri, 15 Sep 2023 16:47:34 +0100 Subject: [PATCH] save rollups (#1526) --- go/enclave/components/rollup_compression.go | 12 +++---- go/enclave/components/rollup_consumer.go | 8 ++++- go/enclave/nodetype/sequencer.go | 22 +++++++++--- go/enclave/storage/enclavedb/block.go | 34 ++++++++++++++++--- .../storage/init/edgelessdb/002_init.sql | 13 +++++++ go/enclave/storage/init/sqlite/002_init.sql | 10 ++++++ go/enclave/storage/interfaces.go | 4 +++ go/enclave/storage/storage.go | 8 +++-- integration/simulation/validate_chain.go | 13 ++++--- 9 files changed, 100 insertions(+), 24 deletions(-) create mode 100644 go/enclave/storage/init/edgelessdb/002_init.sql create mode 100644 go/enclave/storage/init/sqlite/002_init.sql diff --git a/go/enclave/components/rollup_compression.go b/go/enclave/components/rollup_compression.go index 29425091ad..ed7c203272 100644 --- a/go/enclave/components/rollup_compression.go +++ b/go/enclave/components/rollup_compression.go @@ -112,17 +112,17 @@ func (rc *RollupCompression) CreateExtRollup(r *core.Rollup) (*common.ExtRollup, } // ProcessExtRollup - given an External rollup, responsible with checking and saving all batches found inside -func (rc *RollupCompression) ProcessExtRollup(rollup *common.ExtRollup) error { +func (rc *RollupCompression) ProcessExtRollup(rollup *common.ExtRollup) (*common.CalldataRollupHeader, error) { transactionsPerBatch := make([][]*common.L2Tx, 0) err := rc.decryptDecompressAndDeserialise(rollup.BatchPayloads, &transactionsPerBatch) if err != nil { - return err + return nil, err } calldataRollupHeader := new(common.CalldataRollupHeader) err = rc.decryptDecompressAndDeserialise(rollup.CalldataRollupHeader, calldataRollupHeader) if err != nil { - return err + return nil, err } // The recreation of batches is a 2-step process: @@ -130,16 +130,16 @@ func (rc *RollupCompression) ProcessExtRollup(rollup *common.ExtRollup) error { // 1. calculate fields like: sequence, height, time, l1Proof, from the implicit and explicit information from the metadata incompleteBatches, err := rc.createIncompleteBatches(calldataRollupHeader, transactionsPerBatch, rollup.Header.CompressionL1Head) if err != nil { - return err + return nil, err } // 2. execute each batch to be able to calculate the hash which is necessary for the next batch as it is the parent. err = rc.executeAndSaveIncompleteBatches(calldataRollupHeader, incompleteBatches) if err != nil { - return err + return nil, err } - return nil + return calldataRollupHeader, nil } // the main logic that goes from a list of batches to the rollup header diff --git a/go/enclave/components/rollup_consumer.go b/go/enclave/components/rollup_consumer.go index 3bbc35b4bf..8cc644798d 100644 --- a/go/enclave/components/rollup_consumer.go +++ b/go/enclave/components/rollup_consumer.go @@ -60,8 +60,14 @@ func (rc *rollupConsumerImpl) ProcessRollupsInBlock(b *common.BlockAndReceipts) if len(rollups) > 0 { for _, rollup := range rollups { // read batch data from rollup, verify and store it - if err := rc.rollupCompression.ProcessExtRollup(rollup); err != nil { + internalHeader, err := rc.rollupCompression.ProcessExtRollup(rollup) + if err != nil { rc.logger.Error("Failed processing rollup", log.RollupHashKey, rollup.Hash(), log.ErrKey, err) + // todo - issue challenge as a validator + return err + } + if err := rc.storage.StoreRollup(rollup, internalHeader); err != nil { + rc.logger.Error("Failed storing rollup", log.RollupHashKey, rollup.Hash(), log.ErrKey, err) return err } } diff --git a/go/enclave/nodetype/sequencer.go b/go/enclave/nodetype/sequencer.go index 4eafd458f8..e624bc17e3 100644 --- a/go/enclave/nodetype/sequencer.go +++ b/go/enclave/nodetype/sequencer.go @@ -316,12 +316,24 @@ func (s *sequencer) SubmitTransaction(transaction *common.L2Tx) error { } func (s *sequencer) OnL1Fork(fork *common.ChainFork) error { - if fork.IsFork() { - err := s.duplicateBatches(fork.NewCanonical, fork.NonCanonicalPath) - if err != nil { - return fmt.Errorf("could not duplicate batches. Cause %w", err) - } + if !fork.IsFork() { + return nil + } + + err := s.duplicateBatches(fork.NewCanonical, fork.NonCanonicalPath) + if err != nil { + return fmt.Errorf("could not duplicate batches. Cause %w", err) } + + rollup, err := s.storage.FetchReorgedRollup(fork.NonCanonicalPath) + if err == nil { + s.logger.Error("Reissue rollup", log.RollupHashKey, rollup) + return nil + } + if !errors.Is(err, errutil.ErrNotFound) { + return fmt.Errorf("could not call FetchReorgedRollup. Cause: %w", err) + } + return nil } diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index 54f125dc13..75adfcbb2a 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -22,7 +22,8 @@ const ( l1msgValue = "(?,?)" selectL1Msg = "select message from l1_msg " - rollupInsert = "insert into rollup values (?,?,?,?)" + rollupInsert = "replace into rollup values (?,?,?,?,?)" + rollupSelect = "select hash from rollup where compression_block in " updateCanonicalBlock = "update block set is_canonical=? where hash in " @@ -144,21 +145,44 @@ func FetchL1Messages(db *sql.DB, blockHash common.L1BlockHash) (common.CrossChai return result, nil } -func WriteRollup(dbtx DBTransaction, rollup *common.RollupHeader) error { +func WriteRollup(dbtx DBTransaction, rollup *common.RollupHeader, internalHeader *common.CalldataRollupHeader) error { // Write the encoded header data, err := rlp.EncodeToBytes(rollup) if err != nil { return fmt.Errorf("could not encode batch header. Cause: %w", err) } dbtx.ExecuteSQL(rollupInsert, - 0, - 0, + rollup.Hash(), + internalHeader.FirstBatchSequence.Uint64(), + rollup.LastBatchSeqNo, data, - nil, + rollup.CompressionL1Head.Bytes(), ) return nil } +func FetchReorgedRollup(db *sql.DB, reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) { + argPlaceholders := strings.Repeat("?,", len(reorgedBlocks)) + argPlaceholders = argPlaceholders[0 : len(argPlaceholders)-1] // remove trailing comma + + query := rollupSelect + " (" + argPlaceholders + ")" + + args := make([]any, 0) + for _, value := range reorgedBlocks { + args = append(args, value.Bytes()) + } + rollup := new(common.L2BatchHash) + err := db.QueryRow(query, args...).Scan(&rollup) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // make sure the error is converted to obscuro-wide not found error + return nil, errutil.ErrNotFound + } + return nil, err + } + return rollup, nil +} + func fetchBlockHeader(db *sql.DB, whereQuery string, args ...any) (*types.Header, error) { var header string query := selectBlockHeader + " " + whereQuery diff --git a/go/enclave/storage/init/edgelessdb/002_init.sql b/go/enclave/storage/init/edgelessdb/002_init.sql new file mode 100644 index 0000000000..4b532ba776 --- /dev/null +++ b/go/enclave/storage/init/edgelessdb/002_init.sql @@ -0,0 +1,13 @@ +drop table obsdb.rollup; + +create table if not exists obsdb.rollup +( + hash binary(32), + start_seq int NOT NULL, + end_seq int NOT NULL, + header blob NOT NULL, + compression_block binary(32) NOT NULL, + INDEX (compression_block), + primary key (hash) +); +GRANT ALL ON obsdb.rollup TO obscuro; diff --git a/go/enclave/storage/init/sqlite/002_init.sql b/go/enclave/storage/init/sqlite/002_init.sql new file mode 100644 index 0000000000..866fc1ae0d --- /dev/null +++ b/go/enclave/storage/init/sqlite/002_init.sql @@ -0,0 +1,10 @@ +drop table rollup; + +create table rollup +( + hash binary(32) primary key, + start_seq int NOT NULL, + end_seq int NOT NULL, + header blob NOT NULL, + compression_block binary(32) NOT NULL +); \ No newline at end of file diff --git a/go/enclave/storage/interfaces.go b/go/enclave/storage/interfaces.go index 0549bc4545..b32a7efec4 100644 --- a/go/enclave/storage/interfaces.go +++ b/go/enclave/storage/interfaces.go @@ -64,6 +64,10 @@ type BatchResolver interface { StoreBatch(batch *core.Batch) error // StoreExecutedBatch - store the batch after it was executed StoreExecutedBatch(batch *core.Batch, receipts []*types.Receipt) error + + // StoreRollup + StoreRollup(rollup *common.ExtRollup, header *common.CalldataRollupHeader) error + FetchReorgedRollup(reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) } type GethStateDB interface { diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index 4931aefb1c..568f2ee8b5 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -460,12 +460,12 @@ func (s *storageImpl) GetEnclaveKey() (*ecdsa.PrivateKey, error) { return enclaveKey, nil } -func (s *storageImpl) StoreRollup(rollup *common.ExtRollup) error { +func (s *storageImpl) StoreRollup(rollup *common.ExtRollup, internalHeader *common.CalldataRollupHeader) error { callStart := time.Now() defer s.logDuration("StoreRollup", callStart) dbBatch := s.db.NewDBTransaction() - if err := enclavedb.WriteRollup(dbBatch, rollup.Header); err != nil { + if err := enclavedb.WriteRollup(dbBatch, rollup.Header, internalHeader); err != nil { return fmt.Errorf("could not write rollup. Cause: %w", err) } @@ -475,6 +475,10 @@ func (s *storageImpl) StoreRollup(rollup *common.ExtRollup) error { return nil } +func (s *storageImpl) FetchReorgedRollup(reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) { + return enclavedb.FetchReorgedRollup(s.db.GetSQLDB(), reorgedBlocks) +} + func (s *storageImpl) DebugGetLogs(txHash common.TxHash) ([]*tracers.DebugLogs, error) { callStart := time.Now() defer s.logDuration("DebugGetLogs", callStart) diff --git a/integration/simulation/validate_chain.go b/integration/simulation/validate_chain.go index bfe825a344..47defcbbbd 100644 --- a/integration/simulation/validate_chain.go +++ b/integration/simulation/validate_chain.go @@ -118,7 +118,7 @@ func checkObscuroBlockchainValidity(t *testing.T, s *Simulation, maxL1Height uin min, max := minMax(heights) // This checks that all the nodes are in sync. When a node falls behind with processing blocks it might highlight a problem. // since there is one node that only listens to rollups it will be naturally behind. - if max-min > max/5 { + if max-min > max/3 { t.Errorf("There is a problem with the Obscuro chain. Nodes fell out of sync. Max height: %d. Min height: %d -> %+v", max, min, heights) } } @@ -264,7 +264,7 @@ func checkBlockchainOfObscuroNode(t *testing.T, rpcHandles *network.RPCHandles, t.Errorf("Node %d: Obscuro node fell behind by %d blocks.", nodeIdx, maxEthereumHeight-l1Height) } - // check that the height of the Rollup chain is higher than a minimum expected value. + // check that the height of the l2 chain is higher than a minimum expected value. headBatchHeader, err := getHeadBatchHeader(obscuroClient) if err != nil { t.Error(fmt.Errorf("node %d: %w", nodeIdx, err)) @@ -279,13 +279,16 @@ func checkBlockchainOfObscuroNode(t *testing.T, rpcHandles *network.RPCHandles, t.Errorf("Node %d: Node only mined %d rollups. Expected at least: %d.", nodeIdx, l2Height, minObscuroHeight) } - // check that the height from the rollup header is consistent with the height returned by eth_blockNumber. + // check that the height from the head batch header is consistent with the height returned by eth_blockNumber. l2HeightFromBatchNumber, err := obscuroClient.BatchNumber() if err != nil { t.Errorf("Node %d: Could not retrieve block number. Cause: %s", nodeIdx, err) } - if l2HeightFromBatchNumber != l2Height.Uint64() { - t.Errorf("Node %d: Node's head rollup had a height %d, but %s height was %d", nodeIdx, l2Height, rpc.BatchNumber, l2HeightFromBatchNumber) + // due to the difference in calling time, the enclave could produce another batch + const maxAcceptedDiff = 2 + heightDiff := int(l2HeightFromBatchNumber) - int(l2Height.Uint64()) + if heightDiff > maxAcceptedDiff || heightDiff < -maxAcceptedDiff { + t.Errorf("Node %d: Node's head batch had a height %d, but %s height was %d", nodeIdx, l2Height, rpc.BatchNumber, l2HeightFromBatchNumber) } notFoundTransfers, notFoundWithdrawals, notFoundNativeTransfers := FindNotIncludedL2Txs(s.ctx, nodeIdx, rpcHandles, s.TxInjector)