diff --git a/go/enclave/components/block_processor.go b/go/enclave/components/block_processor.go index 91da7849b5..3e7ce8582d 100644 --- a/go/enclave/components/block_processor.go +++ b/go/enclave/components/block_processor.go @@ -95,6 +95,7 @@ func (bp *l1BlockProcessor) ingestBlock(block *common.L1Block) (*BlockIngestionT bp.logger.Trace("parent not found", "blkHeight", block.NumberU64(), log.BlockHashKey, block.Hash(), "l1HeadHeight", prevL1Head.NumberU64(), "l1HeadHash", prevL1Head.Hash(), + log.ErrKey, err, ) return nil, errutil.ErrBlockAncestorNotFound } diff --git a/go/enclave/storage/db_cache.go b/go/enclave/storage/db_cache.go new file mode 100644 index 0000000000..d9f5dfa5cc --- /dev/null +++ b/go/enclave/storage/db_cache.go @@ -0,0 +1,39 @@ +package storage + +import ( + "context" + + "github.com/eko/gocache/lib/v4/cache" + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/obscuronet/go-obscuro/go/common/log" +) + +func getCachedValue[V any](cache *cache.Cache[[]byte], logger gethlog.Logger, key any, onFailed func(any) (V, error)) (V, error) { + value, err := cache.Get(context.Background(), key) + if err != nil { + // todo metrics for cache misses + b, err := onFailed(key) + if err != nil { + return b, err + } + cacheValue(cache, logger, key, b) + return b, err + } + + v := new(V) + err = rlp.DecodeBytes(value, v) + return *v, err +} + +func cacheValue(cache *cache.Cache[[]byte], logger gethlog.Logger, key any, v any) { + encoded, err := rlp.EncodeToBytes(v) + if err != nil { + logger.Error("Could not encode value to store in cache", log.ErrKey, err) + return + } + err = cache.Set(context.Background(), key, encoded) + if err != nil { + logger.Error("Could not store value in cache", log.ErrKey, err) + } +} diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index 32a3fecbf8..dc52b39e0d 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -2,6 +2,7 @@ package enclavedb import ( "bytes" + "crypto/sha256" "database/sql" "errors" "fmt" @@ -25,7 +26,7 @@ const ( txInsertValue = "(?,?,?,?,?,?)" bInsert = "insert into batch values (?,?,?,?,?,?,?,?,?)" - updateBatchExecuted = "update batch set is_executed=true where hash=?" + updateBatchExecuted = "update batch set is_executed=true where sequence=?" selectBatch = "select b.header, bb.content from batch b join batch_body bb on b.body=bb.hash" selectHeader = "select b.header from batch b" @@ -49,7 +50,7 @@ const ( // WriteBatchAndTransactions - persists the batch and the transactions func WriteBatchAndTransactions(dbtx DBTransaction, batch *core.Batch) error { - bodyHash := batch.Header.TxHash.Bytes() + bodyHash := truncTo16(batch.Header.TxHash) body, err := rlp.EncodeToBytes(batch.Transactions) if err != nil { @@ -64,12 +65,11 @@ func WriteBatchAndTransactions(dbtx DBTransaction, batch *core.Batch) error { var parentBytes []byte if batch.Number().Uint64() > 0 { - parentBytes = batch.Header.ParentHash.Bytes() + parentBytes = truncTo16(batch.Header.ParentHash) } - // todo - this can be removed if the batches have no is_canonical var isCanon bool - err = dbtx.GetDB().QueryRow(isCanonQuery, batch.Header.L1Proof.Bytes()).Scan(&isCanon) + err = dbtx.GetDB().QueryRow(isCanonQuery, truncTo16(batch.Header.L1Proof)).Scan(&isCanon) if err != nil { // if the block is not found, we assume it is non-canonical // fmt.Printf("IsCanon %s err: %s\n", batch.Header.L1Proof, err) @@ -77,14 +77,14 @@ func WriteBatchAndTransactions(dbtx DBTransaction, batch *core.Batch) error { } dbtx.ExecuteSQL(bInsert, - batch.Hash().Bytes(), // hash - parentBytes, // parent batch.Header.SequencerOrderNo.Uint64(), // sequence + truncTo16(batch.Hash()), // hash + parentBytes, // parent batch.Header.Number.Uint64(), // height isCanon, // is_canonical header, // header blob bodyHash, // reference to the batch body - batch.Header.L1Proof.Bytes(), // l1_proof + truncTo16(batch.Header.L1Proof), // l1_proof false, // executed ) @@ -105,12 +105,12 @@ func WriteBatchAndTransactions(dbtx DBTransaction, batch *core.Batch) error { return fmt.Errorf("unable to convert tx to message - %w", err) } - args = append(args, transaction.Hash().Bytes()) // tx_hash - args = append(args, txBytes) // content - args = append(args, from.Bytes()) // sender_address - args = append(args, transaction.Nonce()) // nonce - args = append(args, i) // idx - args = append(args, bodyHash) // the batch body which contained it + args = append(args, truncTo16(transaction.Hash())) // tx_hash + args = append(args, txBytes) // content + args = append(args, from.Bytes()) // sender_address + args = append(args, transaction.Nonce()) // nonce + args = append(args, i) // idx + args = append(args, bodyHash) // the batch body which contained it } dbtx.ExecuteSQL(insert, args...) } @@ -119,8 +119,8 @@ func WriteBatchAndTransactions(dbtx DBTransaction, batch *core.Batch) error { } // WriteBatchExecution - insert all receipts to the db -func WriteBatchExecution(dbtx DBTransaction, hash common.L2BatchHash, receipts []*types.Receipt) error { - dbtx.ExecuteSQL(updateBatchExecuted, hash.Bytes()) +func WriteBatchExecution(dbtx DBTransaction, seqNo *big.Int, receipts []*types.Receipt) error { + dbtx.ExecuteSQL(updateBatchExecuted, seqNo.Uint64()) args := make([]any, 0) for _, receipt := range receipts { @@ -134,8 +134,8 @@ func WriteBatchExecution(dbtx DBTransaction, hash common.L2BatchHash, receipts [ args = append(args, executedTransactionID(&receipt.BlockHash, &receipt.TxHash)) // PK args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address args = append(args, receiptBytes) // the serialised receipt - args = append(args, receipt.TxHash.Bytes()) // tx_hash - args = append(args, receipt.BlockHash.Bytes()) // batch_hash + args = append(args, truncTo16(receipt.TxHash)) // tx_hash + args = append(args, seqNo.Uint64()) // batch_seq } if len(args) > 0 { insert := txExecInsert + strings.Repeat(txExecInsertValue+",", len(receipts)) @@ -150,7 +150,7 @@ func executedTransactionID(batchHash *common.L2BatchHash, txHash *common.L2TxHas execTxID := make([]byte, 0) execTxID = append(execTxID, batchHash.Bytes()...) execTxID = append(execTxID, txHash.Bytes()...) - return execTxID + return truncTo16(sha256.Sum256(execTxID)) } func ReadBatchBySeqNo(db *sql.DB, seqNo uint64) (*core.Batch, error) { @@ -158,7 +158,7 @@ func ReadBatchBySeqNo(db *sql.DB, seqNo uint64) (*core.Batch, error) { } func ReadBatchByHash(db *sql.DB, hash common.L2BatchHash) (*core.Batch, error) { - return fetchBatch(db, " where b.hash=?", hash.Bytes()) + return fetchBatch(db, " where b.hash=?", truncTo16(hash)) } func ReadCanonicalBatchByHeight(db *sql.DB, height uint64) (*core.Batch, error) { @@ -166,7 +166,7 @@ func ReadCanonicalBatchByHeight(db *sql.DB, height uint64) (*core.Batch, error) } func ReadBatchHeader(db *sql.DB, hash gethcommon.Hash) (*common.BatchHeader, error) { - return fetchBatchHeader(db, " where hash=?", hash.Bytes()) + return fetchBatchHeader(db, " where hash=?", truncTo16(hash)) } // todo - is there a better way to write this query? @@ -175,7 +175,7 @@ func ReadCurrentHeadBatch(db *sql.DB) (*core.Batch, error) { } func ReadBatchesByBlock(db *sql.DB, hash common.L1BlockHash) ([]*core.Batch, error) { - return fetchBatches(db, " where b.l1_proof=? order by b.sequence", hash.Bytes()) + return fetchBatches(db, " where b.l1_proof=? order by b.sequence", truncTo16(hash)) } func ReadCurrentSequencerNo(db *sql.DB) (*big.Int, error) { @@ -197,7 +197,7 @@ func ReadCurrentSequencerNo(db *sql.DB) (*big.Int, error) { func ReadHeadBatchForBlock(db *sql.DB, l1Hash common.L1BlockHash) (*core.Batch, error) { query := " where b.is_canonical=true and b.is_executed=true and b.height=(select max(b1.height) from batch b1 where b1.is_canonical=true and b1.is_executed=true and b1.l1_proof=?)" - return fetchBatch(db, query, l1Hash.Bytes()) + return fetchBatch(db, query, truncTo16(l1Hash)) } func fetchBatch(db *sql.DB, whereQuery string, args ...any) (*core.Batch, error) { @@ -355,11 +355,11 @@ func selectReceipts(db *sql.DB, config *params.ChainConfig, query string, args . // corresponding block body, so if the block body is not found it will return nil even // if the receipt itself is stored. func ReadReceiptsByBatchHash(db *sql.DB, hash common.L2BatchHash, config *params.ChainConfig) (types.Receipts, error) { - return selectReceipts(db, config, "where batch.hash = ?", hash.Bytes()) + return selectReceipts(db, config, "where batch.hash = ?", truncTo16(hash)) } func ReadReceipt(db *sql.DB, hash common.L2TxHash, config *params.ChainConfig) (*types.Receipt, error) { - row := db.QueryRow(queryReceipts+" where tx=?", hash.Bytes()) + row := db.QueryRow(queryReceipts+" where tx=?", truncTo16(hash)) // receipt, tx, batch, height var receiptData []byte var txData []byte @@ -394,7 +394,7 @@ func ReadReceipt(db *sql.DB, hash common.L2TxHash, config *params.ChainConfig) ( } func ReadTransaction(db *sql.DB, txHash gethcommon.Hash) (*types.Transaction, gethcommon.Hash, uint64, uint64, error) { - row := db.QueryRow(selectTxQuery, txHash.Bytes()) + row := db.QueryRow(selectTxQuery, truncTo16(txHash)) // tx, batch, height, idx var txData []byte @@ -452,7 +452,7 @@ func ReadUnexecutedBatches(db *sql.DB) ([]*core.Batch, error) { } func BatchWasExecuted(db *sql.DB, hash common.L2BatchHash) (bool, error) { - row := db.QueryRow(queryBatchWasExecuted, hash.Bytes()) + row := db.QueryRow(queryBatchWasExecuted, truncTo16(hash)) var result bool err := row.Scan(&result) diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index 75adfcbb2a..f828d742d5 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -39,14 +39,14 @@ func WriteBlock(dbtx DBTransaction, b *types.Header) error { var parentBytes []byte if b.Number.Uint64() > 1 { - parentBytes = b.ParentHash.Bytes() + parentBytes = truncTo16(b.ParentHash) } dbtx.ExecuteSQL(blockInsert, - b.Hash().Bytes(), - parentBytes, - true, - header, - b.Number.Uint64(), + truncTo16(b.Hash()), // hash + parentBytes, // parent + true, // is_canonical + header, // header + b.Number.Uint64(), // height ) return nil } @@ -70,19 +70,19 @@ func updateCanonicalValue(dbtx DBTransaction, isCanonical bool, values []common. args := make([]any, 0) args = append(args, isCanonical) for _, value := range values { - args = append(args, value.Bytes()) + args = append(args, truncTo16(value)) } dbtx.ExecuteSQL(updateBlocks, args...) dbtx.ExecuteSQL(updateBatches, args...) } -func FetchBlockHeader(db *sql.DB, hash common.L2BatchHash) (*types.Header, error) { - return fetchBlockHeader(db, " where hash=?", hash.Bytes()) +func FetchBlockHeader(db *sql.DB, hash common.L1BlockHash) (*types.Header, error) { + return fetchBlockHeader(db, " where hash=?", truncTo16(hash)) } // todo - remove this. For now creates a "block" but without a body. -func FetchBlock(db *sql.DB, hash common.L2BatchHash) (*types.Block, error) { - return fetchBlock(db, " where hash=?", hash.Bytes()) +func FetchBlock(db *sql.DB, hash common.L1BlockHash) (*types.Block, error) { + return fetchBlock(db, " where hash=?", truncTo16(hash)) } func FetchHeadBlock(db *sql.DB) (*types.Block, error) { @@ -105,7 +105,7 @@ func WriteL1Messages(db *sql.DB, blockHash common.L1BlockHash, messages common.C return err } args = append(args, data) - args = append(args, blockHash.Bytes()) + args = append(args, truncTo16(blockHash)) } if len(messages) > 0 { _, err := db.Exec(insert, args...) @@ -117,7 +117,7 @@ func WriteL1Messages(db *sql.DB, blockHash common.L1BlockHash, messages common.C func FetchL1Messages(db *sql.DB, blockHash common.L1BlockHash) (common.CrossChainMessages, error) { var result common.CrossChainMessages query := selectL1Msg + " where block = ?" - rows, err := db.Query(query, blockHash.Bytes()) + rows, err := db.Query(query, truncTo16(blockHash)) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -152,11 +152,11 @@ func WriteRollup(dbtx DBTransaction, rollup *common.RollupHeader, internalHeader return fmt.Errorf("could not encode batch header. Cause: %w", err) } dbtx.ExecuteSQL(rollupInsert, - rollup.Hash(), + truncTo16(rollup.Hash()), internalHeader.FirstBatchSequence.Uint64(), rollup.LastBatchSeqNo, data, - rollup.CompressionL1Head.Bytes(), + truncTo16(rollup.CompressionL1Head), ) return nil } @@ -169,7 +169,7 @@ func FetchReorgedRollup(db *sql.DB, reorgedBlocks []common.L1BlockHash) (*common args := make([]any, 0) for _, value := range reorgedBlocks { - args = append(args, value.Bytes()) + args = append(args, truncTo16(value)) } rollup := new(common.L2BatchHash) err := db.QueryRow(query, args...).Scan(&rollup) diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 2bdfbfb7ee..7bd3d3baa0 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -122,7 +122,7 @@ func writeLog(db *sql.DB, l *types.Log, receipt *types.Receipt, stateDB *state.S } return []any{ - t0, t1, t2, t3, t4, + truncBTo16(t0), truncBTo16(t1), truncBTo16(t2), truncBTo16(t3), truncBTo16(t4), data, l.Index, l.Address.Bytes(), isLifecycle, a1, a2, a3, a4, executedTransactionID(&receipt.BlockHash, &l.TxHash), @@ -133,15 +133,15 @@ func FilterLogs( db *sql.DB, requestingAccount *gethcommon.Address, fromBlock, toBlock *big.Int, - blockHash *common.L2BatchHash, + batchHash *common.L2BatchHash, addresses []gethcommon.Address, topics [][]gethcommon.Hash, ) ([]*types.Log, error) { queryParams := []any{} query := "" - if blockHash != nil { + if batchHash != nil { query += " AND b.hash = ?" - queryParams = append(queryParams, blockHash.Bytes()) + queryParams = append(queryParams, truncTo16(*batchHash)) } // ignore negative numbers @@ -170,7 +170,7 @@ func FilterLogs( column := fmt.Sprintf("topic%d", i) query += " AND " + column + " in (?" + strings.Repeat(",?", len(sub)-1) + ")" for _, topic := range sub { - queryParams = append(queryParams, topic.Bytes()) + queryParams = append(queryParams, truncTo16(topic)) } } } @@ -184,7 +184,7 @@ func DebugGetLogs(db *sql.DB, txHash common.TxHash) ([]*tracers.DebugLogs, error query := baseDebugEventsQuerySelect + " " + baseEventsJoin + "AND tx.hash = ?" - queryParams = append(queryParams, txHash.Bytes()) + queryParams = append(queryParams, truncTo16(txHash)) result := make([]*tracers.DebugLogs, 0) @@ -202,6 +202,7 @@ func DebugGetLogs(db *sql.DB, txHash common.TxHash) ([]*tracers.DebugLogs, error LifecycleEvent: false, } + // todo - read datablob var t0, t1, t2, t3, t4 sql.NullString var relAddress1, relAddress2, relAddress3, relAddress4 sql.NullByte err = rows.Scan( diff --git a/go/enclave/storage/enclavedb/utils.go b/go/enclave/storage/enclavedb/utils.go new file mode 100644 index 0000000000..da27f38174 --- /dev/null +++ b/go/enclave/storage/enclavedb/utils.go @@ -0,0 +1,16 @@ +package enclavedb + +import gethcommon "github.com/ethereum/go-ethereum/common" + +const truncHash = 16 + +func truncTo16(hash gethcommon.Hash) []byte { + return truncBTo16(hash.Bytes()) +} + +func truncBTo16(bytes []byte) []byte { + b := bytes[0:truncHash] + c := make([]byte, truncHash) + copy(c, b) + return c +} diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index fb81076e49..906cebc23e 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -29,16 +29,16 @@ GRANT ALL ON obsdb.attestation_key TO obscuro; create table if not exists obsdb.block ( - hash binary(32), - parent binary(32), + hash binary(16), + parent binary(16), is_canonical boolean NOT NULL, header blob NOT NULL, height int NOT NULL, - INDEX (parent), +# INDEX (parent), primary key (hash), INDEX (is_canonical), - INDEX (height), - INDEX (is_canonical, height) + INDEX (height) +# INDEX (is_canonical, height) ); GRANT ALL ON obsdb.block TO obscuro; @@ -46,7 +46,7 @@ create table if not exists obsdb.l1_msg ( id INTEGER AUTO_INCREMENT, message varbinary(1024) NOT NULL, - block binary(32) NOT NULL, + block binary(16) NOT NULL, INDEX (block), primary key (id) ); @@ -54,19 +54,19 @@ GRANT ALL ON obsdb.l1_msg TO obscuro; create table if not exists obsdb.rollup ( - id INTEGER AUTO_INCREMENT, - start_seq int NOT NULL, - end_seq int NOT NULL, - header blob NOT NULL, - block binary(32) NOT NULL, - INDEX (block), - primary key (id) + hash binary(16), + start_seq int NOT NULL, + end_seq int NOT NULL, + header blob NOT NULL, + compression_block binary(16) NOT NULL, + INDEX (compression_block), + primary key (hash) ); GRANT ALL ON obsdb.rollup TO obscuro; create table if not exists obsdb.batch_body ( - hash binary(32), + hash binary(16), content mediumblob NOT NULL, primary key (hash) ); @@ -74,14 +74,14 @@ GRANT ALL ON obsdb.batch_body TO obscuro; create table if not exists obsdb.batch ( - hash binary(32), - parent binary(32), + hash binary(16), + parent binary(16), sequence int NOT NULL, height int NOT NULL, is_canonical boolean NOT NULL, header blob NOT NULL, - body binary(32) NOT NULL, - l1_proof binary(32) NOT NULL, + body binary(16) NOT NULL, + l1_proof binary(16) NOT NULL, is_executed boolean NOT NULL, INDEX (parent), INDEX (body), @@ -90,20 +90,20 @@ create table if not exists obsdb.batch INDEX (sequence), INDEX (is_canonical), INDEX (is_executed), - INDEX (is_canonical, is_executed), - INDEX (is_canonical, is_executed, height), +# INDEX (is_canonical, is_executed), +# INDEX (is_canonical, is_executed, height), primary key (hash) ); GRANT ALL ON obsdb.batch TO obscuro; create table if not exists obsdb.tx ( - hash binary(32), + hash binary(16), content mediumblob NOT NULL, sender_address binary(20) NOT NULL, nonce int NOT NULL, idx int NOT NULL, - body binary(32) NOT NULL, + body binary(16) NOT NULL, INDEX (body), primary key (hash) ); @@ -111,11 +111,11 @@ GRANT ALL ON obsdb.tx TO obscuro; create table if not exists obsdb.exec_tx ( - id binary(64), + id binary(16), created_contract_address binary(20), receipt mediumblob, - tx binary(32) NOT NULL, - batch binary(32) NOT NULL, + tx binary(16) NOT NULL, + batch binary(16) NOT NULL, INDEX (batch), INDEX (tx), primary key (id) @@ -124,11 +124,11 @@ GRANT ALL ON obsdb.exec_tx TO obscuro; create table if not exists obsdb.events ( - topic0 binary(32) NOT NULL, - topic1 binary(32), - topic2 binary(32), - topic3 binary(32), - topic4 binary(32), + topic0 binary(16) NOT NULL, + topic1 binary(16), + topic2 binary(16), + topic3 binary(16), + topic4 binary(16), datablob mediumblob, log_idx int NOT NULL, address binary(20) NOT NULL, @@ -137,7 +137,7 @@ create table if not exists obsdb.events rel_address2 binary(20), rel_address3 binary(20), rel_address4 binary(20), - exec_tx_id binary(64) NOT NULL, + exec_tx_id binary(16) NOT NULL, INDEX (exec_tx_id), INDEX (address), INDEX (rel_address1), diff --git a/go/enclave/storage/init/edgelessdb/002_init.sql b/go/enclave/storage/init/edgelessdb/002_init.sql deleted file mode 100644 index 4b532ba776..0000000000 --- a/go/enclave/storage/init/edgelessdb/002_init.sql +++ /dev/null @@ -1,13 +0,0 @@ -drop table obsdb.rollup; - -create table if not exists obsdb.rollup -( - hash binary(32), - start_seq int NOT NULL, - end_seq int NOT NULL, - header blob NOT NULL, - compression_block binary(32) NOT NULL, - INDEX (compression_block), - primary key (hash) -); -GRANT ALL ON obsdb.rollup TO obscuro; diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 1d48f5b435..da91880976 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -22,8 +22,8 @@ create table if not exists attestation_key create table if not exists block ( - hash binary(32) primary key, - parent binary(32), + hash binary(16) primary key, + parent binary(16), is_canonical boolean NOT NULL, header blob NOT NULL, height int NOT NULL @@ -36,71 +36,72 @@ create table if not exists l1_msg ( id INTEGER PRIMARY KEY AUTOINCREMENT, message varbinary(1024) NOT NULL, - block binary(32) NOT NULL REFERENCES block + block binary(16) NOT NULL REFERENCES block ); create table if not exists rollup ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - start_seq int NOT NULL, - end_seq int NOT NULL, - header blob NOT NULL, - block binary(32) NOT NULL REFERENCES block + hash binary(16) primary key, + start_seq int NOT NULL, + end_seq int NOT NULL, + header blob NOT NULL, + compression_block binary(16) NOT NULL ); create table if not exists batch_body ( - hash binary(32) primary key, + hash binary(16) primary key, content mediumblob NOT NULL ); create table if not exists batch ( - hash binary(32) primary key, - parent binary(32),-- REFERENCES batch, - sequence int NOT NULL unique, - height int NOT NULL, - is_canonical boolean NOT NULL, - header blob NOT NULL, - body binary(32) NOT NULL REFERENCES batch_body, - l1_proof binary(32) NOT NULL, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch - is_executed boolean NOT NULL + sequence int primary key, + hash binary(16) NOT NULL unique, + parent binary(16), + height int NOT NULL, + is_canonical boolean NOT NULL, + header blob NOT NULL, + body binary(16) NOT NULL REFERENCES batch_body, + l1_proof binary(16) NOT NULL, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch + is_executed boolean NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height -- unique (height, is_canonical, is_executed) ); create index IDX_BATCH_HEIGHT on batch (height); -create index IDX_BATCH_SEQ on batch (sequence); +create index IDX_BATCH_HASH on batch (hash); create index IDX_BATCH_Block on batch (l1_proof); create table if not exists tx ( - hash binary(32) primary key, + hash binary(16) primary key, content mediumblob NOT NULL, sender_address binary(20) NOT NULL, nonce int NOT NULL, idx int NOT NULL, - body binary(32) REFERENCES batch_body + body binary(16) REFERENCES batch_body ); create table if not exists exec_tx ( - id binary(64) PRIMARY KEY, -- batch_hash||tx_hash + id binary(16) PRIMARY KEY, -- batch_hash||tx_hash created_contract_address binary(20), receipt mediumblob, -- commenting out the fk until synthetic transactions are also stored --- tx binary(32) REFERENCES tx, - tx binary(32) NOT NULL, - batch binary(32) NOT NULL REFERENCES batch +-- tx binary(16) REFERENCES tx, + tx binary(16) NOT NULL, + batch int NOT NULL REFERENCES batch ); create index IX_EX_TX1 on exec_tx (tx); +-- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it create table if not exists events ( - topic0 binary(32) NOT NULL, - topic1 binary(32), - topic2 binary(32), - topic3 binary(32), - topic4 binary(32), + topic0 binary(16) NOT NULL, + topic1 binary(16), + topic2 binary(16), + topic3 binary(16), + topic4 binary(16), datablob mediumblob, log_idx int NOT NULL, address binary(20) NOT NULL, @@ -109,15 +110,15 @@ create table if not exists events rel_address2 binary(20), rel_address3 binary(20), rel_address4 binary(20), - exec_tx_id binary(64) REFERENCES exec_tx + exec_tx_id binary(16) REFERENCES exec_tx ); -create index IX_AD on events (address); -create index IX_RAD1 on events (rel_address1); -create index IX_RAD2 on events (rel_address2); -create index IX_RAD3 on events (rel_address3); -create index IX_RAD4 on events (rel_address4); -create index IX_T0 on events (topic0); -create index IX_T1 on events (topic1); -create index IX_T2 on events (topic2); -create index IX_T3 on events (topic3); -create index IX_T4 on events (topic4); \ No newline at end of file +create index IDX_AD on events (address); +create index IDX_RAD1 on events (rel_address1); +create index IDX_RAD2 on events (rel_address2); +create index IDX_RAD3 on events (rel_address3); +create index IDX_RAD4 on events (rel_address4); +create index IDX_T0 on events (topic0); +create index IDX_T1 on events (topic1); +create index IDX_T2 on events (topic2); +create index IDX_T3 on events (topic3); +create index IDX_T4 on events (topic4); \ No newline at end of file diff --git a/go/enclave/storage/init/sqlite/002_init.sql b/go/enclave/storage/init/sqlite/002_init.sql deleted file mode 100644 index 866fc1ae0d..0000000000 --- a/go/enclave/storage/init/sqlite/002_init.sql +++ /dev/null @@ -1,10 +0,0 @@ -drop table rollup; - -create table rollup -( - hash binary(32) primary key, - start_seq int NOT NULL, - end_seq int NOT NULL, - header blob NOT NULL, - compression_block binary(32) NOT NULL -); \ No newline at end of file diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index 568f2ee8b5..d621ffeaeb 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -118,8 +118,8 @@ func (s *storageImpl) FetchCurrentSequencerNo() (*big.Int, error) { func (s *storageImpl) FetchBatch(hash common.L2BatchHash) (*core.Batch, error) { callStart := time.Now() defer s.logDuration("FetchBatch", callStart) - return s.getCachedBatch(hash, func(hash common.L2BatchHash) (*core.Batch, error) { - return enclavedb.ReadBatchByHash(s.db.GetSQLDB(), hash) + return getCachedValue(s.batchCache, s.logger, hash, func(v any) (*core.Batch, error) { + return enclavedb.ReadBatchByHash(s.db.GetSQLDB(), v.(common.L2BatchHash)) }) } @@ -159,7 +159,7 @@ func (s *storageImpl) StoreBlock(b *types.Block, chainFork *common.ChainFork) er return fmt.Errorf("3. could not store block %s. Cause: %w", b.Hash(), err) } - s.cacheBlock(b.Hash(), b) + cacheValue(s.blockCache, s.logger, b.Hash(), b) return nil } @@ -167,8 +167,8 @@ func (s *storageImpl) StoreBlock(b *types.Block, chainFork *common.ChainFork) er func (s *storageImpl) FetchBlock(blockHash common.L1BlockHash) (*types.Block, error) { callStart := time.Now() defer s.logDuration("FetchBlock", callStart) - return s.getCachedBlock(blockHash, func(hash common.L1BlockHash) (*types.Block, error) { - return enclavedb.FetchBlock(s.db.GetSQLDB(), blockHash) + return getCachedValue(s.blockCache, s.logger, blockHash, func(hash any) (*types.Block, error) { + return enclavedb.FetchBlock(s.db.GetSQLDB(), hash.(common.L1BlockHash)) }) } @@ -180,8 +180,8 @@ func (s *storageImpl) FetchCanonicaBlockByHeight(height *big.Int) (*types.Block, return nil, err } blockHash := header.Hash() - return s.getCachedBlock(blockHash, func(hash common.L1BlockHash) (*types.Block, error) { - return enclavedb.FetchBlock(s.db.GetSQLDB(), blockHash) + return getCachedValue(s.blockCache, s.logger, blockHash, func(hash any) (*types.Block, error) { + return enclavedb.FetchBlock(s.db.GetSQLDB(), hash.(common.L2BatchHash)) }) } @@ -345,7 +345,9 @@ func (s *storageImpl) StoreAttestedKey(aggregator gethcommon.Address, key *ecdsa func (s *storageImpl) FetchBatchBySeqNo(seqNum uint64) (*core.Batch, error) { callStart := time.Now() defer s.logDuration("FetchBatchBySeqNo", callStart) - return enclavedb.ReadBatchBySeqNo(s.db.GetSQLDB(), seqNum) + return getCachedValue(s.batchCache, s.logger, seqNum, func(seq any) (*core.Batch, error) { + return enclavedb.ReadBatchBySeqNo(s.db.GetSQLDB(), seq.(uint64)) + }) } func (s *storageImpl) FetchBatchesByBlock(block common.L1BlockHash) ([]*core.Batch, error) { @@ -361,7 +363,7 @@ func (s *storageImpl) StoreBatch(batch *core.Batch) error { existingBatchWithSameSequence, _ := s.FetchBatchBySeqNo(batch.SeqNo().Uint64()) if existingBatchWithSameSequence != nil && existingBatchWithSameSequence.Hash() != batch.Hash() { // todo - tudor - remove the Critical before production, and return a challenge - s.logger.Crit(fmt.Sprintf("Conflicting batches for the same sequence %d: (previous) %s != (incoming) %s", batch.SeqNo(), existingBatchWithSameSequence.Hash(), batch.Hash())) + s.logger.Crit(fmt.Sprintf("Conflicting batches for the same sequence %d: (previous) %+v != (incoming) %+v", batch.SeqNo(), existingBatchWithSameSequence.Header, batch.Header)) return fmt.Errorf("a different batch with same sequence number already exists: %d", batch.SeqNo()) } @@ -380,7 +382,7 @@ func (s *storageImpl) StoreBatch(batch *core.Batch) error { return fmt.Errorf("could not commit batch %w", err) } - s.cacheBatch(batch.Hash(), batch) + cacheValue(s.batchCache, s.logger, batch.Hash(), batch) return nil } @@ -397,7 +399,7 @@ func (s *storageImpl) StoreExecutedBatch(batch *core.Batch, receipts []*types.Re } dbTx := s.db.NewDBTransaction() - if err := enclavedb.WriteBatchExecution(dbTx, batch.Hash(), receipts); err != nil { + if err := enclavedb.WriteBatchExecution(dbTx, batch.SeqNo(), receipts); err != nil { return fmt.Errorf("could not write transaction receipts. Cause: %w", err) } @@ -539,63 +541,6 @@ func (s *storageImpl) GetPublicTransactionCount() (uint64, error) { return enclavedb.GetPublicTransactionCount(s.db.GetSQLDB()) } -func (s *storageImpl) cacheBlock(blockHash common.L1BlockHash, b *types.Block) { - var buffer bytes.Buffer - if err := b.EncodeRLP(&buffer); err != nil { - s.logger.Error("Could not encode block to store block in cache", log.ErrKey, err) - return - } - err := s.blockCache.Set(context.Background(), blockHash, buffer.Bytes()) - if err != nil { - s.logger.Error("Could not store block in cache", log.ErrKey, err) - } -} - -func (s *storageImpl) getCachedBlock(hash common.L1BlockHash, onFailed func(common.L1BlockHash) (*types.Block, error)) (*types.Block, error) { - value, err := s.blockCache.Get(context.Background(), hash) - if err != nil { - // todo metrics for cache misses - b, err := onFailed(hash) - if err != nil { - return b, err - } - s.cacheBlock(hash, b) - return b, err - } - - b := new(types.Block) - err = rlp.DecodeBytes(value, b) - return b, err -} - -func (s *storageImpl) getCachedBatch(hash common.L2BatchHash, onFailed func(common.L2BatchHash) (*core.Batch, error)) (*core.Batch, error) { - value, err := s.batchCache.Get(context.Background(), hash) - if err != nil { - b, err := onFailed(hash) - if err != nil { - return b, err - } - s.cacheBatch(hash, b) - return b, err - } - - b := new(core.Batch) - err = rlp.DecodeBytes(value, b) - return b, err -} - -func (s *storageImpl) cacheBatch(batchHash common.L2BatchHash, b *core.Batch) { - value, err := rlp.EncodeToBytes(b) - if err != nil { - s.logger.Error("Could not encode block to store block in cache", log.ErrKey, err) - return - } - err = s.batchCache.Set(context.Background(), batchHash, value) - if err != nil { - s.logger.Error("Could not store batch in cache", log.ErrKey, err) - } -} - func (s *storageImpl) logDuration(method string, callStart time.Time) { durationMillis := time.Since(callStart).Milliseconds() // we only log 'slow' calls to reduce noise