From 622cda612321de914e738e48b0670b15fac4b869 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Tue, 30 Apr 2024 17:56:54 +0100 Subject: [PATCH 01/15] db performance --- go/enclave/storage/enclavedb/batch.go | 127 +++++++----------- go/enclave/storage/enclavedb/block.go | 82 +++++------ go/enclave/storage/enclavedb/events.go | 69 +++++++--- go/enclave/storage/enclavedb/utils.go | 12 +- .../storage/init/edgelessdb/001_init.sql | 106 +++++++++------ go/enclave/storage/init/sqlite/001_init.sql | 106 ++++++++------- go/enclave/storage/storage.go | 26 +++- 7 files changed, 290 insertions(+), 238 deletions(-) diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index a93938db47..c430d66f3c 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -1,9 +1,7 @@ package enclavedb import ( - "bytes" "context" - "crypto/sha256" "database/sql" "errors" "fmt" @@ -23,34 +21,33 @@ import ( const ( bodyInsert = "replace into batch_body values (?,?)" - txInsert = "replace into tx values " + txInsert = "replace into tx (hash, full_hash, content, sender_address, nonce, idx, body) values " txInsertValue = "(?,?,?,?,?,?,?)" - batchInsert = "insert into batch values (?,?,?,?,?,?,?,?,?,?,?)" + batchInsert = "insert into batch values (?,?,?,?,?,?,?,?,?,?)" updateBatchExecuted = "update batch set is_executed=true where sequence=?" - selectBatch = "select b.header, bb.content from batch b join batch_body bb on b.body=bb.id" - selectHeader = "select b.header from batch b" + selectBatch = "select b.header, bb.content from batch b join batch_body bb on b.body=bb.id" - txExecInsert = "insert into exec_tx values " - txExecInsertValue = "(?,?,?,?,?)" - queryReceipts = "select exec_tx.receipt, tx.content, batch.full_hash, batch.height from exec_tx join tx on tx.hash=exec_tx.tx join batch on batch.sequence=exec_tx.batch " - queryReceiptsCount = "select count(1) from exec_tx join tx on tx.hash=exec_tx.tx join batch on batch.sequence=exec_tx.batch " + txExecInsert = "insert into exec_tx (created_contract_address, receipt, tx, batch) values " + txExecInsertValue = "(?,?,?,?)" + queryReceipts = "select exec_tx.receipt, tx.content, batch.full_hash, batch.height from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " + queryReceiptsCount = "select count(1) from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " - selectTxQuery = "select tx.content, batch.full_hash, batch.height, tx.idx from exec_tx join tx on tx.hash=exec_tx.tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true and tx.hash=?" + selectTxQuery = "select tx.content, batch.full_hash, batch.height, tx.idx from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true and tx.hash=? and tx.full_hash=?" - selectContractCreationTx = "select tx.full_hash from exec_tx join tx on tx.hash=exec_tx.tx where created_contract_address=?" + selectContractCreationTx = "select tx.full_hash from exec_tx join tx on tx.id=exec_tx.tx where created_contract_address=?" selectTotalCreatedContracts = "select count( distinct created_contract_address) from exec_tx " - queryBatchWasExecuted = "select is_executed from batch where is_canonical=true and hash=?" + queryBatchWasExecuted = "select is_executed from batch where is_canonical=true and hash=? and full_hash=?" - isCanonQuery = "select is_canonical from block where hash=?" + isCanonQuery = "select is_canonical from block where hash=? and full_hash=?" - queryTxList = "select tx.full_hash, batch.height, batch.header from exec_tx join batch on batch.sequence=exec_tx.batch join tx on tx.hash=exec_tx.tx where batch.is_canonical=true" + queryTxList = "select tx.full_hash, batch.height, batch.header from exec_tx join batch on batch.sequence=exec_tx.batch join tx on tx.id=exec_tx.tx where batch.is_canonical=true" queryTxCountList = "select count(1) from exec_tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true" ) // WriteBatchAndTransactions - persists the batch and the transactions -func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *core.Batch, convertedHash gethcommon.Hash) error { +func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *core.Batch, convertedHash gethcommon.Hash, blockId uint64) error { // todo - optimize for reorgs batchBodyID := batch.SeqNo().Uint64() @@ -65,13 +62,8 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c dbtx.ExecuteSQL(bodyInsert, batchBodyID, body) - var parentBytes []byte - if batch.Number().Uint64() > 0 { - parentBytes = truncTo16(batch.Header.ParentHash) - } - var isCanon bool - err = dbtx.GetDB().QueryRowContext(ctx, isCanonQuery, truncTo16(batch.Header.L1Proof)).Scan(&isCanon) + err = dbtx.GetDB().QueryRowContext(ctx, isCanonQuery, truncTo4(batch.Header.L1Proof), batch.Header.L1Proof.Bytes()).Scan(&isCanon) if err != nil { // if the block is not found, we assume it is non-canonical // fmt.Printf("IsCanon %s err: %s\n", batch.Header.L1Proof, err) @@ -82,13 +74,12 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c batch.Header.SequencerOrderNo.Uint64(), // sequence batch.Hash(), // full hash convertedHash, // converted_hash - truncTo16(batch.Hash()), // index hash - parentBytes, // parent + truncTo4(batch.Hash()), // index hash batch.Header.Number.Uint64(), // height isCanon, // is_canonical header, // header blob batchBodyID, // reference to the batch body - truncTo16(batch.Header.L1Proof), // l1_proof + blockId, // indexed l1_proof false, // executed ) @@ -109,13 +100,13 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c return fmt.Errorf("unable to convert tx to message - %w", err) } - args = append(args, truncTo16(transaction.Hash())) // indexed tx_hash - args = append(args, transaction.Hash()) // full tx_hash - args = append(args, txBytes) // content - args = append(args, from.Bytes()) // sender_address - args = append(args, transaction.Nonce()) // nonce - args = append(args, i) // idx - args = append(args, batchBodyID) // the batch body which contained it + args = append(args, truncTo4(transaction.Hash())) // truncated tx_hash + args = append(args, transaction.Hash()) // full tx_hash + args = append(args, txBytes) // content + args = append(args, from.Bytes()) // sender_address + args = append(args, transaction.Nonce()) // nonce + args = append(args, i) // idx + args = append(args, batchBodyID) // the batch body which contained it } dbtx.ExecuteSQL(insert, args...) } @@ -136,11 +127,14 @@ func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int return fmt.Errorf("failed to encode block receipts. Cause: %w", err) } - args = append(args, executedTransactionID(&receipt.BlockHash, &receipt.TxHash)) // PK - args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address - args = append(args, receiptBytes) // the serialised receipt - args = append(args, truncTo16(receipt.TxHash)) // tx_hash - args = append(args, seqNo.Uint64()) // batch_seq + txId, err := ReadTxId(ctx, dbtx, storageReceipt.TxHash) + if err != nil { + return err + } + args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address + args = append(args, receiptBytes) // the serialised receipt + args = append(args, txId) // tx id + args = append(args, seqNo.Uint64()) // batch_seq } if len(args) > 0 { insert := txExecInsert + strings.Repeat(txExecInsertValue+",", len(receipts)) @@ -150,12 +144,10 @@ func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int return nil } -// concatenates the batch_hash with the tx_hash to create a PK for the executed transaction -func executedTransactionID(batchHash *common.L2BatchHash, txHash *common.L2TxHash) []byte { - execTxID := make([]byte, 0) - execTxID = append(execTxID, batchHash.Bytes()...) - execTxID = append(execTxID, txHash.Bytes()...) - return truncTo16(sha256.Sum256(execTxID)) +func ReadTxId(ctx context.Context, dbtx DBTransaction, txHash gethcommon.Hash) (uint64, error) { + var txId uint64 + err := dbtx.GetDB().QueryRowContext(ctx, "select id from tx where hash=? and full_hash=?", truncTo4(txHash), txHash.Bytes()).Scan(&txId) + return txId, err } func ReadBatchBySeqNo(ctx context.Context, db *sql.DB, seqNo uint64) (*core.Batch, error) { @@ -163,7 +155,7 @@ func ReadBatchBySeqNo(ctx context.Context, db *sql.DB, seqNo uint64) (*core.Batc } func ReadBatchByHash(ctx context.Context, db *sql.DB, hash common.L2BatchHash) (*core.Batch, error) { - return fetchBatch(ctx, db, " where b.hash=?", truncTo16(hash)) + return fetchBatch(ctx, db, " where b.hash=? and b.full_hash=?", truncTo4(hash), hash.Bytes()) } func ReadCanonicalBatchByHeight(ctx context.Context, db *sql.DB, height uint64) (*core.Batch, error) { @@ -174,17 +166,13 @@ func ReadNonCanonicalBatches(ctx context.Context, db *sql.DB, startAtSeq uint64, return fetchBatches(ctx, db, " where b.sequence>=? and b.sequence <=? and b.is_canonical=false order by b.sequence", startAtSeq, endSeq) } -func ReadBatchHeader(ctx context.Context, db *sql.DB, hash gethcommon.Hash) (*common.BatchHeader, error) { - return fetchBatchHeader(ctx, db, " where hash=?", truncTo16(hash)) -} - // todo - is there a better way to write this query? func ReadCurrentHeadBatch(ctx context.Context, db *sql.DB) (*core.Batch, error) { return fetchBatch(ctx, db, " where b.is_canonical=true and b.is_executed=true and b.height=(select max(b1.height) from batch b1 where b1.is_canonical=true and b1.is_executed=true)") } func ReadBatchesByBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) ([]*core.Batch, error) { - return fetchBatches(ctx, db, " where b.l1_proof=? order by b.sequence", truncTo16(hash)) + return fetchBatches(ctx, db, " join block l1b on b.l1_proof=l1b.id where l1b.hash=? and l1b.full_l1_proof=? order by b.sequence", truncTo4(hash), hash.Bytes()) } func ReadCurrentSequencerNo(ctx context.Context, db *sql.DB) (*big.Int, error) { @@ -205,8 +193,8 @@ func ReadCurrentSequencerNo(ctx context.Context, db *sql.DB) (*big.Int, error) { } func ReadHeadBatchForBlock(ctx context.Context, db *sql.DB, l1Hash common.L1BlockHash) (*core.Batch, error) { - query := " where b.is_canonical=true and b.is_executed=true and b.height=(select max(b1.height) from batch b1 where b1.is_canonical=true and b1.is_executed=true and b1.l1_proof=?)" - return fetchBatch(ctx, db, query, truncTo16(l1Hash)) + query := " where b.is_canonical=true and b.is_executed=true and b.height=(select max(b1.height) from batch b1 where b1.is_canonical=true and b1.is_executed=true and b1.l1_proof=? and b1.full_l1_proof=?)" + return fetchBatch(ctx, db, query, truncTo4(l1Hash), l1Hash.Bytes()) } func fetchBatch(ctx context.Context, db *sql.DB, whereQuery string, args ...any) (*core.Batch, error) { @@ -283,30 +271,6 @@ func fetchBatches(ctx context.Context, db *sql.DB, whereQuery string, args ...an return result, nil } -func fetchBatchHeader(ctx context.Context, db *sql.DB, whereQuery string, args ...any) (*common.BatchHeader, error) { - var header string - query := selectHeader + " " + whereQuery - var err error - if len(args) > 0 { - err = db.QueryRowContext(ctx, query, args...).Scan(&header) - } else { - err = db.QueryRowContext(ctx, query).Scan(&header) - } - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - // make sure the error is converted to obscuro-wide not found error - return nil, errutil.ErrNotFound - } - return nil, err - } - h := new(common.BatchHeader) - if err := rlp.Decode(bytes.NewReader([]byte(header)), h); err != nil { - return nil, fmt.Errorf("could not decode batch header. Cause: %w", err) - } - - return h, nil -} - func selectReceipts(ctx context.Context, db *sql.DB, config *params.ChainConfig, query string, args ...any) (types.Receipts, error) { var allReceipts types.Receipts @@ -364,11 +328,12 @@ func selectReceipts(ctx context.Context, db *sql.DB, config *params.ChainConfig, // corresponding block body, so if the block body is not found it will return nil even // if the receipt itself is stored. func ReadReceiptsByBatchHash(ctx context.Context, db *sql.DB, hash common.L2BatchHash, config *params.ChainConfig) (types.Receipts, error) { - return selectReceipts(ctx, db, config, "where batch.hash = ?", truncTo16(hash)) + return selectReceipts(ctx, db, config, "where batch.hash=? and batch.full_hash=?", truncTo4(hash), hash.Bytes()) } -func ReadReceipt(ctx context.Context, db *sql.DB, hash common.L2TxHash, config *params.ChainConfig) (*types.Receipt, error) { - row := db.QueryRowContext(ctx, queryReceipts+" where tx=?", truncTo16(hash)) +func ReadReceipt(ctx context.Context, db *sql.DB, txHash common.L2TxHash, config *params.ChainConfig) (*types.Receipt, error) { + // todo - canonical? + row := db.QueryRowContext(ctx, queryReceipts+" where tx.hash=? and tx.full_hash=?", truncTo4(txHash), txHash.Bytes()) // receipt, tx, batch, height var receiptData []byte var txData []byte @@ -398,13 +363,13 @@ func ReadReceipt(ctx context.Context, db *sql.DB, hash common.L2TxHash, config * batchhash.SetBytes(batchHash) // todo base fee if err = receipts.DeriveFields(config, batchhash, height, 0, big.NewInt(1), big.NewInt(0), transactions); err != nil { - return nil, fmt.Errorf("failed to derive block receipts fields. hash = %s; number = %d; err = %w", hash, height, err) + return nil, fmt.Errorf("failed to derive block receipts fields. txHash = %s; number = %d; err = %w", txHash, height, err) } return receipts[0], nil } func ReadTransaction(ctx context.Context, db *sql.DB, txHash gethcommon.Hash) (*types.Transaction, common.L2BatchHash, uint64, uint64, error) { - row := db.QueryRowContext(ctx, selectTxQuery, truncTo16(txHash)) + row := db.QueryRowContext(ctx, selectTxQuery, truncTo4(txHash), txHash.Bytes()) // tx, batch, height, idx var txData []byte @@ -462,7 +427,7 @@ func ReadUnexecutedBatches(ctx context.Context, db *sql.DB, from *big.Int) ([]*c } func BatchWasExecuted(ctx context.Context, db *sql.DB, hash common.L2BatchHash) (bool, error) { - row := db.QueryRowContext(ctx, queryBatchWasExecuted, truncTo16(hash)) + row := db.QueryRowContext(ctx, queryBatchWasExecuted, truncTo4(hash), hash.Bytes()) var result bool err := row.Scan(&result) diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index e4b605ab65..a31a701d3d 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -16,38 +16,34 @@ import ( ) const ( - blockInsert = "insert into block values (?,?,?,?,?)" - selectBlockHeader = "select header from block" + blockInsert = "insert into block (hash,full_hash,is_canonical,header,height) values (?,?,?,?,?)" + selectBlockHeader = "select header from block " l1msgInsert = "insert into l1_msg (message, block, is_transfer) values " l1msgValue = "(?,?,?)" - selectL1Msg = "select message from l1_msg " + selectL1Msg = "select message from l1_msg m join block b on m.block=b.id " - rollupInsert = "replace into rollup values (?,?,?,?,?,?)" - rollupSelect = "select hash from rollup where compression_block in " - rollupSelectMetadata = "select start_seq, time_stamp from rollup where hash = ? " + rollupInsert = "replace into rollup (hash, full_hash, start_seq, end_seq, time_stamp, header, compression_block) values (?,?,?,?,?,?,?)" + rollupSelect = "select full_hash from rollup r join block b on r.compression_block=b.id where " + rollupSelectMetadata = "select start_seq, time_stamp from rollup where hash = ? and full_hash=?" - updateCanonicalBlock = "update block set is_canonical=? where hash in " + updateCanonicalBlock = "update block set is_canonical=? where " // todo - do we need the is_canonical field? updateCanonicalBatches = "update batch set is_canonical=? where l1_proof in " ) -func WriteBlock(ctx context.Context, dbtx DBTransaction, b *types.Header) error { +func WriteBlock(_ context.Context, dbtx DBTransaction, b *types.Header) error { header, err := rlp.EncodeToBytes(b) if err != nil { return fmt.Errorf("could not encode block header. Cause: %w", err) } - var parentBytes []byte - if b.Number.Uint64() > 1 { - parentBytes = truncTo16(b.ParentHash) - } dbtx.ExecuteSQL(blockInsert, - truncTo16(b.Hash()), // hash - parentBytes, // parent - true, // is_canonical - header, // header - b.Number.Uint64(), // height + truncTo4(b.Hash()), // hash + b.Hash().Bytes(), // full_hash + true, // is_canonical + header, // header + b.Number.Uint64(), // height ) return nil } @@ -61,25 +57,27 @@ func UpdateCanonicalBlocks(ctx context.Context, dbtx DBTransaction, canonical [] } } -func updateCanonicalValue(ctx context.Context, dbtx DBTransaction, isCanonical bool, values []common.L1BlockHash) { - argPlaceholders := strings.Repeat("?,", len(values)) - argPlaceholders = argPlaceholders[0 : len(argPlaceholders)-1] // remove trailing comma +func updateCanonicalValue(_ context.Context, dbtx DBTransaction, isCanonical bool, blocks []common.L1BlockHash) { + token := "(hash=? and full_hash=?) OR " + updateBlocksWhere := strings.Repeat(token, len(blocks)) + updateBlocksWhere = updateBlocksWhere + "1=0" - updateBlocks := updateCanonicalBlock + "(" + argPlaceholders + ")" - updateBatches := updateCanonicalBatches + "(" + argPlaceholders + ")" + updateBlocks := updateCanonicalBlock + updateBlocksWhere args := make([]any, 0) args = append(args, isCanonical) - for _, value := range values { - args = append(args, truncTo16(value)) + for _, blockHash := range blocks { + args = append(args, truncTo4(blockHash), blockHash.Bytes()) } dbtx.ExecuteSQL(updateBlocks, args...) + + updateBatches := updateCanonicalBatches + "(" + "select id from block where " + updateBlocksWhere + ")" dbtx.ExecuteSQL(updateBatches, args...) } // todo - remove this. For now creates a "block" but without a body. func FetchBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (*types.Block, error) { - return fetchBlock(ctx, db, " where hash=?", truncTo16(hash)) + return fetchBlock(ctx, db, " where hash=? and full_hash=?", truncTo4(hash), hash.Bytes()) } func FetchHeadBlock(ctx context.Context, db *sql.DB) (*types.Block, error) { @@ -90,7 +88,13 @@ func FetchBlockHeaderByHeight(ctx context.Context, db *sql.DB, height *big.Int) return fetchBlockHeader(ctx, db, "where is_canonical=true and height=?", height.Int64()) } -func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1BlockHash, messages []T, isValueTransfer bool) error { +func GetBlockId(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (uint64, error) { + var id uint64 + err := db.QueryRowContext(ctx, "select id from block where hash=? and full_hash=?", truncTo4(hash), hash).Scan(&id) + return id, err +} + +func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockId uint64, messages []T, isValueTransfer bool) error { insert := l1msgInsert + strings.Repeat(l1msgValue+",", len(messages)) insert = insert[0 : len(insert)-1] // remove trailing comma @@ -102,7 +106,7 @@ func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1 return err } args = append(args, data) - args = append(args, truncTo16(blockHash)) + args = append(args, blockId) args = append(args, isValueTransfer) } if len(messages) > 0 { @@ -114,8 +118,8 @@ func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1 func FetchL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1BlockHash, isTransfer bool) ([]T, error) { var result []T - query := selectL1Msg + " where block = ? and is_transfer = ?" - rows, err := db.QueryContext(ctx, query, truncTo16(blockHash), isTransfer) + query := selectL1Msg + " where b.hash = ? and b.full_hash = ? and is_transfer = ?" + rows, err := db.QueryContext(ctx, query, truncTo4(blockHash), blockHash.Bytes(), isTransfer) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -143,32 +147,34 @@ func FetchL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1 return result, nil } -func WriteRollup(ctx context.Context, dbtx DBTransaction, rollup *common.RollupHeader, internalHeader *common.CalldataRollupHeader) error { +func WriteRollup(_ context.Context, dbtx DBTransaction, rollup *common.RollupHeader, blockId uint64, internalHeader *common.CalldataRollupHeader) error { // Write the encoded header data, err := rlp.EncodeToBytes(rollup) if err != nil { return fmt.Errorf("could not encode batch header. Cause: %w", err) } dbtx.ExecuteSQL(rollupInsert, - truncTo16(rollup.Hash()), + truncTo4(rollup.Hash()), + rollup.Hash().Bytes(), internalHeader.FirstBatchSequence.Uint64(), rollup.LastBatchSeqNo, internalHeader.StartTime, data, - truncTo16(rollup.CompressionL1Head), + blockId, ) return nil } func FetchReorgedRollup(ctx context.Context, db *sql.DB, reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) { - argPlaceholders := strings.Repeat("?,", len(reorgedBlocks)) - argPlaceholders = argPlaceholders[0 : len(argPlaceholders)-1] // remove trailing comma + token := "(b.hash=? and b.full_hash=?) OR " + whereClause := strings.Repeat(token, len(reorgedBlocks)) + whereClause = whereClause + "1=0" - query := rollupSelect + " (" + argPlaceholders + ")" + query := rollupSelect + whereClause args := make([]any, 0) - for _, value := range reorgedBlocks { - args = append(args, truncTo16(value)) + for _, blockHash := range reorgedBlocks { + args = append(args, truncTo4(blockHash), blockHash.Bytes()) } rollup := new(common.L2BatchHash) err := db.QueryRowContext(ctx, query, args...).Scan(&rollup) @@ -187,7 +193,7 @@ func FetchRollupMetadata(ctx context.Context, db *sql.DB, hash common.L2RollupHa var startTime uint64 rollup := new(common.PublicRollupMetadata) - err := db.QueryRowContext(ctx, rollupSelectMetadata, truncTo16(hash)).Scan(&startSeq, &startTime) + err := db.QueryRowContext(ctx, rollupSelectMetadata, truncTo4(hash), hash.Bytes()).Scan(&startSeq, &startTime) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, errutil.ErrNotFound diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 910078fdd8..02430de7eb 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -15,11 +15,11 @@ import ( ) const ( - baseEventsQuerySelect = "select topic0, topic1, topic2, topic3, topic4, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address" - baseDebugEventsQuerySelect = "select rel_address1, rel_address2, rel_address3, rel_address4, lifecycle_event, topic0, topic1, topic2, topic3, topic4, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address" - baseEventsJoin = "from events e join exec_tx extx on e.exec_tx_id=extx.id join tx on extx.tx=tx.hash join batch b on extx.batch=b.sequence where b.is_canonical=true " + baseEventsQuerySelect = "select topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" + baseDebugEventsQuerySelect = "select rel_address1_full, rel_address2_full, rel_address3_full, rel_address4_full, lifecycle_event, topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" + baseEventsJoin = "from events e join exec_tx extx on e.tx=extx.tx and e.batch=extx.batch join tx on extx.tx=tx.id join batch b on extx.batch=b.sequence where b.is_canonical=true " insertEvent = "insert into events values " - insertEventValues = "(?,?,?,?,?,?,?,?,?,?,?,?,?,?)" + insertEventValues = "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)" orderBy = " order by b.height, tx.idx asc" ) @@ -28,11 +28,21 @@ func StoreEventLogs(ctx context.Context, dbtx DBTransaction, receipts []*types.R totalLogs := 0 for _, receipt := range receipts { for _, l := range receipt.Logs { - logArgs, err := logDBValues(ctx, dbtx.GetDB(), l, receipt, stateDB) + txId, err := ReadTxId(ctx, dbtx, l.TxHash) + if err != nil { + return err + } + batchId, err := ReadBatchId(ctx, dbtx, receipt.BlockHash) + if err != nil { + return err + } + logArgs, err := logDBValues(ctx, dbtx.GetDB(), l, stateDB) if err != nil { return err } args = append(args, logArgs...) + args = append(args, txId) + args = append(args, batchId) totalLogs++ } } @@ -45,12 +55,21 @@ func StoreEventLogs(ctx context.Context, dbtx DBTransaction, receipts []*types.R return nil } +func ReadBatchId(ctx context.Context, dbtx DBTransaction, batchHash gethcommon.Hash) (uint64, error) { + var batchId uint64 + err := dbtx.GetDB().QueryRowContext(ctx, + "select sequence from batch where batch.hash=? and batch.full_hash=?", + truncTo4(batchHash), batchHash.Bytes(), + ).Scan(&batchId) + return batchId, err +} + // This method stores a log entry together with relevancy metadata // Each types.Log has 5 indexable topics, where the first one is the event signature hash // The other 4 topics are set by the programmer // According to the data relevancy rules, an event is relevant to accounts referenced directly in topics // If the event is not referring any user address, it is considered a "lifecycle event", and is relevant to everyone -func logDBValues(ctx context.Context, db *sql.DB, l *types.Log, receipt *types.Receipt, stateDB *state.StateDB) ([]any, error) { +func logDBValues(ctx context.Context, db *sql.DB, l *types.Log, stateDB *state.StateDB) ([]any, error) { // The topics are stored in an array with a maximum of 5 entries, but usually less var t0, t1, t2, t3, t4 []byte @@ -123,10 +142,14 @@ func logDBValues(ctx context.Context, db *sql.DB, l *types.Log, receipt *types.R } return []any{ + truncBTo4(t0), truncBTo4(t1), truncBTo4(t2), truncBTo4(t3), truncBTo4(t4), t0, t1, t2, t3, t4, - data, l.Index, l.Address.Bytes(), - isLifecycle, a1, a2, a3, a4, - executedTransactionID(&receipt.BlockHash, &l.TxHash), + data, l.Index, + truncBTo4(l.Address.Bytes()), + l.Address.Bytes(), + isLifecycle, + truncBTo4(a1), truncBTo4(a2), truncBTo4(a3), truncBTo4(a4), + a1, a2, a3, a4, }, nil } @@ -142,8 +165,8 @@ func FilterLogs( queryParams := []any{} query := "" if batchHash != nil { - query += " AND b.hash = ?" - queryParams = append(queryParams, truncTo16(*batchHash)) + query += " AND b.hash = ? AND b.full_hash = ?" + queryParams = append(queryParams, truncTo4(*batchHash), batchHash.Bytes()) } // ignore negative numbers @@ -157,8 +180,10 @@ func FilterLogs( } if len(addresses) > 0 { - query += " AND address in (?" + strings.Repeat(",?", len(addresses)-1) + ")" + token := "(address=? AND address_full=?) OR " + query += " AND (" + strings.Repeat(token, len(addresses)) + " 1=0)" for _, address := range addresses { + queryParams = append(queryParams, truncBTo4(address.Bytes())) queryParams = append(queryParams, address.Bytes()) } } @@ -169,9 +194,11 @@ func FilterLogs( for i, sub := range topics { // empty rule set == wildcard if len(sub) > 0 { - column := fmt.Sprintf("topic%d", i) - query += " AND " + column + " in (?" + strings.Repeat(",?", len(sub)-1) + ")" + topicColumn := fmt.Sprintf("topic%d", i) + token := fmt.Sprintf("(%s=? AND %s_full=?) OR ", topicColumn) + query += " AND (" + strings.Repeat(token, len(sub)) + " 1=0)" for _, topic := range sub { + queryParams = append(queryParams, truncBTo4(topic.Bytes())) queryParams = append(queryParams, topic.Bytes()) } } @@ -184,9 +211,9 @@ func FilterLogs( func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tracers.DebugLogs, error) { var queryParams []any - query := baseDebugEventsQuerySelect + " " + baseEventsJoin + "AND tx.hash = ?" + query := baseDebugEventsQuerySelect + " " + baseEventsJoin + "AND tx.hash = ? AND tx.full_hash = ?" - queryParams = append(queryParams, truncTo16(txHash)) + queryParams = append(queryParams, truncTo4(txHash), txHash.Bytes()) result := make([]*tracers.DebugLogs, 0) @@ -269,8 +296,8 @@ func isEndUserAccount(ctx context.Context, db *sql.DB, topic gethcommon.Hash, st addrBytes := potentialAddr.Bytes() // Check the database if there are already entries for this address var count int - query := "select count(*) from events where rel_address1=? OR rel_address2=? OR rel_address3=? OR rel_address4=?" - err := db.QueryRowContext(ctx, query, addrBytes, addrBytes, addrBytes, addrBytes).Scan(&count) + query := "select count(*) from events where (rel_address1=? and rel_address1_full=?) OR (rel_address2=? and rel_address2_full=?) OR (rel_address3=? and rel_address3_full=?) OR (rel_address4=? and rel_address4_full=?)" + err := db.QueryRowContext(ctx, query, truncBTo4(addrBytes), addrBytes, truncBTo4(addrBytes), addrBytes, truncBTo4(addrBytes), addrBytes, truncBTo4(addrBytes), addrBytes).Scan(&count) if err != nil { // exit here return false, nil, err @@ -306,10 +333,14 @@ func loadLogs(ctx context.Context, db *sql.DB, requestingAccount *gethcommon.Add // Add relevancy rules // An event is considered relevant to all account owners whose addresses are used as topics in the event. // In case there are no account addresses in an event's topics, then the event is considered relevant to everyone (known as a "lifecycle event"). - query += " AND (lifecycle_event OR (rel_address1=? OR rel_address2=? OR rel_address3=? OR rel_address4=?)) " + query += " AND (lifecycle_event OR ((rel_address1=? AND rel_address1_full=?) OR (rel_address2=? AND rel_address2_full=?) OR (rel_address3=? AND rel_address3_full=?) OR (rel_address4=? AND rel_address4_full=?))) " + queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) queryParams = append(queryParams, requestingAccount.Bytes()) + queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) queryParams = append(queryParams, requestingAccount.Bytes()) + queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) queryParams = append(queryParams, requestingAccount.Bytes()) + queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) queryParams = append(queryParams, requestingAccount.Bytes()) query += whereCondition diff --git a/go/enclave/storage/enclavedb/utils.go b/go/enclave/storage/enclavedb/utils.go index 9b15744489..a47bc75d57 100644 --- a/go/enclave/storage/enclavedb/utils.go +++ b/go/enclave/storage/enclavedb/utils.go @@ -2,18 +2,16 @@ package enclavedb import gethcommon "github.com/ethereum/go-ethereum/common" -const truncHash = 16 - -func truncTo16(hash gethcommon.Hash) []byte { - return truncBTo16(hash.Bytes()) +func truncTo4(hash gethcommon.Hash) []byte { + return truncBTo4(hash.Bytes()) } -func truncBTo16(bytes []byte) []byte { +func truncBTo4(bytes []byte) []byte { if len(bytes) == 0 { return bytes } - b := bytes[0:truncHash] - c := make([]byte, truncHash) + b := bytes[0:4] + c := make([]byte, 4) copy(c, b) return c } diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index 1f54c266fc..7a20e05ef4 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -29,22 +29,24 @@ GRANT ALL ON obsdb.attestation_key TO obscuro; create table if not exists obsdb.block ( - hash binary(16), - parent binary(16), + id INTEGER AUTO_INCREMENT, + hash binary(4), + full_hash binary(32), is_canonical boolean NOT NULL, header blob NOT NULL, height int NOT NULL, - primary key (hash), - INDEX (height) + primary key (id), + INDEX (height), + INDEX (hash) ); GRANT ALL ON obsdb.block TO obscuro; create table if not exists obsdb.l1_msg ( - id INTEGER AUTO_INCREMENT, - message varbinary(1024) NOT NULL, - block binary(16) NOT NULL, - is_transfer boolean NOT NULL, + id INTEGER AUTO_INCREMENT, + message varbinary(1024) NOT NULL, + block INTEGER NOT NULL, + is_transfer boolean NOT NULL, INDEX (block), primary key (id) ); @@ -52,14 +54,17 @@ GRANT ALL ON obsdb.l1_msg TO obscuro; create table if not exists obsdb.rollup ( - hash binary(16), - start_seq int NOT NULL, - end_seq int NOT NULL, - time_stamp int NOT NULL, - header blob NOT NULL, - compression_block binary(16) NOT NULL, + id INTEGER AUTO_INCREMENT, + hash binary(4), + full_hash binary(32), + start_seq int NOT NULL, + end_seq int NOT NULL, + time_stamp int NOT NULL, + header blob NOT NULL, + compression_block INTEGER NOT NULL, INDEX (compression_block), - primary key (hash) + INDEX (hash), + primary key (id) ); GRANT ALL ON obsdb.rollup TO obscuro; @@ -73,17 +78,16 @@ GRANT ALL ON obsdb.batch_body TO obscuro; create table if not exists obsdb.batch ( - sequence int, - full_hash binary(32), + sequence int, + full_hash binary(32), converted_hash binary(32) NOT NULL, - hash binary(16) NOT NULL, - parent binary(16), - height int NOT NULL, - is_canonical boolean NOT NULL, - header blob NOT NULL, - body int NOT NULL, - l1_proof binary(16) NOT NULL, - is_executed boolean NOT NULL, + hash binary(4) NOT NULL, + height int NOT NULL, + is_canonical boolean NOT NULL, + header blob NOT NULL, + body int NOT NULL, + l1_proof INTEGER NOT NULL, + is_executed boolean NOT NULL, primary key (sequence), INDEX (hash), INDEX (body), @@ -94,6 +98,7 @@ GRANT ALL ON obsdb.batch TO obscuro; create table if not exists obsdb.tx ( + id INTEGER AUTO_INCREMENT, hash binary(16), full_hash binary(32) NOT NULL, content mediumblob NOT NULL, @@ -102,17 +107,18 @@ create table if not exists obsdb.tx idx int NOT NULL, body int NOT NULL, INDEX (body), - primary key (hash) + INDEX (hash), + primary key (id) ); GRANT ALL ON obsdb.tx TO obscuro; create table if not exists obsdb.exec_tx ( - id binary(16), + id INTEGER AUTO_INCREMENT, created_contract_address binary(20), receipt mediumblob, - tx binary(16) NOT NULL, - batch int NOT NULL, + tx int NOT NULL, + batch int NOT NULL, INDEX (batch), INDEX (tx), primary key (id) @@ -121,21 +127,33 @@ GRANT ALL ON obsdb.exec_tx TO obscuro; create table if not exists obsdb.events ( - topic0 binary(32) NOT NULL, - topic1 binary(32), - topic2 binary(32), - topic3 binary(32), - topic4 binary(32), - datablob mediumblob, - log_idx int NOT NULL, - address binary(20) NOT NULL, - lifecycle_event boolean NOT NULL, - rel_address1 binary(20), - rel_address2 binary(20), - rel_address3 binary(20), - rel_address4 binary(20), - exec_tx_id binary(16) NOT NULL, - INDEX (exec_tx_id), + topic0 binary(4) NOT NULL, + topic1 binary(4), + topic2 binary(4), + topic3 binary(4), + topic4 binary(4), + topic0_full binary(32) NOT NULL, + topic1_full binary(32), + topic2_full binary(32), + topic3_full binary(32), + topic4_full binary(32), + datablob mediumblob, + log_idx int NOT NULL, + address binary(4) NOT NULL, + address_full binary(20) NOT NULL, + lifecycle_event boolean NOT NULL, + rel_address1 binary(4), + rel_address2 binary(4), + rel_address3 binary(4), + rel_address4 binary(4), + rel_address1_full binary(20), + rel_address2_full binary(20), + rel_address3_full binary(20), + rel_address4_full binary(20), + tx int NOT NULL, + batch int NOT NULL, + INDEX (tx), + INDEX (batch), INDEX (address), INDEX (rel_address1), INDEX (rel_address2), diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 96afc4906b..dd792cfa4a 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -2,13 +2,13 @@ create table if not exists keyvalue ( ky varbinary(64) primary key, val mediumblob NOT NULL - ); +); create table if not exists config ( ky varchar(64) primary key, val mediumblob NOT NULL - ); +); insert into config values ('CURRENT_SEQ', -1); @@ -18,37 +18,41 @@ create table if not exists attestation_key -- party binary(20) primary key, // todo -pk party binary(20), ky binary(33) NOT NULL - ); +); create table if not exists block ( - hash binary(16) primary key, - parent binary(16), + id INTEGER PRIMARY KEY AUTOINCREMENT, + hash binary(4), + full_hash binary(32), is_canonical boolean NOT NULL, header blob NOT NULL, height int NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical blocks for the same height -- unique (height, is_canonical) - ); +); create index IDX_BLOCK_HEIGHT on block (height); +create index IDX_BLOCK_HASH on block (hash); create table if not exists l1_msg ( id INTEGER PRIMARY KEY AUTOINCREMENT, message varbinary(1024) NOT NULL, - block binary(16) NOT NULL REFERENCES block, + block INTEGER NOT NULL REFERENCES block, is_transfer boolean - ); +); create table if not exists rollup ( - hash binary(16) primary key, - start_seq int NOT NULL, - end_seq int NOT NULL, - time_stamp int NOT NULL, - header blob NOT NULL, - compression_block binary(16) NOT NULL REFERENCES block - ); + id INTEGER PRIMARY KEY AUTOINCREMENT, + hash binary(4), + full_hash binary(32), + start_seq int NOT NULL, + end_seq int NOT NULL, + time_stamp int NOT NULL, + header blob NOT NULL, + compression_block INTEGER NOT NULL REFERENCES block +); create table if not exists batch_body ( @@ -61,62 +65,74 @@ create table if not exists batch sequence int primary key, full_hash binary(32), converted_hash binary(32), - hash binary(16) NOT NULL unique, - parent binary(16), - height int NOT NULL, - is_canonical boolean NOT NULL, - header blob NOT NULL, - body int NOT NULL REFERENCES batch_body, - l1_proof binary(16) NOT NULL, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch - is_executed boolean NOT NULL + hash binary(4) NOT NULL, + height int NOT NULL, + is_canonical boolean NOT NULL, + header blob NOT NULL, + body int NOT NULL REFERENCES batch_body, + l1_proof INTEGER NOT NULL, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch + is_executed boolean NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height -- unique (height, is_canonical, is_executed) - ); +); create index IDX_BATCH_HASH on batch (hash); create index IDX_BATCH_HEIGHT on batch (height, is_canonical); create index IDX_BATCH_Block on batch (l1_proof); create table if not exists tx ( - hash binary(16) primary key, + id INTEGER PRIMARY KEY AUTOINCREMENT, + hash binary(4), full_hash binary(32) NOT NULL, content mediumblob NOT NULL, sender_address binary(20) NOT NULL, nonce int NOT NULL, idx int NOT NULL, body int REFERENCES batch_body - ); +); +create index IDX_Tx_HASH on tx (hash); create table if not exists exec_tx ( - id binary(16) PRIMARY KEY, -- batch_hash||tx_hash + id INTEGER PRIMARY KEY AUTOINCREMENT, created_contract_address binary(20), receipt mediumblob, -- commenting out the fk until synthetic transactions are also stored -- tx binary(16) REFERENCES tx, - tx binary(16) NOT NULL, - batch int NOT NULL REFERENCES batch - ); + tx INTEGER NOT NULL, + batch INTEGER NOT NULL REFERENCES batch +); create index IX_EX_TX1 on exec_tx (tx); -- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it create table if not exists events ( - topic0 binary(32) NOT NULL, - topic1 binary(32), - topic2 binary(32), - topic3 binary(32), - topic4 binary(32), - datablob mediumblob, - log_idx int NOT NULL, - address binary(20) NOT NULL, - lifecycle_event boolean NOT NULL, - rel_address1 binary(20), - rel_address2 binary(20), - rel_address3 binary(20), - rel_address4 binary(20), - exec_tx_id binary(16) REFERENCES exec_tx - ); + topic0 binary(4) NOT NULL, + topic1 binary(4), + topic2 binary(4), + topic3 binary(4), + topic4 binary(4), + topic0_full binary(32) NOT NULL, + topic1_full binary(32), + topic2_full binary(32), + topic3_full binary(32), + topic4_full binary(32), + datablob mediumblob, + log_idx int NOT NULL, + address binary(4) NOT NULL, + address_full binary(20) NOT NULL, + lifecycle_event boolean NOT NULL, + rel_address1 binary(4), + rel_address2 binary(4), + rel_address3 binary(4), + rel_address4 binary(4), + rel_address1_full binary(20), + rel_address2_full binary(20), + rel_address3_full binary(20), + rel_address4_full binary(20), + tx INTEGER NOT NULL, + batch INTEGER NOT NULL REFERENCES batch +); create index IDX_AD on events (address); create index IDX_RAD1 on events (rel_address1); create index IDX_RAD2 on events (rel_address2); diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index 1938ad77b9..1388403d5a 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -417,7 +417,12 @@ func (s *storageImpl) StoreBatch(ctx context.Context, batch *core.Batch, convert dbTx := s.db.NewDBTransaction() s.logger.Trace("write batch", log.BatchHashKey, batch.Hash(), "l1Proof", batch.Header.L1Proof, log.BatchSeqNoKey, batch.SeqNo()) - if err := enclavedb.WriteBatchAndTransactions(ctx, dbTx, batch, convertedHash); err != nil { + blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), batch.Header.L1Proof) + if err != nil { + return err + } + + if err := enclavedb.WriteBatchAndTransactions(ctx, dbTx, batch, convertedHash, blockId); err != nil { return fmt.Errorf("could not write batch. Cause: %w", err) } @@ -469,12 +474,20 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, } func (s *storageImpl) StoreValueTransfers(ctx context.Context, blockHash common.L1BlockHash, transfers common.ValueTransferEvents) error { - return enclavedb.WriteL1Messages(ctx, s.db.GetSQLDB(), blockHash, transfers, true) + blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), blockHash) + if err != nil { + return err + } + return enclavedb.WriteL1Messages(ctx, s.db.GetSQLDB(), blockId, transfers, true) } func (s *storageImpl) StoreL1Messages(ctx context.Context, blockHash common.L1BlockHash, messages common.CrossChainMessages) error { defer s.logDuration("StoreL1Messages", measure.NewStopwatch()) - return enclavedb.WriteL1Messages(ctx, s.db.GetSQLDB(), blockHash, messages, false) + blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), blockHash) + if err != nil { + return err + } + return enclavedb.WriteL1Messages(ctx, s.db.GetSQLDB(), blockId, messages, false) } func (s *storageImpl) GetL1Messages(ctx context.Context, blockHash common.L1BlockHash) (common.CrossChainMessages, error) { @@ -516,7 +529,12 @@ func (s *storageImpl) StoreRollup(ctx context.Context, rollup *common.ExtRollup, defer s.logDuration("StoreRollup", measure.NewStopwatch()) dbBatch := s.db.NewDBTransaction() - if err := enclavedb.WriteRollup(ctx, dbBatch, rollup.Header, internalHeader); err != nil { + blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), rollup.Header.CompressionL1Head) + if err != nil { + return err + } + + if err := enclavedb.WriteRollup(ctx, dbBatch, rollup.Header, blockId, internalHeader); err != nil { return fmt.Errorf("could not write rollup. Cause: %w", err) } From 29cbaccd3076345351617ddf16ab9f216828b36a Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Tue, 30 Apr 2024 18:24:43 +0100 Subject: [PATCH 02/15] fix --- go/enclave/storage/enclavedb/batch.go | 8 ++++---- go/enclave/storage/enclavedb/events.go | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index c430d66f3c..e5828aa4aa 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -127,10 +127,10 @@ func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int return fmt.Errorf("failed to encode block receipts. Cause: %w", err) } - txId, err := ReadTxId(ctx, dbtx, storageReceipt.TxHash) - if err != nil { - return err - } + txId, _ := ReadTxId(ctx, dbtx, storageReceipt.TxHash) + //if err != nil { + // return err + //} args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address args = append(args, receiptBytes) // the serialised receipt args = append(args, txId) // tx id diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 02430de7eb..008b100ddc 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -28,10 +28,10 @@ func StoreEventLogs(ctx context.Context, dbtx DBTransaction, receipts []*types.R totalLogs := 0 for _, receipt := range receipts { for _, l := range receipt.Logs { - txId, err := ReadTxId(ctx, dbtx, l.TxHash) - if err != nil { - return err - } + txId, _ := ReadTxId(ctx, dbtx, l.TxHash) + //if err != nil { + // return err + //} batchId, err := ReadBatchId(ctx, dbtx, receipt.BlockHash) if err != nil { return err @@ -195,7 +195,7 @@ func FilterLogs( // empty rule set == wildcard if len(sub) > 0 { topicColumn := fmt.Sprintf("topic%d", i) - token := fmt.Sprintf("(%s=? AND %s_full=?) OR ", topicColumn) + token := fmt.Sprintf("(%s=? AND %s_full=?) OR ", topicColumn, topicColumn) query += " AND (" + strings.Repeat(token, len(sub)) + " 1=0)" for _, topic := range sub { queryParams = append(queryParams, truncBTo4(topic.Bytes())) From 17de35e38a243bd2490b04a47a2f7148e551e429 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Wed, 1 May 2024 12:13:45 +0100 Subject: [PATCH 03/15] fix --- go/common/enclave.go | 1 + go/enclave/storage/enclavedb/batch.go | 70 +++++++------------ go/enclave/storage/enclavedb/block.go | 50 ++++--------- go/enclave/storage/enclavedb/events.go | 31 +++----- go/enclave/storage/enclavedb/keyvalue.go | 43 +++++++----- go/enclave/storage/enclavedb/utils.go | 14 +++- .../storage/init/edgelessdb/001_init.sql | 21 +++--- go/enclave/storage/init/sqlite/001_init.sql | 23 +++--- go/enclave/storage/storage.go | 6 +- 9 files changed, 121 insertions(+), 138 deletions(-) diff --git a/go/common/enclave.go b/go/common/enclave.go index 59a9d06be4..54952a2aa3 100644 --- a/go/common/enclave.go +++ b/go/common/enclave.go @@ -140,6 +140,7 @@ type EnclaveScan interface { GetTotalContractCount(context.Context) (*big.Int, SystemError) // GetCustomQuery returns the data of a custom query + // todo - better name and description GetCustomQuery(ctx context.Context, encryptedParams EncryptedParamsGetStorageAt) (*responses.PrivateQueryResponse, SystemError) // GetPublicTransactionData returns a list of public transaction data diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index e5828aa4aa..a718c2c3aa 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "math/big" - "strings" "github.com/ethereum/go-ethereum/params" @@ -20,30 +19,9 @@ import ( ) const ( - bodyInsert = "replace into batch_body values (?,?)" - txInsert = "replace into tx (hash, full_hash, content, sender_address, nonce, idx, body) values " - txInsertValue = "(?,?,?,?,?,?,?)" - - batchInsert = "insert into batch values (?,?,?,?,?,?,?,?,?,?)" - updateBatchExecuted = "update batch set is_executed=true where sequence=?" - selectBatch = "select b.header, bb.content from batch b join batch_body bb on b.body=bb.id" - txExecInsert = "insert into exec_tx (created_contract_address, receipt, tx, batch) values " - txExecInsertValue = "(?,?,?,?)" - queryReceipts = "select exec_tx.receipt, tx.content, batch.full_hash, batch.height from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " - queryReceiptsCount = "select count(1) from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " - - selectTxQuery = "select tx.content, batch.full_hash, batch.height, tx.idx from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true and tx.hash=? and tx.full_hash=?" - - selectContractCreationTx = "select tx.full_hash from exec_tx join tx on tx.id=exec_tx.tx where created_contract_address=?" - selectTotalCreatedContracts = "select count( distinct created_contract_address) from exec_tx " - queryBatchWasExecuted = "select is_executed from batch where is_canonical=true and hash=? and full_hash=?" - - isCanonQuery = "select is_canonical from block where hash=? and full_hash=?" - - queryTxList = "select tx.full_hash, batch.height, batch.header from exec_tx join batch on batch.sequence=exec_tx.batch join tx on tx.id=exec_tx.tx where batch.is_canonical=true" - queryTxCountList = "select count(1) from exec_tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true" + queryReceipts = "select exec_tx.receipt, tx.content, batch.full_hash, batch.height from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " ) // WriteBatchAndTransactions - persists the batch and the transactions @@ -60,17 +38,17 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c return fmt.Errorf("could not encode batch header. Cause: %w", err) } - dbtx.ExecuteSQL(bodyInsert, batchBodyID, body) + dbtx.ExecuteSQL("replace into batch_body values (?,?)", batchBodyID, body) var isCanon bool - err = dbtx.GetDB().QueryRowContext(ctx, isCanonQuery, truncTo4(batch.Header.L1Proof), batch.Header.L1Proof.Bytes()).Scan(&isCanon) + err = dbtx.GetDB().QueryRowContext(ctx, "select is_canonical from block where hash=? and full_hash=?", truncTo4(batch.Header.L1Proof), batch.Header.L1Proof.Bytes()).Scan(&isCanon) if err != nil { // if the block is not found, we assume it is non-canonical // fmt.Printf("IsCanon %s err: %s\n", batch.Header.L1Proof, err) isCanon = false } - dbtx.ExecuteSQL(batchInsert, + dbtx.ExecuteSQL("insert into batch values (?,?,?,?,?,?,?,?,?,?)", batch.Header.SequencerOrderNo.Uint64(), // sequence batch.Hash(), // full hash convertedHash, // converted_hash @@ -85,8 +63,7 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c // creates a big insert statement for all transactions if len(batch.Transactions) > 0 { - insert := txInsert + strings.Repeat(txInsertValue+",", len(batch.Transactions)) - insert = insert[0 : len(insert)-1] // remove trailing comma + insert := "replace into tx (hash, full_hash, content, sender_address, nonce, idx, body) values " + repeat("(?,?,?,?,?,?,?)", ",", len(batch.Transactions)) args := make([]any, 0) for i, transaction := range batch.Transactions { @@ -116,7 +93,7 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c // WriteBatchExecution - insert all receipts to the db func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int, receipts []*types.Receipt) error { - dbtx.ExecuteSQL(updateBatchExecuted, seqNo.Uint64()) + dbtx.ExecuteSQL("update batch set is_executed=true where sequence=?", seqNo.Uint64()) args := make([]any, 0) for _, receipt := range receipts { @@ -127,18 +104,16 @@ func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int return fmt.Errorf("failed to encode block receipts. Cause: %w", err) } + // ignore the error because synthetic transactions will not be inserted txId, _ := ReadTxId(ctx, dbtx, storageReceipt.TxHash) - //if err != nil { - // return err - //} - args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address - args = append(args, receiptBytes) // the serialised receipt - args = append(args, txId) // tx id - args = append(args, seqNo.Uint64()) // batch_seq + args = append(args, truncBTo4(receipt.ContractAddress.Bytes())) // created_contract_address + args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address + args = append(args, receiptBytes) // the serialised receipt + args = append(args, txId) // tx id + args = append(args, seqNo.Uint64()) // batch_seq } if len(args) > 0 { - insert := txExecInsert + strings.Repeat(txExecInsertValue+",", len(receipts)) - insert = insert[0 : len(insert)-1] // remove trailing comma + insert := "insert into exec_tx (created_contract_address,created_contract_address_full, receipt, tx, batch) values " + repeat("(?,?,?,?,?)", ",", len(receipts)) dbtx.ExecuteSQL(insert, args...) } return nil @@ -369,7 +344,9 @@ func ReadReceipt(ctx context.Context, db *sql.DB, txHash common.L2TxHash, config } func ReadTransaction(ctx context.Context, db *sql.DB, txHash gethcommon.Hash) (*types.Transaction, common.L2BatchHash, uint64, uint64, error) { - row := db.QueryRowContext(ctx, selectTxQuery, truncTo4(txHash), txHash.Bytes()) + row := db.QueryRowContext(ctx, + "select tx.content, batch.full_hash, batch.height, tx.idx from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true and tx.hash=? and tx.full_hash=?", + truncTo4(txHash), txHash.Bytes()) // tx, batch, height, idx var txData []byte @@ -394,7 +371,7 @@ func ReadTransaction(ctx context.Context, db *sql.DB, txHash gethcommon.Hash) (* } func GetContractCreationTx(ctx context.Context, db *sql.DB, address gethcommon.Address) (*gethcommon.Hash, error) { - row := db.QueryRowContext(ctx, selectContractCreationTx, address.Bytes()) + row := db.QueryRowContext(ctx, "select tx.full_hash from exec_tx join tx on tx.id=exec_tx.tx where created_contract_address=? and created_contract_address_full=?", truncBTo4(address.Bytes()), address.Bytes()) var txHashBytes []byte err := row.Scan(&txHashBytes) @@ -411,7 +388,7 @@ func GetContractCreationTx(ctx context.Context, db *sql.DB, address gethcommon.A } func ReadContractCreationCount(ctx context.Context, db *sql.DB) (*big.Int, error) { - row := db.QueryRowContext(ctx, selectTotalCreatedContracts) + row := db.QueryRowContext(ctx, "select count( distinct created_contract_address) from exec_tx ") var count int64 err := row.Scan(&count) @@ -427,7 +404,7 @@ func ReadUnexecutedBatches(ctx context.Context, db *sql.DB, from *big.Int) ([]*c } func BatchWasExecuted(ctx context.Context, db *sql.DB, hash common.L2BatchHash) (bool, error) { - row := db.QueryRowContext(ctx, queryBatchWasExecuted, truncTo4(hash), hash.Bytes()) + row := db.QueryRowContext(ctx, "select is_executed from batch where is_canonical=true and hash=? and full_hash=?", truncTo4(hash), hash.Bytes()) var result bool err := row.Scan(&result) @@ -443,11 +420,13 @@ func BatchWasExecuted(ctx context.Context, db *sql.DB, hash common.L2BatchHash) } func GetReceiptsPerAddress(ctx context.Context, db *sql.DB, config *params.ChainConfig, address *gethcommon.Address, pagination *common.QueryPagination) (types.Receipts, error) { + // todo - not indexed return selectReceipts(ctx, db, config, "where tx.sender_address = ? ORDER BY height DESC LIMIT ? OFFSET ? ", address.Bytes(), pagination.Size, pagination.Offset) } func GetReceiptsPerAddressCount(ctx context.Context, db *sql.DB, address *gethcommon.Address) (uint64, error) { - row := db.QueryRowContext(ctx, queryReceiptsCount+" where tx.sender_address = ?", address.Bytes()) + // todo - this is not indexed and will do a full table scan! + row := db.QueryRowContext(ctx, "select count(1) from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch "+" where tx.sender_address = ?", address.Bytes()) var count uint64 err := row.Scan(&count) @@ -465,7 +444,8 @@ func GetPublicTransactionData(ctx context.Context, db *sql.DB, pagination *commo func selectPublicTxsBySender(ctx context.Context, db *sql.DB, query string, args ...any) ([]common.PublicTransaction, error) { var publicTxs []common.PublicTransaction - rows, err := db.QueryContext(ctx, queryTxList+" "+query, args...) + q := "select tx.full_hash, batch.height, batch.header from exec_tx join batch on batch.sequence=exec_tx.batch join tx on tx.id=exec_tx.tx where batch.is_canonical=true " + query + rows, err := db.QueryContext(ctx, q, args...) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -503,7 +483,7 @@ func selectPublicTxsBySender(ctx context.Context, db *sql.DB, query string, args } func GetPublicTransactionCount(ctx context.Context, db *sql.DB) (uint64, error) { - row := db.QueryRowContext(ctx, queryTxCountList) + row := db.QueryRowContext(ctx, "select count(1) from exec_tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true") var count uint64 err := row.Scan(&count) diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index a31a701d3d..0ce693a507 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "math/big" - "strings" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" @@ -15,30 +14,13 @@ import ( "github.com/ten-protocol/go-ten/go/common/errutil" ) -const ( - blockInsert = "insert into block (hash,full_hash,is_canonical,header,height) values (?,?,?,?,?)" - selectBlockHeader = "select header from block " - - l1msgInsert = "insert into l1_msg (message, block, is_transfer) values " - l1msgValue = "(?,?,?)" - selectL1Msg = "select message from l1_msg m join block b on m.block=b.id " - - rollupInsert = "replace into rollup (hash, full_hash, start_seq, end_seq, time_stamp, header, compression_block) values (?,?,?,?,?,?,?)" - rollupSelect = "select full_hash from rollup r join block b on r.compression_block=b.id where " - rollupSelectMetadata = "select start_seq, time_stamp from rollup where hash = ? and full_hash=?" - - updateCanonicalBlock = "update block set is_canonical=? where " - // todo - do we need the is_canonical field? - updateCanonicalBatches = "update batch set is_canonical=? where l1_proof in " -) - func WriteBlock(_ context.Context, dbtx DBTransaction, b *types.Header) error { header, err := rlp.EncodeToBytes(b) if err != nil { return fmt.Errorf("could not encode block header. Cause: %w", err) } - dbtx.ExecuteSQL(blockInsert, + dbtx.ExecuteSQL("insert into block (hash,full_hash,is_canonical,header,height) values (?,?,?,?,?)", truncTo4(b.Hash()), // hash b.Hash().Bytes(), // full_hash true, // is_canonical @@ -58,20 +40,18 @@ func UpdateCanonicalBlocks(ctx context.Context, dbtx DBTransaction, canonical [] } func updateCanonicalValue(_ context.Context, dbtx DBTransaction, isCanonical bool, blocks []common.L1BlockHash) { - token := "(hash=? and full_hash=?) OR " - updateBlocksWhere := strings.Repeat(token, len(blocks)) - updateBlocksWhere = updateBlocksWhere + "1=0" - - updateBlocks := updateCanonicalBlock + updateBlocksWhere + canonicalBlocks := repeat("(hash=? and full_hash=?)", "OR", len(blocks)) args := make([]any, 0) args = append(args, isCanonical) for _, blockHash := range blocks { args = append(args, truncTo4(blockHash), blockHash.Bytes()) } + + updateBlocks := "update block set is_canonical=? where " + canonicalBlocks dbtx.ExecuteSQL(updateBlocks, args...) - updateBatches := updateCanonicalBatches + "(" + "select id from block where " + updateBlocksWhere + ")" + updateBatches := "update batch set is_canonical=? where l1_proof in (select id from block where " + canonicalBlocks + ")" dbtx.ExecuteSQL(updateBatches, args...) } @@ -81,6 +61,7 @@ func FetchBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (*type } func FetchHeadBlock(ctx context.Context, db *sql.DB) (*types.Block, error) { + // todo - just read the one with the max id return fetchBlock(ctx, db, "where is_canonical=true and height=(select max(b.height) from block b where is_canonical=true)") } @@ -95,8 +76,7 @@ func GetBlockId(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (uint6 } func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockId uint64, messages []T, isValueTransfer bool) error { - insert := l1msgInsert + strings.Repeat(l1msgValue+",", len(messages)) - insert = insert[0 : len(insert)-1] // remove trailing comma + insert := "insert into l1_msg (message, block, is_transfer) values " + repeat("(?,?,?)", ",", len(messages)) args := make([]any, 0) @@ -118,7 +98,7 @@ func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockId uint64, mes func FetchL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1BlockHash, isTransfer bool) ([]T, error) { var result []T - query := selectL1Msg + " where b.hash = ? and b.full_hash = ? and is_transfer = ?" + query := "select message from l1_msg m join block b on m.block=b.id where b.hash = ? and b.full_hash = ? and is_transfer = ?" rows, err := db.QueryContext(ctx, query, truncTo4(blockHash), blockHash.Bytes(), isTransfer) if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -153,7 +133,7 @@ func WriteRollup(_ context.Context, dbtx DBTransaction, rollup *common.RollupHea if err != nil { return fmt.Errorf("could not encode batch header. Cause: %w", err) } - dbtx.ExecuteSQL(rollupInsert, + dbtx.ExecuteSQL("replace into rollup (hash, full_hash, start_seq, end_seq, time_stamp, header, compression_block) values (?,?,?,?,?,?,?)", truncTo4(rollup.Hash()), rollup.Hash().Bytes(), internalHeader.FirstBatchSequence.Uint64(), @@ -166,11 +146,9 @@ func WriteRollup(_ context.Context, dbtx DBTransaction, rollup *common.RollupHea } func FetchReorgedRollup(ctx context.Context, db *sql.DB, reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) { - token := "(b.hash=? and b.full_hash=?) OR " - whereClause := strings.Repeat(token, len(reorgedBlocks)) - whereClause = whereClause + "1=0" + whereClause := repeat("(b.hash=? and b.full_hash=?)", "OR", len(reorgedBlocks)) - query := rollupSelect + whereClause + query := "select full_hash from rollup r join block b on r.compression_block=b.id where " + whereClause args := make([]any, 0) for _, blockHash := range reorgedBlocks { @@ -193,7 +171,9 @@ func FetchRollupMetadata(ctx context.Context, db *sql.DB, hash common.L2RollupHa var startTime uint64 rollup := new(common.PublicRollupMetadata) - err := db.QueryRowContext(ctx, rollupSelectMetadata, truncTo4(hash), hash.Bytes()).Scan(&startSeq, &startTime) + err := db.QueryRowContext(ctx, + "select start_seq, time_stamp from rollup where hash = ? and full_hash=?", truncTo4(hash), hash.Bytes(), + ).Scan(&startSeq, &startTime) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, errutil.ErrNotFound @@ -207,7 +187,7 @@ func FetchRollupMetadata(ctx context.Context, db *sql.DB, hash common.L2RollupHa func fetchBlockHeader(ctx context.Context, db *sql.DB, whereQuery string, args ...any) (*types.Header, error) { var header string - query := selectBlockHeader + " " + whereQuery + query := "select header from block " + whereQuery var err error if len(args) > 0 { err = db.QueryRowContext(ctx, query, args...).Scan(&header) diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 008b100ddc..27b646dcc7 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -5,7 +5,6 @@ import ( "database/sql" "fmt" "math/big" - "strings" gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" @@ -15,12 +14,7 @@ import ( ) const ( - baseEventsQuerySelect = "select topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" - baseDebugEventsQuerySelect = "select rel_address1_full, rel_address2_full, rel_address3_full, rel_address4_full, lifecycle_event, topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" - baseEventsJoin = "from events e join exec_tx extx on e.tx=extx.tx and e.batch=extx.batch join tx on extx.tx=tx.id join batch b on extx.batch=b.sequence where b.is_canonical=true " - insertEvent = "insert into events values " - insertEventValues = "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)" - orderBy = " order by b.height, tx.idx asc" + baseEventsJoin = "from events e join exec_tx extx on e.tx=extx.tx and e.batch=extx.batch join tx on extx.tx=tx.id join batch b on extx.batch=b.sequence where b.is_canonical=true " ) func StoreEventLogs(ctx context.Context, dbtx DBTransaction, receipts []*types.Receipt, stateDB *state.StateDB) error { @@ -29,9 +23,6 @@ func StoreEventLogs(ctx context.Context, dbtx DBTransaction, receipts []*types.R for _, receipt := range receipts { for _, l := range receipt.Logs { txId, _ := ReadTxId(ctx, dbtx, l.TxHash) - //if err != nil { - // return err - //} batchId, err := ReadBatchId(ctx, dbtx, receipt.BlockHash) if err != nil { return err @@ -47,9 +38,7 @@ func StoreEventLogs(ctx context.Context, dbtx DBTransaction, receipts []*types.R } } if totalLogs > 0 { - query := insertEvent + " " + strings.Repeat(insertEventValues+",", totalLogs) - query = query[0 : len(query)-1] // remove trailing comma - + query := "insert into events values " + repeat("(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ",", totalLogs) dbtx.ExecuteSQL(query, args...) } return nil @@ -180,8 +169,8 @@ func FilterLogs( } if len(addresses) > 0 { - token := "(address=? AND address_full=?) OR " - query += " AND (" + strings.Repeat(token, len(addresses)) + " 1=0)" + cond := repeat("(address=? AND address_full=?)", " OR ", len(addresses)) + query += " AND (" + cond + ")" for _, address := range addresses { queryParams = append(queryParams, truncBTo4(address.Bytes())) queryParams = append(queryParams, address.Bytes()) @@ -195,8 +184,8 @@ func FilterLogs( // empty rule set == wildcard if len(sub) > 0 { topicColumn := fmt.Sprintf("topic%d", i) - token := fmt.Sprintf("(%s=? AND %s_full=?) OR ", topicColumn, topicColumn) - query += " AND (" + strings.Repeat(token, len(sub)) + " 1=0)" + cond := repeat(fmt.Sprintf("(%s=? AND %s_full=?)", topicColumn, topicColumn), "OR", len(sub)) + query += " AND (" + cond + ")" for _, topic := range sub { queryParams = append(queryParams, truncBTo4(topic.Bytes())) queryParams = append(queryParams, topic.Bytes()) @@ -211,7 +200,9 @@ func FilterLogs( func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tracers.DebugLogs, error) { var queryParams []any - query := baseDebugEventsQuerySelect + " " + baseEventsJoin + "AND tx.hash = ? AND tx.full_hash = ?" + query := "select rel_address1_full, rel_address2_full, rel_address3_full, rel_address4_full, lifecycle_event, topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" + + baseEventsJoin + + "AND tx.hash = ? AND tx.full_hash = ?" queryParams = append(queryParams, truncTo4(txHash), txHash.Bytes()) @@ -327,7 +318,7 @@ func loadLogs(ctx context.Context, db *sql.DB, requestingAccount *gethcommon.Add } result := make([]*types.Log, 0) - query := baseEventsQuerySelect + " " + baseEventsJoin + query := "select topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" + " " + baseEventsJoin var queryParams []any // Add relevancy rules @@ -346,7 +337,7 @@ func loadLogs(ctx context.Context, db *sql.DB, requestingAccount *gethcommon.Add query += whereCondition queryParams = append(queryParams, whereParams...) - query += orderBy + query += " order by b.height, tx.idx asc" rows, err := db.QueryContext(ctx, query, queryParams...) if err != nil { diff --git a/go/enclave/storage/enclavedb/keyvalue.go b/go/enclave/storage/enclavedb/keyvalue.go index cb78491501..366b098011 100644 --- a/go/enclave/storage/enclavedb/keyvalue.go +++ b/go/enclave/storage/enclavedb/keyvalue.go @@ -5,24 +5,25 @@ import ( "database/sql" "errors" "fmt" - "strings" + "hash/fnv" "github.com/ethereum/go-ethereum/ethdb" "github.com/ten-protocol/go-ten/go/common/errutil" ) const ( - getQry = `select keyvalue.val from keyvalue where keyvalue.ky = ?;` + getQry = `select keyvalue.val from keyvalue where keyvalue.ky = ? and keyvalue.ky_full = ?;` // `replace` will perform insert or replace if existing and this syntax works for both sqlite and edgeless db - putQry = `replace into keyvalue values(?, ?);` - putQryBatch = `replace into keyvalue values` - putQryValues = `(?,?)` - delQry = `delete from keyvalue where keyvalue.ky = ?;` - searchQry = `select * from keyvalue where substring(keyvalue.ky, 1, ?) = ? and keyvalue.ky >= ? order by keyvalue.ky asc` + putQry = `replace into keyvalue (ky, ky_full, val) values(?, ?, ?);` + putQryBatch = `replace into keyvalue (ky, ky_full, val) values` + putQryValues = `(?,?,?)` + delQry = `delete from keyvalue where keyvalue.ky = ? and keyvalue.ky_full = ?;` + // todo - how is the performance of this? + searchQry = `select ky_full, val from keyvalue where substring(keyvalue.ky_full, 1, ?) = ? and keyvalue.ky_full >= ? order by keyvalue.ky_full asc` ) func Has(ctx context.Context, db *sql.DB, key []byte) (bool, error) { - err := db.QueryRowContext(ctx, getQry, key).Scan() + err := db.QueryRowContext(ctx, getQry, hash(key), key).Scan() if err != nil { if errors.Is(err, sql.ErrNoRows) { return false, nil @@ -35,7 +36,7 @@ func Has(ctx context.Context, db *sql.DB, key []byte) (bool, error) { func Get(ctx context.Context, db *sql.DB, key []byte) ([]byte, error) { var res []byte - err := db.QueryRowContext(ctx, getQry, key).Scan(&res) + err := db.QueryRowContext(ctx, getQry, hash(key), key).Scan(&res) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -47,7 +48,7 @@ func Get(ctx context.Context, db *sql.DB, key []byte) ([]byte, error) { } func Put(ctx context.Context, db *sql.DB, key []byte, value []byte) error { - _, err := db.ExecContext(ctx, putQry, key, value) + _, err := db.ExecContext(ctx, putQry, hash(key), key, value) return err } @@ -58,14 +59,13 @@ func PutKeyValues(ctx context.Context, tx *sql.Tx, keys [][]byte, vals [][]byte) if len(keys) > 0 { // write the kv updates as a single update statement for increased efficiency - update := putQryBatch + strings.Repeat(putQryValues+",", len(keys)) - update = update[0 : len(update)-1] // remove trailing comma + update := putQryBatch + repeat(putQryValues, ",", len(keys)) values := make([]any, 0) for i := range keys { - values = append(values, keys[i], vals[i]) + values = append(values, hash(keys[i]), keys[i], vals[i]) } - _, err := tx.Exec(update, values...) + _, err := tx.ExecContext(ctx, update, values...) if err != nil { return fmt.Errorf("failed to exec k/v transaction statement. kv=%v, err=%w", values, err) } @@ -75,13 +75,13 @@ func PutKeyValues(ctx context.Context, tx *sql.Tx, keys [][]byte, vals [][]byte) } func Delete(ctx context.Context, db *sql.DB, key []byte) error { - _, err := db.ExecContext(ctx, delQry, key) + _, err := db.ExecContext(ctx, delQry, hash(key), key) return err } func DeleteKeys(ctx context.Context, db *sql.Tx, keys [][]byte) error { for _, del := range keys { - _, err := db.ExecContext(ctx, delQry, del) + _, err := db.ExecContext(ctx, delQry, hash(del), del) if err != nil { return err } @@ -108,3 +108,14 @@ func NewIterator(ctx context.Context, db *sql.DB, prefix []byte, start []byte) e rows: rows, } } + +// hash returns 4 bytes "hash" of the key to be indexed +// truncating is not sufficient because the keys are not random +func hash(key []byte) []byte { + h := fnv.New32() + _, err := h.Write(key) + if err != nil { + return nil + } + return h.Sum([]byte{}) +} diff --git a/go/enclave/storage/enclavedb/utils.go b/go/enclave/storage/enclavedb/utils.go index a47bc75d57..57f0dcbaf3 100644 --- a/go/enclave/storage/enclavedb/utils.go +++ b/go/enclave/storage/enclavedb/utils.go @@ -1,6 +1,10 @@ package enclavedb -import gethcommon "github.com/ethereum/go-ethereum/common" +import ( + "strings" + + gethcommon "github.com/ethereum/go-ethereum/common" +) func truncTo4(hash gethcommon.Hash) []byte { return truncBTo4(hash.Bytes()) @@ -15,3 +19,11 @@ func truncBTo4(bytes []byte) []byte { copy(c, b) return c } + +func repeat(token string, sep string, count int) string { + elems := make([]string, count) + for i := 0; i < count; i++ { + elems[i] = token + } + return strings.Join(elems, sep) +} diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index 7a20e05ef4..a865b9ea38 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -3,9 +3,12 @@ CREATE DATABASE obsdb; create table if not exists obsdb.keyvalue ( - ky varbinary(64), - val mediumblob NOT NULL, - primary key (ky) + id INTEGER AUTO_INCREMENT, + ky binary(4), + ky_full varbinary(64), + val mediumblob NOT NULL, + primary key (id), + INDEX (ky) ); GRANT ALL ON obsdb.keyvalue TO obscuro; @@ -114,13 +117,15 @@ GRANT ALL ON obsdb.tx TO obscuro; create table if not exists obsdb.exec_tx ( - id INTEGER AUTO_INCREMENT, - created_contract_address binary(20), - receipt mediumblob, - tx int NOT NULL, - batch int NOT NULL, + id INTEGER AUTO_INCREMENT, + created_contract_address binary(4), + created_contract_address_full binary(20), + receipt mediumblob, + tx int NOT NULL, + batch int NOT NULL, INDEX (batch), INDEX (tx), + INDEX (created_contract_address), primary key (id) ); GRANT ALL ON obsdb.exec_tx TO obscuro; diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index dd792cfa4a..dddff11d09 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -1,8 +1,11 @@ create table if not exists keyvalue ( - ky varbinary(64) primary key, - val mediumblob NOT NULL + id INTEGER PRIMARY KEY AUTOINCREMENT, + ky binary(4), + ky_full varbinary(64), + val mediumblob NOT NULL ); +create index IDX_KV on keyvalue (ky); create table if not exists config ( @@ -94,15 +97,17 @@ create index IDX_Tx_HASH on tx (hash); create table if not exists exec_tx ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - created_contract_address binary(20), - receipt mediumblob, + id INTEGER PRIMARY KEY AUTOINCREMENT, + created_contract_address binary(4), + created_contract_address_full binary(20), + receipt mediumblob, -- commenting out the fk until synthetic transactions are also stored --- tx binary(16) REFERENCES tx, - tx INTEGER NOT NULL, - batch INTEGER NOT NULL REFERENCES batch + tx INTEGER NOT NULL, + batch INTEGER NOT NULL REFERENCES batch ); -create index IX_EX_TX1 on exec_tx (tx); +create index IDX_EX_TX_TX on exec_tx (tx); +create index IDX_EX_TX_BATCH on exec_tx (batch); +create index IDX_EX_TX_CCA on exec_tx (created_contract_address); -- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it create table if not exists events diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index 1388403d5a..f48779fbb4 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -417,10 +417,8 @@ func (s *storageImpl) StoreBatch(ctx context.Context, batch *core.Batch, convert dbTx := s.db.NewDBTransaction() s.logger.Trace("write batch", log.BatchHashKey, batch.Hash(), "l1Proof", batch.Header.L1Proof, log.BatchSeqNoKey, batch.SeqNo()) - blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), batch.Header.L1Proof) - if err != nil { - return err - } + // it is possible that the block is not available if this is a validator + blockId, _ := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), batch.Header.L1Proof) if err := enclavedb.WriteBatchAndTransactions(ctx, dbTx, batch, convertedHash, blockId); err != nil { return fmt.Errorf("could not write batch. Cause: %w", err) From 3ba2d9679c9b17f36be2e8c69de6dcbe493b7787 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Wed, 1 May 2024 12:22:57 +0100 Subject: [PATCH 04/15] fix test --- go/enclave/storage/enclavedb/enclave_sql_db_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/go/enclave/storage/enclavedb/enclave_sql_db_test.go b/go/enclave/storage/enclavedb/enclave_sql_db_test.go index 957cf8c7b8..379923a9ee 100644 --- a/go/enclave/storage/enclavedb/enclave_sql_db_test.go +++ b/go/enclave/storage/enclavedb/enclave_sql_db_test.go @@ -18,7 +18,13 @@ import ( ) var ( - createKVTable = `create table if not exists keyvalue (ky varbinary(64) primary key, val mediumblob);` + createKVTable = `create table if not exists keyvalue +( + id INTEGER PRIMARY KEY AUTOINCREMENT, + ky binary(4), + ky_full varbinary(64), + val mediumblob NOT NULL +);` key1 = hexutils.HexToBytes("0000000000000000000000000000000000000000000000000000000000000001") key2 = hexutils.HexToBytes("0000000000000000000000000000000000000000000000000000000000000002") From a1f85d79d73ba45bde9ddefd77f411754bc08644 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Wed, 1 May 2024 14:09:29 +0100 Subject: [PATCH 05/15] fix --- go/enclave/storage/enclavedb/batch.go | 14 +++++++--- go/enclave/storage/enclavedb/block.go | 26 ++++++++++++++++--- go/enclave/storage/enclavedb/events.go | 4 +-- go/enclave/storage/enclavedb/interfaces.go | 1 - .../storage/init/edgelessdb/001_init.sql | 2 +- go/enclave/storage/init/sqlite/001_init.sql | 2 +- go/host/enclave/guardian.go | 4 ++- 7 files changed, 39 insertions(+), 14 deletions(-) diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index a718c2c3aa..faedad1a1b 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -25,7 +25,7 @@ const ( ) // WriteBatchAndTransactions - persists the batch and the transactions -func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *core.Batch, convertedHash gethcommon.Hash, blockId uint64) error { +func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *core.Batch, convertedHash gethcommon.Hash, blockId *uint64) error { // todo - optimize for reorgs batchBodyID := batch.SeqNo().Uint64() @@ -41,7 +41,10 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c dbtx.ExecuteSQL("replace into batch_body values (?,?)", batchBodyID, body) var isCanon bool - err = dbtx.GetDB().QueryRowContext(ctx, "select is_canonical from block where hash=? and full_hash=?", truncTo4(batch.Header.L1Proof), batch.Header.L1Proof.Bytes()).Scan(&isCanon) + err = dbtx.GetDB().QueryRowContext(ctx, + "select is_canonical from block where hash=? and full_hash=?", + truncTo4(batch.Header.L1Proof), batch.Header.L1Proof.Bytes(), + ).Scan(&isCanon) if err != nil { // if the block is not found, we assume it is non-canonical // fmt.Printf("IsCanon %s err: %s\n", batch.Header.L1Proof, err) @@ -119,10 +122,13 @@ func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int return nil } -func ReadTxId(ctx context.Context, dbtx DBTransaction, txHash gethcommon.Hash) (uint64, error) { +func ReadTxId(ctx context.Context, dbtx DBTransaction, txHash gethcommon.Hash) (*uint64, error) { var txId uint64 err := dbtx.GetDB().QueryRowContext(ctx, "select id from tx where hash=? and full_hash=?", truncTo4(txHash), txHash.Bytes()).Scan(&txId) - return txId, err + if err != nil { + return nil, err + } + return &txId, err } func ReadBatchBySeqNo(ctx context.Context, db *sql.DB, seqNo uint64) (*core.Batch, error) { diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index 0ce693a507..6ddf189241 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -40,6 +40,9 @@ func UpdateCanonicalBlocks(ctx context.Context, dbtx DBTransaction, canonical [] } func updateCanonicalValue(_ context.Context, dbtx DBTransaction, isCanonical bool, blocks []common.L1BlockHash) { + if len(blocks) > 1 { + println("!!!FFFFFOOOOORRRR") + } canonicalBlocks := repeat("(hash=? and full_hash=?)", "OR", len(blocks)) args := make([]any, 0) @@ -48,6 +51,18 @@ func updateCanonicalValue(_ context.Context, dbtx DBTransaction, isCanonical boo args = append(args, truncTo4(blockHash), blockHash.Bytes()) } + rows, err := dbtx.GetDB().Query("select id from block where "+canonicalBlocks, args[1:]...) + defer rows.Close() + if err != nil { + panic(err) + return + } + for rows.Next() { + var id uint64 + rows.Scan(&id) + fmt.Printf("Update canonical=%t block id: %v, hash: %s\n", isCanonical, id, blocks[0].Hex()) + } + updateBlocks := "update block set is_canonical=? where " + canonicalBlocks dbtx.ExecuteSQL(updateBlocks, args...) @@ -69,13 +84,16 @@ func FetchBlockHeaderByHeight(ctx context.Context, db *sql.DB, height *big.Int) return fetchBlockHeader(ctx, db, "where is_canonical=true and height=?", height.Int64()) } -func GetBlockId(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (uint64, error) { +func GetBlockId(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (*uint64, error) { var id uint64 err := db.QueryRowContext(ctx, "select id from block where hash=? and full_hash=?", truncTo4(hash), hash).Scan(&id) - return id, err + if err != nil { + return nil, err + } + return &id, err } -func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockId uint64, messages []T, isValueTransfer bool) error { +func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockId *uint64, messages []T, isValueTransfer bool) error { insert := "insert into l1_msg (message, block, is_transfer) values " + repeat("(?,?,?)", ",", len(messages)) args := make([]any, 0) @@ -127,7 +145,7 @@ func FetchL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1 return result, nil } -func WriteRollup(_ context.Context, dbtx DBTransaction, rollup *common.RollupHeader, blockId uint64, internalHeader *common.CalldataRollupHeader) error { +func WriteRollup(_ context.Context, dbtx DBTransaction, rollup *common.RollupHeader, blockId *uint64, internalHeader *common.CalldataRollupHeader) error { // Write the encoded header data, err := rlp.EncodeToBytes(rollup) if err != nil { diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 27b646dcc7..1a567cee0a 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -200,9 +200,9 @@ func FilterLogs( func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tracers.DebugLogs, error) { var queryParams []any - query := "select rel_address1_full, rel_address2_full, rel_address3_full, rel_address4_full, lifecycle_event, topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" + + query := "select rel_address1_full, rel_address2_full, rel_address3_full, rel_address4_full, lifecycle_event, topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full " + baseEventsJoin + - "AND tx.hash = ? AND tx.full_hash = ?" + " AND tx.hash = ? AND tx.full_hash = ?" queryParams = append(queryParams, truncTo4(txHash), txHash.Bytes()) diff --git a/go/enclave/storage/enclavedb/interfaces.go b/go/enclave/storage/enclavedb/interfaces.go index a8cc0a34d0..6a30420157 100644 --- a/go/enclave/storage/enclavedb/interfaces.go +++ b/go/enclave/storage/enclavedb/interfaces.go @@ -26,7 +26,6 @@ type EnclaveDB interface { // todo - does it need to be an ethdb.Batch? // todo - can we use the typical type DBTransaction interface { - ethdb.Batch GetDB() *sql.DB ExecuteSQL(query string, args ...any) } diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index a865b9ea38..a3f294bda2 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -89,7 +89,7 @@ create table if not exists obsdb.batch is_canonical boolean NOT NULL, header blob NOT NULL, body int NOT NULL, - l1_proof INTEGER NOT NULL, + l1_proof INTEGER, is_executed boolean NOT NULL, primary key (sequence), INDEX (hash), diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index dddff11d09..9361a21ef0 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -73,7 +73,7 @@ create table if not exists batch is_canonical boolean NOT NULL, header blob NOT NULL, body int NOT NULL REFERENCES batch_body, - l1_proof INTEGER NOT NULL, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch + l1_proof INTEGER, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch is_executed boolean NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height -- unique (height, is_canonical, is_executed) diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go index 83fa49894e..6bdaf47075 100644 --- a/go/host/enclave/guardian.go +++ b/go/host/enclave/guardian.go @@ -550,7 +550,9 @@ func (g *Guardian) periodicBatchProduction() { g.logger.Debug("Create batch") // if maxBatchInterval is set higher than batchInterval then we are happy to skip creating batches when there is no data // (up to a maximum time of maxBatchInterval) - skipBatchIfEmpty := g.maxBatchInterval > g.batchInterval && time.Since(g.lastBatchCreated) < g.maxBatchInterval + // todo - disable the skip mechanism + // skipBatchIfEmpty := g.maxBatchInterval > g.batchInterval && time.Since(g.lastBatchCreated) < g.maxBatchInterval + skipBatchIfEmpty := false err := g.enclaveClient.CreateBatch(context.Background(), skipBatchIfEmpty) if err != nil { g.logger.Error("Unable to produce batch", log.ErrKey, err) From 6b12b415d8deb45de90e1f8c81b298ffb4685f9e Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Thu, 2 May 2024 17:57:55 +0100 Subject: [PATCH 06/15] lots --- go/enclave/components/batch_registry.go | 9 ++ go/enclave/components/interfaces.go | 1 + go/enclave/components/rollup_consumer.go | 4 + go/enclave/enclave.go | 1 + go/enclave/storage/db_init.go | 7 +- go/enclave/storage/enclavedb/batch.go | 63 +++++---- go/enclave/storage/enclavedb/block.go | 70 +++++----- go/enclave/storage/enclavedb/config.go | 16 +-- .../storage/enclavedb/db_transaction.go | 58 +++----- .../storage/enclavedb/enclave_sql_db.go | 26 ++-- .../storage/enclavedb/enclave_sql_db_test.go | 2 +- go/enclave/storage/enclavedb/events.go | 38 +++-- go/enclave/storage/enclavedb/interfaces.go | 15 +- .../storage/init/edgelessdb/001_init.sql | 4 +- .../storage/init/edgelessdb/edgelessdb.go | 2 +- .../storage/init/migration/db_migration.go | 1 + go/enclave/storage/init/sqlite/001_init.sql | 4 +- go/enclave/storage/init/sqlite/sqlite.go | 38 ++++- go/enclave/storage/interfaces.go | 3 - go/enclave/storage/storage.go | 132 ++++++++++++++---- go/host/rpc/enclaverpc/enclave_client.go | 25 +--- go/node/docker_node.go | 4 +- 22 files changed, 288 insertions(+), 235 deletions(-) diff --git a/go/enclave/components/batch_registry.go b/go/enclave/components/batch_registry.go index 026725ca01..07a5b20d3d 100644 --- a/go/enclave/components/batch_registry.go +++ b/go/enclave/components/batch_registry.go @@ -75,6 +75,15 @@ func (br *batchRegistry) UnsubscribeFromBatches() { br.batchesCallback = nil } +func (br *batchRegistry) OnBlockProcessed(_ *BlockIngestionType) { + headBatch, err := br.storage.FetchHeadBatch(context.Background()) + if err != nil { + br.logger.Error("Could not fetch head batch", log.ErrKey, err) + return + } + br.headBatchSeq = headBatch.SeqNo() +} + func (br *batchRegistry) OnBatchExecuted(batch *core.Batch, receipts types.Receipts) { br.callbackMutex.RLock() defer br.callbackMutex.RUnlock() diff --git a/go/enclave/components/interfaces.go b/go/enclave/components/interfaces.go index cb68b91478..eebf7faf94 100644 --- a/go/enclave/components/interfaces.go +++ b/go/enclave/components/interfaces.go @@ -100,6 +100,7 @@ type BatchRegistry interface { UnsubscribeFromBatches() OnBatchExecuted(batch *core.Batch, receipts types.Receipts) + OnBlockProcessed(*BlockIngestionType) // HasGenesisBatch - returns if genesis batch is available yet or not, or error in case // the function is unable to determine. diff --git a/go/enclave/components/rollup_consumer.go b/go/enclave/components/rollup_consumer.go index 21c977e5d5..88d8f16d60 100644 --- a/go/enclave/components/rollup_consumer.go +++ b/go/enclave/components/rollup_consumer.go @@ -60,6 +60,10 @@ func (rc *rollupConsumerImpl) ProcessRollupsInBlock(ctx context.Context, b *comm return err } + if len(rollups) > 1 { + rc.logger.Warn(fmt.Sprintf("Multiple rollups %d in block %s", len(rollups), b.Block.Hash())) + } + for _, rollup := range rollups { l1CompressionBlock, err := rc.storage.FetchBlock(ctx, rollup.Header.CompressionL1Head) if err != nil { diff --git a/go/enclave/enclave.go b/go/enclave/enclave.go index 4216b30247..7ff5acab86 100644 --- a/go/enclave/enclave.go +++ b/go/enclave/enclave.go @@ -449,6 +449,7 @@ func (e *enclaveImpl) ingestL1Block(ctx context.Context, br *common.BlockAndRece } if ingestion.IsFork() { + e.registry.OnBlockProcessed(ingestion) err := e.service.OnL1Fork(ctx, ingestion.ChainFork) if err != nil { return nil, err diff --git a/go/enclave/storage/db_init.go b/go/enclave/storage/db_init.go index 853143272d..5706d9f663 100644 --- a/go/enclave/storage/db_init.go +++ b/go/enclave/storage/db_init.go @@ -21,7 +21,9 @@ func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (encla if cfg.UseInMemoryDB { logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...") // this creates a temporary sqlite sqldb - return sqlite.CreateTemporarySQLiteDB(cfg.HostID.String(), "mode=memory&cache=shared&_foreign_keys=on", *cfg, logger) + // return sqlite.CreateTemporarySQLiteDB(cfg.HostID.String(), "mode=memory&cache=shared&_foreign_keys=on", *cfg, logger) + // return sqlite.CreateTemporarySQLiteDB(cfg.HostID.String(), "mode=memory&_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_locking_mode=EXCLUSIVE", *cfg, logger) + return sqlite.CreateTemporarySQLiteDB("", "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal", *cfg, logger) } if !cfg.WillAttest { @@ -29,7 +31,8 @@ func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (encla logger.Warn("Attestation is disabled, using a basic sqlite DB for persistence") // when we want to test persistence after node restart the SqliteDBPath should be set // (if empty string then a temp sqldb file will be created for the lifetime of the enclave) - return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on", *cfg, logger) + // return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on&_txlock=immediate", *cfg, logger) + return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal", *cfg, logger) } // persistent and with attestation means connecting to edgeless DB in a trusted enclave from a secure enclave diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index faedad1a1b..3434f6a97f 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -25,7 +25,7 @@ const ( ) // WriteBatchAndTransactions - persists the batch and the transactions -func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *core.Batch, convertedHash gethcommon.Hash, blockId *uint64) error { +func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Batch, convertedHash gethcommon.Hash, blockId int64) error { // todo - optimize for reorgs batchBodyID := batch.SeqNo().Uint64() @@ -38,10 +38,13 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c return fmt.Errorf("could not encode batch header. Cause: %w", err) } - dbtx.ExecuteSQL("replace into batch_body values (?,?)", batchBodyID, body) + _, err = dbtx.ExecContext(ctx, "replace into batch_body values (?,?)", batchBodyID, body) + if err != nil { + return err + } var isCanon bool - err = dbtx.GetDB().QueryRowContext(ctx, + err = dbtx.QueryRowContext(ctx, "select is_canonical from block where hash=? and full_hash=?", truncTo4(batch.Header.L1Proof), batch.Header.L1Proof.Bytes(), ).Scan(&isCanon) @@ -51,7 +54,7 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c isCanon = false } - dbtx.ExecuteSQL("insert into batch values (?,?,?,?,?,?,?,?,?,?)", + _, err = dbtx.ExecContext(ctx, "insert into batch values (?,?,?,?,?,?,?,?,?,?)", batch.Header.SequencerOrderNo.Uint64(), // sequence batch.Hash(), // full hash convertedHash, // converted_hash @@ -60,9 +63,12 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c isCanon, // is_canonical header, // header blob batchBodyID, // reference to the batch body - blockId, // indexed l1_proof + blockId, // l1_proof block id false, // executed ) + if err != nil { + return err + } // creates a big insert statement for all transactions if len(batch.Transactions) > 0 { @@ -88,15 +94,21 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx DBTransaction, batch *c args = append(args, i) // idx args = append(args, batchBodyID) // the batch body which contained it } - dbtx.ExecuteSQL(insert, args...) + _, err = dbtx.ExecContext(ctx, insert, args...) + if err != nil { + return err + } } return nil } -// WriteBatchExecution - insert all receipts to the db -func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int, receipts []*types.Receipt) error { - dbtx.ExecuteSQL("update batch set is_executed=true where sequence=?", seqNo.Uint64()) +// WriteBatchExecution - save receipts +func WriteBatchExecution(ctx context.Context, dbtx *sql.Tx, seqNo *big.Int, receipts []*types.Receipt) error { + _, err := dbtx.ExecContext(ctx, "update batch set is_executed=true where sequence=?", seqNo.Uint64()) + if err != nil { + return err + } args := make([]any, 0) for _, receipt := range receipts { @@ -108,27 +120,31 @@ func WriteBatchExecution(ctx context.Context, dbtx DBTransaction, seqNo *big.Int } // ignore the error because synthetic transactions will not be inserted - txId, _ := ReadTxId(ctx, dbtx, storageReceipt.TxHash) + txId, _ := GetTxId(ctx, dbtx, storageReceipt.TxHash) args = append(args, truncBTo4(receipt.ContractAddress.Bytes())) // created_contract_address args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address args = append(args, receiptBytes) // the serialised receipt - args = append(args, txId) // tx id - args = append(args, seqNo.Uint64()) // batch_seq + if txId == 0 { + args = append(args, nil) // tx id + } else { + args = append(args, txId) // tx id + } + args = append(args, seqNo.Uint64()) // batch_seq } if len(args) > 0 { insert := "insert into exec_tx (created_contract_address,created_contract_address_full, receipt, tx, batch) values " + repeat("(?,?,?,?,?)", ",", len(receipts)) - dbtx.ExecuteSQL(insert, args...) + _, err = dbtx.ExecContext(ctx, insert, args...) + if err != nil { + return err + } } return nil } -func ReadTxId(ctx context.Context, dbtx DBTransaction, txHash gethcommon.Hash) (*uint64, error) { - var txId uint64 - err := dbtx.GetDB().QueryRowContext(ctx, "select id from tx where hash=? and full_hash=?", truncTo4(txHash), txHash.Bytes()).Scan(&txId) - if err != nil { - return nil, err - } - return &txId, err +func GetTxId(ctx context.Context, dbtx *sql.Tx, txHash gethcommon.Hash) (int64, error) { + var txId int64 + err := dbtx.QueryRowContext(ctx, "select id from tx where hash=? and full_hash=?", truncTo4(txHash), txHash.Bytes()).Scan(&txId) + return txId, err } func ReadBatchBySeqNo(ctx context.Context, db *sql.DB, seqNo uint64) (*core.Batch, error) { @@ -153,7 +169,7 @@ func ReadCurrentHeadBatch(ctx context.Context, db *sql.DB) (*core.Batch, error) } func ReadBatchesByBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) ([]*core.Batch, error) { - return fetchBatches(ctx, db, " join block l1b on b.l1_proof=l1b.id where l1b.hash=? and l1b.full_l1_proof=? order by b.sequence", truncTo4(hash), hash.Bytes()) + return fetchBatches(ctx, db, " join block l1b on b.l1_proof=l1b.id where l1b.hash=? and l1b.full_hash=? order by b.sequence", truncTo4(hash), hash.Bytes()) } func ReadCurrentSequencerNo(ctx context.Context, db *sql.DB) (*big.Int, error) { @@ -173,11 +189,6 @@ func ReadCurrentSequencerNo(ctx context.Context, db *sql.DB) (*big.Int, error) { return big.NewInt(seq.Int64), nil } -func ReadHeadBatchForBlock(ctx context.Context, db *sql.DB, l1Hash common.L1BlockHash) (*core.Batch, error) { - query := " where b.is_canonical=true and b.is_executed=true and b.height=(select max(b1.height) from batch b1 where b1.is_canonical=true and b1.is_executed=true and b1.l1_proof=? and b1.full_l1_proof=?)" - return fetchBatch(ctx, db, query, truncTo4(l1Hash), l1Hash.Bytes()) -} - func fetchBatch(ctx context.Context, db *sql.DB, whereQuery string, args ...any) (*core.Batch, error) { var header string var body []byte diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index 6ddf189241..3a3fb53f2e 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -8,41 +8,47 @@ import ( "fmt" "math/big" + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" "github.com/ten-protocol/go-ten/go/common" "github.com/ten-protocol/go-ten/go/common/errutil" ) -func WriteBlock(_ context.Context, dbtx DBTransaction, b *types.Header) error { +func WriteBlock(ctx context.Context, dbtx *sql.Tx, b *types.Header) error { header, err := rlp.EncodeToBytes(b) if err != nil { return fmt.Errorf("could not encode block header. Cause: %w", err) } - dbtx.ExecuteSQL("insert into block (hash,full_hash,is_canonical,header,height) values (?,?,?,?,?)", + _, err = dbtx.ExecContext(ctx, "insert into block (hash,full_hash,is_canonical,header,height) values (?,?,?,?,?)", truncTo4(b.Hash()), // hash b.Hash().Bytes(), // full_hash true, // is_canonical header, // header b.Number.Uint64(), // height ) - return nil + return err } -func UpdateCanonicalBlocks(ctx context.Context, dbtx DBTransaction, canonical []common.L1BlockHash, nonCanonical []common.L1BlockHash) { +func UpdateCanonicalBlocks(ctx context.Context, dbtx *sql.Tx, canonical []common.L1BlockHash, nonCanonical []common.L1BlockHash, logger gethlog.Logger) error { if len(nonCanonical) > 0 { - updateCanonicalValue(ctx, dbtx, false, nonCanonical) + err := updateCanonicalValue(ctx, dbtx, false, nonCanonical, logger) + if err != nil { + return err + } } if len(canonical) > 0 { - updateCanonicalValue(ctx, dbtx, true, canonical) + err := updateCanonicalValue(ctx, dbtx, true, canonical, logger) + if err != nil { + return err + } } + return nil } -func updateCanonicalValue(_ context.Context, dbtx DBTransaction, isCanonical bool, blocks []common.L1BlockHash) { - if len(blocks) > 1 { - println("!!!FFFFFOOOOORRRR") - } +func updateCanonicalValue(ctx context.Context, dbtx *sql.Tx, isCanonical bool, blocks []common.L1BlockHash, _ gethlog.Logger) error { canonicalBlocks := repeat("(hash=? and full_hash=?)", "OR", len(blocks)) args := make([]any, 0) @@ -51,23 +57,18 @@ func updateCanonicalValue(_ context.Context, dbtx DBTransaction, isCanonical boo args = append(args, truncTo4(blockHash), blockHash.Bytes()) } - rows, err := dbtx.GetDB().Query("select id from block where "+canonicalBlocks, args[1:]...) - defer rows.Close() + updateBlocks := "update block set is_canonical=? where " + canonicalBlocks + _, err := dbtx.ExecContext(ctx, updateBlocks, args...) if err != nil { - panic(err) - return - } - for rows.Next() { - var id uint64 - rows.Scan(&id) - fmt.Printf("Update canonical=%t block id: %v, hash: %s\n", isCanonical, id, blocks[0].Hex()) + return err } - updateBlocks := "update block set is_canonical=? where " + canonicalBlocks - dbtx.ExecuteSQL(updateBlocks, args...) - updateBatches := "update batch set is_canonical=? where l1_proof in (select id from block where " + canonicalBlocks + ")" - dbtx.ExecuteSQL(updateBatches, args...) + _, err = dbtx.ExecContext(ctx, updateBatches, args...) + if err != nil { + return err + } + return nil } // todo - remove this. For now creates a "block" but without a body. @@ -76,24 +77,23 @@ func FetchBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (*type } func FetchHeadBlock(ctx context.Context, db *sql.DB) (*types.Block, error) { - // todo - just read the one with the max id - return fetchBlock(ctx, db, "where is_canonical=true and height=(select max(b.height) from block b where is_canonical=true)") + return fetchBlock(ctx, db, "order by id desc limit 1") } func FetchBlockHeaderByHeight(ctx context.Context, db *sql.DB, height *big.Int) (*types.Header, error) { return fetchBlockHeader(ctx, db, "where is_canonical=true and height=?", height.Int64()) } -func GetBlockId(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (*uint64, error) { - var id uint64 +func GetBlockId(ctx context.Context, db *sql.Tx, hash common.L1BlockHash) (int64, error) { + var id int64 err := db.QueryRowContext(ctx, "select id from block where hash=? and full_hash=?", truncTo4(hash), hash).Scan(&id) if err != nil { - return nil, err + return 0, err } - return &id, err + return id, err } -func WriteL1Messages[T any](ctx context.Context, db *sql.DB, blockId *uint64, messages []T, isValueTransfer bool) error { +func WriteL1Messages[T any](ctx context.Context, db *sql.Tx, blockId int64, messages []T, isValueTransfer bool) error { insert := "insert into l1_msg (message, block, is_transfer) values " + repeat("(?,?,?)", ",", len(messages)) args := make([]any, 0) @@ -145,13 +145,13 @@ func FetchL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1 return result, nil } -func WriteRollup(_ context.Context, dbtx DBTransaction, rollup *common.RollupHeader, blockId *uint64, internalHeader *common.CalldataRollupHeader) error { +func WriteRollup(ctx context.Context, dbtx *sql.Tx, rollup *common.RollupHeader, blockId int64, internalHeader *common.CalldataRollupHeader) error { // Write the encoded header data, err := rlp.EncodeToBytes(rollup) if err != nil { return fmt.Errorf("could not encode batch header. Cause: %w", err) } - dbtx.ExecuteSQL("replace into rollup (hash, full_hash, start_seq, end_seq, time_stamp, header, compression_block) values (?,?,?,?,?,?,?)", + _, err = dbtx.ExecContext(ctx, "replace into rollup (hash, full_hash, start_seq, end_seq, time_stamp, header, compression_block) values (?,?,?,?,?,?,?)", truncTo4(rollup.Hash()), rollup.Hash().Bytes(), internalHeader.FirstBatchSequence.Uint64(), @@ -160,13 +160,17 @@ func WriteRollup(_ context.Context, dbtx DBTransaction, rollup *common.RollupHea data, blockId, ) + if err != nil { + return err + } + return nil } func FetchReorgedRollup(ctx context.Context, db *sql.DB, reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) { whereClause := repeat("(b.hash=? and b.full_hash=?)", "OR", len(reorgedBlocks)) - query := "select full_hash from rollup r join block b on r.compression_block=b.id where " + whereClause + query := "select r.full_hash from rollup r join block b on r.compression_block=b.id where " + whereClause args := make([]any, 0) for _, blockHash := range reorgedBlocks { diff --git a/go/enclave/storage/enclavedb/config.go b/go/enclave/storage/enclavedb/config.go index d261ccd2b9..63bc81cf84 100644 --- a/go/enclave/storage/enclavedb/config.go +++ b/go/enclave/storage/enclavedb/config.go @@ -20,31 +20,19 @@ const ( attSelect = "select ky from attestation_key where party=?" ) -func WriteConfigToBatch(ctx context.Context, dbtx DBTransaction, key string, value any) { - dbtx.ExecuteSQL(cfgInsert, key, value) -} - func WriteConfigToTx(ctx context.Context, dbtx *sql.Tx, key string, value any) (sql.Result, error) { return dbtx.Exec(cfgInsert, key, value) } -func WriteConfig(ctx context.Context, db *sql.DB, key string, value []byte) (sql.Result, error) { +func WriteConfig(ctx context.Context, db *sql.Tx, key string, value []byte) (sql.Result, error) { return db.ExecContext(ctx, cfgInsert, key, value) } -func UpdateConfigToBatch(ctx context.Context, dbtx DBTransaction, key string, value []byte) { - dbtx.ExecuteSQL(cfgUpdate, key, value) -} - -func UpdateConfig(ctx context.Context, db *sql.DB, key string, value []byte) (sql.Result, error) { - return db.ExecContext(ctx, cfgUpdate, key, value) -} - func FetchConfig(ctx context.Context, db *sql.DB, key string) ([]byte, error) { return readSingleRow(ctx, db, cfgSelect, key) } -func WriteAttKey(ctx context.Context, db *sql.DB, party common.Address, key []byte) (sql.Result, error) { +func WriteAttKey(ctx context.Context, db *sql.Tx, party common.Address, key []byte) (sql.Result, error) { return db.ExecContext(ctx, attInsert, party.Bytes(), key) } diff --git a/go/enclave/storage/enclavedb/db_transaction.go b/go/enclave/storage/enclavedb/db_transaction.go index 372988ba86..048ac7e568 100644 --- a/go/enclave/storage/enclavedb/db_transaction.go +++ b/go/enclave/storage/enclavedb/db_transaction.go @@ -2,7 +2,6 @@ package enclavedb import ( "context" - "database/sql" "fmt" "time" @@ -19,62 +18,45 @@ type keyvalue struct { delete bool } -type statement struct { - query string - args []any +type dbTxBatch struct { + timeout time.Duration + db EnclaveDB + writes []keyvalue + size int } -type dbTransaction struct { - timeout time.Duration - db EnclaveDB - writes []keyvalue - statements []statement - size int -} - -func (b *dbTransaction) GetDB() *sql.DB { - return b.db.GetSQLDB() -} - -func (b *dbTransaction) ExecuteSQL(query string, args ...any) { - s := statement{ - query: query, - args: args, - } - b.statements = append(b.statements, s) -} - -// Put inserts the given value into the batch for later committing. -func (b *dbTransaction) Put(key, value []byte) error { +// put inserts the given value into the batch for later committing. +func (b *dbTxBatch) Put(key, value []byte) error { b.writes = append(b.writes, keyvalue{common.CopyBytes(key), common.CopyBytes(value), false}) b.size += len(key) + len(value) return nil } // Delete inserts the a key removal into the batch for later committing. -func (b *dbTransaction) Delete(key []byte) error { +func (b *dbTxBatch) Delete(key []byte) error { b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true}) b.size += len(key) return nil } // ValueSize retrieves the amount of data queued up for writing. -func (b *dbTransaction) ValueSize() int { +func (b *dbTxBatch) ValueSize() int { return b.size } // Write executes a batch statement with all the updates -func (b *dbTransaction) Write() error { +func (b *dbTxBatch) Write() error { ctx, cancelCtx := context.WithTimeout(context.Background(), b.timeout) defer cancelCtx() - return b.WriteCtx(ctx) + return b.writeCtx(ctx) } -func (b *dbTransaction) WriteCtx(ctx context.Context) error { - tx, err := b.db.BeginTx(ctx) +func (b *dbTxBatch) writeCtx(ctx context.Context) error { + tx, err := b.db.NewDBTransaction(ctx) if err != nil { return fmt.Errorf("failed to create batch transaction - %w", err) } + defer tx.Rollback() var deletes [][]byte var updateKeys [][]byte @@ -99,13 +81,6 @@ func (b *dbTransaction) WriteCtx(ctx context.Context) error { return fmt.Errorf("failed to delete keys. Cause %w", err) } - for _, s := range b.statements { - _, err := tx.Exec(s.query, s.args...) - if err != nil { - return fmt.Errorf("failed to exec db statement `%s` (%v). Cause: %w", s.query, s.args, err) - } - } - err = tx.Commit() if err != nil { return fmt.Errorf("failed to commit batch of writes. Cause: %w", err) @@ -114,14 +89,13 @@ func (b *dbTransaction) WriteCtx(ctx context.Context) error { } // Reset resets the batch for reuse. -func (b *dbTransaction) Reset() { +func (b *dbTxBatch) Reset() { b.writes = b.writes[:0] - b.statements = b.statements[:0] b.size = 0 } // Replay replays the batch contents. -func (b *dbTransaction) Replay(w ethdb.KeyValueWriter) error { +func (b *dbTxBatch) Replay(w ethdb.KeyValueWriter) error { for _, keyvalue := range b.writes { if keyvalue.delete { if err := w.Delete(keyvalue.key); err != nil { diff --git a/go/enclave/storage/enclavedb/enclave_sql_db.go b/go/enclave/storage/enclavedb/enclave_sql_db.go index 602ed0b18f..8c69454de6 100644 --- a/go/enclave/storage/enclavedb/enclave_sql_db.go +++ b/go/enclave/storage/enclavedb/enclave_sql_db.go @@ -15,9 +15,10 @@ import ( // enclaveDB - Implements the key-value ethdb.Database and also exposes the underlying sql database // should not be used directly outside the db package type enclaveDB struct { - sqldb *sql.DB - config config.EnclaveConfig - logger gethlog.Logger + sqldb *sql.DB + rwSqldb *sql.DB // required only by sqlite. For a normal db, it will be the same instance as sqldb + config config.EnclaveConfig + logger gethlog.Logger } func (sqlDB *enclaveDB) Tail() (uint64, error) { @@ -55,18 +56,14 @@ func (sqlDB *enclaveDB) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") } -func NewEnclaveDB(db *sql.DB, config config.EnclaveConfig, logger gethlog.Logger) (EnclaveDB, error) { - return &enclaveDB{sqldb: db, config: config, logger: logger}, nil +func NewEnclaveDB(db *sql.DB, rwdb *sql.DB, config config.EnclaveConfig, logger gethlog.Logger) (EnclaveDB, error) { + return &enclaveDB{sqldb: db, rwSqldb: rwdb, config: config, logger: logger}, nil } func (sqlDB *enclaveDB) GetSQLDB() *sql.DB { return sqlDB.sqldb } -func (sqlDB *enclaveDB) BeginTx(ctx context.Context) (*sql.Tx, error) { - return sqlDB.sqldb.BeginTx(ctx, nil) -} - func (sqlDB *enclaveDB) Has(key []byte) (bool, error) { ctx, cancelCtx := context.WithTimeout(context.Background(), sqlDB.config.RPCTimeout) defer cancelCtx() @@ -98,15 +95,16 @@ func (sqlDB *enclaveDB) Close() error { return nil } -func (sqlDB *enclaveDB) NewDBTransaction() *dbTransaction { - return &dbTransaction{ - timeout: sqlDB.config.RPCTimeout, - db: sqlDB, +func (sqlDB *enclaveDB) NewDBTransaction(ctx context.Context) (*sql.Tx, error) { + tx, err := sqlDB.rwSqldb.BeginTx(ctx, nil) + if err != nil { + return nil, fmt.Errorf("failed to create db transaction - %w", err) } + return tx, nil } func (sqlDB *enclaveDB) NewBatch() ethdb.Batch { - return &dbTransaction{ + return &dbTxBatch{ timeout: sqlDB.config.RPCTimeout, db: sqlDB, } diff --git a/go/enclave/storage/enclavedb/enclave_sql_db_test.go b/go/enclave/storage/enclavedb/enclave_sql_db_test.go index 379923a9ee..2a039067df 100644 --- a/go/enclave/storage/enclavedb/enclave_sql_db_test.go +++ b/go/enclave/storage/enclavedb/enclave_sql_db_test.go @@ -127,7 +127,7 @@ func createDB(t *testing.T) ethdb.Database { lite := setupSQLite(t) _, err := lite.Exec(createKVTable) failIfError(t, err, "Failed to create key-value table in test db") - s, err := NewEnclaveDB(lite, config.EnclaveConfig{RPCTimeout: time.Second}, testlog.Logger()) + s, err := NewEnclaveDB(lite, lite, config.EnclaveConfig{RPCTimeout: time.Second}, testlog.Logger()) failIfError(t, err, "Failed to create SQLEthDatabase for test") return s } diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 1a567cee0a..1c22b8a4e1 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -6,6 +6,8 @@ import ( "fmt" "math/big" + "github.com/ten-protocol/go-ten/go/enclave/core" + gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -17,48 +19,42 @@ const ( baseEventsJoin = "from events e join exec_tx extx on e.tx=extx.tx and e.batch=extx.batch join tx on extx.tx=tx.id join batch b on extx.batch=b.sequence where b.is_canonical=true " ) -func StoreEventLogs(ctx context.Context, dbtx DBTransaction, receipts []*types.Receipt, stateDB *state.StateDB) error { +func StoreEventLogs(ctx context.Context, dbtx *sql.Tx, receipts []*types.Receipt, batch *core.Batch, stateDB *state.StateDB) error { var args []any totalLogs := 0 for _, receipt := range receipts { for _, l := range receipt.Logs { - txId, _ := ReadTxId(ctx, dbtx, l.TxHash) - batchId, err := ReadBatchId(ctx, dbtx, receipt.BlockHash) - if err != nil { - return err - } - logArgs, err := logDBValues(ctx, dbtx.GetDB(), l, stateDB) + logArgs, err := logDBValues(ctx, dbtx, l, stateDB) if err != nil { return err } args = append(args, logArgs...) - args = append(args, txId) - args = append(args, batchId) + txId, _ := GetTxId(ctx, dbtx, l.TxHash) + if txId == 0 { + args = append(args, nil) + } else { + args = append(args, txId) + } + args = append(args, batch.SeqNo().Uint64()) totalLogs++ } } if totalLogs > 0 { query := "insert into events values " + repeat("(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ",", totalLogs) - dbtx.ExecuteSQL(query, args...) + _, err := dbtx.ExecContext(ctx, query, args...) + if err != nil { + return err + } } return nil } -func ReadBatchId(ctx context.Context, dbtx DBTransaction, batchHash gethcommon.Hash) (uint64, error) { - var batchId uint64 - err := dbtx.GetDB().QueryRowContext(ctx, - "select sequence from batch where batch.hash=? and batch.full_hash=?", - truncTo4(batchHash), batchHash.Bytes(), - ).Scan(&batchId) - return batchId, err -} - // This method stores a log entry together with relevancy metadata // Each types.Log has 5 indexable topics, where the first one is the event signature hash // The other 4 topics are set by the programmer // According to the data relevancy rules, an event is relevant to accounts referenced directly in topics // If the event is not referring any user address, it is considered a "lifecycle event", and is relevant to everyone -func logDBValues(ctx context.Context, db *sql.DB, l *types.Log, stateDB *state.StateDB) ([]any, error) { +func logDBValues(ctx context.Context, db *sql.Tx, l *types.Log, stateDB *state.StateDB) ([]any, error) { // The topics are stored in an array with a maximum of 5 entries, but usually less var t0, t1, t2, t3, t4 []byte @@ -279,7 +275,7 @@ func bytesToAddress(b []byte) *gethcommon.Address { // forcing its events to become permanently private (this is not implemented for now) // // todo - find a more efficient way -func isEndUserAccount(ctx context.Context, db *sql.DB, topic gethcommon.Hash, stateDB *state.StateDB) (bool, *gethcommon.Address, error) { +func isEndUserAccount(ctx context.Context, db *sql.Tx, topic gethcommon.Hash, stateDB *state.StateDB) (bool, *gethcommon.Address, error) { potentialAddr := common.ExtractPotentialAddress(topic) if potentialAddr == nil { return false, nil, nil diff --git a/go/enclave/storage/enclavedb/interfaces.go b/go/enclave/storage/enclavedb/interfaces.go index 6a30420157..58358003e2 100644 --- a/go/enclave/storage/enclavedb/interfaces.go +++ b/go/enclave/storage/enclavedb/interfaces.go @@ -14,18 +14,5 @@ import ( type EnclaveDB interface { ethdb.Database GetSQLDB() *sql.DB - NewDBTransaction() *dbTransaction - BeginTx(context.Context) (*sql.Tx, error) -} - -// DBTransaction - represents a database transaction implemented unusually. -// Typically, databases have a "beginTransaction" command which is also exposed by the db drivers, -// and then the applications just sends commands on that connection. -// There are rules as to what data is returned when running selects. -// This implementation works by collecting all statements, and then writing them and committing in one go -// todo - does it need to be an ethdb.Batch? -// todo - can we use the typical -type DBTransaction interface { - GetDB() *sql.DB - ExecuteSQL(query string, args ...any) + NewDBTransaction(ctx context.Context) (*sql.Tx, error) } diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index a3f294bda2..60365ea739 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -121,7 +121,7 @@ create table if not exists obsdb.exec_tx created_contract_address binary(4), created_contract_address_full binary(20), receipt mediumblob, - tx int NOT NULL, + tx int, batch int NOT NULL, INDEX (batch), INDEX (tx), @@ -155,7 +155,7 @@ create table if not exists obsdb.events rel_address2_full binary(20), rel_address3_full binary(20), rel_address4_full binary(20), - tx int NOT NULL, + tx int, batch int NOT NULL, INDEX (tx), INDEX (batch), diff --git a/go/enclave/storage/init/edgelessdb/edgelessdb.go b/go/enclave/storage/init/edgelessdb/edgelessdb.go index 09aaa69ba9..68558c83cd 100644 --- a/go/enclave/storage/init/edgelessdb/edgelessdb.go +++ b/go/enclave/storage/init/edgelessdb/edgelessdb.go @@ -160,7 +160,7 @@ func Connector(edbCfg *Config, config config.EnclaveConfig, logger gethlog.Logge } // wrap it in our eth-compatible key-value store layer - return enclavedb.NewEnclaveDB(sqlDB, config, logger) + return enclavedb.NewEnclaveDB(sqlDB, sqlDB, config, logger) } func waitForEdgelessDBToStart(edbHost string, logger gethlog.Logger) error { diff --git a/go/enclave/storage/init/migration/db_migration.go b/go/enclave/storage/init/migration/db_migration.go index 742cc6001d..238fa70e61 100644 --- a/go/enclave/storage/init/migration/db_migration.go +++ b/go/enclave/storage/init/migration/db_migration.go @@ -62,6 +62,7 @@ func executeMigration(db *sql.DB, content string, migrationOrder int64) error { if err != nil { return err } + defer tx.Rollback() _, err = tx.Exec(content) if err != nil { return err diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 9361a21ef0..43c34a31f9 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -102,7 +102,7 @@ create table if not exists exec_tx created_contract_address_full binary(20), receipt mediumblob, -- commenting out the fk until synthetic transactions are also stored - tx INTEGER NOT NULL, + tx INTEGER, batch INTEGER NOT NULL REFERENCES batch ); create index IDX_EX_TX_TX on exec_tx (tx); @@ -135,7 +135,7 @@ create table if not exists events rel_address2_full binary(20), rel_address3_full binary(20), rel_address4_full binary(20), - tx INTEGER NOT NULL, + tx INTEGER , batch INTEGER NOT NULL REFERENCES batch ); create index IDX_AD on events (address); diff --git a/go/enclave/storage/init/sqlite/sqlite.go b/go/enclave/storage/init/sqlite/sqlite.go index 75fa453250..c47514ee15 100644 --- a/go/enclave/storage/init/sqlite/sqlite.go +++ b/go/enclave/storage/init/sqlite/sqlite.go @@ -51,34 +51,50 @@ func CreateTemporarySQLiteDB(dbPath string, dbOptions string, config config.Encl description = "existing" initialsed = true } else { + myfile, e := os.Create(dbPath) + if e != nil { + logger.Crit("could not create temp sqlite DB file - %w", e) + } + myfile.Close() + description = "new" } } - db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?%s", dbPath, dbOptions)) + path := fmt.Sprintf("file:%s?mode=rw&%s", dbPath, dbOptions) + logger.Info("Connect to sqlite", "path", path) + rwdb, err := sql.Open("sqlite3", path) if err != nil { return nil, fmt.Errorf("couldn't open sqlite db - %w", err) } // Sqlite fails with table locks when there are multiple connections - db.SetMaxOpenConns(1) + rwdb.SetMaxOpenConns(1) if !initialsed { - err = initialiseDB(db) + err = initialiseDB(rwdb) if err != nil { return nil, err } } // perform db migration - err = migration.DBMigration(db, sqlFiles, logger.New(log.CmpKey, "DB_MIGRATION")) + err = migration.DBMigration(rwdb, sqlFiles, logger.New(log.CmpKey, "DB_MIGRATION")) if err != nil { return nil, err } logger.Info(fmt.Sprintf("Opened %s sqlite db file at %s", description, dbPath)) - return enclavedb.NewEnclaveDB(db, config, logger) + roPath := fmt.Sprintf("file:%s?mode=ro&%s", dbPath, dbOptions) + logger.Info("Connect to sqlite", "ro_path", roPath) + rodb, err := sql.Open("sqlite3", roPath) + if err != nil { + return nil, fmt.Errorf("couldn't open sqlite db - %w", err) + } + rodb.SetMaxOpenConns(10) + + return enclavedb.NewEnclaveDB(rodb, rwdb, config, logger) } func initialiseDB(db *sql.DB) error { @@ -86,11 +102,19 @@ func initialiseDB(db *sql.DB) error { if err != nil { return err } - - _, err = db.Exec(string(sqlInitFile)) + tx, err := db.Begin() + if err != nil { + return fmt.Errorf("failed to initialise sqlite %w", err) + } + defer tx.Rollback() + _, err = tx.Exec(string(sqlInitFile)) if err != nil { return fmt.Errorf("failed to initialise sqlite %s - %w", sqlInitFile, err) } + err = tx.Commit() + if err != nil { + return err + } return nil } diff --git a/go/enclave/storage/interfaces.go b/go/enclave/storage/interfaces.go index 6c1f27c2a9..6a82cf168c 100644 --- a/go/enclave/storage/interfaces.go +++ b/go/enclave/storage/interfaces.go @@ -61,9 +61,6 @@ type BatchResolver interface { // BatchWasExecuted - return true if the batch was executed BatchWasExecuted(ctx context.Context, hash common.L2BatchHash) (bool, error) - // FetchHeadBatchForBlock returns the hash of the head batch at a given L1 block. - FetchHeadBatchForBlock(ctx context.Context, blockHash common.L1BlockHash) (*core.Batch, error) - // StoreBatch stores an un-executed batch. StoreBatch(ctx context.Context, batch *core.Batch, convertedHash gethcommon.Hash) error // StoreExecutedBatch - store the batch after it was executed diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index 5acfdaa2ce..4890370c34 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -202,20 +202,30 @@ func (s *storageImpl) FetchNonCanonicalBatchesBetween(ctx context.Context, start func (s *storageImpl) StoreBlock(ctx context.Context, b *types.Block, chainFork *common.ChainFork) error { defer s.logDuration("StoreBlock", measure.NewStopwatch()) - dbTransaction := s.db.NewDBTransaction() + dbTx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbTx.Rollback() if chainFork != nil && chainFork.IsFork() { s.logger.Info(fmt.Sprintf("Fork. %s", chainFork)) - enclavedb.UpdateCanonicalBlocks(ctx, dbTransaction, chainFork.CanonicalPath, chainFork.NonCanonicalPath) + err := enclavedb.UpdateCanonicalBlocks(ctx, dbTx, chainFork.CanonicalPath, chainFork.NonCanonicalPath, s.logger) + if err != nil { + return err + } } // In case there were any batches inserted before this block was received - enclavedb.UpdateCanonicalBlocks(ctx, dbTransaction, []common.L1BlockHash{b.Hash()}, nil) + err = enclavedb.UpdateCanonicalBlocks(ctx, dbTx, []common.L1BlockHash{b.Hash()}, nil, s.logger) + if err != nil { + return err + } - if err := enclavedb.WriteBlock(ctx, dbTransaction, b.Header()); err != nil { + if err := enclavedb.WriteBlock(ctx, dbTx, b.Header()); err != nil { return fmt.Errorf("2. could not store block %s. Cause: %w", b.Hash(), err) } - if err := dbTransaction.WriteCtx(ctx); err != nil { + if err := dbTx.Commit(); err != nil { return fmt.Errorf("3. could not store block %s. Cause: %w", b.Hash(), err) } @@ -251,10 +261,19 @@ func (s *storageImpl) StoreSecret(ctx context.Context, secret crypto.SharedEncla if err != nil { return fmt.Errorf("could not encode shared secret. Cause: %w", err) } - _, err = enclavedb.WriteConfig(ctx, s.db.GetSQLDB(), masterSeedCfg, enc) + dbTx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbTx.Rollback() + _, err = enclavedb.WriteConfig(ctx, dbTx, masterSeedCfg, enc) if err != nil { return fmt.Errorf("could not shared secret in DB. Cause: %w", err) } + err = dbTx.Commit() + if err != nil { + return err + } return nil } @@ -321,11 +340,6 @@ func (s *storageImpl) HealthCheck(ctx context.Context) (bool, error) { return true, nil } -func (s *storageImpl) FetchHeadBatchForBlock(ctx context.Context, blockHash common.L1BlockHash) (*core.Batch, error) { - defer s.logDuration("FetchHeadBatchForBlock", measure.NewStopwatch()) - return enclavedb.ReadHeadBatchForBlock(ctx, s.db.GetSQLDB(), blockHash) -} - func (s *storageImpl) CreateStateDB(ctx context.Context, batchHash common.L2BatchHash) (*state.StateDB, error) { defer s.logDuration("CreateStateDB", measure.NewStopwatch()) batch, err := s.FetchBatch(ctx, batchHash) @@ -387,8 +401,20 @@ func (s *storageImpl) FetchAttestedKey(ctx context.Context, address gethcommon.A func (s *storageImpl) StoreAttestedKey(ctx context.Context, aggregator gethcommon.Address, key *ecdsa.PublicKey) error { defer s.logDuration("StoreAttestedKey", measure.NewStopwatch()) - _, err := enclavedb.WriteAttKey(ctx, s.db.GetSQLDB(), aggregator, gethcrypto.CompressPubkey(key)) - return err + dbTx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbTx.Rollback() + _, err = enclavedb.WriteAttKey(ctx, dbTx, aggregator, gethcrypto.CompressPubkey(key)) + if err != nil { + return err + } + err = dbTx.Commit() + if err != nil { + return err + } + return nil } func (s *storageImpl) FetchBatchBySeqNo(ctx context.Context, seqNum uint64) (*core.Batch, error) { @@ -422,17 +448,24 @@ func (s *storageImpl) StoreBatch(ctx context.Context, batch *core.Batch, convert return nil } - dbTx := s.db.NewDBTransaction() - s.logger.Trace("write batch", log.BatchHashKey, batch.Hash(), "l1Proof", batch.Header.L1Proof, log.BatchSeqNoKey, batch.SeqNo()) + dbTx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbTx.Rollback() // it is possible that the block is not available if this is a validator - blockId, _ := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), batch.Header.L1Proof) + blockId, err := enclavedb.GetBlockId(ctx, dbTx, batch.Header.L1Proof) + if err != nil { + s.logger.Warn("could not get block id from db", log.ErrKey, err) + } + s.logger.Trace("write batch", log.BatchHashKey, batch.Hash(), "l1Proof", batch.Header.L1Proof, log.BatchSeqNoKey, batch.SeqNo(), "block_id", blockId) if err := enclavedb.WriteBatchAndTransactions(ctx, dbTx, batch, convertedHash, blockId); err != nil { return fmt.Errorf("could not write batch. Cause: %w", err) } - if err := dbTx.WriteCtx(ctx); err != nil { + if err := dbTx.Commit(); err != nil { return fmt.Errorf("could not commit batch %w", err) } @@ -455,7 +488,11 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, return nil } - dbTx := s.db.NewDBTransaction() + dbTx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbTx.Rollback() if err := enclavedb.WriteBatchExecution(ctx, dbTx, batch.SeqNo(), receipts); err != nil { return fmt.Errorf("could not write transaction receipts. Cause: %w", err) } @@ -466,13 +503,13 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, return fmt.Errorf("could not create state DB to filter logs. Cause: %w", err) } - err = enclavedb.StoreEventLogs(ctx, dbTx, receipts, stateDB) + err = enclavedb.StoreEventLogs(ctx, dbTx, receipts, batch, stateDB) if err != nil { return fmt.Errorf("could not save logs %w", err) } } - if err = dbTx.WriteCtx(ctx); err != nil { + if err = dbTx.Commit(); err != nil { return fmt.Errorf("could not commit batch %w", err) } @@ -480,20 +517,38 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, } func (s *storageImpl) StoreValueTransfers(ctx context.Context, blockHash common.L1BlockHash, transfers common.ValueTransferEvents) error { - blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), blockHash) + dbtx, err := s.db.NewDBTransaction(ctx) if err != nil { return err } - return enclavedb.WriteL1Messages(ctx, s.db.GetSQLDB(), blockId, transfers, true) + defer dbtx.Rollback() + blockId, err := enclavedb.GetBlockId(ctx, dbtx, blockHash) + if err != nil { + return err + } + err = enclavedb.WriteL1Messages(ctx, dbtx, blockId, transfers, true) + if err != nil { + return err + } + return dbtx.Commit() } func (s *storageImpl) StoreL1Messages(ctx context.Context, blockHash common.L1BlockHash, messages common.CrossChainMessages) error { defer s.logDuration("StoreL1Messages", measure.NewStopwatch()) - blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), blockHash) + dbtx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbtx.Rollback() + blockId, err := enclavedb.GetBlockId(ctx, dbtx, blockHash) + if err != nil { + return err + } + err = enclavedb.WriteL1Messages(ctx, dbtx, blockId, messages, false) if err != nil { return err } - return enclavedb.WriteL1Messages(ctx, s.db.GetSQLDB(), blockId, messages, false) + return dbtx.Commit() } func (s *storageImpl) GetL1Messages(ctx context.Context, blockHash common.L1BlockHash) (common.CrossChainMessages, error) { @@ -514,8 +569,20 @@ func (s *storageImpl) StoreEnclaveKey(ctx context.Context, enclaveKey *crypto.En } keyBytes := gethcrypto.FromECDSA(enclaveKey.PrivateKey()) - _, err := enclavedb.WriteConfig(ctx, s.db.GetSQLDB(), enclaveKeyKey, keyBytes) - return err + dbTx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbTx.Rollback() + _, err = enclavedb.WriteConfig(ctx, dbTx, enclaveKeyKey, keyBytes) + if err != nil { + return err + } + err = dbTx.Commit() + if err != nil { + return err + } + return nil } func (s *storageImpl) GetEnclaveKey(ctx context.Context) (*crypto.EnclaveKey, error) { @@ -533,18 +600,23 @@ func (s *storageImpl) GetEnclaveKey(ctx context.Context) (*crypto.EnclaveKey, er func (s *storageImpl) StoreRollup(ctx context.Context, rollup *common.ExtRollup, internalHeader *common.CalldataRollupHeader) error { defer s.logDuration("StoreRollup", measure.NewStopwatch()) - dbBatch := s.db.NewDBTransaction() - blockId, err := enclavedb.GetBlockId(ctx, s.db.GetSQLDB(), rollup.Header.CompressionL1Head) + dbTx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return err + } + defer dbTx.Rollback() + + blockId, err := enclavedb.GetBlockId(ctx, dbTx, rollup.Header.CompressionL1Head) if err != nil { return err } - if err := enclavedb.WriteRollup(ctx, dbBatch, rollup.Header, blockId, internalHeader); err != nil { + if err := enclavedb.WriteRollup(ctx, dbTx, rollup.Header, blockId, internalHeader); err != nil { return fmt.Errorf("could not write rollup. Cause: %w", err) } - if err := dbBatch.WriteCtx(ctx); err != nil { + if err := dbTx.Commit(); err != nil { return fmt.Errorf("could not write rollup to storage. Cause: %w", err) } return nil diff --git a/go/host/rpc/enclaverpc/enclave_client.go b/go/host/rpc/enclaverpc/enclave_client.go index d8ced83449..e01416e021 100644 --- a/go/host/rpc/enclaverpc/enclave_client.go +++ b/go/host/rpc/enclaverpc/enclave_client.go @@ -160,9 +160,6 @@ func (c *Client) EnclaveID(ctx context.Context) (common.EnclaveID, common.System } func (c *Client) SubmitL1Block(ctx context.Context, block *common.L1Block, receipts common.L1Receipts, isLatest bool) (*common.BlockSubmissionResponse, common.SystemError) { - timeoutCtx, cancel := context.WithTimeout(ctx, c.enclaveRPCTimeout) - defer cancel() - var buffer bytes.Buffer if err := block.EncodeRLP(&buffer); err != nil { return nil, fmt.Errorf("could not encode block. Cause: %w", err) @@ -173,7 +170,7 @@ func (c *Client) SubmitL1Block(ctx context.Context, block *common.L1Block, recei return nil, fmt.Errorf("could not encode receipts. Cause: %w", err) } - response, err := c.protoClient.SubmitL1Block(timeoutCtx, &generated.SubmitBlockRequest{EncodedBlock: buffer.Bytes(), EncodedReceipts: serialized, IsLatest: isLatest}) + response, err := c.protoClient.SubmitL1Block(ctx, &generated.SubmitBlockRequest{EncodedBlock: buffer.Bytes(), EncodedReceipts: serialized, IsLatest: isLatest}) if err != nil { return nil, fmt.Errorf("could not submit block. Cause: %w", err) } @@ -203,12 +200,9 @@ func (c *Client) SubmitTx(ctx context.Context, tx common.EncryptedTx) (*response func (c *Client) SubmitBatch(ctx context.Context, batch *common.ExtBatch) common.SystemError { defer core.LogMethodDuration(c.logger, measure.NewStopwatch(), "SubmitBatch rpc call") - timeoutCtx, cancel := context.WithTimeout(ctx, c.enclaveRPCTimeout) - defer cancel() - batchMsg := rpc.ToExtBatchMsg(batch) - response, err := c.protoClient.SubmitBatch(timeoutCtx, &generated.SubmitBatchRequest{Batch: &batchMsg}) + response, err := c.protoClient.SubmitBatch(ctx, &generated.SubmitBatchRequest{Batch: &batchMsg}) if err != nil { return syserr.NewRPCError(err) } @@ -253,10 +247,7 @@ func (c *Client) GetTransactionCount(ctx context.Context, encryptedParams common func (c *Client) Stop() common.SystemError { c.logger.Info("Shutting down enclave client.") - timeoutCtx, cancel := context.WithTimeout(context.Background(), c.enclaveRPCTimeout) - defer cancel() - - response, err := c.protoClient.Stop(timeoutCtx, &generated.StopRequest{}) + response, err := c.protoClient.Stop(context.Background(), &generated.StopRequest{}) if err != nil { return syserr.NewRPCError(fmt.Errorf("could not stop enclave: %w", err)) } @@ -415,10 +406,7 @@ func (c *Client) HealthCheck(ctx context.Context) (bool, common.SystemError) { func (c *Client) CreateBatch(ctx context.Context, skipIfEmpty bool) common.SystemError { defer core.LogMethodDuration(c.logger, measure.NewStopwatch(), "CreateBatch rpc call") - timeoutCtx, cancel := context.WithTimeout(ctx, c.enclaveRPCTimeout) - defer cancel() - - response, err := c.protoClient.CreateBatch(timeoutCtx, &generated.CreateBatchRequest{SkipIfEmpty: skipIfEmpty}) + response, err := c.protoClient.CreateBatch(ctx, &generated.CreateBatchRequest{SkipIfEmpty: skipIfEmpty}) if err != nil { return syserr.NewInternalError(err) } @@ -431,10 +419,7 @@ func (c *Client) CreateBatch(ctx context.Context, skipIfEmpty bool) common.Syste func (c *Client) CreateRollup(ctx context.Context, fromSeqNo uint64) (*common.ExtRollup, common.SystemError) { defer core.LogMethodDuration(c.logger, measure.NewStopwatch(), "CreateRollup rpc call") - timeoutCtx, cancel := context.WithTimeout(ctx, c.enclaveRPCTimeout) - defer cancel() - - response, err := c.protoClient.CreateRollup(timeoutCtx, &generated.CreateRollupRequest{ + response, err := c.protoClient.CreateRollup(ctx, &generated.CreateRollupRequest{ FromSequenceNumber: &fromSeqNo, }) if err != nil { diff --git a/go/node/docker_node.go b/go/node/docker_node.go index eb67a175c4..f86de5d362 100644 --- a/go/node/docker_node.go +++ b/go/node/docker_node.go @@ -9,9 +9,7 @@ import ( "github.com/ten-protocol/go-ten/go/common/docker" ) -var ( - _enclaveDataDir = "/enclavedata" // this is how the directory is references within the enclave container -) +var _enclaveDataDir = "/enclavedata" // this is how the directory is references within the enclave container type DockerNode struct { cfg *Config From e1656ef91604325a8050b88af95bf291252aafb0 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Fri, 3 May 2024 10:37:53 +0100 Subject: [PATCH 07/15] fixes --- go/common/gethutil/gethutil.go | 2 +- go/enclave/components/batch_registry.go | 3 ++- go/enclave/components/interfaces.go | 2 +- go/enclave/enclave.go | 2 +- go/enclave/nodetype/validator.go | 9 +++++-- go/enclave/storage/db_init.go | 3 --- go/enclave/storage/enclavedb/batch.go | 14 +++++++--- go/enclave/storage/enclavedb/block.go | 13 ++++++--- .../storage/init/edgelessdb/001_init.sql | 1 + go/enclave/storage/init/sqlite/001_init.sql | 3 ++- go/enclave/storage/init/sqlite/sqlite.go | 1 + go/enclave/storage/storage.go | 27 +++++++++++++------ 12 files changed, 55 insertions(+), 25 deletions(-) diff --git a/go/common/gethutil/gethutil.go b/go/common/gethutil/gethutil.go index d9c79f742a..76f8e99964 100644 --- a/go/common/gethutil/gethutil.go +++ b/go/common/gethutil/gethutil.go @@ -18,7 +18,7 @@ import ( var EmptyHash = gethcommon.Hash{} // LCA - returns the latest common ancestor of the 2 blocks or an error if no common ancestor is found -// it also returns the blocks that became canonincal, and the once that are now the fork +// it also returns the blocks that became canonical, and the once that are now the fork func LCA(ctx context.Context, newCanonical *types.Block, oldCanonical *types.Block, resolver storage.BlockResolver) (*common.ChainFork, error) { b, cp, ncp, err := internalLCA(ctx, newCanonical, oldCanonical, resolver, []common.L1BlockHash{}, []common.L1BlockHash{oldCanonical.Hash()}) // remove the common ancestor diff --git a/go/enclave/components/batch_registry.go b/go/enclave/components/batch_registry.go index 07a5b20d3d..0507eabe98 100644 --- a/go/enclave/components/batch_registry.go +++ b/go/enclave/components/batch_registry.go @@ -75,7 +75,8 @@ func (br *batchRegistry) UnsubscribeFromBatches() { br.batchesCallback = nil } -func (br *batchRegistry) OnBlockProcessed(_ *BlockIngestionType) { +func (br *batchRegistry) OnL1Reorg(_ *BlockIngestionType) { + // read the cached head batch from the database because there was an L1 reorg headBatch, err := br.storage.FetchHeadBatch(context.Background()) if err != nil { br.logger.Error("Could not fetch head batch", log.ErrKey, err) diff --git a/go/enclave/components/interfaces.go b/go/enclave/components/interfaces.go index eebf7faf94..fa6aff38c3 100644 --- a/go/enclave/components/interfaces.go +++ b/go/enclave/components/interfaces.go @@ -100,7 +100,7 @@ type BatchRegistry interface { UnsubscribeFromBatches() OnBatchExecuted(batch *core.Batch, receipts types.Receipts) - OnBlockProcessed(*BlockIngestionType) + OnL1Reorg(*BlockIngestionType) // HasGenesisBatch - returns if genesis batch is available yet or not, or error in case // the function is unable to determine. diff --git a/go/enclave/enclave.go b/go/enclave/enclave.go index 7ff5acab86..19a0c3110b 100644 --- a/go/enclave/enclave.go +++ b/go/enclave/enclave.go @@ -449,7 +449,7 @@ func (e *enclaveImpl) ingestL1Block(ctx context.Context, br *common.BlockAndRece } if ingestion.IsFork() { - e.registry.OnBlockProcessed(ingestion) + e.registry.OnL1Reorg(ingestion) err := e.service.OnL1Fork(ctx, ingestion.ChainFork) if err != nil { return nil, err diff --git a/go/enclave/nodetype/validator.go b/go/enclave/nodetype/validator.go index b6872d388e..db4becf500 100644 --- a/go/enclave/nodetype/validator.go +++ b/go/enclave/nodetype/validator.go @@ -74,6 +74,7 @@ func (val *obsValidator) VerifySequencerSignature(b *core.Batch) error { } func (val *obsValidator) ExecuteStoredBatches(ctx context.Context) error { + val.logger.Trace("Executing stored batches") headBatchSeq := val.batchRegistry.HeadBatchSeq() if headBatchSeq == nil { headBatchSeq = big.NewInt(int64(common.L2GenesisSeqNo)) @@ -95,11 +96,14 @@ func (val *obsValidator) ExecuteStoredBatches(ctx context.Context) error { } } + val.logger.Trace("Executing stored batch", log.BatchSeqNoKey, batch.SeqNo()) + // check batch execution prerequisites canExecute, err := val.executionPrerequisites(ctx, batch) if err != nil { return fmt.Errorf("could not determine the execution prerequisites for batch %s. Cause: %w", batch.Hash(), err) } + val.logger.Trace("Can executing stored batch", log.BatchSeqNoKey, batch.SeqNo(), "can", canExecute) if canExecute { receipts, err := val.batchExecutor.ExecuteBatch(ctx, batch) @@ -124,16 +128,17 @@ func (val *obsValidator) executionPrerequisites(ctx context.Context, batch *core // 1.l1 block exists block, err := val.storage.FetchBlock(ctx, batch.Header.L1Proof) if err != nil && errors.Is(err, errutil.ErrNotFound) { - val.logger.Info("Error fetching block", log.BlockHashKey, batch.Header.L1Proof, log.ErrKey, err) + val.logger.Warn("Error fetching block", log.BlockHashKey, batch.Header.L1Proof, log.ErrKey, err) return false, err } - + val.logger.Trace("l1 block exists", log.BatchSeqNoKey, batch.SeqNo()) // 2. parent was executed parentExecuted, err := val.storage.BatchWasExecuted(ctx, batch.Header.ParentHash) if err != nil { val.logger.Info("Error reading execution status of batch", log.BatchHashKey, batch.Header.ParentHash, log.ErrKey, err) return false, err } + val.logger.Trace("parentExecuted", log.BatchSeqNoKey, batch.SeqNo(), "val", parentExecuted) return block != nil && parentExecuted, nil } diff --git a/go/enclave/storage/db_init.go b/go/enclave/storage/db_init.go index 5706d9f663..fb9d98894a 100644 --- a/go/enclave/storage/db_init.go +++ b/go/enclave/storage/db_init.go @@ -21,8 +21,6 @@ func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (encla if cfg.UseInMemoryDB { logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...") // this creates a temporary sqlite sqldb - // return sqlite.CreateTemporarySQLiteDB(cfg.HostID.String(), "mode=memory&cache=shared&_foreign_keys=on", *cfg, logger) - // return sqlite.CreateTemporarySQLiteDB(cfg.HostID.String(), "mode=memory&_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_locking_mode=EXCLUSIVE", *cfg, logger) return sqlite.CreateTemporarySQLiteDB("", "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal", *cfg, logger) } @@ -31,7 +29,6 @@ func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (encla logger.Warn("Attestation is disabled, using a basic sqlite DB for persistence") // when we want to test persistence after node restart the SqliteDBPath should be set // (if empty string then a temp sqldb file will be created for the lifetime of the enclave) - // return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on&_txlock=immediate", *cfg, logger) return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal", *cfg, logger) } diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index 3434f6a97f..2c63e3b84a 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -54,7 +54,7 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba isCanon = false } - _, err = dbtx.ExecContext(ctx, "insert into batch values (?,?,?,?,?,?,?,?,?,?)", + args := []any{ batch.Header.SequencerOrderNo.Uint64(), // sequence batch.Hash(), // full hash convertedHash, // converted_hash @@ -63,9 +63,15 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba isCanon, // is_canonical header, // header blob batchBodyID, // reference to the batch body - blockId, // l1_proof block id - false, // executed - ) + batch.Header.L1Proof.Bytes(), // l1 proof hash + } + if blockId == 0 { + args = append(args, nil) // l1_proof block id + } else { + args = append(args, blockId) + } + args = append(args, false) // executed + _, err = dbtx.ExecContext(ctx, "insert into batch values (?,?,?,?,?,?,?,?,?,?,?)", args...) if err != nil { return err } diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index 3a3fb53f2e..e68e2cf6fe 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -49,7 +49,7 @@ func UpdateCanonicalBlocks(ctx context.Context, dbtx *sql.Tx, canonical []common } func updateCanonicalValue(ctx context.Context, dbtx *sql.Tx, isCanonical bool, blocks []common.L1BlockHash, _ gethlog.Logger) error { - canonicalBlocks := repeat("(hash=? and full_hash=?)", "OR", len(blocks)) + currentBlocks := repeat("(hash=? and full_hash=?)", "OR", len(blocks)) args := make([]any, 0) args = append(args, isCanonical) @@ -57,20 +57,27 @@ func updateCanonicalValue(ctx context.Context, dbtx *sql.Tx, isCanonical bool, b args = append(args, truncTo4(blockHash), blockHash.Bytes()) } - updateBlocks := "update block set is_canonical=? where " + canonicalBlocks + updateBlocks := "update block set is_canonical=? where " + currentBlocks _, err := dbtx.ExecContext(ctx, updateBlocks, args...) if err != nil { return err } - updateBatches := "update batch set is_canonical=? where l1_proof in (select id from block where " + canonicalBlocks + ")" + updateBatches := "update batch set is_canonical=? where l1_proof in (select id from block where " + currentBlocks + ")" _, err = dbtx.ExecContext(ctx, updateBatches, args...) if err != nil { return err } + return nil } +func SetMissingBlockId(ctx context.Context, dbtx *sql.Tx, blockId int64, blockHash common.L1BlockHash) error { + // handle the corner case where the block wasn't available + _, err := dbtx.ExecContext(ctx, "update batch set l1_proof=? where (l1_proof is null) and l1_proof_hash=?", blockId, blockHash.Bytes()) + return err +} + // todo - remove this. For now creates a "block" but without a body. func FetchBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (*types.Block, error) { return fetchBlock(ctx, db, " where hash=? and full_hash=?", truncTo4(hash), hash.Bytes()) diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index 60365ea739..c9755edc68 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -89,6 +89,7 @@ create table if not exists obsdb.batch is_canonical boolean NOT NULL, header blob NOT NULL, body int NOT NULL, + l1_proof_hash binary(32), l1_proof INTEGER, is_executed boolean NOT NULL, primary key (sequence), diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 43c34a31f9..e38b16b5c3 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -73,6 +73,7 @@ create table if not exists batch is_canonical boolean NOT NULL, header blob NOT NULL, body int NOT NULL REFERENCES batch_body, + l1_proof_hash binary(32), l1_proof INTEGER, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch is_executed boolean NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height @@ -135,7 +136,7 @@ create table if not exists events rel_address2_full binary(20), rel_address3_full binary(20), rel_address4_full binary(20), - tx INTEGER , + tx INTEGER, batch INTEGER NOT NULL REFERENCES batch ); create index IDX_AD on events (address); diff --git a/go/enclave/storage/init/sqlite/sqlite.go b/go/enclave/storage/init/sqlite/sqlite.go index c47514ee15..71d7b9064f 100644 --- a/go/enclave/storage/init/sqlite/sqlite.go +++ b/go/enclave/storage/init/sqlite/sqlite.go @@ -32,6 +32,7 @@ var sqlFiles embed.FS // CreateTemporarySQLiteDB if dbPath is empty will use a random throwaway temp file, // otherwise dbPath is a filepath for the sqldb file, allows for tests that care about persistence between restarts +// We create 2 sqlite instances. One R/W with a single connection, and a R/O with multiple connections func CreateTemporarySQLiteDB(dbPath string, dbOptions string, config config.EnclaveConfig, logger gethlog.Logger) (enclavedb.EnclaveDB, error) { initialsed := false diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index 4890370c34..876f6b157f 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -207,24 +207,35 @@ func (s *storageImpl) StoreBlock(ctx context.Context, b *types.Block, chainFork return err } defer dbTx.Rollback() + + if err := enclavedb.WriteBlock(ctx, dbTx, b.Header()); err != nil { + return fmt.Errorf("2. could not store block %s. Cause: %w", b.Hash(), err) + } + + blockId, err := enclavedb.GetBlockId(ctx, dbTx, b.Hash()) + if err != nil { + return err + } + + // In case there were any batches inserted before this block was received + err = enclavedb.SetMissingBlockId(ctx, dbTx, blockId, b.Hash()) + if err != nil { + return err + } + if chainFork != nil && chainFork.IsFork() { - s.logger.Info(fmt.Sprintf("Fork. %s", chainFork)) - err := enclavedb.UpdateCanonicalBlocks(ctx, dbTx, chainFork.CanonicalPath, chainFork.NonCanonicalPath, s.logger) + s.logger.Info(fmt.Sprintf("Update Fork. %s", chainFork)) + err = enclavedb.UpdateCanonicalBlocks(ctx, dbTx, chainFork.CanonicalPath, chainFork.NonCanonicalPath, s.logger) if err != nil { return err } } - // In case there were any batches inserted before this block was received err = enclavedb.UpdateCanonicalBlocks(ctx, dbTx, []common.L1BlockHash{b.Hash()}, nil, s.logger) if err != nil { return err } - if err := enclavedb.WriteBlock(ctx, dbTx, b.Header()); err != nil { - return fmt.Errorf("2. could not store block %s. Cause: %w", b.Hash(), err) - } - if err := dbTx.Commit(); err != nil { return fmt.Errorf("3. could not store block %s. Cause: %w", b.Hash(), err) } @@ -497,7 +508,7 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, return fmt.Errorf("could not write transaction receipts. Cause: %w", err) } - if batch.Number().Int64() > 1 { + if batch.Number().Uint64() > common.L2GenesisSeqNo { stateDB, err := s.CreateStateDB(ctx, batch.Header.ParentHash) if err != nil { return fmt.Errorf("could not create state DB to filter logs. Cause: %w", err) From aafea94116b3005f637f823df6408c1dace5791f Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Fri, 3 May 2024 11:44:20 +0100 Subject: [PATCH 08/15] fixes --- go/enclave/components/batch_registry.go | 2 +- go/enclave/components/rollup_consumer.go | 1 + go/enclave/storage/db_init.go | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go/enclave/components/batch_registry.go b/go/enclave/components/batch_registry.go index 0507eabe98..5b069f581e 100644 --- a/go/enclave/components/batch_registry.go +++ b/go/enclave/components/batch_registry.go @@ -76,7 +76,7 @@ func (br *batchRegistry) UnsubscribeFromBatches() { } func (br *batchRegistry) OnL1Reorg(_ *BlockIngestionType) { - // read the cached head batch from the database because there was an L1 reorg + // refresh the cached head batch from the database because there was an L1 reorg headBatch, err := br.storage.FetchHeadBatch(context.Background()) if err != nil { br.logger.Error("Could not fetch head batch", log.ErrKey, err) diff --git a/go/enclave/components/rollup_consumer.go b/go/enclave/components/rollup_consumer.go index 88d8f16d60..be97898e8e 100644 --- a/go/enclave/components/rollup_consumer.go +++ b/go/enclave/components/rollup_consumer.go @@ -61,6 +61,7 @@ func (rc *rollupConsumerImpl) ProcessRollupsInBlock(ctx context.Context, b *comm } if len(rollups) > 1 { + // todo - we need to sort this out rc.logger.Warn(fmt.Sprintf("Multiple rollups %d in block %s", len(rollups), b.Block.Hash())) } diff --git a/go/enclave/storage/db_init.go b/go/enclave/storage/db_init.go index fb9d98894a..9e1483bb1d 100644 --- a/go/enclave/storage/db_init.go +++ b/go/enclave/storage/db_init.go @@ -21,7 +21,7 @@ func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (encla if cfg.UseInMemoryDB { logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...") // this creates a temporary sqlite sqldb - return sqlite.CreateTemporarySQLiteDB("", "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal", *cfg, logger) + return sqlite.CreateTemporarySQLiteDB("", "_foreign_keys=on&_txlock=immediate&_synchronous=normal", *cfg, logger) } if !cfg.WillAttest { @@ -29,7 +29,7 @@ func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (encla logger.Warn("Attestation is disabled, using a basic sqlite DB for persistence") // when we want to test persistence after node restart the SqliteDBPath should be set // (if empty string then a temp sqldb file will be created for the lifetime of the enclave) - return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal", *cfg, logger) + return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on&_txlock=immediate&_synchronous=normal", *cfg, logger) } // persistent and with attestation means connecting to edgeless DB in a trusted enclave from a secure enclave From c13142a805e03304cc42e23636927df31de72f52 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Thu, 9 May 2024 14:48:32 +0100 Subject: [PATCH 09/15] address pr comments --- go/enclave/storage/db_init.go | 11 +++++--- go/enclave/storage/init/sqlite/001_init.sql | 2 +- go/enclave/storage/init/sqlite/sqlite.go | 29 +++++++++----------- go/enclave/storage/storage.go | 30 ++++++++++----------- go/host/enclave/guardian.go | 4 +-- 5 files changed, 38 insertions(+), 38 deletions(-) diff --git a/go/enclave/storage/db_init.go b/go/enclave/storage/db_init.go index 1fc6ce9293..cbae26972f 100644 --- a/go/enclave/storage/db_init.go +++ b/go/enclave/storage/db_init.go @@ -13,15 +13,20 @@ import ( "github.com/ten-protocol/go-ten/go/config" ) +// _journal_mode=wal - The recommended running mode: "Write-ahead logging": https://www.sqlite.org/draft/matrix/wal.html +// _txlock=immediate - db transactions start as soon as "BeginTx()" is called. Avoids deadlocks. https://www.sqlite.org/lang_transaction.html +// _synchronous=normal - not exactly sure if we actually need this. It was recommended somewhere. https://www.sqlite.org/pragma.html#pragma_synchronous +const sqliteCfg = "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal" + // CreateDBFromConfig creates an appropriate ethdb.Database instance based on your config func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (enclavedb.EnclaveDB, error) { if err := validateDBConf(cfg); err != nil { return nil, err } if cfg.UseInMemoryDB { - logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...") + logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating temporary sqlite database...") // this creates a temporary sqlite sqldb - return sqlite.CreateTemporarySQLiteDB("", "_foreign_keys=on&_txlock=immediate&_synchronous=normal", *cfg, logger) + return sqlite.CreateTemporarySQLiteDB("", sqliteCfg, *cfg, logger) } if !cfg.WillAttest && len(cfg.SqliteDBPath) > 0 { @@ -29,7 +34,7 @@ func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (encla logger.Warn("Attestation is disabled, using a basic sqlite DB for persistence") // when we want to test persistence after node restart the SqliteDBPath should be set // (if empty string then a temp sqldb file will be created for the lifetime of the enclave) - return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on&_txlock=immediate&_synchronous=normal", *cfg, logger) + return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, sqliteCfg, *cfg, logger) } if !cfg.WillAttest && len(cfg.EdgelessDBHost) > 0 { diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index e38b16b5c3..7dcf374be7 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -94,7 +94,7 @@ create table if not exists tx idx int NOT NULL, body int REFERENCES batch_body ); -create index IDX_Tx_HASH on tx (hash); +create index IDX_TX_HASH on tx (hash); create table if not exists exec_tx ( diff --git a/go/enclave/storage/init/sqlite/sqlite.go b/go/enclave/storage/init/sqlite/sqlite.go index 71d7b9064f..13b4c2ce2c 100644 --- a/go/enclave/storage/init/sqlite/sqlite.go +++ b/go/enclave/storage/init/sqlite/sqlite.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "github.com/ten-protocol/go-ten/go/config" @@ -44,22 +43,20 @@ func CreateTemporarySQLiteDB(dbPath string, dbOptions string, config config.Encl dbPath = tempPath } - inMem := strings.Contains(dbOptions, "mode=memory") - description := "in memory" - if !inMem { - _, err := os.Stat(dbPath) - if err == nil { - description = "existing" - initialsed = true - } else { - myfile, e := os.Create(dbPath) - if e != nil { - logger.Crit("could not create temp sqlite DB file - %w", e) - } - myfile.Close() - - description = "new" + var description string + + _, err := os.Stat(dbPath) + if err == nil { + description = "existing" + initialsed = true + } else { + myfile, e := os.Create(dbPath) + if e != nil { + logger.Crit("could not create temp sqlite DB file - %w", e) } + myfile.Close() + + description = "new" } path := fmt.Sprintf("file:%s?mode=rw&%s", dbPath, dbOptions) diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index 876f6b157f..c02adbbb48 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -204,7 +204,7 @@ func (s *storageImpl) StoreBlock(ctx context.Context, b *types.Block, chainFork defer s.logDuration("StoreBlock", measure.NewStopwatch()) dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() @@ -214,7 +214,7 @@ func (s *storageImpl) StoreBlock(ctx context.Context, b *types.Block, chainFork blockId, err := enclavedb.GetBlockId(ctx, dbTx, b.Hash()) if err != nil { - return err + return fmt.Errorf("could not get block id - %w", err) } // In case there were any batches inserted before this block was received @@ -274,7 +274,7 @@ func (s *storageImpl) StoreSecret(ctx context.Context, secret crypto.SharedEncla } dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() _, err = enclavedb.WriteConfig(ctx, dbTx, masterSeedCfg, enc) @@ -414,7 +414,7 @@ func (s *storageImpl) StoreAttestedKey(ctx context.Context, aggregator gethcommo defer s.logDuration("StoreAttestedKey", measure.NewStopwatch()) dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() _, err = enclavedb.WriteAttKey(ctx, dbTx, aggregator, gethcrypto.CompressPubkey(key)) @@ -461,7 +461,7 @@ func (s *storageImpl) StoreBatch(ctx context.Context, batch *core.Batch, convert dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() @@ -501,7 +501,7 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() if err := enclavedb.WriteBatchExecution(ctx, dbTx, batch.SeqNo(), receipts); err != nil { @@ -530,16 +530,16 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, func (s *storageImpl) StoreValueTransfers(ctx context.Context, blockHash common.L1BlockHash, transfers common.ValueTransferEvents) error { dbtx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbtx.Rollback() blockId, err := enclavedb.GetBlockId(ctx, dbtx, blockHash) if err != nil { - return err + return fmt.Errorf("could not get block id - %w", err) } err = enclavedb.WriteL1Messages(ctx, dbtx, blockId, transfers, true) if err != nil { - return err + return fmt.Errorf("could not write l1 messages - %w", err) } return dbtx.Commit() } @@ -548,16 +548,16 @@ func (s *storageImpl) StoreL1Messages(ctx context.Context, blockHash common.L1Bl defer s.logDuration("StoreL1Messages", measure.NewStopwatch()) dbtx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbtx.Rollback() blockId, err := enclavedb.GetBlockId(ctx, dbtx, blockHash) if err != nil { - return err + return fmt.Errorf("could not get block id - %w", err) } err = enclavedb.WriteL1Messages(ctx, dbtx, blockId, messages, false) if err != nil { - return err + return fmt.Errorf("could not write l1 messages - %w", err) } return dbtx.Commit() } @@ -582,7 +582,7 @@ func (s *storageImpl) StoreEnclaveKey(ctx context.Context, enclaveKey *crypto.En dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() _, err = enclavedb.WriteConfig(ctx, dbTx, enclaveKeyKey, keyBytes) @@ -614,13 +614,13 @@ func (s *storageImpl) StoreRollup(ctx context.Context, rollup *common.ExtRollup, dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { - return err + return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() blockId, err := enclavedb.GetBlockId(ctx, dbTx, rollup.Header.CompressionL1Head) if err != nil { - return err + return fmt.Errorf("could not get block id - %w", err) } if err := enclavedb.WriteRollup(ctx, dbTx, rollup.Header, blockId, internalHeader); err != nil { diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go index 1d6d056c79..2a4374a869 100644 --- a/go/host/enclave/guardian.go +++ b/go/host/enclave/guardian.go @@ -550,9 +550,7 @@ func (g *Guardian) periodicBatchProduction() { g.logger.Debug("Create batch") // if maxBatchInterval is set higher than batchInterval then we are happy to skip creating batches when there is no data // (up to a maximum time of maxBatchInterval) - // todo - disable the skip mechanism - // skipBatchIfEmpty := g.maxBatchInterval > g.batchInterval && time.Since(g.lastBatchCreated) < g.maxBatchInterval - skipBatchIfEmpty := false + skipBatchIfEmpty := g.maxBatchInterval > g.batchInterval && time.Since(g.lastBatchCreated) < g.maxBatchInterval err := g.enclaveClient.CreateBatch(context.Background(), skipBatchIfEmpty) if err != nil { g.logger.Error("Unable to produce batch", log.ErrKey, err) From 25ca08909165fd691c465cebb33cda3b1e291cd4 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Thu, 9 May 2024 15:52:57 +0100 Subject: [PATCH 10/15] fix --- go/enclave/storage/init/edgelessdb/001_init.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index c9755edc68..95b05e75ce 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -103,7 +103,7 @@ GRANT ALL ON obsdb.batch TO obscuro; create table if not exists obsdb.tx ( id INTEGER AUTO_INCREMENT, - hash binary(16), + hash binary(4), full_hash binary(32) NOT NULL, content mediumblob NOT NULL, sender_address binary(20) NOT NULL, From 1f47bdfbc0ef7e56a953e6029d09823377b96da0 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Fri, 10 May 2024 12:47:55 +0100 Subject: [PATCH 11/15] fix --- go/enclave/storage/init/sqlite/001_init.sql | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 7dcf374be7..5dd9db4ee0 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -44,6 +44,7 @@ create table if not exists l1_msg block INTEGER NOT NULL REFERENCES block, is_transfer boolean ); +create index L1_MSG_BLOCK_IDX on l1_msg (block); create table if not exists rollup ( @@ -56,6 +57,8 @@ create table if not exists rollup header blob NOT NULL, compression_block INTEGER NOT NULL REFERENCES block ); +create index ROLLUP_COMPRESSION_BLOCK_IDX on rollup (compression_block); +create index ROLLUP_COMPRESSION_HASH_IDX on rollup (hash); create table if not exists batch_body ( @@ -81,7 +84,8 @@ create table if not exists batch ); create index IDX_BATCH_HASH on batch (hash); create index IDX_BATCH_HEIGHT on batch (height, is_canonical); -create index IDX_BATCH_Block on batch (l1_proof); +create index IDX_BATCH_BLOCK on batch (l1_proof); +create index IDX_BATCH_BODY on batch (body); create table if not exists tx ( @@ -95,6 +99,7 @@ create table if not exists tx body int REFERENCES batch_body ); create index IDX_TX_HASH on tx (hash); +create index IDX_TX_BODY on tx (body); create table if not exists exec_tx ( @@ -148,4 +153,6 @@ create index IDX_T0 on events (topic0); create index IDX_T1 on events (topic1); create index IDX_T2 on events (topic2); create index IDX_T3 on events (topic3); -create index IDX_T4 on events (topic4); \ No newline at end of file +create index IDX_T4 on events (topic4); +create index IDX_TX on events (tx); +create index IDX_BATCH on events (batch); \ No newline at end of file From c556311bd8762190df8744a8572327bbd4df0964 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Fri, 10 May 2024 14:04:08 +0100 Subject: [PATCH 12/15] update index approach --- go/enclave/storage/enclavedb/batch.go | 53 ++++++------ go/enclave/storage/enclavedb/block.go | 34 ++++---- go/enclave/storage/enclavedb/events.go | 33 +++----- go/enclave/storage/enclavedb/utils.go | 16 ---- .../storage/init/edgelessdb/001_init.sql | 82 +++++++------------ go/enclave/storage/init/sqlite/001_init.sql | 72 ++++++---------- 6 files changed, 106 insertions(+), 184 deletions(-) diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index 2c63e3b84a..e6a018f9e6 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -21,7 +21,7 @@ import ( const ( selectBatch = "select b.header, bb.content from batch b join batch_body bb on b.body=bb.id" - queryReceipts = "select exec_tx.receipt, tx.content, batch.full_hash, batch.height from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " + queryReceipts = "select exec_tx.receipt, tx.content, batch.hash, batch.height from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " ) // WriteBatchAndTransactions - persists the batch and the transactions @@ -45,8 +45,8 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba var isCanon bool err = dbtx.QueryRowContext(ctx, - "select is_canonical from block where hash=? and full_hash=?", - truncTo4(batch.Header.L1Proof), batch.Header.L1Proof.Bytes(), + "select is_canonical from block where hash=? ", + batch.Header.L1Proof.Bytes(), ).Scan(&isCanon) if err != nil { // if the block is not found, we assume it is non-canonical @@ -56,9 +56,8 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba args := []any{ batch.Header.SequencerOrderNo.Uint64(), // sequence - batch.Hash(), // full hash convertedHash, // converted_hash - truncTo4(batch.Hash()), // index hash + batch.Hash(), // hash batch.Header.Number.Uint64(), // height isCanon, // is_canonical header, // header blob @@ -71,14 +70,14 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba args = append(args, blockId) } args = append(args, false) // executed - _, err = dbtx.ExecContext(ctx, "insert into batch values (?,?,?,?,?,?,?,?,?,?,?)", args...) + _, err = dbtx.ExecContext(ctx, "insert into batch values (?,?,?,?,?,?,?,?,?,?)", args...) if err != nil { return err } // creates a big insert statement for all transactions if len(batch.Transactions) > 0 { - insert := "replace into tx (hash, full_hash, content, sender_address, nonce, idx, body) values " + repeat("(?,?,?,?,?,?,?)", ",", len(batch.Transactions)) + insert := "replace into tx (hash, content, sender_address, nonce, idx, body) values " + repeat("(?,?,?,?,?,?)", ",", len(batch.Transactions)) args := make([]any, 0) for i, transaction := range batch.Transactions { @@ -92,13 +91,12 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba return fmt.Errorf("unable to convert tx to message - %w", err) } - args = append(args, truncTo4(transaction.Hash())) // truncated tx_hash - args = append(args, transaction.Hash()) // full tx_hash - args = append(args, txBytes) // content - args = append(args, from.Bytes()) // sender_address - args = append(args, transaction.Nonce()) // nonce - args = append(args, i) // idx - args = append(args, batchBodyID) // the batch body which contained it + args = append(args, transaction.Hash()) // tx_hash + args = append(args, txBytes) // content + args = append(args, from.Bytes()) // sender_address + args = append(args, transaction.Nonce()) // nonce + args = append(args, i) // idx + args = append(args, batchBodyID) // the batch body which contained it } _, err = dbtx.ExecContext(ctx, insert, args...) if err != nil { @@ -127,9 +125,8 @@ func WriteBatchExecution(ctx context.Context, dbtx *sql.Tx, seqNo *big.Int, rece // ignore the error because synthetic transactions will not be inserted txId, _ := GetTxId(ctx, dbtx, storageReceipt.TxHash) - args = append(args, truncBTo4(receipt.ContractAddress.Bytes())) // created_contract_address - args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address - args = append(args, receiptBytes) // the serialised receipt + args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address + args = append(args, receiptBytes) // the serialised receipt if txId == 0 { args = append(args, nil) // tx id } else { @@ -138,7 +135,7 @@ func WriteBatchExecution(ctx context.Context, dbtx *sql.Tx, seqNo *big.Int, rece args = append(args, seqNo.Uint64()) // batch_seq } if len(args) > 0 { - insert := "insert into exec_tx (created_contract_address,created_contract_address_full, receipt, tx, batch) values " + repeat("(?,?,?,?,?)", ",", len(receipts)) + insert := "insert into exec_tx (created_contract_address, receipt, tx, batch) values " + repeat("(?,?,?,?)", ",", len(receipts)) _, err = dbtx.ExecContext(ctx, insert, args...) if err != nil { return err @@ -149,7 +146,7 @@ func WriteBatchExecution(ctx context.Context, dbtx *sql.Tx, seqNo *big.Int, rece func GetTxId(ctx context.Context, dbtx *sql.Tx, txHash gethcommon.Hash) (int64, error) { var txId int64 - err := dbtx.QueryRowContext(ctx, "select id from tx where hash=? and full_hash=?", truncTo4(txHash), txHash.Bytes()).Scan(&txId) + err := dbtx.QueryRowContext(ctx, "select id from tx where hash=? ", txHash.Bytes()).Scan(&txId) return txId, err } @@ -158,7 +155,7 @@ func ReadBatchBySeqNo(ctx context.Context, db *sql.DB, seqNo uint64) (*core.Batc } func ReadBatchByHash(ctx context.Context, db *sql.DB, hash common.L2BatchHash) (*core.Batch, error) { - return fetchBatch(ctx, db, " where b.hash=? and b.full_hash=?", truncTo4(hash), hash.Bytes()) + return fetchBatch(ctx, db, " where b.hash=? ", hash.Bytes()) } func ReadCanonicalBatchByHeight(ctx context.Context, db *sql.DB, height uint64) (*core.Batch, error) { @@ -175,7 +172,7 @@ func ReadCurrentHeadBatch(ctx context.Context, db *sql.DB) (*core.Batch, error) } func ReadBatchesByBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) ([]*core.Batch, error) { - return fetchBatches(ctx, db, " join block l1b on b.l1_proof=l1b.id where l1b.hash=? and l1b.full_hash=? order by b.sequence", truncTo4(hash), hash.Bytes()) + return fetchBatches(ctx, db, " join block l1b on b.l1_proof=l1b.id where l1b.hash=? order by b.sequence", hash.Bytes()) } func ReadCurrentSequencerNo(ctx context.Context, db *sql.DB) (*big.Int, error) { @@ -326,12 +323,12 @@ func selectReceipts(ctx context.Context, db *sql.DB, config *params.ChainConfig, // corresponding block body, so if the block body is not found it will return nil even // if the receipt itself is stored. func ReadReceiptsByBatchHash(ctx context.Context, db *sql.DB, hash common.L2BatchHash, config *params.ChainConfig) (types.Receipts, error) { - return selectReceipts(ctx, db, config, "where batch.hash=? and batch.full_hash=?", truncTo4(hash), hash.Bytes()) + return selectReceipts(ctx, db, config, "where batch.hash=? ", hash.Bytes()) } func ReadReceipt(ctx context.Context, db *sql.DB, txHash common.L2TxHash, config *params.ChainConfig) (*types.Receipt, error) { // todo - canonical? - row := db.QueryRowContext(ctx, queryReceipts+" where tx.hash=? and tx.full_hash=?", truncTo4(txHash), txHash.Bytes()) + row := db.QueryRowContext(ctx, queryReceipts+" where tx.hash=? ", txHash.Bytes()) // receipt, tx, batch, height var receiptData []byte var txData []byte @@ -368,8 +365,8 @@ func ReadReceipt(ctx context.Context, db *sql.DB, txHash common.L2TxHash, config func ReadTransaction(ctx context.Context, db *sql.DB, txHash gethcommon.Hash) (*types.Transaction, common.L2BatchHash, uint64, uint64, error) { row := db.QueryRowContext(ctx, - "select tx.content, batch.full_hash, batch.height, tx.idx from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true and tx.hash=? and tx.full_hash=?", - truncTo4(txHash), txHash.Bytes()) + "select tx.content, batch.hash, batch.height, tx.idx from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true and tx.hash=?", + txHash.Bytes()) // tx, batch, height, idx var txData []byte @@ -394,7 +391,7 @@ func ReadTransaction(ctx context.Context, db *sql.DB, txHash gethcommon.Hash) (* } func GetContractCreationTx(ctx context.Context, db *sql.DB, address gethcommon.Address) (*gethcommon.Hash, error) { - row := db.QueryRowContext(ctx, "select tx.full_hash from exec_tx join tx on tx.id=exec_tx.tx where created_contract_address=? and created_contract_address_full=?", truncBTo4(address.Bytes()), address.Bytes()) + row := db.QueryRowContext(ctx, "select tx.hash from exec_tx join tx on tx.id=exec_tx.tx where created_contract_address=? ", address.Bytes()) var txHashBytes []byte err := row.Scan(&txHashBytes) @@ -427,7 +424,7 @@ func ReadUnexecutedBatches(ctx context.Context, db *sql.DB, from *big.Int) ([]*c } func BatchWasExecuted(ctx context.Context, db *sql.DB, hash common.L2BatchHash) (bool, error) { - row := db.QueryRowContext(ctx, "select is_executed from batch where is_canonical=true and hash=? and full_hash=?", truncTo4(hash), hash.Bytes()) + row := db.QueryRowContext(ctx, "select is_executed from batch where is_canonical=true and hash=? ", hash.Bytes()) var result bool err := row.Scan(&result) @@ -467,7 +464,7 @@ func GetPublicTransactionData(ctx context.Context, db *sql.DB, pagination *commo func selectPublicTxsBySender(ctx context.Context, db *sql.DB, query string, args ...any) ([]common.PublicTransaction, error) { var publicTxs []common.PublicTransaction - q := "select tx.full_hash, batch.height, batch.header from exec_tx join batch on batch.sequence=exec_tx.batch join tx on tx.id=exec_tx.tx where batch.is_canonical=true " + query + q := "select tx.hash, batch.height, batch.header from exec_tx join batch on batch.sequence=exec_tx.batch join tx on tx.id=exec_tx.tx where batch.is_canonical=true " + query rows, err := db.QueryContext(ctx, q, args...) if err != nil { if errors.Is(err, sql.ErrNoRows) { diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index e68e2cf6fe..155b09fd8b 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -22,12 +22,11 @@ func WriteBlock(ctx context.Context, dbtx *sql.Tx, b *types.Header) error { return fmt.Errorf("could not encode block header. Cause: %w", err) } - _, err = dbtx.ExecContext(ctx, "insert into block (hash,full_hash,is_canonical,header,height) values (?,?,?,?,?)", - truncTo4(b.Hash()), // hash - b.Hash().Bytes(), // full_hash - true, // is_canonical - header, // header - b.Number.Uint64(), // height + _, err = dbtx.ExecContext(ctx, "insert into block (hash,is_canonical,header,height) values (?,?,?,?)", + b.Hash().Bytes(), // hash + true, // is_canonical + header, // header + b.Number.Uint64(), // height ) return err } @@ -49,12 +48,12 @@ func UpdateCanonicalBlocks(ctx context.Context, dbtx *sql.Tx, canonical []common } func updateCanonicalValue(ctx context.Context, dbtx *sql.Tx, isCanonical bool, blocks []common.L1BlockHash, _ gethlog.Logger) error { - currentBlocks := repeat("(hash=? and full_hash=?)", "OR", len(blocks)) + currentBlocks := repeat(" hash=? ", "OR", len(blocks)) args := make([]any, 0) args = append(args, isCanonical) for _, blockHash := range blocks { - args = append(args, truncTo4(blockHash), blockHash.Bytes()) + args = append(args, blockHash.Bytes()) } updateBlocks := "update block set is_canonical=? where " + currentBlocks @@ -80,7 +79,7 @@ func SetMissingBlockId(ctx context.Context, dbtx *sql.Tx, blockId int64, blockHa // todo - remove this. For now creates a "block" but without a body. func FetchBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) (*types.Block, error) { - return fetchBlock(ctx, db, " where hash=? and full_hash=?", truncTo4(hash), hash.Bytes()) + return fetchBlock(ctx, db, " where hash=?", hash.Bytes()) } func FetchHeadBlock(ctx context.Context, db *sql.DB) (*types.Block, error) { @@ -93,7 +92,7 @@ func FetchBlockHeaderByHeight(ctx context.Context, db *sql.DB, height *big.Int) func GetBlockId(ctx context.Context, db *sql.Tx, hash common.L1BlockHash) (int64, error) { var id int64 - err := db.QueryRowContext(ctx, "select id from block where hash=? and full_hash=?", truncTo4(hash), hash).Scan(&id) + err := db.QueryRowContext(ctx, "select id from block where hash=? ", hash).Scan(&id) if err != nil { return 0, err } @@ -123,8 +122,8 @@ func WriteL1Messages[T any](ctx context.Context, db *sql.Tx, blockId int64, mess func FetchL1Messages[T any](ctx context.Context, db *sql.DB, blockHash common.L1BlockHash, isTransfer bool) ([]T, error) { var result []T - query := "select message from l1_msg m join block b on m.block=b.id where b.hash = ? and b.full_hash = ? and is_transfer = ?" - rows, err := db.QueryContext(ctx, query, truncTo4(blockHash), blockHash.Bytes(), isTransfer) + query := "select message from l1_msg m join block b on m.block=b.id where b.hash = ? and is_transfer = ?" + rows, err := db.QueryContext(ctx, query, blockHash.Bytes(), isTransfer) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -158,8 +157,7 @@ func WriteRollup(ctx context.Context, dbtx *sql.Tx, rollup *common.RollupHeader, if err != nil { return fmt.Errorf("could not encode batch header. Cause: %w", err) } - _, err = dbtx.ExecContext(ctx, "replace into rollup (hash, full_hash, start_seq, end_seq, time_stamp, header, compression_block) values (?,?,?,?,?,?,?)", - truncTo4(rollup.Hash()), + _, err = dbtx.ExecContext(ctx, "replace into rollup (hash, start_seq, end_seq, time_stamp, header, compression_block) values (?,?,?,?,?,?)", rollup.Hash().Bytes(), internalHeader.FirstBatchSequence.Uint64(), rollup.LastBatchSeqNo, @@ -175,13 +173,13 @@ func WriteRollup(ctx context.Context, dbtx *sql.Tx, rollup *common.RollupHeader, } func FetchReorgedRollup(ctx context.Context, db *sql.DB, reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) { - whereClause := repeat("(b.hash=? and b.full_hash=?)", "OR", len(reorgedBlocks)) + whereClause := repeat(" b.hash=? ", "OR", len(reorgedBlocks)) - query := "select r.full_hash from rollup r join block b on r.compression_block=b.id where " + whereClause + query := "select r.hash from rollup r join block b on r.compression_block=b.id where " + whereClause args := make([]any, 0) for _, blockHash := range reorgedBlocks { - args = append(args, truncTo4(blockHash), blockHash.Bytes()) + args = append(args, blockHash.Bytes()) } rollup := new(common.L2BatchHash) err := db.QueryRowContext(ctx, query, args...).Scan(&rollup) @@ -201,7 +199,7 @@ func FetchRollupMetadata(ctx context.Context, db *sql.DB, hash common.L2RollupHa rollup := new(common.PublicRollupMetadata) err := db.QueryRowContext(ctx, - "select start_seq, time_stamp from rollup where hash = ? and full_hash=?", truncTo4(hash), hash.Bytes(), + "select start_seq, time_stamp from rollup where hash = ?", hash.Bytes(), ).Scan(&startSeq, &startTime) if err != nil { if errors.Is(err, sql.ErrNoRows) { diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 1c22b8a4e1..e396e94179 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -40,7 +40,7 @@ func StoreEventLogs(ctx context.Context, dbtx *sql.Tx, receipts []*types.Receipt } } if totalLogs > 0 { - query := "insert into events values " + repeat("(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ",", totalLogs) + query := "insert into events values " + repeat("(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ",", totalLogs) _, err := dbtx.ExecContext(ctx, query, args...) if err != nil { return err @@ -127,13 +127,10 @@ func logDBValues(ctx context.Context, db *sql.Tx, l *types.Log, stateDB *state.S } return []any{ - truncBTo4(t0), truncBTo4(t1), truncBTo4(t2), truncBTo4(t3), truncBTo4(t4), t0, t1, t2, t3, t4, data, l.Index, - truncBTo4(l.Address.Bytes()), l.Address.Bytes(), isLifecycle, - truncBTo4(a1), truncBTo4(a2), truncBTo4(a3), truncBTo4(a4), a1, a2, a3, a4, }, nil } @@ -150,8 +147,8 @@ func FilterLogs( queryParams := []any{} query := "" if batchHash != nil { - query += " AND b.hash = ? AND b.full_hash = ?" - queryParams = append(queryParams, truncTo4(*batchHash), batchHash.Bytes()) + query += " AND b.hash = ? " + queryParams = append(queryParams, batchHash.Bytes()) } // ignore negative numbers @@ -165,10 +162,9 @@ func FilterLogs( } if len(addresses) > 0 { - cond := repeat("(address=? AND address_full=?)", " OR ", len(addresses)) + cond := repeat("(address=?)", " OR ", len(addresses)) query += " AND (" + cond + ")" for _, address := range addresses { - queryParams = append(queryParams, truncBTo4(address.Bytes())) queryParams = append(queryParams, address.Bytes()) } } @@ -180,10 +176,9 @@ func FilterLogs( // empty rule set == wildcard if len(sub) > 0 { topicColumn := fmt.Sprintf("topic%d", i) - cond := repeat(fmt.Sprintf("(%s=? AND %s_full=?)", topicColumn, topicColumn), "OR", len(sub)) + cond := repeat(fmt.Sprintf("(%s=? )", topicColumn), " OR ", len(sub)) query += " AND (" + cond + ")" for _, topic := range sub { - queryParams = append(queryParams, truncBTo4(topic.Bytes())) queryParams = append(queryParams, topic.Bytes()) } } @@ -196,11 +191,11 @@ func FilterLogs( func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tracers.DebugLogs, error) { var queryParams []any - query := "select rel_address1_full, rel_address2_full, rel_address3_full, rel_address4_full, lifecycle_event, topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full " + + query := "select rel_address1, rel_address2, rel_address3, rel_address4, lifecycle_event, topic0, topic1, topic2, topic3, topic4, datablob, b.hash, b.height, tx.hash, tx.idx, log_idx, address " + baseEventsJoin + - " AND tx.hash = ? AND tx.full_hash = ?" + " AND tx.hash = ? " - queryParams = append(queryParams, truncTo4(txHash), txHash.Bytes()) + queryParams = append(queryParams, txHash.Bytes()) result := make([]*tracers.DebugLogs, 0) @@ -283,8 +278,8 @@ func isEndUserAccount(ctx context.Context, db *sql.Tx, topic gethcommon.Hash, st addrBytes := potentialAddr.Bytes() // Check the database if there are already entries for this address var count int - query := "select count(*) from events where (rel_address1=? and rel_address1_full=?) OR (rel_address2=? and rel_address2_full=?) OR (rel_address3=? and rel_address3_full=?) OR (rel_address4=? and rel_address4_full=?)" - err := db.QueryRowContext(ctx, query, truncBTo4(addrBytes), addrBytes, truncBTo4(addrBytes), addrBytes, truncBTo4(addrBytes), addrBytes, truncBTo4(addrBytes), addrBytes).Scan(&count) + query := "select count(*) from events where (rel_address1=?) OR (rel_address2=?) OR (rel_address3=? ) OR (rel_address4=? )" + err := db.QueryRowContext(ctx, query, addrBytes, addrBytes, addrBytes, addrBytes).Scan(&count) if err != nil { // exit here return false, nil, err @@ -314,20 +309,16 @@ func loadLogs(ctx context.Context, db *sql.DB, requestingAccount *gethcommon.Add } result := make([]*types.Log, 0) - query := "select topic0_full, topic1_full, topic2_full, topic3_full, topic4_full, datablob, b.full_hash, b.height, tx.full_hash, tx.idx, log_idx, address_full" + " " + baseEventsJoin + query := "select topic0, topic1, topic2, topic3, topic4, datablob, b.hash, b.height, tx.hash, tx.idx, log_idx, address" + " " + baseEventsJoin var queryParams []any // Add relevancy rules // An event is considered relevant to all account owners whose addresses are used as topics in the event. // In case there are no account addresses in an event's topics, then the event is considered relevant to everyone (known as a "lifecycle event"). - query += " AND (lifecycle_event OR ((rel_address1=? AND rel_address1_full=?) OR (rel_address2=? AND rel_address2_full=?) OR (rel_address3=? AND rel_address3_full=?) OR (rel_address4=? AND rel_address4_full=?))) " - queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) + query += " AND (lifecycle_event OR (rel_address1=? OR rel_address2=? OR rel_address3=? OR rel_address4=?)) " queryParams = append(queryParams, requestingAccount.Bytes()) - queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) queryParams = append(queryParams, requestingAccount.Bytes()) - queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) queryParams = append(queryParams, requestingAccount.Bytes()) - queryParams = append(queryParams, truncBTo4(requestingAccount.Bytes())) queryParams = append(queryParams, requestingAccount.Bytes()) query += whereCondition diff --git a/go/enclave/storage/enclavedb/utils.go b/go/enclave/storage/enclavedb/utils.go index 57f0dcbaf3..75ba6c9522 100644 --- a/go/enclave/storage/enclavedb/utils.go +++ b/go/enclave/storage/enclavedb/utils.go @@ -2,24 +2,8 @@ package enclavedb import ( "strings" - - gethcommon "github.com/ethereum/go-ethereum/common" ) -func truncTo4(hash gethcommon.Hash) []byte { - return truncBTo4(hash.Bytes()) -} - -func truncBTo4(bytes []byte) []byte { - if len(bytes) == 0 { - return bytes - } - b := bytes[0:4] - c := make([]byte, 4) - copy(c, b) - return c -} - func repeat(token string, sep string, count int) string { elems := make([]string, count) for i := 0; i < count; i++ { diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index 95b05e75ce..2c0291b73c 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -33,14 +33,13 @@ GRANT ALL ON obsdb.attestation_key TO obscuro; create table if not exists obsdb.block ( id INTEGER AUTO_INCREMENT, - hash binary(4), - full_hash binary(32), + hash binary(32), is_canonical boolean NOT NULL, header blob NOT NULL, height int NOT NULL, primary key (id), INDEX (height), - INDEX (hash) + INDEX (hash(4)) ); GRANT ALL ON obsdb.block TO obscuro; @@ -58,15 +57,14 @@ GRANT ALL ON obsdb.l1_msg TO obscuro; create table if not exists obsdb.rollup ( id INTEGER AUTO_INCREMENT, - hash binary(4), - full_hash binary(32), + hash binary(32), start_seq int NOT NULL, end_seq int NOT NULL, time_stamp int NOT NULL, header blob NOT NULL, compression_block INTEGER NOT NULL, INDEX (compression_block), - INDEX (hash), + INDEX (hash(4)), primary key (id) ); GRANT ALL ON obsdb.rollup TO obscuro; @@ -82,9 +80,8 @@ GRANT ALL ON obsdb.batch_body TO obscuro; create table if not exists obsdb.batch ( sequence int, - full_hash binary(32), converted_hash binary(32) NOT NULL, - hash binary(4) NOT NULL, + hash binary(32) NOT NULL, height int NOT NULL, is_canonical boolean NOT NULL, header blob NOT NULL, @@ -103,15 +100,14 @@ GRANT ALL ON obsdb.batch TO obscuro; create table if not exists obsdb.tx ( id INTEGER AUTO_INCREMENT, - hash binary(4), - full_hash binary(32) NOT NULL, + hash binary(32), content mediumblob NOT NULL, sender_address binary(20) NOT NULL, nonce int NOT NULL, idx int NOT NULL, body int NOT NULL, INDEX (body), - INDEX (hash), + INDEX (hash(4)), primary key (id) ); GRANT ALL ON obsdb.tx TO obscuro; @@ -119,56 +115,36 @@ GRANT ALL ON obsdb.tx TO obscuro; create table if not exists obsdb.exec_tx ( id INTEGER AUTO_INCREMENT, - created_contract_address binary(4), - created_contract_address_full binary(20), + created_contract_address binary(20), receipt mediumblob, tx int, batch int NOT NULL, - INDEX (batch), - INDEX (tx), - INDEX (created_contract_address), + INDEX (batch,tx), + INDEX (created_contract_address(4)), primary key (id) ); GRANT ALL ON obsdb.exec_tx TO obscuro; create table if not exists obsdb.events ( - topic0 binary(4) NOT NULL, - topic1 binary(4), - topic2 binary(4), - topic3 binary(4), - topic4 binary(4), - topic0_full binary(32) NOT NULL, - topic1_full binary(32), - topic2_full binary(32), - topic3_full binary(32), - topic4_full binary(32), - datablob mediumblob, - log_idx int NOT NULL, - address binary(4) NOT NULL, - address_full binary(20) NOT NULL, - lifecycle_event boolean NOT NULL, - rel_address1 binary(4), - rel_address2 binary(4), - rel_address3 binary(4), - rel_address4 binary(4), - rel_address1_full binary(20), - rel_address2_full binary(20), - rel_address3_full binary(20), - rel_address4_full binary(20), - tx int, - batch int NOT NULL, - INDEX (tx), - INDEX (batch), - INDEX (address), - INDEX (rel_address1), - INDEX (rel_address2), - INDEX (rel_address3), - INDEX (rel_address4), - INDEX (topic0), - INDEX (topic1), - INDEX (topic2), - INDEX (topic3), - INDEX (topic4) + topic0 binary(32) NOT NULL, + topic1 binary(32), + topic2 binary(32), + topic3 binary(32), + topic4 binary(32), + datablob mediumblob, + log_idx int NOT NULL, + address binary(20) NOT NULL, + lifecycle_event boolean NOT NULL, + rel_address1 binary(20), + rel_address2 binary(20), + rel_address3 binary(20), + rel_address4 binary(20), + tx int, + batch int NOT NULL, + INDEX (batch, tx), + INDEX (address(4)), + INDEX (rel_address1(4), rel_address2(4), rel_address3(4), rel_address4(4)), + INDEX (topic0(4), topic1(4), topic2(4), topic3(4), topic4(4)) ); GRANT ALL ON obsdb.events TO obscuro; \ No newline at end of file diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 5dd9db4ee0..9e9ea6676e 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -26,8 +26,7 @@ create table if not exists attestation_key create table if not exists block ( id INTEGER PRIMARY KEY AUTOINCREMENT, - hash binary(4), - full_hash binary(32), + hash binary(32), is_canonical boolean NOT NULL, header blob NOT NULL, height int NOT NULL @@ -49,8 +48,7 @@ create index L1_MSG_BLOCK_IDX on l1_msg (block); create table if not exists rollup ( id INTEGER PRIMARY KEY AUTOINCREMENT, - hash binary(4), - full_hash binary(32), + hash binary(32), start_seq int NOT NULL, end_seq int NOT NULL, time_stamp int NOT NULL, @@ -69,9 +67,8 @@ create table if not exists batch_body create table if not exists batch ( sequence int primary key, - full_hash binary(32), converted_hash binary(32), - hash binary(4) NOT NULL, + hash binary(32) NOT NULL, height int NOT NULL, is_canonical boolean NOT NULL, header blob NOT NULL, @@ -90,8 +87,7 @@ create index IDX_BATCH_BODY on batch (body); create table if not exists tx ( id INTEGER PRIMARY KEY AUTOINCREMENT, - hash binary(4), - full_hash binary(32) NOT NULL, + hash binary(32), content mediumblob NOT NULL, sender_address binary(20) NOT NULL, nonce int NOT NULL, @@ -104,55 +100,35 @@ create index IDX_TX_BODY on tx (body); create table if not exists exec_tx ( id INTEGER PRIMARY KEY AUTOINCREMENT, - created_contract_address binary(4), - created_contract_address_full binary(20), + created_contract_address binary(20), receipt mediumblob, -- commenting out the fk until synthetic transactions are also stored tx INTEGER, batch INTEGER NOT NULL REFERENCES batch ); -create index IDX_EX_TX_TX on exec_tx (tx); -create index IDX_EX_TX_BATCH on exec_tx (batch); +create index IDX_EX_TX_BATCH on exec_tx (batch,tx); create index IDX_EX_TX_CCA on exec_tx (created_contract_address); -- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it create table if not exists events ( - topic0 binary(4) NOT NULL, - topic1 binary(4), - topic2 binary(4), - topic3 binary(4), - topic4 binary(4), - topic0_full binary(32) NOT NULL, - topic1_full binary(32), - topic2_full binary(32), - topic3_full binary(32), - topic4_full binary(32), - datablob mediumblob, - log_idx int NOT NULL, - address binary(4) NOT NULL, - address_full binary(20) NOT NULL, - lifecycle_event boolean NOT NULL, - rel_address1 binary(4), - rel_address2 binary(4), - rel_address3 binary(4), - rel_address4 binary(4), - rel_address1_full binary(20), - rel_address2_full binary(20), - rel_address3_full binary(20), - rel_address4_full binary(20), - tx INTEGER, - batch INTEGER NOT NULL REFERENCES batch + topic0 binary(32) NOT NULL, + topic1 binary(32), + topic2 binary(32), + topic3 binary(32), + topic4 binary(32), + datablob mediumblob, + log_idx int NOT NULL, + address binary(20) NOT NULL, + lifecycle_event boolean NOT NULL, + rel_address1 binary(20), + rel_address2 binary(20), + rel_address3 binary(20), + rel_address4 binary(20), + tx INTEGER, + batch INTEGER NOT NULL REFERENCES batch ); +create index IDX_BATCH_TX on events (batch, tx); create index IDX_AD on events (address); -create index IDX_RAD1 on events (rel_address1); -create index IDX_RAD2 on events (rel_address2); -create index IDX_RAD3 on events (rel_address3); -create index IDX_RAD4 on events (rel_address4); -create index IDX_T0 on events (topic0); -create index IDX_T1 on events (topic1); -create index IDX_T2 on events (topic2); -create index IDX_T3 on events (topic3); -create index IDX_T4 on events (topic4); -create index IDX_TX on events (tx); -create index IDX_BATCH on events (batch); \ No newline at end of file +create index IDX_RAD1 on events (rel_address1, rel_address2, rel_address3, rel_address4); +create index IDX_T0 on events (topic0, topic1, topic2, topic3, topic4); From eff485b826ddfa6490ea42c179b0f9b860047951 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Fri, 10 May 2024 15:58:36 +0100 Subject: [PATCH 13/15] update index approach --- go/enclave/storage/enclavedb/keyvalue.go | 36 +++++++------------ .../storage/init/edgelessdb/001_init.sql | 28 +++++++++------ go/enclave/storage/init/sqlite/001_init.sql | 3 +- 3 files changed, 30 insertions(+), 37 deletions(-) diff --git a/go/enclave/storage/enclavedb/keyvalue.go b/go/enclave/storage/enclavedb/keyvalue.go index 366b098011..336ca37ba7 100644 --- a/go/enclave/storage/enclavedb/keyvalue.go +++ b/go/enclave/storage/enclavedb/keyvalue.go @@ -5,25 +5,24 @@ import ( "database/sql" "errors" "fmt" - "hash/fnv" "github.com/ethereum/go-ethereum/ethdb" "github.com/ten-protocol/go-ten/go/common/errutil" ) const ( - getQry = `select keyvalue.val from keyvalue where keyvalue.ky = ? and keyvalue.ky_full = ?;` + getQry = `select keyvalue.val from keyvalue where keyvalue.ky = ? ;` // `replace` will perform insert or replace if existing and this syntax works for both sqlite and edgeless db - putQry = `replace into keyvalue (ky, ky_full, val) values(?, ?, ?);` - putQryBatch = `replace into keyvalue (ky, ky_full, val) values` - putQryValues = `(?,?,?)` - delQry = `delete from keyvalue where keyvalue.ky = ? and keyvalue.ky_full = ?;` + putQry = `replace into keyvalue (ky, val) values(?, ?);` + putQryBatch = `replace into keyvalue (ky, val) values` + putQryValues = `(?,?)` + delQry = `delete from keyvalue where keyvalue.ky = ? ;` // todo - how is the performance of this? - searchQry = `select ky_full, val from keyvalue where substring(keyvalue.ky_full, 1, ?) = ? and keyvalue.ky_full >= ? order by keyvalue.ky_full asc` + searchQry = `select ky, val from keyvalue where substring(keyvalue.ky, 1, ?) = ? and keyvalue.ky >= ? order by keyvalue.ky asc` ) func Has(ctx context.Context, db *sql.DB, key []byte) (bool, error) { - err := db.QueryRowContext(ctx, getQry, hash(key), key).Scan() + err := db.QueryRowContext(ctx, getQry, key).Scan() if err != nil { if errors.Is(err, sql.ErrNoRows) { return false, nil @@ -36,7 +35,7 @@ func Has(ctx context.Context, db *sql.DB, key []byte) (bool, error) { func Get(ctx context.Context, db *sql.DB, key []byte) ([]byte, error) { var res []byte - err := db.QueryRowContext(ctx, getQry, hash(key), key).Scan(&res) + err := db.QueryRowContext(ctx, getQry, key).Scan(&res) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -48,7 +47,7 @@ func Get(ctx context.Context, db *sql.DB, key []byte) ([]byte, error) { } func Put(ctx context.Context, db *sql.DB, key []byte, value []byte) error { - _, err := db.ExecContext(ctx, putQry, hash(key), key, value) + _, err := db.ExecContext(ctx, putQry, key, value) return err } @@ -63,7 +62,7 @@ func PutKeyValues(ctx context.Context, tx *sql.Tx, keys [][]byte, vals [][]byte) values := make([]any, 0) for i := range keys { - values = append(values, hash(keys[i]), keys[i], vals[i]) + values = append(values, keys[i], vals[i]) } _, err := tx.ExecContext(ctx, update, values...) if err != nil { @@ -75,13 +74,13 @@ func PutKeyValues(ctx context.Context, tx *sql.Tx, keys [][]byte, vals [][]byte) } func Delete(ctx context.Context, db *sql.DB, key []byte) error { - _, err := db.ExecContext(ctx, delQry, hash(key), key) + _, err := db.ExecContext(ctx, delQry, key) return err } func DeleteKeys(ctx context.Context, db *sql.Tx, keys [][]byte) error { for _, del := range keys { - _, err := db.ExecContext(ctx, delQry, hash(del), del) + _, err := db.ExecContext(ctx, delQry, del) if err != nil { return err } @@ -108,14 +107,3 @@ func NewIterator(ctx context.Context, db *sql.DB, prefix []byte, start []byte) e rows: rows, } } - -// hash returns 4 bytes "hash" of the key to be indexed -// truncating is not sufficient because the keys are not random -func hash(key []byte) []byte { - h := fnv.New32() - _, err := h.Write(key) - if err != nil { - return nil - } - return h.Sum([]byte{}) -} diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index 2c0291b73c..281ee4dc47 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -4,11 +4,10 @@ CREATE DATABASE obsdb; create table if not exists obsdb.keyvalue ( id INTEGER AUTO_INCREMENT, - ky binary(4), - ky_full varbinary(64), + ky varbinary(64), val mediumblob NOT NULL, primary key (id), - INDEX (ky) + INDEX USING HASH (ky) ); GRANT ALL ON obsdb.keyvalue TO obscuro; @@ -39,7 +38,7 @@ create table if not exists obsdb.block height int NOT NULL, primary key (id), INDEX (height), - INDEX (hash(4)) + INDEX USING HASH (hash(16)) ); GRANT ALL ON obsdb.block TO obscuro; @@ -64,7 +63,7 @@ create table if not exists obsdb.rollup header blob NOT NULL, compression_block INTEGER NOT NULL, INDEX (compression_block), - INDEX (hash(4)), + INDEX USING HASH (hash(16)), primary key (id) ); GRANT ALL ON obsdb.rollup TO obscuro; @@ -90,7 +89,7 @@ create table if not exists obsdb.batch l1_proof INTEGER, is_executed boolean NOT NULL, primary key (sequence), - INDEX (hash), + INDEX USING HASH (hash(16)), INDEX (body), INDEX (height, is_canonical), INDEX (l1_proof) @@ -107,7 +106,7 @@ create table if not exists obsdb.tx idx int NOT NULL, body int NOT NULL, INDEX (body), - INDEX (hash(4)), + INDEX USING HASH (hash(16)), primary key (id) ); GRANT ALL ON obsdb.tx TO obscuro; @@ -120,7 +119,7 @@ create table if not exists obsdb.exec_tx tx int, batch int NOT NULL, INDEX (batch,tx), - INDEX (created_contract_address(4)), + INDEX (tx, created_contract_address(4)), primary key (id) ); GRANT ALL ON obsdb.exec_tx TO obscuro; @@ -143,8 +142,15 @@ create table if not exists obsdb.events tx int, batch int NOT NULL, INDEX (batch, tx), - INDEX (address(4)), - INDEX (rel_address1(4), rel_address2(4), rel_address3(4), rel_address4(4)), - INDEX (topic0(4), topic1(4), topic2(4), topic3(4), topic4(4)) + INDEX USING HASH (address(16)), + INDEX USING HASH (rel_address1(16)), + INDEX USING HASH (rel_address2(16)), + INDEX USING HASH (rel_address3(16)), + INDEX USING HASH (rel_address4(16)), + INDEX USING HASH (topic0(16)), + INDEX USING HASH (topic1(16)), + INDEX USING HASH (topic2(16)), + INDEX USING HASH (topic3(16)), + INDEX USING HASH (topic4(16)) ); GRANT ALL ON obsdb.events TO obscuro; \ No newline at end of file diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 9e9ea6676e..6244458ad5 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -1,8 +1,7 @@ create table if not exists keyvalue ( id INTEGER PRIMARY KEY AUTOINCREMENT, - ky binary(4), - ky_full varbinary(64), + ky varbinary(64), val mediumblob NOT NULL ); create index IDX_KV on keyvalue (ky); From a209f991885f7fd5260e1517bc85a6911fb7f501 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Fri, 10 May 2024 17:43:28 +0100 Subject: [PATCH 14/15] tweaking --- go/enclave/storage/enclavedb/events.go | 3 +- .../storage/init/edgelessdb/001_init.sql | 86 +++++++++---------- go/enclave/storage/init/sqlite/001_init.sql | 29 ++++--- 3 files changed, 60 insertions(+), 58 deletions(-) diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index e396e94179..2723c9a3a2 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -40,7 +40,8 @@ func StoreEventLogs(ctx context.Context, dbtx *sql.Tx, receipts []*types.Receipt } } if totalLogs > 0 { - query := "insert into events values " + repeat("(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ",", totalLogs) + query := "insert into events (topic0,topic1,topic2,topic3,topic4,datablob,log_idx,address,lifecycle_event,rel_address1,rel_address2,rel_address3,rel_address4,tx,batch) values " + + repeat("(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ",", totalLogs) _, err := dbtx.ExecContext(ctx, query, args...) if err != nil { return err diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index 281ee4dc47..b118cd1111 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -3,9 +3,9 @@ CREATE DATABASE obsdb; create table if not exists obsdb.keyvalue ( - id INTEGER AUTO_INCREMENT, - ky varbinary(64), - val mediumblob NOT NULL, + id INTEGER AUTO_INCREMENT, + ky varbinary(64) NOT NULL, + val mediumblob NOT NULL, primary key (id), INDEX USING HASH (ky) ); @@ -32,13 +32,13 @@ GRANT ALL ON obsdb.attestation_key TO obscuro; create table if not exists obsdb.block ( id INTEGER AUTO_INCREMENT, - hash binary(32), - is_canonical boolean NOT NULL, - header blob NOT NULL, - height int NOT NULL, + hash binary(32) NOT NULL, + is_canonical boolean NOT NULL, + header blob NOT NULL, + height int NOT NULL, primary key (id), INDEX (height), - INDEX USING HASH (hash(16)) + INDEX USING HASH (hash(8)) ); GRANT ALL ON obsdb.block TO obscuro; @@ -56,21 +56,21 @@ GRANT ALL ON obsdb.l1_msg TO obscuro; create table if not exists obsdb.rollup ( id INTEGER AUTO_INCREMENT, - hash binary(32), - start_seq int NOT NULL, - end_seq int NOT NULL, - time_stamp int NOT NULL, - header blob NOT NULL, - compression_block INTEGER NOT NULL, + hash binary(32) NOT NULL, + start_seq int NOT NULL, + end_seq int NOT NULL, + time_stamp int NOT NULL, + header blob NOT NULL, + compression_block INTEGER NOT NULL, INDEX (compression_block), - INDEX USING HASH (hash(16)), + INDEX USING HASH (hash(8)), primary key (id) ); GRANT ALL ON obsdb.rollup TO obscuro; create table if not exists obsdb.batch_body ( - id int NOT NULL, + id INTEGER, content mediumblob NOT NULL, primary key (id) ); @@ -78,9 +78,9 @@ GRANT ALL ON obsdb.batch_body TO obscuro; create table if not exists obsdb.batch ( - sequence int, + sequence INTEGER, converted_hash binary(32) NOT NULL, - hash binary(32) NOT NULL, + hash binary(32) NOT NULL, height int NOT NULL, is_canonical boolean NOT NULL, header blob NOT NULL, @@ -89,36 +89,34 @@ create table if not exists obsdb.batch l1_proof INTEGER, is_executed boolean NOT NULL, primary key (sequence), - INDEX USING HASH (hash(16)), - INDEX (body), - INDEX (height, is_canonical), - INDEX (l1_proof) + INDEX USING HASH (hash(8)), + INDEX (body, l1_proof), + INDEX (height) ); GRANT ALL ON obsdb.batch TO obscuro; create table if not exists obsdb.tx ( id INTEGER AUTO_INCREMENT, - hash binary(32), + hash binary(32) NOT NULL, content mediumblob NOT NULL, sender_address binary(20) NOT NULL, nonce int NOT NULL, idx int NOT NULL, body int NOT NULL, - INDEX (body), - INDEX USING HASH (hash(16)), + INDEX USING HASH (hash(8)), primary key (id) ); GRANT ALL ON obsdb.tx TO obscuro; create table if not exists obsdb.exec_tx ( - id INTEGER AUTO_INCREMENT, - created_contract_address binary(20), - receipt mediumblob, - tx int, - batch int NOT NULL, - INDEX (batch,tx), + id INTEGER AUTO_INCREMENT, + created_contract_address binary(20), + receipt mediumblob, + tx int, + batch int NOT NULL, + INDEX (batch), INDEX (tx, created_contract_address(4)), primary key (id) ); @@ -126,6 +124,7 @@ GRANT ALL ON obsdb.exec_tx TO obscuro; create table if not exists obsdb.events ( + id INTEGER AUTO_INCREMENT, topic0 binary(32) NOT NULL, topic1 binary(32), topic2 binary(32), @@ -139,18 +138,19 @@ create table if not exists obsdb.events rel_address2 binary(20), rel_address3 binary(20), rel_address4 binary(20), - tx int, + tx int NOT NULL, batch int NOT NULL, - INDEX (batch, tx), - INDEX USING HASH (address(16)), - INDEX USING HASH (rel_address1(16)), - INDEX USING HASH (rel_address2(16)), - INDEX USING HASH (rel_address3(16)), - INDEX USING HASH (rel_address4(16)), - INDEX USING HASH (topic0(16)), - INDEX USING HASH (topic1(16)), - INDEX USING HASH (topic2(16)), - INDEX USING HASH (topic3(16)), - INDEX USING HASH (topic4(16)) + primary key (id), + INDEX (tx, batch), + INDEX USING HASH (address(8)), + INDEX USING HASH (rel_address1(8)), + INDEX USING HASH (rel_address2(8)), + INDEX USING HASH (rel_address3(8)), + INDEX USING HASH (rel_address4(8)), + INDEX USING HASH (topic0(8)), + INDEX USING HASH (topic1(8)), + INDEX USING HASH (topic2(8)), + INDEX USING HASH (topic3(8)), + INDEX USING HASH (topic4(8)) ); GRANT ALL ON obsdb.events TO obscuro; \ No newline at end of file diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 6244458ad5..14daa713b0 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -1,8 +1,8 @@ create table if not exists keyvalue ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - ky varbinary(64), - val mediumblob NOT NULL + id INTEGER PRIMARY KEY AUTOINCREMENT, + ky varbinary(64), + val mediumblob NOT NULL ); create index IDX_KV on keyvalue (ky); @@ -68,13 +68,13 @@ create table if not exists batch sequence int primary key, converted_hash binary(32), hash binary(32) NOT NULL, - height int NOT NULL, - is_canonical boolean NOT NULL, - header blob NOT NULL, - body int NOT NULL REFERENCES batch_body, + height int NOT NULL, + is_canonical boolean NOT NULL, + header blob NOT NULL, + body int NOT NULL REFERENCES batch_body, l1_proof_hash binary(32), l1_proof INTEGER, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch - is_executed boolean NOT NULL + is_executed boolean NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height -- unique (height, is_canonical, is_executed) ); @@ -98,19 +98,20 @@ create index IDX_TX_BODY on tx (body); create table if not exists exec_tx ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - created_contract_address binary(20), - receipt mediumblob, + id INTEGER PRIMARY KEY AUTOINCREMENT, + created_contract_address binary(20), + receipt mediumblob, -- commenting out the fk until synthetic transactions are also stored - tx INTEGER, - batch INTEGER NOT NULL REFERENCES batch + tx INTEGER, + batch INTEGER NOT NULL REFERENCES batch ); -create index IDX_EX_TX_BATCH on exec_tx (batch,tx); +create index IDX_EX_TX_BATCH on exec_tx (batch, tx); create index IDX_EX_TX_CCA on exec_tx (created_contract_address); -- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it create table if not exists events ( + id INTEGER PRIMARY KEY AUTOINCREMENT, topic0 binary(32) NOT NULL, topic1 binary(32), topic2 binary(32), From 40b3bf6103ef925471e9d6bce6a1e72f39d8c55d Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Mon, 13 May 2024 10:01:04 +0100 Subject: [PATCH 15/15] tweaking --- .../storage/init/edgelessdb/001_init.sql | 4 +- go/enclave/storage/init/sqlite/001_init.sql | 60 ++++++++++--------- 2 files changed, 36 insertions(+), 28 deletions(-) diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index b118cd1111..8dc56204a6 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -7,6 +7,7 @@ create table if not exists obsdb.keyvalue ky varbinary(64) NOT NULL, val mediumblob NOT NULL, primary key (id), + UNIQUE (ky), INDEX USING HASH (ky) ); GRANT ALL ON obsdb.keyvalue TO obscuro; @@ -85,11 +86,12 @@ create table if not exists obsdb.batch is_canonical boolean NOT NULL, header blob NOT NULL, body int NOT NULL, - l1_proof_hash binary(32), + l1_proof_hash binary(32) NOT NULL, l1_proof INTEGER, is_executed boolean NOT NULL, primary key (sequence), INDEX USING HASH (hash(8)), + INDEX USING HASH (l1_proof_hash(8)), INDEX (body, l1_proof), INDEX (height) ); diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 14daa713b0..6f07aaa91a 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -1,8 +1,8 @@ create table if not exists keyvalue ( id INTEGER PRIMARY KEY AUTOINCREMENT, - ky varbinary(64), - val mediumblob NOT NULL + ky varbinary(64) UNIQUE NOT NULL, + val mediumblob NOT NULL ); create index IDX_KV on keyvalue (ky); @@ -25,10 +25,10 @@ create table if not exists attestation_key create table if not exists block ( id INTEGER PRIMARY KEY AUTOINCREMENT, - hash binary(32), - is_canonical boolean NOT NULL, - header blob NOT NULL, - height int NOT NULL + hash binary(32) NOT NULL, + is_canonical boolean NOT NULL, + header blob NOT NULL, + height int NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical blocks for the same height -- unique (height, is_canonical) ); @@ -40,19 +40,19 @@ create table if not exists l1_msg id INTEGER PRIMARY KEY AUTOINCREMENT, message varbinary(1024) NOT NULL, block INTEGER NOT NULL REFERENCES block, - is_transfer boolean + is_transfer boolean NOT NULL ); create index L1_MSG_BLOCK_IDX on l1_msg (block); create table if not exists rollup ( id INTEGER PRIMARY KEY AUTOINCREMENT, - hash binary(32), - start_seq int NOT NULL, - end_seq int NOT NULL, - time_stamp int NOT NULL, - header blob NOT NULL, - compression_block INTEGER NOT NULL REFERENCES block + hash binary(32) NOT NULL, + start_seq int NOT NULL, + end_seq int NOT NULL, + time_stamp int NOT NULL, + header blob NOT NULL, + compression_block INTEGER NOT NULL REFERENCES block ); create index ROLLUP_COMPRESSION_BLOCK_IDX on rollup (compression_block); create index ROLLUP_COMPRESSION_HASH_IDX on rollup (hash); @@ -66,35 +66,34 @@ create table if not exists batch_body create table if not exists batch ( sequence int primary key, - converted_hash binary(32), + converted_hash binary(32) NOT NULL, hash binary(32) NOT NULL, height int NOT NULL, is_canonical boolean NOT NULL, header blob NOT NULL, body int NOT NULL REFERENCES batch_body, - l1_proof_hash binary(32), + l1_proof_hash binary(32) NOT NULL, l1_proof INTEGER, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch is_executed boolean NOT NULL -- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height -- unique (height, is_canonical, is_executed) ); create index IDX_BATCH_HASH on batch (hash); -create index IDX_BATCH_HEIGHT on batch (height, is_canonical); -create index IDX_BATCH_BLOCK on batch (l1_proof); -create index IDX_BATCH_BODY on batch (body); +create index IDX_BATCH_BLOCK on batch (l1_proof_hash); +create index IDX_BATCH_BODY on batch (body, l1_proof); +create index IDX_BATCH_HEIGHT on batch (height); create table if not exists tx ( id INTEGER PRIMARY KEY AUTOINCREMENT, - hash binary(32), + hash binary(32) NOT NULL, content mediumblob NOT NULL, sender_address binary(20) NOT NULL, nonce int NOT NULL, idx int NOT NULL, - body int REFERENCES batch_body + body int NOT NULL REFERENCES batch_body ); create index IDX_TX_HASH on tx (hash); -create index IDX_TX_BODY on tx (body); create table if not exists exec_tx ( @@ -105,8 +104,8 @@ create table if not exists exec_tx tx INTEGER, batch INTEGER NOT NULL REFERENCES batch ); -create index IDX_EX_TX_BATCH on exec_tx (batch, tx); -create index IDX_EX_TX_CCA on exec_tx (created_contract_address); +create index IDX_EX_TX_BATCH on exec_tx (batch); +create index IDX_EX_TX_CCA on exec_tx (tx, created_contract_address); -- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it create table if not exists events @@ -125,10 +124,17 @@ create table if not exists events rel_address2 binary(20), rel_address3 binary(20), rel_address4 binary(20), - tx INTEGER, + tx INTEGER NOT NULL references tx, batch INTEGER NOT NULL REFERENCES batch ); -create index IDX_BATCH_TX on events (batch, tx); +create index IDX_BATCH_TX on events (tx, batch); create index IDX_AD on events (address); -create index IDX_RAD1 on events (rel_address1, rel_address2, rel_address3, rel_address4); -create index IDX_T0 on events (topic0, topic1, topic2, topic3, topic4); +create index IDX_RAD1 on events (rel_address1); +create index IDX_RAD2 on events (rel_address2); +create index IDX_RAD3 on events (rel_address3); +create index IDX_RAD4 on events (rel_address4); +create index IDX_T0 on events (topic0); +create index IDX_T1 on events (topic1); +create index IDX_T2 on events (topic2); +create index IDX_T3 on events (topic3); +create index IDX_T4 on events (topic4);