diff --git a/go/common/query_types.go b/go/common/query_types.go index eb6ed1f89c..b222fbab2a 100644 --- a/go/common/query_types.go +++ b/go/common/query_types.go @@ -49,7 +49,6 @@ type PublicTransaction struct { type PublicBatch struct { SequencerOrderNo *big.Int `json:"sequence"` - Hash string `json:"hash"` FullHash common.Hash `json:"fullHash"` Height *big.Int `json:"height"` TxCount *big.Int `json:"txCount"` diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go index 9fcf7852f9..84881a8bfe 100644 --- a/go/host/enclave/guardian.go +++ b/go/host/enclave/guardian.go @@ -462,6 +462,12 @@ func (g *Guardian) processL1BlockTransactions(block *common.L1Block) { // if there are any secret responses in the block we should refresh our P2P list to re-sync with the network _, rollupTxs, contractAddressTxs := g.sl.L1Publisher().ExtractObscuroRelevantTransactions(block) + // TODO (@will) this should be removed and pulled from the L1 + err := g.storage.AddBlock(block.Header()) + if err != nil { + g.logger.Error("Could not add block to host db.", log.ErrKey, err) + } + for _, rollup := range rollupTxs { r, err := common.DecodeRollup(rollup.Rollup) if err != nil { @@ -481,11 +487,6 @@ func (g *Guardian) processL1BlockTransactions(block *common.L1Block) { g.logger.Error("Could not store rollup.", log.ErrKey, err) } } - // TODO (@will) this should be removed and pulled from the L1 - err = g.storage.AddBlock(block.Header(), r.Header.Hash()) - if err != nil { - g.logger.Error("Could not add block to host db.", log.ErrKey, err) - } } if len(contractAddressTxs) > 0 { diff --git a/go/host/storage/hostdb/batch.go b/go/host/storage/hostdb/batch.go index 1d1cfd69cb..735f89ed76 100644 --- a/go/host/storage/hostdb/batch.go +++ b/go/host/storage/hostdb/batch.go @@ -14,13 +14,13 @@ import ( ) const ( - selectBatch = "SELECT sequence, full_hash, hash, height, ext_batch FROM batch_host" + selectBatch = "SELECT sequence, hash, height, ext_batch FROM batch_host" selectExtBatch = "SELECT ext_batch FROM batch_host" - selectLatestBatch = "SELECT sequence, full_hash, hash, height, ext_batch FROM batch_host ORDER BY sequence DESC LIMIT 1" + selectLatestBatch = "SELECT sequence, hash, height, ext_batch FROM batch_host ORDER BY sequence DESC LIMIT 1" selectTxsAndBatch = "SELECT t.hash FROM transaction_host t JOIN batch_host b ON t.b_sequence = b.sequence WHERE b.hash = " selectBatchSeqByTx = "SELECT b_sequence FROM transaction_host WHERE hash = " - selectTxBySeq = "SELECT full_hash FROM transaction_host WHERE b_sequence = " - selectBatchTxs = "SELECT t.full_hash, b.sequence, b.height, b.ext_batch FROM transaction_host t JOIN batch_host b ON t.b_sequence = b.sequence" + selectTxBySeq = "SELECT hash FROM transaction_host WHERE b_sequence = " + selectBatchTxs = "SELECT t.hash, b.sequence, b.height, b.ext_batch FROM transaction_host t JOIN batch_host b ON t.b_sequence = b.sequence" ) // AddBatch adds a batch and its header to the DB @@ -33,7 +33,6 @@ func AddBatch(dbtx *dbTransaction, statements *SQLStatements, batch *common.ExtB _, err = dbtx.tx.Exec(statements.InsertBatch, batch.SeqNo().Uint64(), // sequence batch.Hash(), // full hash - truncTo16(batch.Hash()), // shortened hash batch.Header.Number.Uint64(), // height extBatch, // ext_batch ) @@ -45,11 +44,16 @@ func AddBatch(dbtx *dbTransaction, statements *SQLStatements, batch *common.ExtB } if len(batch.TxHashes) > 0 { - for _, txHash := range batch.TxHashes { - _, err = dbtx.tx.Exec(statements.InsertTransactions, truncTo16(txHash), txHash.Bytes(), batch.SeqNo().Uint64()) - if err != nil { - return fmt.Errorf("failed to insert transaction with hash: %d", err) - } + insert := statements.InsertTransactions + args := make([]any, 0) + for i, txHash := range batch.TxHashes { + insert += fmt.Sprintf(" (%s, %s),", statements.GetPlaceHolder(i*2+1), statements.GetPlaceHolder(i*2+2)) + args = append(args, txHash.Bytes(), batch.SeqNo().Uint64()) + } + insert = strings.TrimRight(insert, ",") + _, err = dbtx.tx.Exec(insert, args...) + if err != nil { + return fmt.Errorf("failed to insert transactions. cause: %w", err) } } @@ -60,7 +64,7 @@ func AddBatch(dbtx *dbTransaction, statements *SQLStatements, batch *common.ExtB } newTotal := currentTotal + len(batch.TxHashes) - _, err = dbtx.tx.Exec(statements.InsertTxCount, 1, newTotal) + _, err = dbtx.tx.Exec(statements.UpdateTxCount, newTotal) if err != nil { return fmt.Errorf("failed to update transaction count: %w", err) } @@ -170,7 +174,7 @@ func GetCurrentHeadBatch(db HostDB) (*common.PublicBatch, error) { // GetBatchHeader returns the batch header given the hash. func GetBatchHeader(db HostDB, hash gethcommon.Hash) (*common.BatchHeader, error) { whereQuery := " WHERE hash=" + db.GetSQLStatement().Placeholder - return fetchBatchHeader(db.GetSQLDB(), whereQuery, truncTo16(hash)) + return fetchBatchHeader(db.GetSQLDB(), whereQuery, hash.Bytes()) } // GetBatchHashByNumber returns the hash of a batch given its number. @@ -195,7 +199,7 @@ func GetHeadBatchHeader(db HostDB) (*common.BatchHeader, error) { // GetBatchNumber returns the height of the batch containing the given transaction hash. func GetBatchNumber(db HostDB, txHash gethcommon.Hash) (*big.Int, error) { - batchHeight, err := fetchBatchNumber(db, truncTo16(txHash)) + batchHeight, err := fetchBatchNumber(db, txHash.Bytes()) if err != nil { return nil, fmt.Errorf("failed to fetch batch height - %w", err) } @@ -205,7 +209,7 @@ func GetBatchNumber(db HostDB, txHash gethcommon.Hash) (*big.Int, error) { // GetBatchTxHashes returns the transaction hashes of the batch with the given hash. func GetBatchTxHashes(db HostDB, batchHash gethcommon.Hash) ([]gethcommon.Hash, error) { query := selectTxsAndBatch + db.GetSQLStatement().Placeholder - rows, err := db.GetSQLDB().Query(query, truncTo16(batchHash)) + rows, err := db.GetSQLDB().Query(query, batchHash.Bytes()) if err != nil { return nil, fmt.Errorf("query execution failed: %w", err) } @@ -231,14 +235,14 @@ func GetBatchTxHashes(db HostDB, batchHash gethcommon.Hash) ([]gethcommon.Hash, // GetPublicBatch returns the batch with the given hash. func GetPublicBatch(db HostDB, hash common.L2BatchHash) (*common.PublicBatch, error) { whereQuery := " WHERE b.hash=" + db.GetSQLStatement().Placeholder - return fetchPublicBatch(db.GetSQLDB(), whereQuery, truncTo16(hash)) + return fetchPublicBatch(db.GetSQLDB(), whereQuery, hash.Bytes()) } // GetBatchByTx returns the batch with the given hash. func GetBatchByTx(db HostDB, txHash gethcommon.Hash) (*common.ExtBatch, error) { var seqNo uint64 query := selectBatchSeqByTx + db.GetSQLStatement().Placeholder - err := db.GetSQLDB().QueryRow(query, truncTo16(txHash)).Scan(&seqNo) + err := db.GetSQLDB().QueryRow(query, txHash.Bytes()).Scan(&seqNo) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, errutil.ErrNotFound @@ -251,7 +255,7 @@ func GetBatchByTx(db HostDB, txHash gethcommon.Hash) (*common.ExtBatch, error) { // GetBatchByHash returns the batch with the given hash. func GetBatchByHash(db HostDB, hash common.L2BatchHash) (*common.ExtBatch, error) { whereQuery := " WHERE hash=" + db.GetSQLStatement().Placeholder - return fetchFullBatch(db.GetSQLDB(), whereQuery, truncTo16(hash)) + return fetchFullBatch(db.GetSQLDB(), whereQuery, hash.Bytes()) } // GetLatestBatch returns the head batch header @@ -278,7 +282,7 @@ func GetBatchByHeight(db HostDB, height *big.Int) (*common.PublicBatch, error) { // GetBatchTransactions returns the TransactionListingResponse for a given batch hash func GetBatchTransactions(db HostDB, batchHash gethcommon.Hash) (*common.TransactionListingResponse, error) { whereQuery := " WHERE b.hash=" + db.GetSQLStatement().Placeholder - return fetchBatchTxs(db.GetSQLDB(), whereQuery, truncTo16(batchHash)) + return fetchBatchTxs(db.GetSQLDB(), whereQuery, batchHash.Bytes()) } func fetchBatchHeader(db *sql.DB, whereQuery string, args ...any) (*common.BatchHeader, error) { @@ -330,7 +334,6 @@ func fetchBatchNumber(db HostDB, args ...any) (*big.Int, error) { func fetchPublicBatch(db *sql.DB, whereQuery string, args ...any) (*common.PublicBatch, error) { var sequenceInt64 uint64 var fullHash common.TxHash - var hash []byte var heightInt64 int var extBatch []byte @@ -338,9 +341,9 @@ func fetchPublicBatch(db *sql.DB, whereQuery string, args ...any) (*common.Publi var err error if len(args) > 0 { - err = db.QueryRow(query, args...).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch) + err = db.QueryRow(query, args...).Scan(&sequenceInt64, &fullHash, &heightInt64, &extBatch) } else { - err = db.QueryRow(query).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch) + err = db.QueryRow(query).Scan(&sequenceInt64, &fullHash, &heightInt64, &extBatch) } if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -356,7 +359,6 @@ func fetchPublicBatch(db *sql.DB, whereQuery string, args ...any) (*common.Publi batch := &common.PublicBatch{ SequencerOrderNo: new(big.Int).SetInt64(int64(sequenceInt64)), - Hash: bytesToHexString(hash), FullHash: fullHash, Height: new(big.Int).SetInt64(int64(heightInt64)), TxCount: new(big.Int).SetInt64(int64(len(b.TxHashes))), @@ -370,7 +372,6 @@ func fetchPublicBatch(db *sql.DB, whereQuery string, args ...any) (*common.Publi func fetchFullBatch(db *sql.DB, whereQuery string, args ...any) (*common.ExtBatch, error) { var sequenceInt64 uint64 var fullHash common.TxHash - var hash []byte var heightInt64 int var extBatch []byte @@ -378,9 +379,9 @@ func fetchFullBatch(db *sql.DB, whereQuery string, args ...any) (*common.ExtBatc var err error if len(args) > 0 { - err = db.QueryRow(query, args...).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch) + err = db.QueryRow(query, args...).Scan(&sequenceInt64, &fullHash, &heightInt64, &extBatch) } else { - err = db.QueryRow(query).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch) + err = db.QueryRow(query).Scan(&sequenceInt64, &fullHash, &heightInt64, &extBatch) } if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -400,11 +401,10 @@ func fetchFullBatch(db *sql.DB, whereQuery string, args ...any) (*common.ExtBatc func fetchHeadBatch(db *sql.DB) (*common.PublicBatch, error) { var sequenceInt64 int var fullHash gethcommon.Hash // common.Hash - var hash []byte var heightInt64 int var extBatch []byte - err := db.QueryRow(selectLatestBatch).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch) + err := db.QueryRow(selectLatestBatch).Scan(&sequenceInt64, &fullHash, &heightInt64, &extBatch) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, errutil.ErrNotFound @@ -420,7 +420,6 @@ func fetchHeadBatch(db *sql.DB) (*common.PublicBatch, error) { batch := &common.PublicBatch{ SequencerOrderNo: new(big.Int).SetInt64(int64(sequenceInt64)), - Hash: bytesToHexString(hash), FullHash: fullHash, Height: new(big.Int).SetInt64(int64(heightInt64)), TxCount: new(big.Int).SetInt64(int64(len(b.TxHashes))), diff --git a/go/host/storage/hostdb/block.go b/go/host/storage/hostdb/block.go index 922551d9c5..e90dc323f3 100644 --- a/go/host/storage/hostdb/block.go +++ b/go/host/storage/hostdb/block.go @@ -9,25 +9,19 @@ import ( ) const ( - selectBlocks = "SELECT id, hash, header, rollup_hash FROM block_host ORDER BY id DESC " + selectBlocks = "SELECT b.id, b.hash, b.header, r.hash FROM block_host b join rollup_host r on r.compression_block=b.id ORDER BY id DESC " ) // AddBlock stores a block header with the given rollupHash it contains in the host DB -func AddBlock(dbtx *dbTransaction, statements *SQLStatements, b *types.Header, rollupHash common.L2RollupHash) error { +func AddBlock(dbtx *dbTransaction, statements *SQLStatements, b *types.Header) error { header, err := rlp.EncodeToBytes(b) if err != nil { return fmt.Errorf("could not encode block header. Cause: %w", err) } - r, err := rlp.EncodeToBytes(rollupHash) - if err != nil { - return fmt.Errorf("could not encode rollup hash transactions: %w", err) - } - _, err = dbtx.tx.Exec(statements.InsertBlock, - b.Hash(), // hash - header, // l1 block header - r, // rollup hash + b.Hash().Bytes(), // hash + header, // l1 block header ) if err != nil { return fmt.Errorf("could not insert block. Cause: %w", err) diff --git a/go/host/storage/hostdb/rollup.go b/go/host/storage/hostdb/rollup.go index a07edea92d..8e1b02430d 100644 --- a/go/host/storage/hostdb/rollup.go +++ b/go/host/storage/hostdb/rollup.go @@ -14,10 +14,10 @@ import ( ) const ( - selectExtRollup = "SELECT ext_rollup from rollup_host r" + selectExtRollup = "SELECT ext_rollup from rollup_host r join block_host b on r.compression_block=b.id " selectLatestRollup = "SELECT ext_rollup FROM rollup_host ORDER BY time_stamp DESC LIMIT 1" - selectRollupBatches = "SELECT b.sequence, b.hash, b.full_hash, b.height, b.ext_batch FROM rollup_host r JOIN batch_host b ON r.start_seq <= b.sequence AND r.end_seq >= b.sequence" - selectRollups = "SELECT id, hash, start_seq, end_seq, time_stamp, ext_rollup, compression_block FROM rollup_host" + selectRollupBatches = "SELECT b.sequence, b.hash, b.height, b.ext_batch FROM rollup_host r JOIN batch_host b ON r.start_seq <= b.sequence AND r.end_seq >= b.sequence" + selectRollups = "SELECT rh.id, rh.hash, rh.start_seq, rh.end_seq, rh.time_stamp, rh.ext_rollup, bh.hash FROM rollup_host rh join block_host bh on rh.compression_block=bh.id " ) // AddRollup adds a rollup to the DB @@ -27,13 +27,19 @@ func AddRollup(dbtx *dbTransaction, statements *SQLStatements, rollup *common.Ex return fmt.Errorf("could not encode rollup: %w", err) } + var blockId int + err = dbtx.tx.QueryRow("select id from block_host where hash="+statements.Placeholder, block.Hash().Bytes()).Scan(&blockId) + if err != nil { + return fmt.Errorf("could not read block id: %w", err) + } + _, err = dbtx.tx.Exec(statements.InsertRollup, - truncTo16(rollup.Header.Hash()), // short hash + rollup.Header.Hash().Bytes(), // hash metadata.FirstBatchSequence.Uint64(), // first batch sequence rollup.Header.LastBatchSeqNo, // last batch sequence metadata.StartTime, // timestamp extRollup, // rollup blob - block.Hash(), // l1 block hash + blockId, // l1 block hash ) if err != nil { return fmt.Errorf("could not insert rollup. Cause: %w", err) @@ -44,7 +50,7 @@ func AddRollup(dbtx *dbTransaction, statements *SQLStatements, rollup *common.Ex // GetRollupListing returns latest rollups given a pagination. // For example, offset 1, size 10 will return the latest 11-20 rollups. func GetRollupListing(db HostDB, pagination *common.QueryPagination) (*common.RollupListingResponse, error) { - orderQuery := " ORDER BY id DESC " + orderQuery := " ORDER BY rh.id DESC " query := selectRollups + orderQuery + db.GetSQLStatement().Pagination rows, err := db.GetSQLDB().Query(query, pagination.Size, pagination.Offset) @@ -92,18 +98,18 @@ func GetRollupListing(db HostDB, pagination *common.QueryPagination) (*common.Ro func GetExtRollup(db HostDB, hash gethcommon.Hash) (*common.ExtRollup, error) { whereQuery := " WHERE r.hash=" + db.GetSQLStatement().Placeholder - return fetchExtRollup(db.GetSQLDB(), whereQuery, truncTo16(hash)) + return fetchExtRollup(db.GetSQLDB(), whereQuery, hash.Bytes()) } // GetRollupHeader returns the rollup with the given hash. func GetRollupHeader(db HostDB, hash gethcommon.Hash) (*common.RollupHeader, error) { whereQuery := " WHERE r.hash=" + db.GetSQLStatement().Placeholder - return fetchRollupHeader(db.GetSQLDB(), whereQuery, truncTo16(hash)) + return fetchRollupHeader(db.GetSQLDB(), whereQuery, hash.Bytes()) } // GetRollupHeaderByBlock returns the rollup for the given block func GetRollupHeaderByBlock(db HostDB, blockHash gethcommon.Hash) (*common.RollupHeader, error) { - whereQuery := " WHERE r.compression_block=" + db.GetSQLStatement().Placeholder + whereQuery := " WHERE b.hash=" + db.GetSQLStatement().Placeholder return fetchRollupHeader(db.GetSQLDB(), whereQuery, blockHash) } @@ -118,7 +124,7 @@ func GetLatestRollup(db HostDB) (*common.RollupHeader, error) { func GetRollupByHash(db HostDB, rollupHash gethcommon.Hash) (*common.PublicRollup, error) { whereQuery := " WHERE hash=" + db.GetSQLStatement().Placeholder - return fetchPublicRollup(db.GetSQLDB(), whereQuery, truncTo16(rollupHash)) + return fetchPublicRollup(db.GetSQLDB(), whereQuery, rollupHash.Bytes()) } func GetRollupBySeqNo(db HostDB, seqNo uint64) (*common.PublicRollup, error) { @@ -130,7 +136,7 @@ func GetRollupBatches(db HostDB, rollupHash gethcommon.Hash) (*common.BatchListi whereQuery := " WHERE r.hash=" + db.GetSQLStatement().Placeholder orderQuery := " ORDER BY b.height DESC" query := selectRollupBatches + whereQuery + orderQuery - rows, err := db.GetSQLDB().Query(query, truncTo16(rollupHash)) + rows, err := db.GetSQLDB().Query(query, rollupHash.Bytes()) if err != nil { return nil, fmt.Errorf("query execution for select rollup batches failed: %w", err) } @@ -140,12 +146,11 @@ func GetRollupBatches(db HostDB, rollupHash gethcommon.Hash) (*common.BatchListi for rows.Next() { var ( sequenceInt64 int - hash []byte fullHash gethcommon.Hash heightInt64 int extBatch []byte ) - err := rows.Scan(&sequenceInt64, &hash, &fullHash, &heightInt64, &extBatch) + err := rows.Scan(&sequenceInt64, &fullHash, &heightInt64, &extBatch) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, errutil.ErrNotFound @@ -160,7 +165,6 @@ func GetRollupBatches(db HostDB, rollupHash gethcommon.Hash) (*common.BatchListi batch := common.PublicBatch{ SequencerOrderNo: new(big.Int).SetInt64(int64(sequenceInt64)), - Hash: bytesToHexString(hash), FullHash: fullHash, Height: new(big.Int).SetInt64(int64(heightInt64)), TxCount: new(big.Int).SetInt64(int64(len(b.TxHashes))), diff --git a/go/host/storage/hostdb/sql_statements.go b/go/host/storage/hostdb/sql_statements.go index 9a7021c576..ccec07ef4d 100644 --- a/go/host/storage/hostdb/sql_statements.go +++ b/go/host/storage/hostdb/sql_statements.go @@ -1,23 +1,32 @@ package hostdb +import "strconv" + // SQLStatements struct holds SQL statements for a specific database type type SQLStatements struct { InsertBatch string InsertTransactions string - InsertTxCount string + UpdateTxCount string InsertRollup string InsertBlock string Pagination string Placeholder string } +func (s SQLStatements) GetPlaceHolder(pos int) string { + if s.Placeholder == "?" { + return s.Placeholder + } + return "$" + strconv.Itoa(pos) +} + func SQLiteSQLStatements() *SQLStatements { return &SQLStatements{ - InsertBatch: "INSERT INTO batch_host (sequence, full_hash, hash, height, ext_batch) VALUES (?, ?, ?, ?, ?)", - InsertTransactions: "REPLACE INTO transaction_host (hash, full_hash, b_sequence) VALUES (?, ?, ?)", - InsertTxCount: "INSERT INTO transaction_count (id, total) VALUES (?, ?) ON CONFLICT(id) DO UPDATE SET total = EXCLUDED.total", + InsertBatch: "INSERT INTO batch_host (sequence, hash, height, ext_batch) VALUES (?, ?, ?, ?)", + InsertTransactions: "INSERT INTO transaction_host (hash, b_sequence) VALUES ", + UpdateTxCount: "UPDATE transaction_count SET total=? WHERE id=1", InsertRollup: "INSERT INTO rollup_host (hash, start_seq, end_seq, time_stamp, ext_rollup, compression_block) values (?,?,?,?,?,?)", - InsertBlock: "REPLACE INTO block_host (hash, header, rollup_hash) values (?,?,?)", + InsertBlock: "INSERT INTO block_host (hash, header) values (?,?)", Pagination: "LIMIT ? OFFSET ?", Placeholder: "?", } @@ -25,11 +34,11 @@ func SQLiteSQLStatements() *SQLStatements { func PostgresSQLStatements() *SQLStatements { return &SQLStatements{ - InsertBatch: "INSERT INTO batch_host (sequence, full_hash, hash, height, ext_batch) VALUES ($1, $2, $3, $4, $5)", - InsertTransactions: "INSERT INTO transaction_host (hash, full_hash, b_sequence) VALUES ($1, $2, $3) ON CONFLICT (hash) DO NOTHING", - InsertTxCount: "INSERT INTO transaction_count (id, total) VALUES ($1, $2) ON CONFLICT (id) DO UPDATE SET total = EXCLUDED.total", + InsertBatch: "INSERT INTO batch_host (sequence, hash, height, ext_batch) VALUES ($1, $2, $3, $4)", + InsertTransactions: "INSERT INTO transaction_host (hash, b_sequence) VALUES ", + UpdateTxCount: "UPDATE transaction_count SET total=$1 WHERE id=1", InsertRollup: "INSERT INTO rollup_host (hash, start_seq, end_seq, time_stamp, ext_rollup, compression_block) values ($1, $2, $3, $4, $5, $6)", - InsertBlock: "INSERT INTO block_host (hash, header, rollup_hash) VALUES ($1, $2, $3) ON CONFLICT (hash) DO NOTHING", + InsertBlock: "INSERT INTO block_host (hash, header) VALUES ($1, $2)", Pagination: "LIMIT $1 OFFSET $2", Placeholder: "$1", } diff --git a/go/host/storage/hostdb/transaction.go b/go/host/storage/hostdb/transaction.go index 49a4b0507e..f2fb4c53a6 100644 --- a/go/host/storage/hostdb/transaction.go +++ b/go/host/storage/hostdb/transaction.go @@ -11,8 +11,8 @@ import ( const ( selectTxCount = "SELECT total FROM transaction_count WHERE id = 1" - selectTx = "SELECT full_hash, b_sequence FROM transaction_host WHERE hash = " - selectTxs = "SELECT t.full_hash, b.ext_batch FROM transaction_host t JOIN batch_host b ON t.b_sequence = b.sequence ORDER BY b.height DESC " + selectTx = "SELECT hash, b_sequence FROM transaction_host WHERE hash = " + selectTxs = "SELECT t.hash, b.ext_batch FROM transaction_host t JOIN batch_host b ON t.b_sequence = b.sequence ORDER BY b.height DESC " countTxs = "SELECT COUNT(b_sequence) AS row_count FROM transaction_host" ) @@ -63,7 +63,7 @@ func GetTransaction(db HostDB, hash gethcommon.Hash) (*common.PublicTransaction, var fullHash []byte var seq int - err := db.GetSQLDB().QueryRow(query, truncTo16(hash)).Scan(&fullHash, &seq) + err := db.GetSQLDB().QueryRow(query, hash.Bytes()).Scan(&fullHash, &seq) if err != nil { return nil, fmt.Errorf("failed to retrieve transaction sequence number: %w", err) } diff --git a/go/host/storage/hostdb/utils.go b/go/host/storage/hostdb/utils.go index c1e75150f0..b728031ad5 100644 --- a/go/host/storage/hostdb/utils.go +++ b/go/host/storage/hostdb/utils.go @@ -5,62 +5,12 @@ import ( "fmt" "testing" - gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ten-protocol/go-ten/go/host/storage/init/sqlite" ) -const truncHash = 16 - // An arbitrary number to put in the header const batchNumber = 777 -// truncTo16 checks if the leading half of the hash is filled with zeros and decides whether to truncate the first or last 16 bytes. -func truncTo16(hash gethcommon.Hash) []byte { - hashBytes := hash.Bytes() - // Check if the first half of the hash is all zeros - if isLeadingHalfZeros(hashBytes) { - return truncLastTo16(hashBytes) - } - return truncFirstTo16(hashBytes) -} - -// isLeadingHalfZeros checks if the leading half of the hash is all zeros. -func isLeadingHalfZeros(bytes []byte) bool { - halfLength := len(bytes) / 2 - for i := 0; i < halfLength; i++ { - if bytes[i] != 0 { - return false - } - } - return true -} - -// truncLastTo16 truncates the last 16 bytes of the hash. -func truncLastTo16(bytes []byte) []byte { - if len(bytes) == 0 { - return bytes - } - start := len(bytes) - truncHash - if start < 0 { - start = 0 - } - b := bytes[start:] - c := make([]byte, truncHash) - copy(c, b) - return c -} - -// truncFirstTo16 truncates the first 16 bytes of the hash. -func truncFirstTo16(bytes []byte) []byte { - if len(bytes) == 0 { - return bytes - } - b := bytes[0:truncHash] - c := make([]byte, truncHash) - copy(c, b) - return c -} - func createSQLiteDB(t *testing.T) (HostDB, error) { hostDB, err := sqlite.CreateTemporarySQLiteHostDB("", "mode=memory") if err != nil { diff --git a/go/host/storage/init/postgres/001_init.sql b/go/host/storage/init/postgres/001_init.sql index 87eece1b15..8f967f551a 100644 --- a/go/host/storage/init/postgres/001_init.sql +++ b/go/host/storage/init/postgres/001_init.sql @@ -1,9 +1,8 @@ CREATE TABLE IF NOT EXISTS block_host ( id SERIAL PRIMARY KEY, - hash BYTEA NOT NULL UNIQUE, - header BYTEA NOT NULL, - rollup_hash BYTEA NOT NULL + hash BYTEA NOT NULL, + header BYTEA NOT NULL ); CREATE INDEX IF NOT EXISTS IDX_BLOCK_HASH_HOST ON block_host USING HASH (hash); @@ -16,8 +15,9 @@ CREATE TABLE IF NOT EXISTS rollup_host end_seq INT NOT NULL, time_stamp INT NOT NULL, ext_rollup BYTEA NOT NULL, - compression_block BYTEA NOT NULL -); + compression_block INT NOT NULL, + FOREIGN KEY (compression_block) REFERENCES block_host(id) + ); CREATE INDEX IF NOT EXISTS IDX_ROLLUP_HASH_HOST ON rollup_host USING HASH (hash); CREATE INDEX IF NOT EXISTS IDX_ROLLUP_PROOF_HOST ON rollup_host (compression_block); @@ -26,8 +26,7 @@ CREATE INDEX IF NOT EXISTS IDX_ROLLUP_SEQ_HOST ON rollup_host (start_seq, end_se CREATE TABLE IF NOT EXISTS batch_host ( sequence INT PRIMARY KEY, - full_hash BYTEA NOT NULL, - hash BYTEA NOT NULL UNIQUE, + hash BYTEA NOT NULL , height INT NOT NULL, ext_batch BYTEA NOT NULL ); @@ -37,12 +36,13 @@ CREATE INDEX IF NOT EXISTS IDX_BATCH_HEIGHT_HOST ON batch_host (height); CREATE TABLE IF NOT EXISTS transaction_host ( - hash BYTEA PRIMARY KEY, - full_hash BYTEA NOT NULL UNIQUE, + id SERIAL PRIMARY KEY, + hash BYTEA, b_sequence INT, FOREIGN KEY (b_sequence) REFERENCES batch_host(sequence) ); +CREATE INDEX IF NOT EXISTS IDX_TX_HASH_HOST ON transaction_host USING HASH (hash); CREATE INDEX IF NOT EXISTS IDX_TX_SEQ_HOST ON transaction_host (b_sequence); CREATE TABLE IF NOT EXISTS transaction_count diff --git a/go/host/storage/init/sqlite/001_init.sql b/go/host/storage/init/sqlite/001_init.sql index c299176d58..371dad1d0a 100644 --- a/go/host/storage/init/sqlite/001_init.sql +++ b/go/host/storage/init/sqlite/001_init.sql @@ -2,8 +2,7 @@ create table if not exists block_host ( id INTEGER PRIMARY KEY AUTOINCREMENT, hash binary(32) NOT NULL UNIQUE, - header blob NOT NULL, - rollup_hash binary(32) NOT NULL + header blob NOT NULL ); create index IDX_BLOCK_HASH_HOST on block_host (hash); @@ -11,12 +10,12 @@ create index IDX_BLOCK_HASH_HOST on block_host (hash); create table if not exists rollup_host ( id INTEGER PRIMARY KEY AUTOINCREMENT, - hash binary(16) NOT NULL UNIQUE, + hash binary(32) NOT NULL UNIQUE, start_seq int NOT NULL, end_seq int NOT NULL, time_stamp int NOT NULL, ext_rollup blob NOT NULL, - compression_block binary(32) NOT NULL + compression_block int NOT NULL references block_host ); create index IDX_ROLLUP_HASH_HOST on rollup_host (hash); @@ -26,8 +25,7 @@ create index IDX_ROLLUP_SEQ_HOST on rollup_host (start_seq, end_seq); create table if not exists batch_host ( sequence int primary key, - full_hash binary(32) NOT NULL, - hash binary(16) NOT NULL UNIQUE, + hash binary(32) NOT NULL, height int NOT NULL, ext_batch mediumblob NOT NULL ); @@ -36,10 +34,11 @@ create index IDX_BATCH_HEIGHT_HOST on batch_host (height); create table if not exists transaction_host ( - hash binary(16) PRIMARY KEY, - full_hash binary(32) NOT NULL UNIQUE, + id int PRIMARY KEY, + hash binary(32) , b_sequence int REFERENCES batch_host ); +create index TX_HASH_HOST on transaction_host (hash); create table if not exists transaction_count ( diff --git a/go/host/storage/interfaces.go b/go/host/storage/interfaces.go index d7e229c36e..7abbf4fc9e 100644 --- a/go/host/storage/interfaces.go +++ b/go/host/storage/interfaces.go @@ -56,7 +56,7 @@ type BatchResolver interface { type BlockResolver interface { // AddBlock stores block data containing rollups in the host DB - AddBlock(b *types.Header, rollupHash common.L2RollupHash) error + AddBlock(b *types.Header) error // AddRollup stores a rollup in the host DB AddRollup(rollup *common.ExtRollup, metadata *common.PublicRollupMetadata, block *common.L1Block) error // FetchLatestRollupHeader returns the head `RollupHeader` diff --git a/go/host/storage/storage.go b/go/host/storage/storage.go index 6221993513..e1ac74b32b 100644 --- a/go/host/storage/storage.go +++ b/go/host/storage/storage.go @@ -75,13 +75,13 @@ func (s *storageImpl) AddRollup(rollup *common.ExtRollup, metadata *common.Publi return nil } -func (s *storageImpl) AddBlock(b *types.Header, rollupHash common.L2RollupHash) error { +func (s *storageImpl) AddBlock(b *types.Header) error { dbtx, err := s.db.NewDBTransaction() if err != nil { return err } - if err := hostdb.AddBlock(dbtx, s.db.GetSQLStatement(), b, rollupHash); err != nil { + if err := hostdb.AddBlock(dbtx, s.db.GetSQLStatement(), b); err != nil { if err := dbtx.Rollback(); err != nil { return err }