diff --git a/go/common/storage/utils.go b/go/common/storage/utils.go new file mode 100644 index 0000000000..1e517fb57e --- /dev/null +++ b/go/common/storage/utils.go @@ -0,0 +1,19 @@ +package storage + +import gethcommon "github.com/ethereum/go-ethereum/common" + +const truncHash = 16 + +func truncTo16(hash gethcommon.Hash) []byte { + return truncBTo16(hash.Bytes()) +} + +func truncBTo16(bytes []byte) []byte { + if len(bytes) == 0 { + return bytes + } + b := bytes[0:truncHash] + c := make([]byte, truncHash) + copy(c, b) + return c +} diff --git a/go/config/host_config.go b/go/config/host_config.go index 23fa494257..e5d3ec3bd9 100644 --- a/go/config/host_config.go +++ b/go/config/host_config.go @@ -191,8 +191,11 @@ type HostConfig struct { LogPath string // Whether the host should use in-memory or persistent storage UseInMemoryDB bool - // filepath for the levelDB persistence dir (can be empty if a throwaway file in /tmp/ is acceptable, or if using InMemory DB) - LevelDBPath string + // Host address for Maria DB instance (can be empty if using InMemory DB or if attestation is disabled) + MariaDBHost string + // filepath for the sqlite DB persistence file (can be empty if a throwaway file in /tmp/ is acceptable or + // if using InMemory DB) + SqliteDBPath string ////// // NODE NETWORKING diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go index 5e6e7a406c..9b9e77030f 100644 --- a/go/host/enclave/guardian.go +++ b/go/host/enclave/guardian.go @@ -1,8 +1,8 @@ package enclave import ( + "database/sql" "fmt" - "github.com/ten-protocol/go-ten/go/host/storage/hostdb" "math/big" "strings" "sync" @@ -56,7 +56,7 @@ type Guardian struct { enclaveClient common.Enclave sl guardianServiceLocator - db *db.DB + db *sql.DB submitDataLock sync.Mutex // we only submit one block, batch or transaction to enclave at a time @@ -74,7 +74,7 @@ type Guardian struct { enclaveID *common.EnclaveID } -func NewGuardian(cfg *config.HostConfig, hostData host.Identity, serviceLocator guardianServiceLocator, enclaveClient common.Enclave, db *db.DB, interrupter *stopcontrol.StopControl, logger gethlog.Logger) *Guardian { +func NewGuardian(cfg *config.HostConfig, hostData host.Identity, serviceLocator guardianServiceLocator, enclaveClient common.Enclave, db *sql.DB, interrupter *stopcontrol.StopControl, logger gethlog.Logger) *Guardian { return &Guardian{ hostData: hostData, state: NewStateTracker(logger), diff --git a/go/host/host.go b/go/host/host.go index 2584459518..5a81d2bdca 100644 --- a/go/host/host.go +++ b/go/host/host.go @@ -1,16 +1,16 @@ package host import ( + "database/sql" "encoding/json" "fmt" - "github.com/ten-protocol/go-ten/go/host/storage/hostdb" - gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ten-protocol/go-ten/go/host/l2" "github.com/ten-protocol/go-ten/go/host/enclave" "github.com/ten-protocol/go-ten/go/host/l1" + "github.com/ten-protocol/go-ten/go/host/storage" "github.com/ethereum/go-ethereum/rpc" "github.com/naoina/toml" @@ -39,7 +39,7 @@ type host struct { // ignore incoming requests stopControl *stopcontrol.StopControl - db *db.DB // Stores the host's publicly-available data + db *sql.DB // Stores the host's publicly-available data logger gethlog.Logger @@ -50,7 +50,7 @@ type host struct { } func NewHost(config *config.HostConfig, hostServices *ServicesRegistry, p2p hostcommon.P2PHostService, ethClient ethadapter.EthClient, l1Repo hostcommon.L1RepoService, enclaveClient common.Enclave, ethWallet wallet.Wallet, mgmtContractLib mgmtcontractlib.MgmtContractLib, logger gethlog.Logger, regMetrics gethmetrics.Registry) hostcommon.Host { - database, err := db.CreateDBFromConfig(config, regMetrics, logger) + database, err := storage.CreateDBFromConfig(config, logger) if err != nil { logger.Crit("unable to create database for host", log.ErrKey, err) } @@ -131,7 +131,7 @@ func (h *host) Config() *config.HostConfig { return h.config } -func (h *host) DB() *db.DB { +func (h *host) DB() *sql.DB { return h.db } diff --git a/go/host/l2/batchrepository.go b/go/host/l2/batchrepository.go index bd9916e8c4..6d0011d245 100644 --- a/go/host/l2/batchrepository.go +++ b/go/host/l2/batchrepository.go @@ -1,8 +1,8 @@ package l2 import ( + "database/sql" "errors" - "github.com/ten-protocol/go-ten/go/host/storage/hostdb" "math/big" "sync" "sync/atomic" @@ -36,7 +36,7 @@ type Repository struct { subscribers []host.L2BatchHandler sl batchRepoServiceLocator - db *db.DB + db *sql.DB isSequencer bool // high watermark for batch sequence numbers seen so far. If we can't find batch for seq no < this, then we should ask peers for missing batches @@ -54,7 +54,7 @@ type Repository struct { logger gethlog.Logger } -func NewBatchRepository(cfg *config.HostConfig, hostService batchRepoServiceLocator, database *db.DB, logger gethlog.Logger) *Repository { +func NewBatchRepository(cfg *config.HostConfig, hostService batchRepoServiceLocator, database *sql.DB, logger gethlog.Logger) *Repository { return &Repository{ sl: hostService, db: database, diff --git a/go/host/storage/db_init.go b/go/host/storage/db_init.go index dcb3b77c24..480616d0bd 100644 --- a/go/host/storage/db_init.go +++ b/go/host/storage/db_init.go @@ -4,8 +4,6 @@ import ( "database/sql" "fmt" - "github.com/ten-protocol/go-ten/go/enclave/storage/enclavedb" - "github.com/ten-protocol/go-ten/go/common/storage/database/init/sqlite" "github.com/ten-protocol/go-ten/go/enclave/storage/init/edgelessdb" @@ -22,39 +20,29 @@ func CreateDBFromConfig(cfg *config.HostConfig, logger gethlog.Logger) (*sql.DB, if cfg.UseInMemoryDB { logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...") // this creates a temporary sqlite sqldb - return sqlite.CreateTemporarySQLiteHostDB(cfg.HostID.String(), "mode=memory&cache=shared&_foreign_keys=on", logger, "001_init.sql") + return sqlite.CreateTemporarySQLiteHostDB(cfg.ID.String(), "mode=memory&cache=shared&_foreign_keys=on", logger, "001_init.sql") } // persistent and with attestation means connecting to edgeless DB in a trusted enclave from a secure enclave - logger.Info(fmt.Sprintf("Preparing Edgeless DB connection to %s...", cfg.EdgelessDBHost)) - return nil, err - //return getEdgelessDB(cfg, logger) + logger.Info(fmt.Sprintf("Preparing Edgeless DB connection to %s...", cfg.MariaDBHost)) + return getMariaDBHost(cfg, logger) } // validateDBConf high-level checks that you have a valid configuration for DB creation func validateDBConf(cfg *config.HostConfig) error { - if cfg.UseInMemoryDB && cfg.EdgelessDBHost != "" { - return fmt.Errorf("invalid db config, useInMemoryDB=true so EdgelessDB host not expected, but EdgelessDBHost=%s", cfg.EdgelessDBHost) - } - if !cfg.WillAttest && cfg.EdgelessDBHost != "" { - return fmt.Errorf("invalid db config, willAttest=false so EdgelessDB host not supported, but EdgelessDBHost=%s", cfg.EdgelessDBHost) - } - if !cfg.UseInMemoryDB && cfg.WillAttest && cfg.EdgelessDBHost == "" { - return fmt.Errorf("useInMemoryDB=false, willAttest=true so expected an EdgelessDB host but none was provided") + if cfg.UseInMemoryDB && cfg.MariaDBHost != "" { + return fmt.Errorf("invalid db config, useInMemoryDB=true so MariaDB host not expected, but MariaDBHost=%s", cfg.MariaDBHost) } if cfg.SqliteDBPath != "" && cfg.UseInMemoryDB { return fmt.Errorf("useInMemoryDB=true so sqlite database will not be used and no path is needed, but sqliteDBPath=%s", cfg.SqliteDBPath) } - if cfg.SqliteDBPath != "" && cfg.WillAttest { - return fmt.Errorf("willAttest=true so sqlite database will not be used and no path is needed, but sqliteDBPath=%s", cfg.SqliteDBPath) - } return nil } -func getEdgelessDB(cfg *config.EnclaveConfig, logger gethlog.Logger) (enclavedb.EnclaveDB, error) { - if cfg.EdgelessDBHost == "" { - return nil, fmt.Errorf("failed to prepare EdgelessDB connection - EdgelessDBHost was not set on enclave config") +func getMariaDBHost(cfg *config.HostConfig, logger gethlog.Logger) (*sql.DB, error) { + if cfg.MariaDBHost == "" { + return nil, fmt.Errorf("failed to prepare MariaDB connection - MariaDBHost was not set on host config") } - dbConfig := edgelessdb.Config{Host: cfg.EdgelessDBHost} + dbConfig := edgelessdb.Config{Host: cfg.MariaDBHost} return edgelessdb.Connector(&dbConfig, logger) } diff --git a/go/host/storage/hostdb/batch.go b/go/host/storage/hostdb/batch.go new file mode 100644 index 0000000000..088fd00130 --- /dev/null +++ b/go/host/storage/hostdb/batch.go @@ -0,0 +1,192 @@ +package hostdb + +import ( + "database/sql" + "fmt" + "github.com/ethereum/go-ethereum/rlp" + "math/big" + + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ten-protocol/go-ten/go/common" +) + +// AddBatch adds a batch and its header to the DB +func AddBatch(db *sql.DB, batch *common.ExtBatch) error { + return BeginTx(db, func(tx *sql.Tx) error { + + batchBodyStmt, err := db.Prepare("INSERT INTO batch_body (id, body) VALUES (?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare body insert statement: %w", err) + } + defer batchBodyStmt.Close() + + // BATCH INSERT + batchStmt, err := db.Prepare("INSERT INTO batches (sequence, full_hash, hash, height, tx_count, header_blob, body_id) VALUES (?, ?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare batch insert statement: %w", err) + } + defer batchStmt.Close() + + //TX INSERT + txStmt, err := db.Prepare("INSERT INTO transactions (tx_hash_indexed, tx_hash_full, content, sender_address, nonce, idx, body_id) VALUES (?, ?, ?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare transaction insert statement: %w", err) + } + defer txStmt.Close() + + // Encode batch data + batchBodyID := batch.Header.SequencerOrderNo.Uint64() + body, err := rlp.EncodeToBytes(batch.EncryptedTxBlob) + if err != nil { + return fmt.Errorf("could not encode L2 transactions: %w", err) + } + header, err := rlp.EncodeToBytes(batch.Header) + if err != nil { + return fmt.Errorf("could not encode batch header: %w", err) + } + + // Execute body insert + _, err = batchBodyStmt.Exec(batchBodyID, body) + if err != nil { + return fmt.Errorf("failed to insert body: %w", err) + } + + _, err = batchStmt.Exec( + batch.Header.SequencerOrderNo.Uint64(), // sequence + batch.Hash(), // full hash + truncTo16(batch.Hash()), // shortened hash + batch.Header.Number.Uint64(), // height + len(batch.TxHashes), // tx_count + header, // header blob + batchBodyID, // reference to the batch body + ) + + return nil + }) +} + +// GetHeadBatchHeader returns the header of the node's current head batch. +func GetHeadBatchHeader(db *sql.DB) (*common.BatchHeader, error) { + panic("implement me") +} + +// GetBatchHeader returns the batch header given the hash. +func GetBatchHeader(db *sql.DB, hash gethcommon.Hash) (*common.BatchHeader, error) { + panic("implement me") +} + +// GetBatchHash returns the hash of a batch given its number. +func GetBatchHash(db *sql.DB, number *big.Int) (*gethcommon.Hash, error) { + panic("implement me") +} + +// GetBatchTxs returns the transaction hashes of the batch with the given hash. +func GetBatchTxs(db *sql.DB, batchHash gethcommon.Hash) ([]gethcommon.Hash, error) { + panic("implement me") +} + +// GetBatchNumber returns the number of the batch containing the given transaction hash. +func GetBatchNumber(db *sql.DB, txHash gethcommon.Hash) (*big.Int, error) { + panic("implement me") +} + +// GetTotalTransactions returns the total number of batched transactions. +func GetTotalTransactions(db *sql.DB) (*big.Int, error) { + panic("implement me") +} + +// GetBatch returns the batch with the given hash. +func GetBatch(db *sql.DB, batchHash gethcommon.Hash) (*common.ExtBatch, error) { + panic("implement me") +} + +// GetBatchBySequenceNumber returns the batch with the given sequence number. +func GetBatchBySequenceNumber(db *sql.DB, sequenceNumber *big.Int) (*common.ExtBatch, error) { + panic("implement me") +} + +// GetBatchListing returns latest batches given a pagination. +// For example, page 0, size 10 will return the latest 10 batches. +func GetBatchListing(db *sql.DB, pagination *common.QueryPagination) (*common.BatchListingResponse, error) { + panic("implement me") +} + +// Retrieves the batch header corresponding to the hash. +func readBatchHeader(db *sql.DB, hash gethcommon.Hash) (*common.BatchHeader, error) { + panic("implement me") +} + +// Retrieves the hash of the head batch. +func readHeadBatchHash(db *sql.DB) (*gethcommon.Hash, error) { + panic("implement me") +} + +// Stores a batch header into the database. +func writeBatchHeader(db *sql.DB, w ethdb.KeyValueWriter, header *common.BatchHeader) error { + panic("implement me") +} + +// Stores the head batch header hash into the database. +func writeHeadBatchHash(db *sql.DB, w ethdb.KeyValueWriter, val gethcommon.Hash) error { + panic("implement me") +} + +// Stores a batch's hash in the database, keyed by the batch's number. +func writeBatchHash(db *sql.DB, w ethdb.KeyValueWriter, header *common.BatchHeader) error { + panic("implement me") +} + +// Stores a batch's hash in the database, keyed by the batch's sequencer number. +func writeBatchSeqNo(db *sql.DB, w ethdb.KeyValueWriter, header *common.BatchHeader) error { + panic("implement me") +} + +// Retrieves the hash for the batch with the given number.. +func readBatchHash(db *sql.DB, number *big.Int) (*gethcommon.Hash, error) { + panic("implement me") +} + +// Returns the transaction hashes in the batch with the given hash. +func readBatchTxHashes(db *sql.DB, batchHash common.L2BatchHash) ([]gethcommon.Hash, error) { + panic("implement me") +} + +// Stores a batch's number in the database, keyed by the hash of a transaction in that rollup. +func writeBatchNumber(db *sql.DB, w ethdb.KeyValueWriter, header *common.BatchHeader, txHash gethcommon.Hash) error { + panic("implement me") +} + +// Writes the transaction hashes against the batch containing them. +func writeBatchTxHashes(db *sql.DB, w ethdb.KeyValueWriter, batchHash common.L2BatchHash, txHashes []gethcommon.Hash) error { + panic("implement me") +} + +// Retrieves the number of the batch containing the transaction with the given hash. +func readBatchNumber(db *sql.DB, txHash gethcommon.Hash) (*big.Int, error) { + panic("implement me") +} + +func readBatchHashBySequenceNumber(db *sql.DB, seqNum *big.Int) (*gethcommon.Hash, error) { + panic("implement me") +} + +// Retrieves the total number of rolled-up transactions - returns 0 if no tx count is found +func readTotalTransactions(db *sql.DB) (*big.Int, error) { + panic("implement me") +} + +// Stores the total number of transactions in the database. +func writeTotalTransactions(db *sql.DB, w ethdb.KeyValueWriter, newTotal *big.Int) error { + panic("implement me") +} + +// Stores a batch into the database. +func writeBatch(db *sql.DB, w ethdb.KeyValueWriter, batch *common.ExtBatch) error { + panic("implement me") +} + +// Retrieves the batch corresponding to the hash. +func readBatch(db *sql.DB, hash gethcommon.Hash) (*common.ExtBatch, error) { + panic("implement me") +} diff --git a/go/host/storage/hostdb/batches_test.go b/go/host/storage/hostdb/batch_test.go similarity index 99% rename from go/host/storage/hostdb/batches_test.go rename to go/host/storage/hostdb/batch_test.go index 54289b6061..05e30dd8e6 100644 --- a/go/host/storage/hostdb/batches_test.go +++ b/go/host/storage/hostdb/batch_test.go @@ -1,4 +1,4 @@ -package db +package hostdb import ( "errors" diff --git a/go/host/storage/hostdb/batches.go b/go/host/storage/hostdb/batches.go deleted file mode 100644 index 8df76cc6b0..0000000000 --- a/go/host/storage/hostdb/batches.go +++ /dev/null @@ -1,388 +0,0 @@ -package db - -import ( - "bytes" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/ethdb" - "github.com/pkg/errors" - - "github.com/ten-protocol/go-ten/go/common/errutil" - - gethcommon "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ten-protocol/go-ten/go/common" -) - -// DB methods relating to batches. - -// GetHeadBatchHeader returns the header of the node's current head batch. -func (db *DB) GetHeadBatchHeader() (*common.BatchHeader, error) { - headBatchHash, err := db.readHeadBatchHash() - if err != nil { - return nil, err - } - return db.readBatchHeader(*headBatchHash) -} - -// GetBatchHeader returns the batch header given the hash. -func (db *DB) GetBatchHeader(hash gethcommon.Hash) (*common.BatchHeader, error) { - return db.readBatchHeader(hash) -} - -// AddBatch adds a batch and its header to the DB -func (db *DB) AddBatch(batch *common.ExtBatch) error { - // We check if the batch is already stored, to avoid incrementing the total transaction count twice for one batch. - _, err := db.GetBatchHeader(batch.Hash()) - if err != nil && !errors.Is(err, errutil.ErrNotFound) { - return fmt.Errorf("4. could not retrieve batch header. Cause: %w", err) - } - if err == nil { - // The batch is already stored, so we return early. - return errutil.ErrAlreadyExists - } - - b := db.kvStore.NewBatch() - - if err := db.writeBatchHeader(b, batch.Header); err != nil { - return fmt.Errorf("could not write batch header. Cause: %w", err) - } - if err := db.writeBatch(b, batch); err != nil { - return fmt.Errorf("could not write batch. Cause: %w", err) - } - if err := db.writeBatchTxHashes(b, batch.Hash(), batch.TxHashes); err != nil { - return fmt.Errorf("could not write batch transaction hashes. Cause: %w", err) - } - if err := db.writeBatchHash(b, batch.Header); err != nil { - return fmt.Errorf("could not write batch hash. Cause: %w", err) - } - if err := db.writeBatchSeqNo(b, batch.Header); err != nil { - return fmt.Errorf("could not write batch hash. Cause: %w", err) - } - for _, txHash := range batch.TxHashes { - if err := db.writeBatchNumber(b, batch.Header, txHash); err != nil { - return fmt.Errorf("could not write batch number. Cause: %w", err) - } - } - - // Update the total number of transactions. There's a potential race here, but absolute accuracy of the number of - // transactions is not required. - currentTotal, err := db.readTotalTransactions() - if err != nil { - return fmt.Errorf("could not retrieve total transactions. Cause: %w", err) - } - newTotal := big.NewInt(0).Add(currentTotal, big.NewInt(int64(len(batch.TxHashes)))) - err = db.writeTotalTransactions(b, newTotal) - if err != nil { - return fmt.Errorf("could not write total transactions. Cause: %w", err) - } - - // Update the head if the new height is greater than the existing one. - headBatchHeader, err := db.GetHeadBatchHeader() - if err != nil && !errors.Is(err, errutil.ErrNotFound) { - return fmt.Errorf("could not retrieve head batch header. Cause: %w", err) - } - if headBatchHeader == nil || headBatchHeader.Number.Cmp(batch.Header.Number) == -1 { - err = db.writeHeadBatchHash(b, batch.Hash()) - if err != nil { - return fmt.Errorf("could not write new head batch hash. Cause: %w", err) - } - } - - if err = b.Write(); err != nil { - return fmt.Errorf("could not write batch to DB. Cause: %w", err) - } - return nil -} - -// GetBatchHash returns the hash of a batch given its number. -func (db *DB) GetBatchHash(number *big.Int) (*gethcommon.Hash, error) { - return db.readBatchHash(number) -} - -// GetBatchTxs returns the transaction hashes of the batch with the given hash. -func (db *DB) GetBatchTxs(batchHash gethcommon.Hash) ([]gethcommon.Hash, error) { - return db.readBatchTxHashes(batchHash) -} - -// GetBatchNumber returns the number of the batch containing the given transaction hash. -func (db *DB) GetBatchNumber(txHash gethcommon.Hash) (*big.Int, error) { - return db.readBatchNumber(txHash) -} - -// GetTotalTransactions returns the total number of batched transactions. -func (db *DB) GetTotalTransactions() (*big.Int, error) { - return db.readTotalTransactions() -} - -// GetBatch returns the batch with the given hash. -func (db *DB) GetBatch(batchHash gethcommon.Hash) (*common.ExtBatch, error) { - db.batchReads.Inc(1) - return db.readBatch(batchHash) -} - -// GetBatchBySequenceNumber returns the batch with the given sequence number. -func (db *DB) GetBatchBySequenceNumber(sequenceNumber *big.Int) (*common.ExtBatch, error) { - db.batchReads.Inc(1) - batchHash, err := db.readBatchHashBySequenceNumber(sequenceNumber) - if err != nil { - return nil, fmt.Errorf("could not retrieve batch hash for seqNo=%d: %w", sequenceNumber, err) - } - return db.GetBatch(*batchHash) -} - -// GetBatchListing returns latest batches given a pagination. -// For example, page 0, size 10 will return the latest 10 batches. -// todo change this when the db changes - this is not super performant -func (db *DB) GetBatchListing(pagination *common.QueryPagination) (*common.BatchListingResponse, error) { - // fetch the total batches so we can paginate - header, err := db.GetHeadBatchHeader() - if err != nil { - return nil, err - } - - batchesFrom := header.SequencerOrderNo.Uint64() - pagination.Offset - batchesToInclusive := int(batchesFrom) - int(pagination.Size) + 1 - // batchesToInclusive can't go below zero - if batchesToInclusive < 0 { - batchesToInclusive = 0 - } - - var batches []common.PublicBatch - // fetch requested batches - looping backwards from the latest batch subtracting any pagination offset - // (e.g. front-end showing latest batches first, page 3 of size 10 would be skipping the 30 most recent batches) - for i := batchesFrom; i >= uint64(batchesToInclusive); i-- { - extBatch, err := db.GetBatchBySequenceNumber(big.NewInt(int64(i))) - if err != nil && !errors.Is(err, errutil.ErrNotFound) { - return nil, err - } - if extBatch != nil { - batches = append(batches, common.PublicBatch{BatchHeader: *extBatch.Header, TxHashes: extBatch.TxHashes}) - } - } - - return &common.BatchListingResponse{ - BatchesData: batches, - Total: header.Number.Uint64(), - }, nil -} - -// headerKey = batchHeaderPrefix + hash -func batchHeaderKey(hash gethcommon.Hash) []byte { - return append(batchHeaderPrefix, hash.Bytes()...) -} - -// headerKey = batchPrefix + hash -func batchKey(hash gethcommon.Hash) []byte { - return append(batchPrefix, hash.Bytes()...) -} - -// headerKey = batchHashPrefix + number -func batchHashKey(num *big.Int) []byte { - return append(batchHashPrefix, []byte(num.String())...) -} - -// headerKey = batchTxHashesPrefix + batch hash -func batchTxHashesKey(hash gethcommon.Hash) []byte { - return append(batchTxHashesPrefix, hash.Bytes()...) -} - -// headerKey = batchNumberPrefix + hash -func batchNumberKey(txHash gethcommon.Hash) []byte { - return append(batchNumberPrefix, txHash.Bytes()...) -} - -// hashKey = batchHashForSeqNoPrefix + seqNo -func batchHashBySeqNoKey(seqNo *big.Int) []byte { - return append(batchHashForSeqNoPrefix, []byte(seqNo.String())...) -} - -// Retrieves the batch header corresponding to the hash. -func (db *DB) readBatchHeader(hash gethcommon.Hash) (*common.BatchHeader, error) { - data, err := db.kvStore.Get(batchHeaderKey(hash)) - if err != nil { - return nil, err - } - if len(data) == 0 { - return nil, errutil.ErrNotFound - } - header := new(common.BatchHeader) - if err := rlp.Decode(bytes.NewReader(data), header); err != nil { - return nil, err - } - return header, nil -} - -// Retrieves the hash of the head batch. -func (db *DB) readHeadBatchHash() (*gethcommon.Hash, error) { - value, err := db.kvStore.Get(headBatch) - if err != nil { - return nil, err - } - h := gethcommon.BytesToHash(value) - return &h, nil -} - -// Stores a batch header into the database. -func (db *DB) writeBatchHeader(w ethdb.KeyValueWriter, header *common.BatchHeader) error { - // Write the encoded header - data, err := rlp.EncodeToBytes(header) - if err != nil { - return err - } - key := batchHeaderKey(header.Hash()) - - return w.Put(key, data) -} - -// Stores the head batch header hash into the database. -func (db *DB) writeHeadBatchHash(w ethdb.KeyValueWriter, val gethcommon.Hash) error { - err := w.Put(headBatch, val.Bytes()) - if err != nil { - return err - } - return nil -} - -// Stores a batch's hash in the database, keyed by the batch's number. -func (db *DB) writeBatchHash(w ethdb.KeyValueWriter, header *common.BatchHeader) error { - key := batchHashKey(header.Number) - - return w.Put(key, header.Hash().Bytes()) -} - -// Stores a batch's hash in the database, keyed by the batch's sequencer number. -func (db *DB) writeBatchSeqNo(w ethdb.KeyValueWriter, header *common.BatchHeader) error { - key := batchHashBySeqNoKey(header.SequencerOrderNo) - - return w.Put(key, header.Hash().Bytes()) -} - -// Retrieves the hash for the batch with the given number.. -func (db *DB) readBatchHash(number *big.Int) (*gethcommon.Hash, error) { - data, err := db.kvStore.Get(batchHashKey(number)) - if err != nil { - return nil, err - } - if len(data) == 0 { - return nil, errutil.ErrNotFound - } - hash := gethcommon.BytesToHash(data) - return &hash, nil -} - -// Returns the transaction hashes in the batch with the given hash. -func (db *DB) readBatchTxHashes(batchHash common.L2BatchHash) ([]gethcommon.Hash, error) { - data, err := db.kvStore.Get(batchTxHashesKey(batchHash)) - if err != nil { - return nil, err - } - if len(data) == 0 { - return nil, errutil.ErrNotFound - } - - var txHashes []gethcommon.Hash - if err = rlp.Decode(bytes.NewReader(data), &txHashes); err != nil { - return nil, err - } - - return txHashes, nil -} - -// Stores a batch's number in the database, keyed by the hash of a transaction in that rollup. -func (db *DB) writeBatchNumber(w ethdb.KeyValueWriter, header *common.BatchHeader, txHash gethcommon.Hash) error { - key := batchNumberKey(txHash) - - return w.Put(key, header.Number.Bytes()) -} - -// Writes the transaction hashes against the batch containing them. -func (db *DB) writeBatchTxHashes(w ethdb.KeyValueWriter, batchHash common.L2BatchHash, txHashes []gethcommon.Hash) error { - data, err := rlp.EncodeToBytes(txHashes) - if err != nil { - return err - } - key := batchTxHashesKey(batchHash) - - return w.Put(key, data) -} - -// Retrieves the number of the batch containing the transaction with the given hash. -func (db *DB) readBatchNumber(txHash gethcommon.Hash) (*big.Int, error) { - data, err := db.kvStore.Get(batchNumberKey(txHash)) - if err != nil { - return nil, err - } - if len(data) == 0 { - return nil, errutil.ErrNotFound - } - return big.NewInt(0).SetBytes(data), nil -} - -func (db *DB) readBatchHashBySequenceNumber(seqNum *big.Int) (*gethcommon.Hash, error) { - data, err := db.kvStore.Get(batchHashBySeqNoKey(seqNum)) - if err != nil { - return nil, err - } - if len(data) == 0 { - return nil, errutil.ErrNotFound - } - h := gethcommon.BytesToHash(data) - return &h, nil -} - -// Retrieves the total number of rolled-up transactions - returns 0 if no tx count is found -func (db *DB) readTotalTransactions() (*big.Int, error) { - data, err := db.kvStore.Get(totalTransactionsKey) - if err != nil { - if errors.Is(err, errutil.ErrNotFound) { - return big.NewInt(0), nil - } - return nil, err - } - if len(data) == 0 { - return big.NewInt(0), nil - } - return big.NewInt(0).SetBytes(data), nil -} - -// Stores the total number of transactions in the database. -func (db *DB) writeTotalTransactions(w ethdb.KeyValueWriter, newTotal *big.Int) error { - err := w.Put(totalTransactionsKey, newTotal.Bytes()) - if err != nil { - return err - } - return nil -} - -// Stores a batch into the database. -func (db *DB) writeBatch(w ethdb.KeyValueWriter, batch *common.ExtBatch) error { - // Write the encoded header - data, err := rlp.EncodeToBytes(batch) - if err != nil { - return err - } - key := batchKey(batch.Hash()) - if err := w.Put(key, data); err != nil { - return err - } - db.batchWrites.Inc(1) - return nil -} - -// Retrieves the batch corresponding to the hash. -func (db *DB) readBatch(hash gethcommon.Hash) (*common.ExtBatch, error) { - data, err := db.kvStore.Get(batchKey(hash)) - if err != nil { - return nil, err - } - if len(data) == 0 { - return nil, errutil.ErrNotFound - } - batch := new(common.ExtBatch) - if err := rlp.Decode(bytes.NewReader(data), batch); err != nil { - return nil, err - } - return batch, nil -} diff --git a/go/host/storage/hostdb/blocks.go b/go/host/storage/hostdb/blocks.go index a8596bcb2f..62b3615f63 100644 --- a/go/host/storage/hostdb/blocks.go +++ b/go/host/storage/hostdb/blocks.go @@ -1,4 +1,4 @@ -package db +package hostdb import ( "bytes" diff --git a/go/host/storage/hostdb/blocks_test.go b/go/host/storage/hostdb/blocks_test.go index 0649ebca77..3a74af311e 100644 --- a/go/host/storage/hostdb/blocks_test.go +++ b/go/host/storage/hostdb/blocks_test.go @@ -1,4 +1,4 @@ -package db +package hostdb import ( "errors" diff --git a/go/host/storage/hostdb/hostdb.go b/go/host/storage/hostdb/hostdb.go index ee8fda2d49..2f406f99cc 100644 --- a/go/host/storage/hostdb/hostdb.go +++ b/go/host/storage/hostdb/hostdb.go @@ -1,115 +1,27 @@ -package db +package hostdb import ( + "database/sql" "fmt" - "os" - - "github.com/ten-protocol/go-ten/go/config" - - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/leveldb" - gethlog "github.com/ethereum/go-ethereum/log" - gethmetrics "github.com/ethereum/go-ethereum/metrics" - "github.com/ten-protocol/go-ten/go/common/gethdb" -) - -// Schema keys, in alphabetical order. -var ( - blockHeaderPrefix = []byte("b") - blockNumberHeaderPrefix = []byte("bnh") - batchHeaderPrefix = []byte("ba") - batchHashPrefix = []byte("bh") - batchNumberPrefix = []byte("bn") - batchPrefix = []byte("bp") - batchHashForSeqNoPrefix = []byte("bs") - batchTxHashesPrefix = []byte("bt") - headBatch = []byte("hb") - totalTransactionsKey = []byte("t") - rollupHeaderPrefix = []byte("rh") - rollupHeaderBlockPrefix = []byte("rhb") - tipRollupHash = []byte("tr") - blockHeadedAtTip = []byte("bht") ) -// DB allows to access the nodes public nodeDB -type DB struct { - kvStore ethdb.KeyValueStore - logger gethlog.Logger - batchWrites gethmetrics.Gauge - batchReads gethmetrics.Gauge - blockWrites gethmetrics.Gauge - blockReads gethmetrics.Gauge -} - -// Stop is especially important for graceful shutdown of LevelDB as it may flush data to disk that is currently in cache -func (db *DB) Stop() error { - db.logger.Info("Closing the host DB.") - err := db.kvStore.Close() +// BeginTx handles beginning a transaction and committing or rolling it back based on the error +func BeginTx(db *sql.DB, txFunc func(*sql.Tx) error) error { + tx, err := db.Begin() if err != nil { - return err + return fmt.Errorf("failed to begin transaction: %w", err) } - return nil -} - -func CreateDBFromConfig(cfg *config.HostConfig, regMetrics gethmetrics.Registry, logger gethlog.Logger) (*DB, error) { - if err := validateDBConf(cfg); err != nil { - return nil, err - } - if cfg.UseInMemoryDB { - logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...") - return NewInMemoryDB(regMetrics, logger), nil - } - return NewLevelDBBackedDB(cfg.LevelDBPath, regMetrics, logger) -} -func validateDBConf(cfg *config.HostConfig) error { - if cfg.UseInMemoryDB && cfg.LevelDBPath != "" { - return fmt.Errorf("useInMemoryDB=true so levelDB will not be used and no path is needed, but levelDBPath=%s", cfg.LevelDBPath) - } - return nil -} - -// NewInMemoryDB returns a new instance of the Node DB -func NewInMemoryDB(regMetrics gethmetrics.Registry, logger gethlog.Logger) *DB { - return newDB(gethdb.NewMemDB(), regMetrics, logger) -} - -// NewLevelDBBackedDB creates a persistent DB for the host, if dbPath == "" it will generate a temp file -func NewLevelDBBackedDB(dbPath string, regMetrics gethmetrics.Registry, logger gethlog.Logger) (*DB, error) { - var err error - if dbPath == "" { - // todo (#1618) - we should remove this option before prod, if you want a temp DB it should be wired in via the config - dbPath, err = os.MkdirTemp("", "leveldb_*") - if err != nil { - return nil, fmt.Errorf("could not create temp leveldb directory - %w", err) + defer func() { + if p := recover(); p != nil { + tx.Rollback() + panic(p) // re-throw panic after Rollback + } else if err != nil { + tx.Rollback() + } else { + err = tx.Commit() } - logger.Warn("dbPath was empty, created temp dir for persistence", "dbPath", dbPath) - } - // determine if a db file already exists, we don't want to overwrite it - _, err = os.Stat(dbPath) - dbDesc := "new" - if err == nil { - dbDesc = "existing" - } + }() - // todo (#1618) - these should be configs - cache := 128 - handles := 128 - db, err := leveldb.New(dbPath, cache, handles, "host", false) - if err != nil { - return nil, fmt.Errorf("could not create leveldb - %w", err) - } - logger.Info(fmt.Sprintf("Opened %s level db dir at %s", dbDesc, dbPath)) - return newDB(&ObscuroLevelDB{db: db}, regMetrics, logger), nil -} - -func newDB(kvStore ethdb.KeyValueStore, regMetrics gethmetrics.Registry, logger gethlog.Logger) *DB { - return &DB{ - kvStore: kvStore, - logger: logger, - batchWrites: gethmetrics.NewRegisteredGauge("host/db/batch/writes", regMetrics), - batchReads: gethmetrics.NewRegisteredGauge("host/db/batch/reads", regMetrics), - blockWrites: gethmetrics.NewRegisteredGauge("host/db/block/writes", regMetrics), - blockReads: gethmetrics.NewRegisteredGauge("host/db/block/reads", regMetrics), - } + return txFunc(tx) } diff --git a/go/host/storage/hostdb/leveldb.go b/go/host/storage/hostdb/leveldb.go index 49a3ec6cf3..9923357680 100644 --- a/go/host/storage/hostdb/leveldb.go +++ b/go/host/storage/hostdb/leveldb.go @@ -1,4 +1,4 @@ -package db +package hostdb import ( "errors" diff --git a/go/host/storage/hostdb/rollups.go b/go/host/storage/hostdb/rollups.go index 41ac34b80d..f7bb6b470c 100644 --- a/go/host/storage/hostdb/rollups.go +++ b/go/host/storage/hostdb/rollups.go @@ -1,4 +1,4 @@ -package db +package hostdb import ( "bytes" diff --git a/go/host/storage/hostdb/utils.go b/go/host/storage/hostdb/utils.go new file mode 100644 index 0000000000..4d9e0103d2 --- /dev/null +++ b/go/host/storage/hostdb/utils.go @@ -0,0 +1,19 @@ +package hostdb + +import gethcommon "github.com/ethereum/go-ethereum/common" + +const truncHash = 16 + +func truncTo16(hash gethcommon.Hash) []byte { + return truncBTo16(hash.Bytes()) +} + +func truncBTo16(bytes []byte) []byte { + if len(bytes) == 0 { + return bytes + } + b := bytes[0:truncHash] + c := make([]byte, truncHash) + copy(c, b) + return c +} diff --git a/go/host/storage/init/sqlite/001_init.sql b/go/host/storage/init/sqlite/001_init.sql index 1dd81c7313..5037d5ce80 100644 --- a/go/host/storage/init/sqlite/001_init.sql +++ b/go/host/storage/init/sqlite/001_init.sql @@ -28,6 +28,7 @@ create table if not exists batch_body create table if not exists batch ( sequence int primary key, + full_hash binary(32) NOT NULL, hash binary(16) NOT NULL unique, height int NOT NULL, tx_count int NOT NULL, diff --git a/go/host/storage/interfaces.go b/go/host/storage/interfaces.go new file mode 100644 index 0000000000..44516e0e08 --- /dev/null +++ b/go/host/storage/interfaces.go @@ -0,0 +1,47 @@ +package storage + +import ( + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ten-protocol/go-ten/go/common" + "github.com/ten-protocol/go-ten/go/enclave/core" + "math/big" +) + +type BatchResolver interface { + // FetchBatch returns the batch with the given hash. + FetchBatch(hash common.L2BatchHash) (*core.Batch, error) + // FetchBatchHeader returns the batch header with the given hash. + FetchBatchHeader(hash common.L2BatchHash) (*common.BatchHeader, error) + // FetchBatchByHeight returns the batch on the canonical chain with the given height. + FetchBatchByHeight(height uint64) (*core.Batch, error) + // FetchBatchBySeqNo returns the batch with the given seq number. + FetchBatchBySeqNo(seqNum uint64) (*core.Batch, error) + // FetchHeadBatch returns the current head batch of the canonical chain. + FetchHeadBatch() (*core.Batch, error) + // FetchCurrentSequencerNo returns the sequencer number + FetchCurrentSequencerNo() (*big.Int, error) + // FetchBatchesByBlock returns all batches with the block hash as the L1 proof + FetchBatchesByBlock(common.L1BlockHash) ([]*core.Batch, error) + //// FetchNonCanonicalBatchesBetween - returns all reorged batches between the sequences + //FetchNonCanonicalBatchesBetween(startSeq uint64, endSeq uint64) ([]*core.Batch, error) + //// FetchCanonicalUnexecutedBatches - return the list of the unexecuted batches that are canonical + //FetchCanonicalUnexecutedBatches(*big.Int) ([]*core.Batch, error) + + //FetchConvertedHash(hash common.L2BatchHash) (gethcommon.Hash, error) + + // BatchWasExecuted - return true if the batch was executed + BatchWasExecuted(hash common.L2BatchHash) (bool, error) + + // FetchHeadBatchForBlock returns the hash of the head batch at a given L1 block. + FetchHeadBatchForBlock(blockHash common.L1BlockHash) (*core.Batch, error) + + // StoreBatch stores an un-executed batch. + StoreBatch(batch *core.Batch, convertedHash gethcommon.Hash) error + // StoreExecutedBatch - store the batch after it was executed + StoreExecutedBatch(batch *core.Batch, receipts []*types.Receipt) error + + // StoreRollup + StoreRollup(rollup *common.ExtRollup, header *common.CalldataRollupHeader) error + FetchReorgedRollup(reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) +} diff --git a/go/host/storage/storage.go b/go/host/storage/storage.go new file mode 100644 index 0000000000..548ed1bcc9 --- /dev/null +++ b/go/host/storage/storage.go @@ -0,0 +1,7 @@ +package storage + +import "database/sql" + +type HostDatabase struct { + db *sql.DB +}