Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DB performance #1893

Merged
merged 19 commits into from
May 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions go/common/enclave.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ type EnclaveScan interface {
GetTotalContractCount(context.Context) (*big.Int, SystemError)

// GetCustomQuery returns the data of a custom query
// todo - better name and description
GetCustomQuery(ctx context.Context, encryptedParams EncryptedParamsGetStorageAt) (*responses.PrivateQueryResponse, SystemError)

// EnclavePublicConfig returns network data that is known to the enclave but can be shared publicly
Expand Down
2 changes: 1 addition & 1 deletion go/common/gethutil/gethutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
var EmptyHash = gethcommon.Hash{}

// LCA - returns the latest common ancestor of the 2 blocks or an error if no common ancestor is found
// it also returns the blocks that became canonincal, and the once that are now the fork
// it also returns the blocks that became canonical, and the once that are now the fork
func LCA(ctx context.Context, newCanonical *types.Block, oldCanonical *types.Block, resolver storage.BlockResolver) (*common.ChainFork, error) {
b, cp, ncp, err := internalLCA(ctx, newCanonical, oldCanonical, resolver, []common.L1BlockHash{}, []common.L1BlockHash{oldCanonical.Hash()})
// remove the common ancestor
Expand Down
10 changes: 10 additions & 0 deletions go/enclave/components/batch_registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,16 @@ func (br *batchRegistry) UnsubscribeFromBatches() {
br.batchesCallback = nil
}

func (br *batchRegistry) OnL1Reorg(_ *BlockIngestionType) {
// refresh the cached head batch from the database because there was an L1 reorg
headBatch, err := br.storage.FetchHeadBatch(context.Background())
if err != nil {
br.logger.Error("Could not fetch head batch", log.ErrKey, err)
return
}
br.headBatchSeq = headBatch.SeqNo()
}

func (br *batchRegistry) OnBatchExecuted(batch *core.Batch, receipts types.Receipts) {
br.callbackMutex.RLock()
defer br.callbackMutex.RUnlock()
Expand Down
1 change: 1 addition & 0 deletions go/enclave/components/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ type BatchRegistry interface {
UnsubscribeFromBatches()

OnBatchExecuted(batch *core.Batch, receipts types.Receipts)
OnL1Reorg(*BlockIngestionType)

// HasGenesisBatch - returns if genesis batch is available yet or not, or error in case
// the function is unable to determine.
Expand Down
5 changes: 5 additions & 0 deletions go/enclave/components/rollup_consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,11 @@ func (rc *rollupConsumerImpl) ProcessRollupsInBlock(ctx context.Context, b *comm
return err
}

if len(rollups) > 1 {
// todo - we need to sort this out
rc.logger.Warn(fmt.Sprintf("Multiple rollups %d in block %s", len(rollups), b.Block.Hash()))
}

for _, rollup := range rollups {
l1CompressionBlock, err := rc.storage.FetchBlock(ctx, rollup.Header.CompressionL1Head)
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions go/enclave/enclave.go
Original file line number Diff line number Diff line change
Expand Up @@ -449,6 +449,7 @@ func (e *enclaveImpl) ingestL1Block(ctx context.Context, br *common.BlockAndRece
}

if ingestion.IsFork() {
e.registry.OnL1Reorg(ingestion)
err := e.service.OnL1Fork(ctx, ingestion.ChainFork)
if err != nil {
return nil, err
Expand Down
9 changes: 7 additions & 2 deletions go/enclave/nodetype/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ func (val *obsValidator) VerifySequencerSignature(b *core.Batch) error {
}

func (val *obsValidator) ExecuteStoredBatches(ctx context.Context) error {
val.logger.Trace("Executing stored batches")
headBatchSeq := val.batchRegistry.HeadBatchSeq()
if headBatchSeq == nil {
headBatchSeq = big.NewInt(int64(common.L2GenesisSeqNo))
Expand All @@ -95,11 +96,14 @@ func (val *obsValidator) ExecuteStoredBatches(ctx context.Context) error {
}
}

val.logger.Trace("Executing stored batch", log.BatchSeqNoKey, batch.SeqNo())

// check batch execution prerequisites
canExecute, err := val.executionPrerequisites(ctx, batch)
if err != nil {
return fmt.Errorf("could not determine the execution prerequisites for batch %s. Cause: %w", batch.Hash(), err)
}
val.logger.Trace("Can executing stored batch", log.BatchSeqNoKey, batch.SeqNo(), "can", canExecute)

if canExecute {
receipts, err := val.batchExecutor.ExecuteBatch(ctx, batch)
Expand All @@ -124,16 +128,17 @@ func (val *obsValidator) executionPrerequisites(ctx context.Context, batch *core
// 1.l1 block exists
block, err := val.storage.FetchBlock(ctx, batch.Header.L1Proof)
if err != nil && errors.Is(err, errutil.ErrNotFound) {
val.logger.Info("Error fetching block", log.BlockHashKey, batch.Header.L1Proof, log.ErrKey, err)
val.logger.Warn("Error fetching block", log.BlockHashKey, batch.Header.L1Proof, log.ErrKey, err)
return false, err
}

val.logger.Trace("l1 block exists", log.BatchSeqNoKey, batch.SeqNo())
// 2. parent was executed
parentExecuted, err := val.storage.BatchWasExecuted(ctx, batch.Header.ParentHash)
if err != nil {
val.logger.Info("Error reading execution status of batch", log.BatchHashKey, batch.Header.ParentHash, log.ErrKey, err)
return false, err
}
val.logger.Trace("parentExecuted", log.BatchSeqNoKey, batch.SeqNo(), "val", parentExecuted)

return block != nil && parentExecuted, nil
}
Expand Down
11 changes: 8 additions & 3 deletions go/enclave/storage/db_init.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,28 @@ import (
"github.com/ten-protocol/go-ten/go/config"
)

// _journal_mode=wal - The recommended running mode: "Write-ahead logging": https://www.sqlite.org/draft/matrix/wal.html
// _txlock=immediate - db transactions start as soon as "BeginTx()" is called. Avoids deadlocks. https://www.sqlite.org/lang_transaction.html
// _synchronous=normal - not exactly sure if we actually need this. It was recommended somewhere. https://www.sqlite.org/pragma.html#pragma_synchronous
const sqliteCfg = "_foreign_keys=on&_journal_mode=wal&_txlock=immediate&_synchronous=normal"

// CreateDBFromConfig creates an appropriate ethdb.Database instance based on your config
func CreateDBFromConfig(cfg *config.EnclaveConfig, logger gethlog.Logger) (enclavedb.EnclaveDB, error) {
if err := validateDBConf(cfg); err != nil {
return nil, err
}
if cfg.UseInMemoryDB {
logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...")
logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating temporary sqlite database...")
// this creates a temporary sqlite sqldb
return sqlite.CreateTemporarySQLiteDB(cfg.HostID.String(), "mode=memory&cache=shared&_foreign_keys=on", *cfg, logger)
return sqlite.CreateTemporarySQLiteDB("", sqliteCfg, *cfg, logger)
}

if !cfg.WillAttest && len(cfg.SqliteDBPath) > 0 {
// persistent but not secure in an enclave, we'll connect to a throwaway sqlite DB and test out persistence/sql implementations
logger.Warn("Attestation is disabled, using a basic sqlite DB for persistence")
// when we want to test persistence after node restart the SqliteDBPath should be set
// (if empty string then a temp sqldb file will be created for the lifetime of the enclave)
return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, "_foreign_keys=on", *cfg, logger)
return sqlite.CreateTemporarySQLiteDB(cfg.SqliteDBPath, sqliteCfg, *cfg, logger)
}

if !cfg.WillAttest && len(cfg.EdgelessDBHost) > 0 {
Expand Down
Loading
Loading