Skip to content

Commit

Permalink
Merge branch 'develop' into merge-engine-api
Browse files Browse the repository at this point in the history
  • Loading branch information
bnoieh authored Nov 29, 2024
2 parents b36b338 + daa702b commit 0dc1919
Show file tree
Hide file tree
Showing 47 changed files with 1,597 additions and 329 deletions.
41 changes: 41 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,46 @@
# Changelog

## v0.5.3

This is a minor release for opBNB Mainnet and Testnet.

It fixes a txpool memory leak bug that could cause out-of-memory issues.

It is recommended to upgrade to this version for both Mainnet and Testnet.

### What's Changed
* fix: txpool reheap out-of-memory issues by @andyzhang2023 in https://github.com/bnb-chain/op-geth/pull/211

### Docker Images
ghcr.io/bnb-chain/op-geth:v0.5.3

**Full Changelog**: https://github.com/bnb-chain/op-geth/compare/v0.5.2...v0.5.3

## v0.5.2

This is a minor release for opBNB Mainnet and Testnet.

It includes several optimizations and improvements, including the introduction of a new feature to automatically recover from unexpected shutdowns, support for multi-database features, and fixes to various bugs.

Upgrading is optional.

### What's Changed
* feat: add recover node buffer list for pathdb by @sysvm in https://github.com/bnb-chain/op-geth/pull/126
* fix(op-geth): add new field in SimulateGaslessBundleResp by @redhdx in https://github.com/bnb-chain/op-geth/pull/205
* feat: support multi database feature for op by @jingjunLi in https://github.com/bnb-chain/op-geth/pull/127
* fix: Fix pbss snapshot inconsistency with engine-sync enabled when starting by @krish-nr in https://github.com/bnb-chain/op-geth/pull/189
* fix: fix StateScheme overwrite bug by @jingjunLi in https://github.com/bnb-chain/op-geth/pull/220
* fix(op-geth): fix gasless receipt l1fee by @redhdx in https://github.com/bnb-chain/op-geth/pull/219
* feat: sequencer auto recover when meet an unexpected shutdown by @krish-nr in https://github.com/bnb-chain/op-geth/pull/166

### New Contributors
* @jingjunLi made their first contribution in https://github.com/bnb-chain/op-geth/pull/127

### Docker Images
ghcr.io/bnb-chain/op-geth:v0.5.2

**Full Changelog**: https://github.com/bnb-chain/op-geth/compare/v0.5.1...v0.5.2

## v0.5.1

This release includes various optimizations and improvements to transaction processing, CI support, and network infrastructure.
Expand Down
25 changes: 24 additions & 1 deletion cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ var (
utils.CachePreimagesFlag,
utils.OverrideCancun,
utils.OverrideVerkle,
utils.MultiDataBaseFlag,
}, utils.DatabaseFlags),
Description: `
The init command initializes a new genesis block and definition for the network.
Expand Down Expand Up @@ -221,12 +222,27 @@ func initGenesis(ctx *cli.Context) error {
overrides.OverrideVerkle = &v
}
for _, name := range []string{"chaindata", "lightchaindata"} {
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false, false)
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
defer chaindb.Close()

// if the trie data dir has been set, new trie db with a new state database
if ctx.IsSet(utils.MultiDataBaseFlag.Name) {
statediskdb, dbErr := stack.OpenDatabaseWithFreezer(name+"/state", 0, 0, "", "", false, true)
if dbErr != nil {
utils.Fatalf("Failed to open separate trie database: %v", dbErr)
}
chaindb.SetStateStore(statediskdb)
blockdb, err := stack.OpenDatabaseWithFreezer(name+"/block", 0, 0, "", "", false, true)
if err != nil {
utils.Fatalf("Failed to open separate block database: %v", err)
}
chaindb.SetBlockStore(blockdb)
log.Warn("Multi-database is an experimental feature")
}

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle(), true)
defer triedb.Close()

Expand Down Expand Up @@ -265,6 +281,13 @@ func dumpGenesis(ctx *cli.Context) error {
}
continue
}
// set the separate state & block database
if stack.CheckIfMultiDataBase() && err == nil {
stateDiskDb := utils.MakeStateDataBase(ctx, stack, true)
db.SetStateStore(stateDiskDb)
blockDb := utils.MakeBlockDatabase(ctx, stack, true)
db.SetBlockStore(blockDb)
}
genesis, err := core.ReadGenesis(db)
if err != nil {
utils.Fatalf("failed to read genesis: %s", err)
Expand Down
106 changes: 92 additions & 14 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,6 @@ func inspectTrie(ctx *cli.Context) error {

db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()

var headerBlockHash common.Hash
if ctx.NArg() >= 1 {
if ctx.Args().Get(0) == "latest" {
Expand Down Expand Up @@ -495,14 +494,19 @@ func checkStateContent(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
it ethdb.Iterator
hasher = crypto.NewKeccakState()
got = make([]byte, 32)
errs int
count int
startTime = time.Now()
lastLog = time.Now()
)
if stack.CheckIfMultiDataBase() {
it = rawdb.NewKeyLengthIterator(db.StateStore().NewIterator(prefix, start), 32)
} else {
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
}
for it.Next() {
count++
k := it.Key()
Expand Down Expand Up @@ -549,6 +553,13 @@ func dbStats(ctx *cli.Context) error {
defer db.Close()

showLeveldbStats(db)
if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store")
showLeveldbStats(db.BlockStore())
}

return nil
}

Expand All @@ -562,13 +573,38 @@ func dbCompact(ctx *cli.Context) error {
log.Info("Stats before compaction")
showLeveldbStats(db)

if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store")
showLeveldbStats(db.BlockStore())
}

log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
log.Error("Compact err", "error", err)
return err
}

if stack.CheckIfMultiDataBase() {
if err := db.StateStore().Compact(nil, nil); err != nil {
log.Error("Compact err", "error", err)
return err
}
if err := db.BlockStore().Compact(nil, nil); err != nil {
log.Error("Compact err", "error", err)
return err
}
}

log.Info("Stats after compaction")
showLeveldbStats(db)
if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store after compaction")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store after compaction")
showLeveldbStats(db.BlockStore())
}
return nil
}

Expand All @@ -588,8 +624,17 @@ func dbGet(ctx *cli.Context) error {
log.Info("Could not decode the key", "error", err)
return err
}
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err := db.Get(key)
data, err := opDb.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
return err
Expand All @@ -606,8 +651,14 @@ func dbTrieGet(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var db ethdb.Database
chaindb := utils.MakeChainDatabase(ctx, stack, true)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()

scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
Expand Down Expand Up @@ -673,8 +724,14 @@ func dbTrieDelete(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var db ethdb.Database
chaindb := utils.MakeChainDatabase(ctx, stack, true)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()

scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
Expand Down Expand Up @@ -742,7 +799,17 @@ func dbDelete(ctx *cli.Context) error {
log.Error("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err := opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
Expand Down Expand Up @@ -780,11 +847,22 @@ func dbPut(ctx *cli.Context) error {
log.Error("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)

opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err = opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
return opDb.Put(key, value)
}

// dbDumpTrie shows the key-value slots of a given storage trie
Expand Down Expand Up @@ -875,7 +953,7 @@ func freezerInspect(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
stack.Close()
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end, stack.CheckIfMultiDataBase())
}

func importLDBdata(ctx *cli.Context) error {
Expand Down Expand Up @@ -1016,7 +1094,7 @@ func showMetaData(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()

ancients, err := db.Ancients()
ancients, err := db.BlockStore().Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
Expand Down Expand Up @@ -1061,7 +1139,7 @@ func hbss2pbss(ctx *cli.Context) error {
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
db.Sync()
db.BlockStore().Sync()
defer db.Close()

config := triedb.HashDefaults
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ var (
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.MultiDataBaseFlag,
utils.AllowInsecureNoTriesFlag,
utils.CacheLogSizeFlag,
utils.FDLimitFlag,
Expand Down
6 changes: 3 additions & 3 deletions cmd/utils/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -524,13 +524,13 @@ func ImportPreimages(db ethdb.Database, fn string) error {
// Accumulate the preimages and flush when enough ws gathered
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
if len(preimages) > 1024 {
rawdb.WritePreimages(db, preimages)
rawdb.WritePreimages(db.StateStore(), preimages)
preimages = make(map[common.Hash][]byte)
}
}
// Flush the last batch preimage data
if len(preimages) > 0 {
rawdb.WritePreimages(db, preimages)
rawdb.WritePreimages(db.StateStore(), preimages)
}
return nil
}
Expand Down Expand Up @@ -642,7 +642,7 @@ func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn
}()

for item := range hashCh {
preimage := rawdb.ReadPreimage(chaindb, item.Hash)
preimage := rawdb.ReadPreimage(chaindb.StateStore(), item.Hash)
if len(preimage) == 0 {
return fmt.Errorf("missing preimage for %v", item.Hash)
}
Expand Down
37 changes: 36 additions & 1 deletion cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,12 @@ var (
Value: flags.DirectoryString(node.DefaultDataDir()),
Category: flags.EthCategory,
}
MultiDataBaseFlag = &cli.BoolFlag{
Name: "multidatabase",
Usage: "Enable a separated state and block database, it will be created within two subdirectory called state and block, " +
"Users can copy this state or block directory to another directory or disk, and then create a symbolic link to the state directory under the chaindata",
Category: flags.EthCategory,
}
RemoteDBFlag = &cli.StringFlag{
Name: "remotedb",
Usage: "URL for remote database",
Expand Down Expand Up @@ -2319,14 +2325,43 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
case ctx.String(SyncModeFlag.Name) == "light":
chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly)
default:
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly)
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly, false)
// set the separate state database
if stack.CheckIfMultiDataBase() && err == nil {
stateDiskDb := MakeStateDataBase(ctx, stack, readonly)
chainDb.SetStateStore(stateDiskDb)
blockDb := MakeBlockDatabase(ctx, stack, readonly)
chainDb.SetBlockStore(blockDb)
}
}
if err != nil {
Fatalf("Could not open database: %v", err)
}
return chainDb
}

// MakeStateDataBase open a separate state database using the flags passed to the client and will hard crash if it fails.
func MakeStateDataBase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
cache := ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) * 90 / 100
stateDiskDb, err := stack.OpenDatabaseWithFreezer("chaindata/state", cache, handles, "", "", readonly, true)
if err != nil {
Fatalf("Failed to open separate trie database: %v", err)
}
return stateDiskDb
}

// MakeBlockDatabase open a separate block database using the flags passed to the client and will hard crash if it fails.
func MakeBlockDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
cache := ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) / 10
blockDb, err := stack.OpenDatabaseWithFreezer("chaindata/block", cache, handles, "", "", readonly, true)
if err != nil {
Fatalf("Failed to open separate block database: %v", err)
}
return blockDb
}

func PathDBConfigAddJournalFilePath(stack *node.Node, config *pathdb.Config) *pathdb.Config {
path := fmt.Sprintf("%s/%s", stack.ResolvePath("chaindata"), eth.JournalFileName)
config.JournalFilePath = path
Expand Down
Loading

0 comments on commit 0dc1919

Please sign in to comment.