Skip to content

Commit

Permalink
Merge pull request #212 from CortexFoundation/ucwong
Browse files Browse the repository at this point in the history
freeze and compress block from active leveldb
  • Loading branch information
ucwong authored Jul 29, 2019
2 parents ae403b6 + 66b2e73 commit ecd09f3
Show file tree
Hide file tree
Showing 71 changed files with 6,909 additions and 1,700 deletions.
2 changes: 1 addition & 1 deletion accounts/abi/bind/auth.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
"github.com/CortexFoundation/CortexTheseus/common"
"github.com/CortexFoundation/CortexTheseus/core/types"
"github.com/CortexFoundation/CortexTheseus/crypto"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/CortexTheseus/accounts"
)

// NewTransactor is a utility method to easily create a transaction signer from
Expand Down
143 changes: 100 additions & 43 deletions cmd/cortex/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,17 @@ package main

import (
"encoding/json"
"path/filepath"
"fmt"
"os"
"runtime"
"strconv"
"sync/atomic"
"time"

"github.com/CortexFoundation/CortexTheseus/cmd/utils"
"github.com/CortexFoundation/CortexTheseus/core/rawdb"
"github.com/CortexFoundation/CortexTheseus/common"
"github.com/CortexFoundation/CortexTheseus/cmd/utils"
"github.com/CortexFoundation/CortexTheseus/console"
"github.com/CortexFoundation/CortexTheseus/core"
"github.com/CortexFoundation/CortexTheseus/core/state"
Expand All @@ -36,7 +38,7 @@ import (
"github.com/CortexFoundation/CortexTheseus/event"
"github.com/CortexFoundation/CortexTheseus/log"
"github.com/CortexFoundation/CortexTheseus/trie"
"github.com/syndtr/goleveldb/leveldb/util"
//"github.com/syndtr/goleveldb/leveldb/util"
"gopkg.in/urfave/cli.v1"
)

Expand Down Expand Up @@ -169,6 +171,19 @@ Remove blockchain and state databases`,
The arguments are interpreted as block numbers or hashes.
Use "cortex dump 0" to dump the genesis block.`,
}
inspectCommand = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
Usage: "Inspect the storage size for each type of data in the database",
ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.CacheFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
}
)

// initGenesis will initialise the given JSON format genesis file and writes it as
Expand All @@ -192,7 +207,7 @@ func initGenesis(ctx *cli.Context) error {
// Open an initialise both full and light databases
stack := makeFullNode(ctx)
for _, name := range []string{"chaindata"} {
chaindb, err := stack.OpenDatabase(name, 0, 0)
chaindb, err := stack.OpenDatabase(name, 0, 0, "")
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
Expand Down Expand Up @@ -246,15 +261,17 @@ func importChain(ctx *cli.Context) error {
fmt.Printf("Import done in %v.\n\n", time.Since(start))

// Output pre-compaction stats mostly to see the import trashing
db := chainDb.(*ctxcdb.LDBDatabase)
db := chainDb.(ctxcdb.Database)

stats, err := db.LDB().GetProperty("leveldb.stats")
//stats, err := db.LDB().GetProperty("leveldb.stats")
stats, err := db.Stat("leveldb.stats")
if err != nil {
utils.Fatalf("Failed to read database stats: %v", err)
}
fmt.Println(stats)

ioStats, err := db.LDB().GetProperty("leveldb.iostats")
//ioStats, err := db.LDB().GetProperty("leveldb.iostats")
ioStats, err := db.Stat("leveldb.iostats")
if err != nil {
utils.Fatalf("Failed to read database iostats: %v", err)
}
Expand All @@ -279,18 +296,18 @@ func importChain(ctx *cli.Context) error {
// Compact the entire database to more accurately measure disk io and print the stats
start = time.Now()
fmt.Println("Compacting entire database...")
if err = db.LDB().CompactRange(util.Range{}); err != nil {
utils.Fatalf("Compaction failed: %v", err)
}
if err = db.Compact(nil, nil); err != nil {
utils.Fatalf("Compaction failed: %v", err)
}
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))

stats, err = db.LDB().GetProperty("leveldb.stats")
stats, err = db.Stat("leveldb.stats")
if err != nil {
utils.Fatalf("Failed to read database stats: %v", err)
}
fmt.Println(stats)

ioStats, err = db.LDB().GetProperty("leveldb.iostats")
ioStats, err = db.Stat("leveldb.iostats")
if err != nil {
utils.Fatalf("Failed to read database iostats: %v", err)
}
Expand Down Expand Up @@ -337,7 +354,7 @@ func importPreimages(ctx *cli.Context) error {
utils.Fatalf("This command requires an argument.")
}
stack := makeFullNode(ctx)
diskdb := utils.MakeChainDatabase(ctx, stack).(*ctxcdb.LDBDatabase)
diskdb := utils.MakeChainDatabase(ctx, stack).(ctxcdb.Database)

start := time.Now()
if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
Expand All @@ -353,7 +370,7 @@ func exportPreimages(ctx *cli.Context) error {
utils.Fatalf("This command requires an argument.")
}
stack := makeFullNode(ctx)
diskdb := utils.MakeChainDatabase(ctx, stack).(*ctxcdb.LDBDatabase)
diskdb := utils.MakeChainDatabase(ctx, stack).(ctxcdb.Database)

start := time.Now()
if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
Expand All @@ -365,9 +382,13 @@ func exportPreimages(ctx *cli.Context) error {

func copyDb(ctx *cli.Context) error {
// Ensure we have a source chain directory to copy
if len(ctx.Args()) != 1 {
if len(ctx.Args()) < 1 {
utils.Fatalf("Source chaindata directory path argument missing")
}

if len(ctx.Args()) < 2 {
utils.Fatalf("Source ancient chain directory path argument missing")
}
// Initialize a new chain for the running node to sync into
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
Expand All @@ -376,7 +397,8 @@ func copyDb(ctx *cli.Context) error {
dl := downloader.New(syncmode, 0, chainDb, new(event.TypeMux), chain, nil)

// Create a source peer to satisfy downloader requests from
db, err := ctxcdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
//db, err := ctxcdb.NewLevelDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
if err != nil {
return err
}
Expand All @@ -403,41 +425,65 @@ func copyDb(ctx *cli.Context) error {
// Compact the entire database to remove any sync overhead
start = time.Now()
fmt.Println("Compacting entire database...")
if err = chainDb.(*ctxcdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
utils.Fatalf("Compaction failed: %v", err)
}
if err = db.Compact(nil, nil); err != nil {
utils.Fatalf("Compaction failed: %v", err)
}
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))

return nil
}

func removeDB(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)

for _, name := range []string{"chaindata"} {
// Ensure the database exists in the first place
logger := log.New("database", name)
stack, config := makeConfigNode(ctx)

// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Cortex.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
return nil
}

dbdir := stack.ResolvePath(name)
if !common.FileExist(dbdir) {
logger.Info("Database doesn't exist, skipping", "path", dbdir)
continue
}
// Confirm removal and execute
fmt.Println(dbdir)
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
logger.Warn("Database deletion aborted")
default:
start := time.Now()
os.RemoveAll(dbdir)
logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
}
}
return nil
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}

func dump(ctx *cli.Context) error {
Expand Down Expand Up @@ -466,6 +512,17 @@ func dump(ctx *cli.Context) error {
return nil
}

func inspect(ctx *cli.Context) error {
node, _ := makeConfigNode(ctx)
defer node.Close()

_, chainDb := utils.MakeChain(ctx, node)
defer chainDb.Close()

return rawdb.InspectDatabase(chainDb)
}


// hashish returns true for strings that look like hashes.
func hashish(x string) bool {
_, err := strconv.Atoi(x)
Expand Down
5 changes: 3 additions & 2 deletions cmd/utils/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
}

// ImportPreimages imports a batch of exported hash preimages into the database.
func ImportPreimages(db *ctxcdb.LDBDatabase, fn string) error {
func ImportPreimages(db ctxcdb.Database, fn string) error {
log.Info("Importing preimages", "file", fn)

// Open the file handle and potentially unwrap the gzip stream
Expand Down Expand Up @@ -285,7 +285,7 @@ func ImportPreimages(db *ctxcdb.LDBDatabase, fn string) error {

// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
func ExportPreimages(db *ctxcdb.LDBDatabase, fn string) error {
func ExportPreimages(db ctxcdb.Database, fn string) error {
log.Info("Exporting preimages", "file", fn)

// Open the file handle and potentially wrap with a gzip stream
Expand All @@ -302,6 +302,7 @@ func ExportPreimages(db *ctxcdb.LDBDatabase, fn string) error {
}
// Iterate over the preimages and export them
it := db.NewIteratorWithPrefix([]byte("secure-key-"))
defer it.Release()
for it.Next() {
if err := rlp.Encode(writer, it.Value()); err != nil {
return err
Expand Down
10 changes: 9 additions & 1 deletion cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,10 @@ var (
Name: "keystore",
Usage: "Directory for the keystore (default = inside the datadir)",
}
AncientFlag = DirectoryFlag{
Name: "datadir.ancient",
Usage: "Data directory for ancient chain segments (default = inside chaindata)",
}
// NoUSBFlag = cli.BoolFlag{
// Name: "nousb",
// Usage: "Disables monitoring for and managing USB hardware wallets",
Expand Down Expand Up @@ -1062,6 +1066,9 @@ func SetCortexConfig(ctx *cli.Context, stack *node.Node, cfg *ctxc.Config) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
}
cfg.DatabaseHandles = makeDatabaseHandles()
if ctx.GlobalIsSet(AncientFlag.Name) {
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
}

if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
Expand Down Expand Up @@ -1296,7 +1303,8 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ctxcdb.Database {
handles = makeDatabaseHandles()
)
name := "chaindata"
chainDb, err := stack.OpenDatabase(name, cache, handles)
//chainDb, err := stack.OpenDatabase(name, cache, handles)
chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "")
if err != nil {
Fatalf("Could not open database: %v", err)
}
Expand Down
25 changes: 23 additions & 2 deletions common/prque/prque.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,20 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: use of this source code is governed by a BSD
// license that can be found in the LICENSE file. Alternatively, the CookieJar
// toolbox may be used in accordance with the terms and conditions contained
// in a signed written agreement between you and the author(s).

// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".

// Package prque implements a priority queue data structure supporting arbitrary
// value types and int64 priorities.
//
// If you would like to use a min-priority queue, simply negate the priorities.
//
// Internally the queue is based on the standard heap package working on a
// sortable version of the block based stack.
package prque

import (
Expand All @@ -11,8 +26,8 @@ type Prque struct {
cont *sstack
}

// Creates a new priority queue.
func New(setIndex setIndexCallback) *Prque {
// New creates a new priority queue.
func New(setIndex SetIndexCallback) *Prque {
return &Prque{newSstack(setIndex)}
}

Expand All @@ -21,6 +36,12 @@ func (p *Prque) Push(data interface{}, priority int64) {
heap.Push(p.cont, &item{data, priority})
}

// Peek returns the value with the greates priority but does not pop it off.
func (p *Prque) Peek() (interface{}, int64) {
item := p.cont.blocks[0][0]
return item.value, item.priority
}

// Pops the value with the greates priority off the stack and returns it.
// Currently no shrinking is done.
func (p *Prque) Pop() (interface{}, int64) {
Expand Down
Loading

0 comments on commit ecd09f3

Please sign in to comment.