diff --git a/agreement/service.go b/agreement/service.go index 765f4270a9..263efe0b09 100644 --- a/agreement/service.go +++ b/agreement/service.go @@ -94,10 +94,15 @@ func MakeService(p Parameters) (*Service, error) { s.log = makeServiceLogger(p.Logger) + // If cadaver directory is not set, use cold data directory (which may also not be set) + cadaverDir := p.CadaverDirectory + if cadaverDir == "" { + cadaverDir = p.ColdDataDir + } // GOAL2-541: tracer is not concurrency safe. It should only ever be // accessed by main state machine loop. var err error - s.tracer, err = makeTracer(s.log, defaultCadaverName, p.CadaverSizeTarget, p.CadaverDirectory, + s.tracer, err = makeTracer(s.log, defaultCadaverName, p.CadaverSizeTarget, cadaverDir, s.Local.EnableAgreementReporting, s.Local.EnableAgreementTimeMetrics) if err != nil { return nil, err diff --git a/catchup/pref_test.go b/catchup/pref_test.go index 688958ab26..be3f67f473 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -61,7 +61,8 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { for i := 0; i < b.N; i++ { inMem := true - local, err := data.LoadLedger(logging.TestingLog(b), b.Name()+"empty"+strconv.Itoa(i), inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) + prefix := b.Name() + "empty" + strconv.Itoa(i) + local, err := data.LoadLedger(logging.TestingLog(b), prefix, inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) require.NoError(b, err) // Make Service @@ -148,7 +149,8 @@ func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *da const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - emptyLedger, err = data.LoadLedger(logging.TestingLog(t), t.Name()+"empty", inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) + prefix := t.Name() + "empty" + emptyLedger, err = data.LoadLedger(logging.TestingLog(t), prefix, inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) require.NoError(t, err) ledger, err = datatest.FabricateLedger(logging.TestingLog(t), t.Name(), parts, genesisBalances, emptyLedger.LastRound()+basics.Round(numBlocks)) diff --git a/cmd/algod/main.go b/cmd/algod/main.go index 8b8bb0f81c..5450ff5b64 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -26,8 +26,6 @@ import ( "strings" "time" - "github.com/gofrs/flock" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/daemon/algod" @@ -40,6 +38,7 @@ import ( "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/metrics" "github.com/algorand/go-algorand/util/tokens" + "github.com/gofrs/flock" "github.com/algorand/go-deadlock" ) @@ -93,11 +92,13 @@ func run() int { baseHeartbeatEvent.Info.Branch = version.Branch baseHeartbeatEvent.Info.CommitHash = version.GetCommitHash() + // -b will print only the git branch and then exit if *branchCheck { fmt.Println(config.Branch) return 0 } + // -c will print only the release channel and then exit if *channelCheck { fmt.Println(config.Channel) return 0 @@ -115,24 +116,13 @@ func run() int { } genesisPath := *genesisFile - if genesisPath == "" { - genesisPath = filepath.Join(dataDir, config.GenesisJSONFile) - } - - // Load genesis - genesisText, err := os.ReadFile(genesisPath) - if err != nil { - fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", genesisPath, err) - return 1 - } - - var genesis bookkeeping.Genesis - err = protocol.DecodeJSON(genesisText, &genesis) + genesis, genesisText, err := loadGenesis(dataDir, genesisPath) if err != nil { - fmt.Fprintf(os.Stderr, "Cannot parse genesis file %s: %v\n", genesisPath, err) + fmt.Fprintf(os.Stderr, "Error loading genesis file (%s): %v", genesisPath, err) return 1 } + // -G will print only the genesis ID and then exit if *genesisPrint { fmt.Println(genesis.ID()) return 0 @@ -453,3 +443,19 @@ func resolveDataDir() string { } return dir } + +func loadGenesis(dataDir string, genesisPath string) (bookkeeping.Genesis, string, error) { + if genesisPath == "" { + genesisPath = filepath.Join(dataDir, config.GenesisJSONFile) + } + genesisText, err := os.ReadFile(genesisPath) + if err != nil { + return bookkeeping.Genesis{}, "", err + } + var genesis bookkeeping.Genesis + err = protocol.DecodeJSON(genesisText, &genesis) + if err != nil { + return bookkeeping.Genesis{}, "", err + } + return genesis, string(genesisText), nil +} diff --git a/config/config_test.go b/config/config_test.go index cc089fc3ab..c88b53834e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -654,3 +654,152 @@ func TestLocal_RecalculateConnectionLimits(t *testing.T) { }) } } + +// Tests that ensureAbsGenesisDir resolves a path to an absolute path, appends the genesis directory, and creates any needed directories +func TestEnsureAbsDir(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + + t1 := filepath.Join(testDirectory, "test1") + t1Abs, err := ensureAbsGenesisDir(t1, "myGenesisID") + require.NoError(t, err) + require.DirExists(t, t1Abs) + require.Equal(t, testDirectory+"/test1/myGenesisID", t1Abs) + + // confirm that relative paths become absolute + t2 := filepath.Join(testDirectory, "test2", "..") + t2Abs, err := ensureAbsGenesisDir(t2, "myGenesisID") + require.NoError(t, err) + require.DirExists(t, t2Abs) + require.Equal(t, testDirectory+"/myGenesisID", t2Abs) +} + +// TestEnsureAndResolveGenesisDirs confirms that paths provided in the config are resolved to absolute paths and are created if relevant +func TestEnsureAndResolveGenesisDirs(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := GetDefaultLocal() + + testDirectory := t.TempDir() + // insert some "Bad" path elements to see them removed when converted to absolute + cfg.TrackerDBDir = filepath.Join(testDirectory, "BAD/../custom_tracker") + cfg.BlockDBDir = filepath.Join(testDirectory, "/BAD/BAD/../../custom_block") + cfg.CrashDBDir = filepath.Join(testDirectory, "custom_crash") + cfg.StateproofDir = filepath.Join(testDirectory, "/RELATIVEPATHS/../RELATIVE/../custom_stateproof") + cfg.CatchpointDir = filepath.Join(testDirectory, "custom_catchpoint") + + paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID") + require.NoError(t, err) + + // confirm that the paths are absolute, and contain the genesisID + require.Equal(t, testDirectory+"/custom_tracker/myGenesisID", paths.TrackerGenesisDir) + require.DirExists(t, paths.TrackerGenesisDir) + require.Equal(t, testDirectory+"/custom_block/myGenesisID", paths.BlockGenesisDir) + require.DirExists(t, paths.BlockGenesisDir) + require.Equal(t, testDirectory+"/custom_crash/myGenesisID", paths.CrashGenesisDir) + require.DirExists(t, paths.CrashGenesisDir) + require.Equal(t, testDirectory+"/custom_stateproof/myGenesisID", paths.StateproofGenesisDir) + require.DirExists(t, paths.StateproofGenesisDir) + require.Equal(t, testDirectory+"/custom_catchpoint/myGenesisID", paths.CatchpointGenesisDir) + require.DirExists(t, paths.CatchpointGenesisDir) +} + +// TestEnsureAndResolveGenesisDirs_hierarchy confirms that when only some directories are specified, other directories defer to them +func TestEnsureAndResolveGenesisDirs_hierarchy(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := GetDefaultLocal() + testDirectory := t.TempDir() + paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID") + require.NoError(t, err) + // confirm that if only the root is specified, it is used for all directories + require.Equal(t, testDirectory+"/myGenesisID", paths.TrackerGenesisDir) + require.DirExists(t, paths.TrackerGenesisDir) + require.Equal(t, testDirectory+"/myGenesisID", paths.BlockGenesisDir) + require.DirExists(t, paths.BlockGenesisDir) + require.Equal(t, testDirectory+"/myGenesisID", paths.CrashGenesisDir) + require.DirExists(t, paths.CrashGenesisDir) + require.Equal(t, testDirectory+"/myGenesisID", paths.StateproofGenesisDir) + require.DirExists(t, paths.StateproofGenesisDir) + require.Equal(t, testDirectory+"/myGenesisID", paths.CatchpointGenesisDir) + require.DirExists(t, paths.CatchpointGenesisDir) + + cfg = GetDefaultLocal() + testDirectory = t.TempDir() + hot := filepath.Join(testDirectory, "hot") + cold := filepath.Join(testDirectory, "cold") + cfg.HotDataDir = hot + cfg.ColdDataDir = cold + paths, err = cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID") + require.NoError(t, err) + // confirm that if hot/cold are specified, hot/cold are used for appropriate directories + require.Equal(t, hot+"/myGenesisID", paths.TrackerGenesisDir) + require.DirExists(t, paths.TrackerGenesisDir) + require.Equal(t, cold+"/myGenesisID", paths.BlockGenesisDir) + require.DirExists(t, paths.BlockGenesisDir) + require.Equal(t, cold+"/myGenesisID", paths.CrashGenesisDir) + require.DirExists(t, paths.CrashGenesisDir) + require.Equal(t, cold+"/myGenesisID", paths.StateproofGenesisDir) + require.DirExists(t, paths.StateproofGenesisDir) + require.Equal(t, cold+"/myGenesisID", paths.CatchpointGenesisDir) + require.DirExists(t, paths.CatchpointGenesisDir) +} + +// TestEnsureAndResolveGenesisDirsError confirms that if a path can't be created, an error is returned +func TestEnsureAndResolveGenesisDirsError(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := GetDefaultLocal() + + testDirectory := t.TempDir() + // insert some "Bad" path elements to see them removed when converted to absolute + cfg.TrackerDBDir = filepath.Join(testDirectory, "BAD/../custom_tracker") + cfg.BlockDBDir = filepath.Join(testDirectory, "/BAD/BAD/../../custom_block") + cfg.CrashDBDir = filepath.Join(testDirectory, "custom_crash") + cfg.StateproofDir = filepath.Join(testDirectory, "/RELATIVEPATHS/../RELATIVE/../custom_stateproof") + cfg.CatchpointDir = filepath.Join(testDirectory, "custom_catchpoint") + + // first try an error with an empty root dir + paths, err := cfg.EnsureAndResolveGenesisDirs("", "myGenesisID") + require.Empty(t, paths) + require.Error(t, err) + require.Contains(t, err.Error(), "rootDir is required") + + require.NoError(t, os.Chmod(testDirectory, 0200)) + + // now try an error with a root dir that can't be written to + paths, err = cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID") + require.Empty(t, paths) + require.Error(t, err) + require.Contains(t, err.Error(), "permission denied") +} + +// TestResolveLogPaths confirms that log paths are resolved to the most appropriate data directory of the supplied config +func TestResolveLogPaths(t *testing.T) { + partitiontest.PartitionTest(t) + + // on default settings, the log paths should be in the root directory + cfg := GetDefaultLocal() + log, archive := cfg.ResolveLogPaths("root") + require.Equal(t, "root/node.log", log) + require.Equal(t, "root/node.archive.log", archive) + + // with supplied hot/cold data directories, they resolve to hot/cold + cfg = GetDefaultLocal() + cfg.HotDataDir = "hot" + cfg.ColdDataDir = "cold" + log, archive = cfg.ResolveLogPaths("root") + require.Equal(t, "hot/node.log", log) + require.Equal(t, "cold/node.archive.log", archive) + + // with supplied hot/cold data AND specific paths directories, they resolve to the specific paths + cfg = GetDefaultLocal() + cfg.HotDataDir = "hot" + cfg.ColdDataDir = "cold" + cfg.LogFileDir = "mycoolLogDir" + cfg.LogArchiveDir = "myCoolLogArchive" + log, archive = cfg.ResolveLogPaths("root") + require.Equal(t, "mycoolLogDir/node.log", log) + require.Equal(t, "myCoolLogArchive/node.archive.log", archive) +} diff --git a/config/localTemplate.go b/config/localTemplate.go index aa612b6fe0..4202c2648a 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -17,6 +17,7 @@ package config import ( + "fmt" "os" "path/filepath" "strings" @@ -76,7 +77,52 @@ type Local struct { BaseLoggerDebugLevel uint32 `version[0]:"1" version[1]:"4"` // if this is 0, do not produce agreement.cadaver CadaverSizeTarget uint64 `version[0]:"1073741824" version[24]:"0"` - CadaverDirectory string `version[27]:""` + // if this is not set, MakeService will attempt to use ColdDataDir instead + CadaverDirectory string `version[27]:""` + + // HotDataDir is an optional directory to store data that is frequently accessed by the node. + // For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network. + // If not specified, the node will use the runtime supplied datadir to store this data. + // Individual resources may have their own override specified, which would override this setting for that resource. + // Setting HotDataDir to a dedicated high performance disk allows for basic disc tuning. + HotDataDir string `version[31]:""` + + // ColdDataDir is an optional directory to store data that is infrequently accessed by the node. + // For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network. + // If not specified, the node will use the runtime supplied datadir. + // Individual resources may have their own override specified, which would override this setting for that resource. + // Setting ColdDataDir to a less critical or cheaper disk allows for basic disc tuning. + ColdDataDir string `version[31]:""` + + // TrackerDbDir is an optional directory to store the tracker database. + // For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network. + // If not specified, the node will use the HotDataDir. + TrackerDBDir string `version[31]:""` + // BlockDBDir is an optional directory to store the block database. + // For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network. + // If not specified, the node will use the ColdDataDir. + BlockDBDir string `version[31]:""` + // CatchpointDir is an optional directory to store catchpoint files, + // except for the in-progress temp file, which will use the HotDataDir and is not separately configurable. + // For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network. + // If not specified, the node will use the ColdDataDir. + CatchpointDir string `version[31]:""` + // StateproofDir is an optional directory to store stateproof data. + // For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network. + // If not specified, the node will use the ColdDataDir. + StateproofDir string `version[31]:""` + // CrashDBDir is an optional directory to store the crash database. + // For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network. + // If not specified, the node will use the ColdDataDir. + CrashDBDir string `version[31]:""` + + // LogFileDir is an optional directory to store the log, node.log + // If not specified, the node will use the HotDataDir. + // The -o command line option can be used to override this output location. + LogFileDir string `version[31]:""` + // LogArchiveDir is an optional directory to store the log archive. + // If not specified, the node will use the ColdDataDir. + LogArchiveDir string `version[31]:""` // IncomingConnectionsLimit specifies the max number of long-lived incoming // connections. 0 means no connections allowed. Must be non-negative. @@ -645,6 +691,150 @@ func (cfg Local) IsGossipServer() bool { return cfg.NetAddress != "" } +// ensureAbsGenesisDir will convert a path to absolute, and will attempt to make a genesis directory there +func ensureAbsGenesisDir(path string, genesisID string) (string, error) { + pathAbs, err := filepath.Abs(path) + if err != nil { + return "", err + } + genesisDir := filepath.Join(pathAbs, genesisID) + err = os.MkdirAll(genesisDir, 0700) + if err != nil && !os.IsExist(err) { + return "", err + } + return genesisDir, nil +} + +// ResolvedGenesisDirs is a collection of directories including Genesis ID +// Subdirectories for execution of a node +type ResolvedGenesisDirs struct { + RootGenesisDir string + HotGenesisDir string + ColdGenesisDir string + TrackerGenesisDir string + BlockGenesisDir string + CatchpointGenesisDir string + StateproofGenesisDir string + CrashGenesisDir string +} + +// String returns the Genesis Directory values as a string +func (rgd ResolvedGenesisDirs) String() string { + ret := "" + ret += fmt.Sprintf("RootGenesisDir: %s\n", rgd.RootGenesisDir) + ret += fmt.Sprintf("HotGenesisDir: %s\n", rgd.HotGenesisDir) + ret += fmt.Sprintf("ColdGenesisDir: %s\n", rgd.ColdGenesisDir) + ret += fmt.Sprintf("TrackerGenesisDir: %s\n", rgd.TrackerGenesisDir) + ret += fmt.Sprintf("BlockGenesisDir: %s\n", rgd.BlockGenesisDir) + ret += fmt.Sprintf("CatchpointGenesisDir: %s\n", rgd.CatchpointGenesisDir) + ret += fmt.Sprintf("StateproofGenesisDir: %s\n", rgd.StateproofGenesisDir) + ret += fmt.Sprintf("CrashGenesisDir: %s\n", rgd.CrashGenesisDir) + return ret +} + +// ResolveLogPaths will return the most appropriate location for liveLog and archive, given user config +func (cfg *Local) ResolveLogPaths(rootDir string) (liveLog, archive string) { + // the default locations of log and archive are root + liveLog = filepath.Join(rootDir, "node.log") + archive = filepath.Join(rootDir, cfg.LogArchiveName) + // if hot data dir is set, use it for the base of logs + if cfg.HotDataDir != "" { + liveLog = filepath.Join(cfg.HotDataDir, "node.log") + } + // if cold data dir is set, use it for the base of archives + if cfg.ColdDataDir != "" { + archive = filepath.Join(cfg.ColdDataDir, cfg.LogArchiveName) + } + // if LogFileDir is set, use it instead + if cfg.LogFileDir != "" { + liveLog = filepath.Join(cfg.LogFileDir, "node.log") + } + // if LogArchivePath is set, use it instead + if cfg.LogArchiveDir != "" { + archive = filepath.Join(cfg.LogArchiveDir, cfg.LogArchiveName) + } + return liveLog, archive +} + +// EnsureAndResolveGenesisDirs will resolve the supplied config paths to absolute paths, and will create the genesis directories of each +// returns a ResolvedGenesisDirs struct with the resolved paths for use during runtime +func (cfg *Local) EnsureAndResolveGenesisDirs(rootDir, genesisID string) (ResolvedGenesisDirs, error) { + var resolved ResolvedGenesisDirs + var err error + if rootDir != "" { + resolved.RootGenesisDir, err = ensureAbsGenesisDir(rootDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + return ResolvedGenesisDirs{}, fmt.Errorf("rootDir is required") + } + // if HotDataDir is not set, use RootDataDir + if cfg.HotDataDir != "" { + resolved.HotGenesisDir, err = ensureAbsGenesisDir(cfg.HotDataDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + resolved.HotGenesisDir = resolved.RootGenesisDir + } + // if ColdDataDir is not set, use RootDataDir + if cfg.ColdDataDir != "" { + resolved.ColdGenesisDir, err = ensureAbsGenesisDir(cfg.ColdDataDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + resolved.ColdGenesisDir = resolved.RootGenesisDir + } + // if TrackerDBDir is not set, use HotDataDir + if cfg.TrackerDBDir != "" { + resolved.TrackerGenesisDir, err = ensureAbsGenesisDir(cfg.TrackerDBDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + resolved.TrackerGenesisDir = resolved.HotGenesisDir + } + // if BlockDBDir is not set, use ColdDataDir + if cfg.BlockDBDir != "" { + resolved.BlockGenesisDir, err = ensureAbsGenesisDir(cfg.BlockDBDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + resolved.BlockGenesisDir = resolved.ColdGenesisDir + } + // if CatchpointDir is not set, use ColdDataDir + if cfg.CatchpointDir != "" { + resolved.CatchpointGenesisDir, err = ensureAbsGenesisDir(cfg.CatchpointDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + resolved.CatchpointGenesisDir = resolved.ColdGenesisDir + } + // if StateproofDir is not set, use ColdDataDir + if cfg.StateproofDir != "" { + resolved.StateproofGenesisDir, err = ensureAbsGenesisDir(cfg.StateproofDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + resolved.StateproofGenesisDir = resolved.ColdGenesisDir + } + // if CrashDBDir is not set, use ColdDataDir + if cfg.CrashDBDir != "" { + resolved.CrashGenesisDir, err = ensureAbsGenesisDir(cfg.CrashDBDir, genesisID) + if err != nil { + return ResolvedGenesisDirs{}, err + } + } else { + resolved.CrashGenesisDir = resolved.ColdGenesisDir + } + return resolved, nil +} + // AdjustConnectionLimits updates RestConnectionsSoftLimit, RestConnectionsHardLimit, IncomingConnectionsLimit // if requiredFDs greater than maxFDs func (cfg *Local) AdjustConnectionLimits(requiredFDs, maxFDs uint64) bool { diff --git a/config/local_defaults.go b/config/local_defaults.go index d27783273e..fd0aa20521 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -29,11 +29,13 @@ var defaultLocal = Local{ AnnounceParticipationKey: true, Archival: false, BaseLoggerDebugLevel: 4, + BlockDBDir: "", BlockServiceCustomFallbackEndpoints: "", BlockServiceMemCap: 500000000, BroadcastConnectionsLimit: -1, CadaverDirectory: "", CadaverSizeTarget: 0, + CatchpointDir: "", CatchpointFileHistoryLength: 365, CatchpointInterval: 10000, CatchpointTracking: 0, @@ -44,8 +46,10 @@ var defaultLocal = Local{ CatchupHTTPBlockFetchTimeoutSec: 4, CatchupLedgerDownloadRetryAttempts: 50, CatchupParallelBlocks: 16, + ColdDataDir: "", ConnectionsRateLimitingCount: 60, ConnectionsRateLimitingWindowSeconds: 1, + CrashDBDir: "", DNSBootstrapID: ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", DNSSecurityFlags: 1, DeadlockDetection: 0, @@ -87,12 +91,15 @@ var defaultLocal = Local{ ForceRelayMessages: false, GossipFanout: 4, HeartbeatUpdateInterval: 600, + HotDataDir: "", IncomingConnectionsLimit: 2400, IncomingMessageFilterBucketCount: 5, IncomingMessageFilterBucketSize: 512, LedgerSynchronousMode: 2, + LogArchiveDir: "", LogArchiveMaxAge: "", LogArchiveName: "node.archive.log", + LogFileDir: "", LogSizeLimit: 1073741824, MaxAPIBoxPerApplication: 100000, MaxAPIResourcesPerAccount: 100000, @@ -123,12 +130,14 @@ var defaultLocal = Local{ RestReadTimeoutSeconds: 15, RestWriteTimeoutSeconds: 120, RunHosted: false, + StateproofDir: "", StorageEngine: "sqlite", SuggestedFeeBlockHistory: 3, SuggestedFeeSlidingWindowSize: 50, TLSCertFile: "", TLSKeyFile: "", TelemetryToLog: true, + TrackerDBDir: "", TransactionSyncDataExchangeRate: 0, TransactionSyncSignificantMessageThreshold: 0, TxBacklogReservedCapacityPerPeer: 20, diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 80d0afeed3..1b40e98bfb 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -81,8 +81,8 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes lib.GenesisJSONText = genesisText - liveLog := filepath.Join(s.RootPath, "node.log") - archive := filepath.Join(s.RootPath, cfg.LogArchiveName) + liveLog, archive := cfg.ResolveLogPaths(s.RootPath) + var maxLogAge time.Duration var err error if cfg.LogArchiveMaxAge != "" { diff --git a/data/ledger.go b/data/ledger.go index 27949168fe..141fae6cf8 100644 --- a/data/ledger.go +++ b/data/ledger.go @@ -78,8 +78,8 @@ type roundSeed struct { // LoadLedger creates a Ledger object to represent the ledger with the // specified database file prefix, initializing it if necessary. -func LoadLedger( - log logging.Logger, dbFilenamePrefix string, memory bool, +func LoadLedger[T string | ledger.DirsAndPrefix]( + log logging.Logger, dir T, memory bool, genesisProto protocol.ConsensusVersion, genesisBal bookkeeping.GenesisBalances, genesisID string, genesisHash crypto.Digest, blockListeners []ledgercore.BlockListener, cfg config.Local, ) (*Ledger, error) { @@ -107,9 +107,9 @@ func LoadLedger( Accounts: genesisBal.Balances, GenesisHash: genesisHash, } - l.log.Debugf("Initializing Ledger(%s)", dbFilenamePrefix) + l.log.Debugf("Initializing Ledger(%v)", dir) - ll, err := ledger.OpenLedger(log, dbFilenamePrefix, memory, genesisInitState, cfg) + ll, err := ledger.OpenLedger(log, dir, memory, genesisInitState, cfg) if err != nil { return nil, err } diff --git a/installer/config.json.example b/installer/config.json.example index 8705d06081..62cbe6427b 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -8,11 +8,13 @@ "AnnounceParticipationKey": true, "Archival": false, "BaseLoggerDebugLevel": 4, + "BlockDBDir": "", "BlockServiceCustomFallbackEndpoints": "", "BlockServiceMemCap": 500000000, "BroadcastConnectionsLimit": -1, "CadaverDirectory": "", "CadaverSizeTarget": 0, + "CatchpointDir": "", "CatchpointFileHistoryLength": 365, "CatchpointInterval": 10000, "CatchpointTracking": 0, @@ -23,8 +25,10 @@ "CatchupHTTPBlockFetchTimeoutSec": 4, "CatchupLedgerDownloadRetryAttempts": 50, "CatchupParallelBlocks": 16, + "ColdDataDir": "", "ConnectionsRateLimitingCount": 60, "ConnectionsRateLimitingWindowSeconds": 1, + "CrashDBDir": "", "DNSBootstrapID": ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", "DNSSecurityFlags": 1, "DeadlockDetection": 0, @@ -66,12 +70,15 @@ "ForceRelayMessages": false, "GossipFanout": 4, "HeartbeatUpdateInterval": 600, + "HotDataDir": "", "IncomingConnectionsLimit": 2400, "IncomingMessageFilterBucketCount": 5, "IncomingMessageFilterBucketSize": 512, "LedgerSynchronousMode": 2, + "LogArchiveDir": "", "LogArchiveMaxAge": "", "LogArchiveName": "node.archive.log", + "LogFileDir": "", "LogSizeLimit": 1073741824, "MaxAPIBoxPerApplication": 100000, "MaxAPIResourcesPerAccount": 100000, @@ -102,12 +109,14 @@ "RestReadTimeoutSeconds": 15, "RestWriteTimeoutSeconds": 120, "RunHosted": false, + "StateproofDir": "", "StorageEngine": "sqlite", "SuggestedFeeBlockHistory": 3, "SuggestedFeeSlidingWindowSize": 50, "TLSCertFile": "", "TLSKeyFile": "", "TelemetryToLog": true, + "TrackerDBDir": "", "TransactionSyncDataExchangeRate": 0, "TransactionSyncSignificantMessageThreshold": 0, "TxBacklogReservedCapacityPerPeer": 20, diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 7e59406025..5f8e48b005 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -90,7 +90,9 @@ func catchpointStage1Decoder(r io.Reader) (io.ReadCloser, error) { } type catchpointTracker struct { - // dbDirectory is the directory where the ledger and block sql file resides as well as the parent directory for the catchup files to be generated + // tmpDir is the path to the currently building catchpoint file + tmpDir string + // dbDirectory is the path to the finished/cold data of catchpoint dbDirectory string // catchpointInterval is the configured interval at which the catchpointTracker would generate catchpoint labels and catchpoint files. @@ -155,8 +157,11 @@ type catchpointTracker struct { } // initialize initializes the catchpointTracker structure -func (ct *catchpointTracker) initialize(cfg config.Local, dbPathPrefix string) { - ct.dbDirectory = filepath.Dir(dbPathPrefix) +func (ct *catchpointTracker) initialize(cfg config.Local, paths DirsAndPrefix) { + // catchpoint uses the cold data directories, except for the temp file + ct.dbDirectory = paths.CatchpointGenesisDir + // the temp file uses the hot data directories + ct.tmpDir = paths.HotGenesisDir switch cfg.CatchpointTracking { case -1: @@ -281,10 +286,8 @@ func (ct *catchpointTracker) finishFirstStageAfterCrash(dbRound basics.Round) er } // First, delete the unfinished data file. - relCatchpointDataFilePath := filepath.Join( - trackerdb.CatchpointDirName, - makeCatchpointDataFilePath(dbRound)) - err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointDataFilePath) + relCatchpointDataFilePath := filepath.Join(trackerdb.CatchpointDirName, makeCatchpointDataFilePath(dbRound)) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.tmpDir, relCatchpointDataFilePath) if err != nil { return err } @@ -300,9 +303,7 @@ func (ct *catchpointTracker) finishCatchpointsAfterCrash(catchpointLookback uint for _, record := range records { // First, delete the unfinished catchpoint file. - relCatchpointFilePath := filepath.Join( - trackerdb.CatchpointDirName, - trackerdb.MakeCatchpointFilePath(basics.Round(record.Round))) + relCatchpointFilePath := filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(basics.Round(record.Round))) err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointFilePath) if err != nil { return err @@ -776,7 +777,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound return nil } - catchpointDataFilePath := filepath.Join(ct.dbDirectory, trackerdb.CatchpointDirName) + catchpointDataFilePath := filepath.Join(ct.tmpDir, trackerdb.CatchpointDirName) catchpointDataFilePath = filepath.Join(catchpointDataFilePath, makeCatchpointDataFilePath(accountsRound)) @@ -802,8 +803,8 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound BlockHeaderDigest: blockHash, } - relCatchpointFilePath := - filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) + relCatchpointFilePath := filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) + absCatchpointFilePath := filepath.Join(ct.dbDirectory, relCatchpointFilePath) err = os.MkdirAll(filepath.Dir(absCatchpointFilePath), 0700) @@ -842,6 +843,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound With("accountsCount", dataInfo.TotalAccounts). With("kvsCount", dataInfo.TotalKVs). With("fileSize", fileInfo.Size()). + With("filepath", relCatchpointFilePath). With("catchpointLabel", label). Infof("Catchpoint file was created") @@ -1149,9 +1151,8 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account startTime := time.Now() - catchpointDataFilePath := filepath.Join(ct.dbDirectory, trackerdb.CatchpointDirName) - catchpointDataFilePath = - filepath.Join(catchpointDataFilePath, makeCatchpointDataFilePath(accountsRound)) + catchpointDataFilePath := filepath.Join(ct.tmpDir, trackerdb.CatchpointDirName) + catchpointDataFilePath = filepath.Join(catchpointDataFilePath, makeCatchpointDataFilePath(accountsRound)) more := true const shortChunkExecutionDuration = 50 * time.Millisecond @@ -1399,8 +1400,7 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS } // if the database doesn't know about that round, see if we have that file anyway: - relCatchpointFilePath := - filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) + relCatchpointFilePath := filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) absCatchpointFilePath := filepath.Join(ct.dbDirectory, relCatchpointFilePath) file, err := os.OpenFile(absCatchpointFilePath, os.O_RDONLY, 0666) if err == nil && file != nil { diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index d5ed7621e8..0c6caf26f0 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -67,7 +67,13 @@ func newCatchpointTracker(tb testing.TB, l *mockLedgerForTracker, conf config.Lo ct := &catchpointTracker{} ao := &onlineAccounts{} au.initialize(conf) - ct.initialize(conf, dbPathPrefix) + paths := DirsAndPrefix{ + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + CatchpointGenesisDir: dbPathPrefix, + HotGenesisDir: dbPathPrefix, + }, + } + ct.initialize(conf, paths) ao.initialize(conf) _, err := trackerDBInitialize(l, ct.catchpointEnabled(), dbPathPrefix) require.NoError(tb, err) @@ -100,6 +106,7 @@ func TestCatchpointGetCatchpointStream(t *testing.T) { require.NoError(t, err) ct.dbDirectory = temporaryDirectory + ct.tmpDir = temporaryDirectory // Create the catchpoint files with dummy data for i := 0; i < filesToCreate; i++ { @@ -169,6 +176,7 @@ func TestCatchpointsDeleteStored(t *testing.T) { ct := newCatchpointTracker(t, ml, conf, ".") defer ct.close() ct.dbDirectory = temporaryDirectory + ct.tmpDir = temporaryDirectory dummyCatchpointFilesToCreate := 42 @@ -245,9 +253,16 @@ func TestCatchpointsDeleteStoredOnSchemaUpdate(t *testing.T) { ct := &catchpointTracker{} conf := config.GetDefaultLocal() conf.CatchpointInterval = 1 - ct.initialize(conf, ".") + paths := DirsAndPrefix{ + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + CatchpointGenesisDir: ".", + HotGenesisDir: ".", + }, + } + ct.initialize(conf, paths) defer ct.close() ct.dbDirectory = temporaryDirectroy + ct.tmpDir = temporaryDirectroy _, err = trackerDBInitialize(ml, true, ct.dbDirectory) require.NoError(t, err) @@ -301,9 +316,16 @@ func TestRecordCatchpointFile(t *testing.T) { conf.CatchpointFileHistoryLength = 3 conf.Archival = true - ct.initialize(conf, ".") + paths := DirsAndPrefix{ + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + CatchpointGenesisDir: ".", + HotGenesisDir: ".", + }, + } + ct.initialize(conf, paths) defer ct.close() ct.dbDirectory = temporaryDirectory + ct.tmpDir = temporaryDirectory _, err := trackerDBInitialize(ml, true, ct.dbDirectory) require.NoError(t, err) @@ -358,9 +380,17 @@ func TestCatchpointCommitErrorHandling(t *testing.T) { conf := config.GetDefaultLocal() conf.Archival = true - ct.initialize(conf, ".") + paths := DirsAndPrefix{ + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + CatchpointGenesisDir: ".", + HotGenesisDir: ".", + }, + } + ct.initialize(conf, paths) + defer ct.close() ct.dbDirectory = temporaryDirectory + ct.tmpDir = temporaryDirectory _, err := trackerDBInitialize(ml, true, ct.dbDirectory) require.NoError(t, err) @@ -439,9 +469,17 @@ func TestCatchpointFileWithLargeSpVerification(t *testing.T) { conf := config.GetDefaultLocal() conf.Archival = true - ct.initialize(conf, ".") + paths := DirsAndPrefix{ + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + CatchpointGenesisDir: ".", + HotGenesisDir: ".", + }, + } + ct.initialize(conf, paths) + defer ct.close() ct.dbDirectory = temporaryDirectory + ct.tmpDir = temporaryDirectory _, err := trackerDBInitialize(ml, true, ct.dbDirectory) require.NoError(t, err) @@ -503,7 +541,13 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { cfg := config.GetDefaultLocal() cfg.Archival = true ct := catchpointTracker{} - ct.initialize(cfg, ".") + paths := DirsAndPrefix{ + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + CatchpointGenesisDir: ".", + HotGenesisDir: ".", + }, + } + ct.initialize(cfg, paths) temporaryDirectroy := b.TempDir() catchpointsDirectory := filepath.Join(temporaryDirectroy, trackerdb.CatchpointDirName) @@ -511,6 +555,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { require.NoError(b, err) ct.dbDirectory = temporaryDirectroy + ct.tmpDir = temporaryDirectroy err = ct.loadFromDisk(ml, 0) require.NoError(b, err) @@ -1148,6 +1193,7 @@ func TestCatchpointFirstStageInfoPruning(t *testing.T) { require.NoError(t, err) ct.dbDirectory = temporaryDirectory + ct.tmpDir = temporaryDirectory expectedNumEntries := protoParams.CatchpointLookback / cfg.CatchpointInterval @@ -1247,8 +1293,7 @@ func TestCatchpointFirstStagePersistence(t *testing.T) { cfg.CatchpointInterval = 4 cfg.CatchpointTracking = 2 cfg.MaxAcctLookback = 0 - ct := newCatchpointTracker( - t, ml, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix)) + ct := newCatchpointTracker(t, ml, cfg, tempDirectory) defer ct.close() // Add blocks until the first catchpoint first stage round. @@ -1299,8 +1344,7 @@ func TestCatchpointFirstStagePersistence(t *testing.T) { require.NoError(t, err) // Create a catchpoint tracker and let it restart catchpoint's first stage. - ct2 := newCatchpointTracker( - t, ml2, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix)) + ct2 := newCatchpointTracker(t, ml2, cfg, tempDirectory) defer ct2.close() // Check that the catchpoint data file was rewritten. @@ -1348,8 +1392,7 @@ func TestCatchpointSecondStagePersistence(t *testing.T) { cfg.CatchpointInterval = 4 cfg.CatchpointTracking = 2 cfg.MaxAcctLookback = 0 - ct := newCatchpointTracker( - t, ml, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix)) + ct := newCatchpointTracker(t, ml, cfg, tempDirectory) defer ct.close() isCatchpointRound := func(rnd basics.Round) bool { @@ -1443,8 +1486,7 @@ func TestCatchpointSecondStagePersistence(t *testing.T) { require.NoError(t, err) // Create a catchpoint tracker and let it restart catchpoint's second stage. - ct2 := newCatchpointTracker( - t, ml2, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix)) + ct2 := newCatchpointTracker(t, ml2, cfg, tempDirectory) defer ct2.close() // Check that the catchpoint data file was rewritten. @@ -1493,8 +1535,7 @@ func TestCatchpointSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) { cfg.CatchpointInterval = 4 cfg.CatchpointTracking = 0 cfg.MaxAcctLookback = 0 - ct := newCatchpointTracker( - t, ml, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix)) + ct := newCatchpointTracker(t, ml, cfg, tempDirectory) defer ct.close() secondStageRound := basics.Round(36) @@ -1526,8 +1567,7 @@ func TestCatchpointSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) { // Configure a new catchpoint tracker with catchpoints enabled. cfg.CatchpointTracking = 2 - ct2 := newCatchpointTracker( - t, ml2, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix)) + ct2 := newCatchpointTracker(t, ml2, cfg, tempDirectory) defer ct2.close() // Add the last block. @@ -1967,3 +2007,36 @@ func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) { // Garbage collection helps prevent trashing for next tests runtime.GC() } + +func TestMakeCatchpointFilePath(t *testing.T) { + partitiontest.PartitionTest(t) + + type testCase struct { + round int + expectedDataFilePath string + expectedCatchpointFilePath string + } + + tcs := []testCase{ + {10, "10.data", "10.catchpoint"}, + {100, "100.data", "100.catchpoint"}, + // MakeCatchpointFilePath divides the round by 256 to create subdirecories + {257, "257.data", "01/257.catchpoint"}, + {511, "511.data", "01/511.catchpoint"}, + {512, "512.data", "02/512.catchpoint"}, + // 256 * 256 = 65536 + {65536, "65536.data", "00/01/65536.catchpoint"}, + {65537, "65537.data", "00/01/65537.catchpoint"}, + // 645536 * 3 = 193609728 + {193609727, "193609727.data", "3f/8a/0b/193609727.catchpoint"}, + {193609728, "193609728.data", "40/8a/0b/193609728.catchpoint"}, + // 256 * 256 * 256 = 16777216 + {16777216, "16777216.data", "00/00/01/16777216.catchpoint"}, + } + + for _, tc := range tcs { + require.Equal(t, tc.expectedCatchpointFilePath, trackerdb.MakeCatchpointFilePath(basics.Round(tc.round))) + require.Equal(t, tc.expectedDataFilePath, makeCatchpointDataFilePath(basics.Round(tc.round))) + } + +} diff --git a/ledger/ledger.go b/ledger/ledger.go index 337072f128..863edc3e27 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -20,7 +20,7 @@ import ( "context" "database/sql" "fmt" - "os" + "path/filepath" "time" "github.com/algorand/go-deadlock" @@ -98,17 +98,23 @@ type Ledger struct { cfg config.Local - dbPathPrefix string + dirsAndPrefix DirsAndPrefix tracer logic.EvalTracer } +// DirsAndPrefix is a struct that holds the genesis directories and the database file prefix, so ledger can construct full paths to database files +type DirsAndPrefix struct { + config.ResolvedGenesisDirs + DBFilePrefix string // the prefix of the database files, appended to genesis directories +} + // OpenLedger creates a Ledger object, using SQLite database filenames // based on dbPathPrefix (in-memory if dbMem is true). genesisInitState.Blocks and // genesisInitState.Accounts specify the initial blocks and accounts to use if the -// database wasn't initialized before. -func OpenLedger( - log logging.Logger, dbPathPrefix string, dbMem bool, genesisInitState ledgercore.InitState, cfg config.Local, +func OpenLedger[T string | DirsAndPrefix]( + // database wasn't initialized before. + log logging.Logger, dbPathPrefix T, dbMem bool, genesisInitState ledgercore.InitState, cfg config.Local, ) (*Ledger, error) { var err error verifiedCacheSize := cfg.VerifiedTranscationsCacheSize @@ -121,6 +127,20 @@ func OpenLedger( tracer = eval.MakeTxnGroupDeltaTracer(cfg.MaxAcctLookback) } + var dirs DirsAndPrefix + // if only a string path has been supplied for the ledger, use it for all resources + // don't set the prefix, only tests provide a string for the path, and they manage paths explicitly + if s, ok := any(dbPathPrefix).(string); ok { + dirs.HotGenesisDir = s + dirs.TrackerGenesisDir = s + dirs.ColdGenesisDir = s + dirs.BlockGenesisDir = s + dirs.CatchpointGenesisDir = s + } else if ds, ok := any(dbPathPrefix).(DirsAndPrefix); ok { + // if a DirsAndPrefix has been supplied, use it. + dirs = ds + } + l := &Ledger{ log: log, archival: cfg.Archival, @@ -132,7 +152,7 @@ func OpenLedger( accountsRebuildSynchronousMode: db.SynchronousMode(cfg.AccountsRebuildSynchronousMode), verifiedTxnCache: verify.MakeVerifiedTransactionCache(verifiedCacheSize), cfg: cfg, - dbPathPrefix: dbPathPrefix, + dirsAndPrefix: dirs, tracer: tracer, } @@ -142,7 +162,7 @@ func OpenLedger( } }() - l.trackerDBs, l.blockDBs, err = openLedgerDB(dbPathPrefix, dbMem, cfg, log) + l.trackerDBs, l.blockDBs, err = openLedgerDB(dirs, dbMem, cfg, log) if err != nil { err = fmt.Errorf("OpenLedger.openLedgerDB %v", err) return nil, err @@ -222,7 +242,8 @@ func (l *Ledger) reloadLedger() error { l.accts.initialize(l.cfg) l.acctsOnline.initialize(l.cfg) - l.catchpoint.initialize(l.cfg, l.dbPathPrefix) + + l.catchpoint.initialize(l.cfg, l.dirsAndPrefix) err = l.trackers.initialize(l, trackers, l.cfg) if err != nil { @@ -280,43 +301,29 @@ func (l *Ledger) verifyMatchingGenesisHash() (err error) { return } -func openLedgerDB(dbPathPrefix string, dbMem bool, cfg config.Local, log logging.Logger) (trackerDBs trackerdb.Store, blockDBs db.Pair, err error) { - // Backwards compatibility: we used to store both blocks and tracker - // state in a single SQLite db file. - if !dbMem { - commonDBFilename := dbPathPrefix + ".sqlite" - _, err = os.Stat(commonDBFilename) - if !os.IsNotExist(err) { - // before launch, we used to have both blocks and tracker - // state in a single SQLite db file. We don't have that anymore, - // and we want to fail when that's the case. - err = fmt.Errorf("a single ledger database file '%s' was detected. This is no longer supported by current binary", commonDBFilename) - return - } - } - +func openLedgerDB(dbPrefixes DirsAndPrefix, dbMem bool, cfg config.Local, log logging.Logger) (trackerDBs trackerdb.Store, blockDBs db.Pair, err error) { outErr := make(chan error, 2) go func() { + trackerDBPrefix := filepath.Join(dbPrefixes.ResolvedGenesisDirs.TrackerGenesisDir, dbPrefixes.DBFilePrefix) var lerr error switch cfg.StorageEngine { case "pebbledb": - dir := dbPathPrefix + "/tracker.pebble" + dir := trackerDBPrefix + "/tracker.pebble" trackerDBs, lerr = pebbledbdriver.Open(dir, dbMem, config.Consensus[protocol.ConsensusCurrentVersion], log) // anything else will initialize a sqlite engine. case "sqlite": fallthrough default: - file := dbPathPrefix + ".tracker.sqlite" - trackerDBs, lerr = sqlitedriver.Open(file, dbMem, log) + trackerDBs, lerr = sqlitedriver.Open(trackerDBPrefix+".tracker.sqlite", dbMem, log) } outErr <- lerr }() go func() { + blockDBPrefix := filepath.Join(dbPrefixes.ResolvedGenesisDirs.BlockGenesisDir, dbPrefixes.DBFilePrefix) var lerr error - blockDBFilename := dbPathPrefix + ".block.sqlite" - blockDBs, lerr = db.OpenPair(blockDBFilename, dbMem) + blockDBs, lerr = db.OpenPair(blockDBPrefix+".block.sqlite", dbMem) if lerr != nil { outErr <- lerr return diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 5edb6d20e1..a25e92f87e 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -2334,7 +2334,19 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { cfg.MaxAcctLookback = proto.MaxBalLookback log := logging.TestingLog(t) log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message - trackerDB, blockDB, err := openLedgerDB(dbName, inMem, cfg, log) + // Set basic Directory for all resources + dirs := DirsAndPrefix{ + DBFilePrefix: "", + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + RootGenesisDir: dbName, + HotGenesisDir: dbName, + ColdGenesisDir: dbName, + TrackerGenesisDir: dbName, + BlockGenesisDir: dbName, + CatchpointGenesisDir: dbName, + }, + } + trackerDB, blockDB, err := openLedgerDB(dirs, inMem, cfg, log) require.NoError(t, err) defer func() { trackerDB.Close() @@ -3004,7 +3016,7 @@ func TestLedgerSPVerificationTracker(t *testing.T) { } verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound), - 1, proto.StateProofInterval, false, any) + 1, proto.StateProofInterval, false, spverDBLoc) addEmptyValidatedBlock(t, l, genesisInitState.Accounts) @@ -3028,7 +3040,7 @@ func TestLedgerSPVerificationTracker(t *testing.T) { triggerTrackerFlush(t, l, genesisInitState) verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound), - numOfStateProofs, proto.StateProofInterval, true, any) + numOfStateProofs, proto.StateProofInterval, true, spverDBLoc) blk := makeNewEmptyBlock(t, l, t.Name(), genesisInitState.Accounts) var stateProofReceived bookkeeping.StateProofTrackingData @@ -3054,9 +3066,9 @@ func TestLedgerSPVerificationTracker(t *testing.T) { triggerTrackerFlush(t, l, genesisInitState) verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound), - 1, proto.StateProofInterval, false, any) + 1, proto.StateProofInterval, false, spverDBLoc) verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound+proto.StateProofInterval), - numOfStateProofs-1, proto.StateProofInterval, true, any) + numOfStateProofs-1, proto.StateProofInterval, true, spverDBLoc) } func TestLedgerReloadStateProofVerificationTracker(t *testing.T) { @@ -3160,7 +3172,7 @@ func TestLedgerCatchpointSPVerificationTracker(t *testing.T) { numTrackedDataFirstCatchpoint := (cfg.CatchpointInterval - proto.MaxBalLookback) / proto.StateProofInterval verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofDataTargetRound), - numTrackedDataFirstCatchpoint, proto.StateProofInterval, true, any) + numTrackedDataFirstCatchpoint, proto.StateProofInterval, true, spverDBLoc) l.Close() l, err = OpenLedger(log, dbName, inMem, genesisInitState, cfg) @@ -3168,11 +3180,12 @@ func TestLedgerCatchpointSPVerificationTracker(t *testing.T) { defer l.Close() verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofDataTargetRound), - numTrackedDataFirstCatchpoint, proto.StateProofInterval, false, any) + numTrackedDataFirstCatchpoint, proto.StateProofInterval, false, spverDBLoc) catchpointAccessor, accessorProgress := initializeTestCatchupAccessor(t, l, uint64(len(initkeys))) - relCatchpointFilePath := filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(basics.Round(cfg.CatchpointInterval))) + relCatchpointFilePath := filepath.Join(dbName, trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(basics.Round(cfg.CatchpointInterval))) + catchpointData := readCatchpointFile(t, relCatchpointFilePath) err = catchpointAccessor.ProcessStagingBalances(context.Background(), catchpointData[1].headerName, catchpointData[1].data, &accessorProgress) @@ -3181,7 +3194,7 @@ func TestLedgerCatchpointSPVerificationTracker(t *testing.T) { require.NoError(t, err) verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofDataTargetRound), - numTrackedDataFirstCatchpoint, proto.StateProofInterval, true, any) + numTrackedDataFirstCatchpoint, proto.StateProofInterval, true, spverDBLoc) } func TestLedgerSPTrackerAfterReplay(t *testing.T) { @@ -3218,7 +3231,7 @@ func TestLedgerSPTrackerAfterReplay(t *testing.T) { } // 1024 - verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, any) + verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, spverDBLoc) a.Equal(0, len(l.spVerification.pendingDeleteContexts)) // Add StateProof transaction (for round 512) and apply without validating, advancing the NextStateProofRound to 768 @@ -3227,7 +3240,7 @@ func TestLedgerSPTrackerAfterReplay(t *testing.T) { a.NoError(err) a.Equal(1, len(l.spVerification.pendingDeleteContexts)) // To be deleted, but not yet deleted (waiting for commit) - verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, any) + verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, spverDBLoc) // first ensure the block is committed into blockdb l.WaitForCommit(l.Latest()) @@ -3246,5 +3259,5 @@ func TestLedgerSPTrackerAfterReplay(t *testing.T) { a.NoError(err) a.Equal(1, len(l.spVerification.pendingDeleteContexts)) - verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, any) + verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, spverDBLoc) } diff --git a/ledger/spverificationtracker_test.go b/ledger/spverificationtracker_test.go index a88c05f0ed..47d85bab31 100644 --- a/ledger/spverificationtracker_test.go +++ b/ledger/spverificationtracker_test.go @@ -38,7 +38,7 @@ const unusedByStateProofTracker = basics.Round(0) type StateProofTrackingLocation uint64 const ( - any StateProofTrackingLocation = iota + spverDBLoc StateProofTrackingLocation = iota trackerDB trackerMemory ) @@ -157,7 +157,7 @@ func verifyStateProofVerificationTracking(t *testing.T, spt *spVerificationTrack for lastAttestedRound := startRound; lastAttestedRound <= finalLastAttestedRound; lastAttestedRound += basics.Round(stateProofInterval) { var err error switch trackingLocation { - case any: + case spverDBLoc: _, err = spt.LookupVerificationContext(lastAttestedRound) case trackerDB: _, err = spt.lookupContextInDB(lastAttestedRound) @@ -190,7 +190,7 @@ func TestStateProofVerificationTracker_StateProofsDisabled(t *testing.T) { mockCommit(t, spt, ml, 0, roundsAmount) - verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, uint64(roundsAmount)/defaultStateProofInterval, defaultStateProofInterval, false, any) + verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, uint64(roundsAmount)/defaultStateProofInterval, defaultStateProofInterval, false, spverDBLoc) } func TestStateProofVerificationTracker_StateProofsNotStuck(t *testing.T) { @@ -208,12 +208,12 @@ func TestStateProofVerificationTracker_StateProofsNotStuck(t *testing.T) { mockCommit(t, spt, ml, 0, lastBlock.block.Round()) expectedRemainingContextNum := expectedContextNum - 1 - verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, expectedRemainingContextNum, defaultStateProofInterval, false, any) + verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, expectedRemainingContextNum, defaultStateProofInterval, false, spverDBLoc) finalLastAttestedRound := defaultFirstStateProofContextRound + basics.Round(expectedRemainingContextNum*defaultStateProofInterval) // The last verification context should still be tracked since the round with the state proof transaction it is used // to verify has not yet been committed. - verifyStateProofVerificationTracking(t, spt, finalLastAttestedRound, 1, defaultStateProofInterval, true, any) + verifyStateProofVerificationTracking(t, spt, finalLastAttestedRound, 1, defaultStateProofInterval, true, spverDBLoc) } func TestStateProofVerificationTracker_CommitFUllDbFlush(t *testing.T) { @@ -295,12 +295,12 @@ func TestStateProofVerificationTracker_CommitFullDbPruning(t *testing.T) { mockCommit(t, spt, ml, 0, lastBlock.block.Round()) - verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, maxStateProofsToGenerate, defaultStateProofInterval, false, any) + verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, maxStateProofsToGenerate, defaultStateProofInterval, false, spverDBLoc) finalLastAttestedRound := defaultFirstStateProofContextRound + basics.Round(maxStateProofsToGenerate*defaultStateProofInterval) // The last verification context should still be tracked since the round with the state proof transaction it is used // to verify has not yet been committed. - verifyStateProofVerificationTracking(t, spt, finalLastAttestedRound, 1, defaultStateProofInterval, true, any) + verifyStateProofVerificationTracking(t, spt, finalLastAttestedRound, 1, defaultStateProofInterval, true, spverDBLoc) } func TestStateProofVerificationTracker_CommitPartialDbPruning(t *testing.T) { @@ -324,7 +324,7 @@ func TestStateProofVerificationTracker_CommitPartialDbPruning(t *testing.T) { mockCommit(t, spt, ml, 0, lastStuckBlock.block.Round()+basics.Round(contextToRemove)) - verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToRemove, defaultStateProofInterval, false, any) + verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToRemove, defaultStateProofInterval, false, spverDBLoc) verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound+basics.Round(contextToRemove*defaultStateProofInterval), contextToAdd-contextToRemove, defaultStateProofInterval, true, trackerDB) } @@ -380,10 +380,10 @@ func TestStateProofVerificationTracker_StateProofIntervalChange(t *testing.T) { newStateProofInterval, true) verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, oldIntervalContext, defaultStateProofInterval, - true, any) + true, spverDBLoc) firstNewIntervalLastAttestedRound := lastOldIntervalBlock.block.Round() + basics.Round(defaultStateProofInterval) verifyStateProofVerificationTracking(t, spt, firstNewIntervalLastAttestedRound, newIntervalContext, - newStateProofInterval, true, any) + newStateProofInterval, true, spverDBLoc) newIntervalRemovedStateProofs := newIntervalContext - (newIntervalContext / 2) // State Proofs for old blocks should be generated using the old interval. @@ -399,11 +399,11 @@ func TestStateProofVerificationTracker_StateProofIntervalChange(t *testing.T) { firstRemainingLastAttestedRound := firstNewIntervalLastAttestedRound + basics.Round(newIntervalRemovedStateProofs*newStateProofInterval) verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, oldIntervalContext, defaultStateProofInterval, - false, any) + false, spverDBLoc) verifyStateProofVerificationTracking(t, spt, firstNewIntervalLastAttestedRound, - newIntervalRemovedStateProofs, newStateProofInterval, false, any) + newIntervalRemovedStateProofs, newStateProofInterval, false, spverDBLoc) verifyStateProofVerificationTracking(t, spt, firstRemainingLastAttestedRound, newIntervalContext-newIntervalRemovedStateProofs, - newStateProofInterval, true, any) + newStateProofInterval, true, spverDBLoc) } func TestStateProofVerificationTracker_LookupVerificationContext(t *testing.T) { diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go index 709d123d9e..730c315e80 100644 --- a/ledger/tracker_test.go +++ b/ledger/tracker_test.go @@ -69,7 +69,13 @@ func TestTrackerScheduleCommit(t *testing.T) { ct := &catchpointTracker{} ao := &onlineAccounts{} au.initialize(conf) - ct.initialize(conf, ".") + paths := DirsAndPrefix{ + ResolvedGenesisDirs: config.ResolvedGenesisDirs{ + CatchpointGenesisDir: ".", + HotGenesisDir: ".", + }, + } + ct.initialize(conf, paths) ao.initialize(conf) _, err := trackerDBInitialize(ml, false, ".") diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index 8c4b3eaee9..16d562a554 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -445,7 +445,8 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g localCfg.Archival = true localCfg.CatchpointTracking = -1 localCfg.LedgerSynchronousMode = 0 - l, err := ledger.OpenLedger(log, filepath.Join(genesisFolder, "bootstrapped"), false, initState, localCfg) + prefix := filepath.Join(genesisFolder, "bootstrapped") + l, err := ledger.OpenLedger(log, prefix, false, initState, localCfg) if err != nil { return err } @@ -479,7 +480,8 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g l.Close() localCfg.CatchpointTracking = 0 - l, err = ledger.OpenLedger(log, genesisFolder+"/bootstrapped", false, initState, localCfg) + prefix2 := genesisFolder + "/bootstrapped" + l, err = ledger.OpenLedger(log, prefix2, false, initState, localCfg) if err != nil { return err } diff --git a/node/follower_node.go b/node/follower_node.go index e044333d42..7706a16bf9 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -20,8 +20,6 @@ package node import ( "context" "fmt" - "os" - "path/filepath" "time" "github.com/algorand/go-deadlock" @@ -59,7 +57,7 @@ type AlgorandFollowerNode struct { catchpointCatchupService *catchup.CatchpointCatchupService blockService *rpcs.BlockService - rootDir string + genesisDirs config.ResolvedGenesisDirs genesisID string genesisHash crypto.Digest devMode bool // is this node operates in a developer mode ? ( benign agreement, broadcasting transaction generates a new block ) @@ -80,11 +78,15 @@ type AlgorandFollowerNode struct { // MakeFollower sets up an Algorand data node func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phonebookAddresses []string, genesis bookkeeping.Genesis) (*AlgorandFollowerNode, error) { node := new(AlgorandFollowerNode) - node.rootDir = rootDir node.log = log.With("name", cfg.NetAddress) node.genesisID = genesis.ID() node.genesisHash = genesis.Hash() node.devMode = genesis.DevMode + var err error + node.genesisDirs, err = cfg.EnsureAndResolveGenesisDirs(rootDir, genesis.ID()) + if err != nil { + return nil, err + } if node.devMode { log.Warn("Follower running on a devMode network. Must submit txns to a different node.") @@ -102,16 +104,6 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo p2pNode.DeregisterMessageInterest(protocol.VoteBundleTag) node.net = p2pNode - // load stored data - genesisDir := filepath.Join(rootDir, genesis.ID()) - ledgerPathnamePrefix := filepath.Join(genesisDir, config.LedgerFilenamePrefix) - - // create initial ledger, if it doesn't exist - err = os.Mkdir(genesisDir, 0700) - if err != nil && !os.IsExist(err) { - log.Errorf("Unable to create genesis directory: %v", err) - return nil, err - } genalloc, err := genesis.Balances() if err != nil { log.Errorf("Cannot load genesis allocation: %v", err) @@ -120,9 +112,13 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo node.cryptoPool = execpool.MakePool(node) node.lowPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.LowPriority, node) - node.ledger, err = data.LoadLedger(node.log, ledgerPathnamePrefix, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, []ledgercore.BlockListener{}, cfg) + ledgerPaths := ledger.DirsAndPrefix{ + DBFilePrefix: config.LedgerFilenamePrefix, + ResolvedGenesisDirs: node.genesisDirs, + } + node.ledger, err = data.LoadLedger(node.log, ledgerPaths, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, []ledgercore.BlockListener{}, cfg) if err != nil { - log.Errorf("Cannot initialize ledger (%s): %v", ledgerPathnamePrefix, err) + log.Errorf("Cannot initialize ledger (%v): %v", ledgerPaths, err) return nil, err } diff --git a/node/follower_node_test.go b/node/follower_node_test.go index 192b333be0..fdf7c3600b 100644 --- a/node/follower_node_test.go +++ b/node/follower_node_test.go @@ -18,6 +18,7 @@ package node import ( "context" + "path/filepath" "testing" "github.com/sirupsen/logrus" @@ -65,7 +66,8 @@ func setupFollowNode(t *testing.T) *AlgorandFollowerNode { cfg := config.GetDefaultLocal() cfg.EnableFollowMode = true genesis := followNodeDefaultGenesis() - node, err := MakeFollower(logging.Base(), t.TempDir(), cfg, []string{}, genesis) + root := t.TempDir() + node, err := MakeFollower(logging.Base(), root, cfg, []string{}, genesis) require.NoError(t, err) return node } @@ -136,7 +138,8 @@ func TestDevModeWarning(t *testing.T) { logger, hook := test.NewNullLogger() tlogger := logging.NewWrappedLogger(logger) - _, err := MakeFollower(tlogger, t.TempDir(), cfg, []string{}, genesis) + root := t.TempDir() + _, err := MakeFollower(tlogger, root, cfg, []string{}, genesis) require.NoError(t, err) // check for the warning @@ -169,3 +172,137 @@ func TestFastCatchupResume(t *testing.T) { // Verify the sync was reset. assert.Equal(t, uint64(0), node.GetSyncRound()) } + +// TestDefaultResourcePaths confirms that when no extra configuration is provided, all resources are created in the dataDir +func TestDefaultResourcePaths_Follower(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + + genesis := bookkeeping.Genesis{ + SchemaID: "go-test-node-genesis", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + + cfg := config.GetDefaultLocal() + + // the logger is set up by the server, so we don't test this here + log := logging.Base() + + n, err := MakeFollower(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) + + n.Start() + defer n.Stop() + + // confirm genesis dir exists in the data dir, and that resources exist in the expected locations + require.DirExists(t, filepath.Join(testDirectory, genesis.ID())) + + require.FileExists(t, filepath.Join(testDirectory, genesis.ID(), "ledger.tracker.sqlite")) + require.FileExists(t, filepath.Join(testDirectory, genesis.ID(), "ledger.block.sqlite")) +} + +// TestConfiguredDataDirs tests to see that when HotDataDir and ColdDataDir are set, underlying resources are created in the correct locations +// Not all resources are tested here, because not all resources use the paths provided to them immediately. For example, catchpoint only creates +// a directory when writing a catchpoint file, which is not being done here with this simple node +func TestConfiguredDataDirs_Follower(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + testDirHot := t.TempDir() + testDirCold := t.TempDir() + + genesis := bookkeeping.Genesis{ + SchemaID: "go-test-node-genesis", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + + cfg := config.GetDefaultLocal() + + cfg.HotDataDir = testDirHot + cfg.ColdDataDir = testDirCold + cfg.CatchpointTracking = 2 + cfg.CatchpointInterval = 1 + + // the logger is set up by the server, so we don't test this here + log := logging.Base() + + n, err := MakeFollower(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) + + n.Start() + defer n.Stop() + + // confirm hot data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirHot, genesis.ID())) + + // confirm the tracker is in the genesis dir of hot data dir + require.FileExists(t, filepath.Join(testDirHot, genesis.ID(), "ledger.tracker.sqlite")) + + // confirm cold data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirCold, genesis.ID())) + + // confirm the blockdb is in the genesis dir of cold data dir + require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "ledger.block.sqlite")) + +} + +// TestConfiguredResourcePaths tests to see that when individual paths are set, underlying resources are created in the correct locations +func TestConfiguredResourcePaths_Follower(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + testDirHot := t.TempDir() + testDirCold := t.TempDir() + + // add a path for each resource now + trackerPath := filepath.Join(testDirectory, "custom_tracker") + blockPath := filepath.Join(testDirectory, "custom_block") + + genesis := bookkeeping.Genesis{ + SchemaID: "go-test-node-genesis", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + + cfg := config.GetDefaultLocal() + + // Configure everything even though a follower node will only use Tracker and Block DBs + cfg.HotDataDir = testDirHot + cfg.ColdDataDir = testDirCold + cfg.TrackerDBDir = trackerPath + cfg.BlockDBDir = blockPath + cfg.CatchpointTracking = 2 + cfg.CatchpointInterval = 1 + + // the logger is set up by the server, so we don't test this here + log := logging.Base() + + n, err := MakeFollower(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) + + n.Start() + defer n.Stop() + + // confirm hot data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirHot, genesis.ID())) + + // the tracker shouldn't be in the hot data dir, but rather the custom path's genesis dir + require.NoFileExists(t, filepath.Join(testDirHot, genesis.ID(), "ledger.tracker.sqlite")) + require.FileExists(t, filepath.Join(cfg.TrackerDBDir, genesis.ID(), "ledger.tracker.sqlite")) + + // confirm cold data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirCold, genesis.ID())) + + // block db shouldn't be in the cold data dir, but rather the custom path's genesis dir + require.NoFileExists(t, filepath.Join(testDirCold, genesis.ID(), "ledger.block.sqlite")) + require.FileExists(t, filepath.Join(cfg.BlockDBDir, genesis.ID(), "ledger.block.sqlite")) +} diff --git a/node/node.go b/node/node.go index aa13f0683f..4c18ad1d51 100644 --- a/node/node.go +++ b/node/node.go @@ -127,7 +127,7 @@ type AlgorandFullNode struct { ledgerService *rpcs.LedgerService txPoolSyncerService *rpcs.TxSyncer - rootDir string + genesisDirs config.ResolvedGenesisDirs genesisID string genesisHash crypto.Digest devMode bool // is this node operating in a developer mode ? ( benign agreement, broadcasting transaction generates a new block ) @@ -177,23 +177,17 @@ type TxnWithStatus struct { // (i.e., it returns a node that participates in consensus) func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAddresses []string, genesis bookkeeping.Genesis) (*AlgorandFullNode, error) { node := new(AlgorandFullNode) - node.rootDir = rootDir node.log = log.With("name", cfg.NetAddress) node.genesisID = genesis.ID() node.genesisHash = genesis.Hash() node.devMode = genesis.DevMode node.config = cfg - - // load stored data - genesisDir := filepath.Join(rootDir, genesis.ID()) - ledgerPathnamePrefix := filepath.Join(genesisDir, config.LedgerFilenamePrefix) - - // create initial ledger, if it doesn't exist - err := os.Mkdir(genesisDir, 0700) - if err != nil && !os.IsExist(err) { - log.Errorf("Unable to create genesis directory: %v", err) + var err error + node.genesisDirs, err = cfg.EnsureAndResolveGenesisDirs(rootDir, genesis.ID()) + if err != nil { return nil, err } + genalloc, err := genesis.Balances() if err != nil { log.Errorf("Cannot load genesis allocation: %v", err) @@ -203,7 +197,8 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd // tie network, block fetcher, and agreement services together var p2pNode network.GossipNode if cfg.EnableP2P { - p2pNode, err = network.NewP2PNetwork(node.log, node.config, genesisDir, phonebookAddresses, genesis.ID(), genesis.Network) + // TODO: pass more appropriate genesisDir (hot/cold). Presently this is just used to store a peerID key. + p2pNode, err = network.NewP2PNetwork(node.log, node.config, node.genesisDirs.RootGenesisDir, phonebookAddresses, genesis.ID(), genesis.Network) if err != nil { log.Errorf("could not create p2p node: %v", err) return nil, err @@ -223,9 +218,13 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.cryptoPool = execpool.MakePool(node) node.lowPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.LowPriority, node) node.highPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.HighPriority, node) - node.ledger, err = data.LoadLedger(node.log, ledgerPathnamePrefix, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, []ledgercore.BlockListener{}, cfg) + ledgerPaths := ledger.DirsAndPrefix{ + DBFilePrefix: config.LedgerFilenamePrefix, + ResolvedGenesisDirs: node.genesisDirs, + } + node.ledger, err = data.LoadLedger(node.log, ledgerPaths, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, []ledgercore.BlockListener{}, cfg) if err != nil { - log.Errorf("Cannot initialize ledger (%s): %v", ledgerPathnamePrefix, err) + log.Errorf("Cannot initialize ledger (%v): %v", ledgerPaths, err) return nil, err } @@ -256,7 +255,8 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.ledgerService = rpcs.MakeLedgerService(cfg, node.ledger, p2pNode, node.genesisID) rpcs.RegisterTxService(node.transactionPool, p2pNode, node.genesisID, cfg.TxPoolSize, cfg.TxSyncServeResponseSize) - crashPathname := filepath.Join(genesisDir, config.CrashFilename) + // crash data is stored in the cold data directory unless otherwise specified + crashPathname := filepath.Join(node.genesisDirs.CrashGenesisDir, config.CrashFilename) crashAccess, err := db.MakeAccessor(crashPathname, false, false) if err != nil { log.Errorf("Cannot load crash data: %v", err) @@ -271,6 +271,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd } else { agreementClock = timers.MakeMonotonicClock[agreement.TimeoutType](time.Now()) } + agreementParameters := agreement.Parameters{ Logger: log, Accessor: crashAccess, @@ -294,7 +295,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates, node.lowPriorityCryptoVerificationPool) node.txPoolSyncerService = rpcs.MakeTxSyncer(node.transactionPool, node.net, node.txHandler.SolicitedTxHandler(), time.Duration(cfg.TxSyncIntervalSeconds)*time.Second, time.Duration(cfg.TxSyncTimeoutSeconds)*time.Second, cfg.TxSyncServeResponseSize) - registry, err := ensureParticipationDB(genesisDir, node.log) + registry, err := ensureParticipationDB(node.genesisDirs.ColdGenesisDir, node.log) if err != nil { log.Errorf("unable to initialize the participation registry database: %v", err) return nil, err @@ -327,7 +328,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.tracer = messagetracer.NewTracer(log).Init(cfg) gossip.SetTrace(agreementParameters.Network, node.tracer) - node.stateProofWorker = stateproof.NewWorker(genesisDir, node.log, node.accountManager, node.ledger.Ledger, node.net, node) + node.stateProofWorker = stateproof.NewWorker(node.genesisDirs.StateproofGenesisDir, node.log, node.accountManager, node.ledger.Ledger, node.net, node) return node, err } @@ -433,7 +434,7 @@ func (node *AlgorandFullNode) Stop() { // note: unlike the other two functions, this accepts a whole filename func (node *AlgorandFullNode) getExistingPartHandle(filename string) (db.Accessor, error) { - filename = filepath.Join(node.rootDir, node.genesisID, filename) + filename = filepath.Join(node.genesisDirs.RootGenesisDir, filename) _, err := os.Stat(filename) if err == nil { @@ -837,9 +838,7 @@ func (node *AlgorandFullNode) RemoveParticipationKey(partKeyID account.Participa return account.ErrParticipationIDNotFound } - genID := node.GenesisID() - - outDir := filepath.Join(node.rootDir, genID) + outDir := node.genesisDirs.RootGenesisDir filename := config.PartKeyFilename(partRecord.ParticipationID.String(), uint64(partRecord.FirstValid), uint64(partRecord.LastValid)) fullyQualifiedFilename := filepath.Join(outDir, filepath.Base(filename)) @@ -901,9 +900,7 @@ func createTemporaryParticipationKey(outDir string, partKeyBinary []byte) (strin // InstallParticipationKey Given a participation key binary stream install the participation key. func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) { - genID := node.GenesisID() - - outDir := filepath.Join(node.rootDir, genID) + outDir := node.genesisDirs.RootGenesisDir fullyQualifiedTempFile, err := createTemporaryParticipationKey(outDir, partKeyBinary) // We need to make sure no tempfile is created/remains if there is an error @@ -958,7 +955,7 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc func (node *AlgorandFullNode) loadParticipationKeys() error { // Generate a list of all potential participation key files - genesisDir := filepath.Join(node.rootDir, node.genesisID) + genesisDir := node.genesisDirs.RootGenesisDir files, err := os.ReadDir(genesisDir) if err != nil { return fmt.Errorf("AlgorandFullNode.loadPartitipationKeys: could not read directory %v: %v", genesisDir, err) diff --git a/node/node_test.go b/node/node_test.go index 64e285eac1..c905fa78da 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -516,6 +516,165 @@ func TestMismatchingGenesisDirectoryPermissions(t *testing.T) { require.NoError(t, os.RemoveAll(testDirectroy)) } +// TestDefaultResourcePaths confirms that when no extra configuration is provided, all resources are created in the dataDir +func TestDefaultResourcePaths(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + + genesis := bookkeeping.Genesis{ + SchemaID: "gen", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + + cfg := config.GetDefaultLocal() + + // the logger is set up by the server, so we don't test this here + log := logging.Base() + + n, err := MakeFull(log, testDirectory, cfg, []string{}, genesis) + + n.Start() + defer n.Stop() + + require.NoError(t, err) + + // confirm genesis dir exists in the data dir, and that resources exist in the expected locations + require.DirExists(t, filepath.Join(testDirectory, genesis.ID())) + + _, err = os.Stat(filepath.Join(testDirectory, genesis.ID(), "ledger.tracker.sqlite")) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(testDirectory, genesis.ID(), "stateproof.sqlite")) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(testDirectory, genesis.ID(), "ledger.block.sqlite")) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(testDirectory, genesis.ID(), "partregistry.sqlite")) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(testDirectory, genesis.ID(), "crash.sqlite")) + require.NoError(t, err) +} + +// TestConfiguredDataDirs tests to see that when HotDataDir and ColdDataDir are set, underlying resources are created in the correct locations +// Not all resources are tested here, because not all resources use the paths provided to them immediately. For example, catchpoint only creates +// a directory when writing a catchpoint file, which is not being done here with this simple node +func TestConfiguredDataDirs(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + testDirHot := t.TempDir() + testDirCold := t.TempDir() + + genesis := bookkeeping.Genesis{ + SchemaID: "go-test-node-genesis", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + + cfg := config.GetDefaultLocal() + + cfg.HotDataDir = testDirHot + cfg.ColdDataDir = testDirCold + cfg.CatchpointTracking = 2 + cfg.CatchpointInterval = 1 + + // the logger is set up by the server, so we don't test this here + log := logging.Base() + + n, err := MakeFull(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) + + n.Start() + defer n.Stop() + + // confirm hot data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirHot, genesis.ID())) + + // confirm the tracker is in the genesis dir of hot data dir + require.FileExists(t, filepath.Join(testDirHot, genesis.ID(), "ledger.tracker.sqlite")) + + // confirm the stateproof db in the genesis dir of hot data dir + require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "stateproof.sqlite")) + + // confirm cold data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirCold, genesis.ID())) + + // confirm the blockdb is in the genesis dir of cold data dir + require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "ledger.block.sqlite")) + + // confirm the partregistry is in the genesis dir of cold data dir + require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "partregistry.sqlite")) + + // confirm the partregistry is in the genesis dir of cold data dir + require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "crash.sqlite")) +} + +// TestConfiguredResourcePaths tests to see that when TrackerDbFilePath, BlockDbFilePath, StateproofDir, and CrashFilePath are set, underlying resources are created in the correct locations +func TestConfiguredResourcePaths(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + testDirHot := t.TempDir() + testDirCold := t.TempDir() + + // add a path for each resource now + trackerPath := filepath.Join(testDirectory, "custom_tracker") + blockPath := filepath.Join(testDirectory, "custom_block") + stateproofDir := filepath.Join(testDirectory, "custom_stateproof") + crashPath := filepath.Join(testDirectory, "custom_crash") + + genesis := bookkeeping.Genesis{ + SchemaID: "go-test-node-genesis", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + + cfg := config.GetDefaultLocal() + + cfg.HotDataDir = testDirHot + cfg.ColdDataDir = testDirCold + cfg.TrackerDBDir = trackerPath + cfg.BlockDBDir = blockPath + cfg.StateproofDir = stateproofDir + cfg.CrashDBDir = crashPath + + // the logger is set up by the server, so we don't test this here + log := logging.Base() + + n, err := MakeFull(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) + + n.Start() + defer n.Stop() + + // confirm hot data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirHot, genesis.ID())) + + // the tracker shouldn't be in the hot data dir, but rather the custom path's genesis dir + require.NoFileExists(t, filepath.Join(testDirHot, genesis.ID(), "ledger.tracker.sqlite")) + require.FileExists(t, filepath.Join(cfg.TrackerDBDir, genesis.ID(), "ledger.tracker.sqlite")) + + // same with stateproofs + require.NoFileExists(t, filepath.Join(testDirHot, genesis.ID(), "stateproof.sqlite")) + require.FileExists(t, filepath.Join(cfg.StateproofDir, genesis.ID(), "stateproof.sqlite")) + + // confirm cold data dir exists and contains a genesis dir + require.DirExists(t, filepath.Join(testDirCold, genesis.ID())) + + // block db shouldn't be in the cold data dir, but rather the custom path's genesis dir + require.NoFileExists(t, filepath.Join(testDirCold, genesis.ID(), "ledger.block.sqlite")) + require.FileExists(t, filepath.Join(cfg.BlockDBDir, genesis.ID(), "ledger.block.sqlite")) + + require.NoFileExists(t, filepath.Join(testDirCold, genesis.ID(), "crash.sqlite")) + require.FileExists(t, filepath.Join(cfg.CrashDBDir, genesis.ID(), "crash.sqlite")) +} + // TestOfflineOnlineClosedBitStatus a test that validates that the correct bits are being set func TestOfflineOnlineClosedBitStatus(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go index 6b275a2489..98d86ae36d 100644 --- a/rpcs/blockService_test.go +++ b/rpcs/blockService_test.go @@ -543,8 +543,9 @@ func makeLedger(t *testing.T, namePostfix string) *data.Ledger { cfg := config.GetDefaultLocal() const inMem = true + prefix := t.Name() + namePostfix ledger, err := data.LoadLedger( - log, t.Name()+namePostfix, inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash, + log, prefix, inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash, nil, cfg, ) require.NoError(t, err) diff --git a/test/testdata/configs/config-v31.json b/test/testdata/configs/config-v31.json index 8705d06081..62cbe6427b 100644 --- a/test/testdata/configs/config-v31.json +++ b/test/testdata/configs/config-v31.json @@ -8,11 +8,13 @@ "AnnounceParticipationKey": true, "Archival": false, "BaseLoggerDebugLevel": 4, + "BlockDBDir": "", "BlockServiceCustomFallbackEndpoints": "", "BlockServiceMemCap": 500000000, "BroadcastConnectionsLimit": -1, "CadaverDirectory": "", "CadaverSizeTarget": 0, + "CatchpointDir": "", "CatchpointFileHistoryLength": 365, "CatchpointInterval": 10000, "CatchpointTracking": 0, @@ -23,8 +25,10 @@ "CatchupHTTPBlockFetchTimeoutSec": 4, "CatchupLedgerDownloadRetryAttempts": 50, "CatchupParallelBlocks": 16, + "ColdDataDir": "", "ConnectionsRateLimitingCount": 60, "ConnectionsRateLimitingWindowSeconds": 1, + "CrashDBDir": "", "DNSBootstrapID": ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", "DNSSecurityFlags": 1, "DeadlockDetection": 0, @@ -66,12 +70,15 @@ "ForceRelayMessages": false, "GossipFanout": 4, "HeartbeatUpdateInterval": 600, + "HotDataDir": "", "IncomingConnectionsLimit": 2400, "IncomingMessageFilterBucketCount": 5, "IncomingMessageFilterBucketSize": 512, "LedgerSynchronousMode": 2, + "LogArchiveDir": "", "LogArchiveMaxAge": "", "LogArchiveName": "node.archive.log", + "LogFileDir": "", "LogSizeLimit": 1073741824, "MaxAPIBoxPerApplication": 100000, "MaxAPIResourcesPerAccount": 100000, @@ -102,12 +109,14 @@ "RestReadTimeoutSeconds": 15, "RestWriteTimeoutSeconds": 120, "RunHosted": false, + "StateproofDir": "", "StorageEngine": "sqlite", "SuggestedFeeBlockHistory": 3, "SuggestedFeeSlidingWindowSize": 50, "TLSCertFile": "", "TLSKeyFile": "", "TelemetryToLog": true, + "TrackerDBDir": "", "TransactionSyncDataExchangeRate": 0, "TransactionSyncSignificantMessageThreshold": 0, "TxBacklogReservedCapacityPerPeer": 20, diff --git a/tools/debug/transplanter/main.go b/tools/debug/transplanter/main.go index 1c7a3a6b41..1ee2a9c84e 100644 --- a/tools/debug/transplanter/main.go +++ b/tools/debug/transplanter/main.go @@ -381,7 +381,6 @@ func main() { l.Close() fmt.Printf("Catching up from %d to %d\n", latest, *roundStart) - followerNode, err = node.MakeFollower(log, rootPath, cfg, []string{}, genesis) if err != nil { fmt.Fprintf(os.Stderr, "Cannot init follower node: %v", err)