From d4d15624a45445102447f2636a64954cdde2caf3 Mon Sep 17 00:00:00 2001 From: Marko Date: Tue, 3 Dec 2024 17:18:19 +0100 Subject: [PATCH 1/4] refactor(store/v2)!: simplify storage (#22683) (cherry picked from commit 94cfcc11aaf543179bc91caaa601e9d80c91ecd4) # Conflicts: # runtime/v2/builder.go # server/v2/stf/branch/bench_test.go # server/v2/store/snapshot.go # store/iavl/store_test.go # store/v2/commitment/iavl/tree.go # store/v2/commitment/store.go # store/v2/commitment/store_test_suite.go # store/v2/database.go # store/v2/migration/README.md # store/v2/migration/manager.go # store/v2/migration/manager_test.go # store/v2/mock/db_mock.go # store/v2/mock/types.go # store/v2/pruning/manager.go # store/v2/pruning/manager_test.go # store/v2/root/factory.go # store/v2/root/migrate_test.go # store/v2/root/store.go # store/v2/root/store_mock_test.go # store/v2/root/store_test.go # store/v2/root/upgrade_test.go # store/v2/snapshots/helpers_test.go # store/v2/snapshots/manager.go # store/v2/snapshots/manager_test.go # store/v2/snapshots/snapshotter.go # store/v2/store.go # tests/integration/v2/auth/app_test.go --- runtime/v2/builder.go | 219 +++++ server/v2/cometbft/abci_test.go | 6 +- .../v2/cometbft/internal/mock/mock_reader.go | 8 +- .../v2/cometbft/internal/mock/mock_store.go | 24 +- server/v2/cometbft/server.go | 2 - server/v2/stf/branch/bench_test.go | 128 +++ server/v2/store/snapshot.go | 416 +++++++++ store/iavl/store_test.go | 710 +++++++++++++++ store/v2/commitment/iavl/tree.go | 201 +++++ store/v2/commitment/store.go | 571 ++++++++++++ store/v2/commitment/store_test_suite.go | 495 +++++++++++ store/v2/database.go | 63 ++ store/v2/migration/README.md | 111 +++ store/v2/migration/manager.go | 213 +++++ store/v2/migration/manager_test.go | 179 ++++ store/v2/mock/db_mock.go | 301 +++++++ store/v2/mock/types.go | 13 + store/v2/pruning/manager.go | 52 ++ store/v2/pruning/manager_test.go | 227 +++++ store/v2/root/factory.go | 131 +++ store/v2/root/migrate_test.go | 156 ++++ store/v2/root/store.go | 400 +++++++++ store/v2/root/store_mock_test.go | 103 +++ store/v2/root/store_test.go | 830 ++++++++++++++++++ store/v2/root/upgrade_test.go | 151 ++++ store/v2/snapshots/helpers_test.go | 282 ++++++ store/v2/snapshots/manager.go | 591 +++++++++++++ store/v2/snapshots/manager_test.go | 525 +++++++++++ store/v2/snapshots/snapshotter.go | 46 + store/v2/store.go | 101 +++ .../integration/accounts/base_account_test.go | 4 + tests/integration/accounts/bundler_test.go | 1 + tests/integration/v2/auth/app_test.go | 134 +++ 33 files changed, 7364 insertions(+), 30 deletions(-) create mode 100644 runtime/v2/builder.go create mode 100644 server/v2/stf/branch/bench_test.go create mode 100644 server/v2/store/snapshot.go create mode 100644 store/iavl/store_test.go create mode 100644 store/v2/commitment/iavl/tree.go create mode 100644 store/v2/commitment/store.go create mode 100644 store/v2/commitment/store_test_suite.go create mode 100644 store/v2/database.go create mode 100644 store/v2/migration/README.md create mode 100644 store/v2/migration/manager.go create mode 100644 store/v2/migration/manager_test.go create mode 100644 store/v2/mock/db_mock.go create mode 100644 store/v2/mock/types.go create mode 100644 store/v2/pruning/manager.go create mode 100644 store/v2/pruning/manager_test.go create mode 100644 store/v2/root/factory.go create mode 100644 store/v2/root/migrate_test.go create mode 100644 store/v2/root/store.go create mode 100644 store/v2/root/store_mock_test.go create mode 100644 store/v2/root/store_test.go create mode 100644 store/v2/root/upgrade_test.go create mode 100644 store/v2/snapshots/helpers_test.go create mode 100644 store/v2/snapshots/manager.go create mode 100644 store/v2/snapshots/manager_test.go create mode 100644 store/v2/snapshots/snapshotter.go create mode 100644 store/v2/store.go create mode 100644 tests/integration/v2/auth/app_test.go diff --git a/runtime/v2/builder.go b/runtime/v2/builder.go new file mode 100644 index 000000000000..b851955943b0 --- /dev/null +++ b/runtime/v2/builder.go @@ -0,0 +1,219 @@ +package runtime + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "cosmossdk.io/core/appmodule" + appmodulev2 "cosmossdk.io/core/appmodule/v2" + "cosmossdk.io/core/store" + "cosmossdk.io/core/transaction" + "cosmossdk.io/runtime/v2/services" + "cosmossdk.io/server/v2/appmanager" + "cosmossdk.io/server/v2/stf" + "cosmossdk.io/server/v2/stf/branch" + "cosmossdk.io/store/v2/root" +) + +// AppBuilder is a type that is injected into a container by the runtime/v2 module +// (as *AppBuilder) which can be used to create an app which is compatible with +// the existing app.go initialization conventions. +type AppBuilder[T transaction.Tx] struct { + app *App[T] + storeBuilder root.Builder + storeConfig *root.Config + + // the following fields are used to overwrite the default + branch func(state store.ReaderMap) store.WriterMap + txValidator func(ctx context.Context, tx T) error + postTxExec func(ctx context.Context, tx T, success bool) error +} + +// RegisterModules registers the provided modules with the module manager. +// This is the primary hook for integrating with modules which are not registered using the app config. +func (a *AppBuilder[T]) RegisterModules(modules map[string]appmodulev2.AppModule) error { + for name, appModule := range modules { + // if a (legacy) module implements the HasName interface, check that the name matches + if mod, ok := appModule.(interface{ Name() string }); ok { + if name != mod.Name() { + a.app.logger.Warn(fmt.Sprintf("module name %q does not match name returned by HasName: %q", name, mod.Name())) + } + } + + if _, ok := a.app.moduleManager.modules[name]; ok { + return fmt.Errorf("module named %q already exists", name) + } + a.app.moduleManager.modules[name] = appModule + + if mod, ok := appModule.(appmodulev2.HasRegisterInterfaces); ok { + mod.RegisterInterfaces(a.app.interfaceRegistrar) + } + + if mod, ok := appModule.(appmodule.HasAminoCodec); ok { + mod.RegisterLegacyAminoCodec(a.app.amino) + } + } + + return nil +} + +// Build builds an *App instance. +func (a *AppBuilder[T]) Build(opts ...AppBuilderOption[T]) (*App[T], error) { + for _, opt := range opts { + opt(a) + } + + // default branch + if a.branch == nil { + a.branch = branch.DefaultNewWriterMap + } + + // default tx validator + if a.txValidator == nil { + a.txValidator = a.app.moduleManager.TxValidators() + } + + // default post tx exec + if a.postTxExec == nil { + a.postTxExec = func(ctx context.Context, tx T, success bool) error { + return nil + } + } + + var err error + a.app.db, err = a.storeBuilder.Build(a.app.logger, a.storeConfig) + if err != nil { + return nil, err + } + + if err = a.app.moduleManager.RegisterServices(a.app); err != nil { + return nil, err + } + + endBlocker, valUpdate := a.app.moduleManager.EndBlock() + + stf, err := stf.New[T]( + a.app.logger.With("module", "stf"), + a.app.msgRouterBuilder, + a.app.queryRouterBuilder, + a.app.moduleManager.PreBlocker(), + a.app.moduleManager.BeginBlock(), + endBlocker, + a.txValidator, + valUpdate, + a.postTxExec, + a.branch, + ) + if err != nil { + return nil, fmt.Errorf("failed to create STF: %w", err) + } + a.app.stf = stf + + a.app.AppManager = appmanager.New[T]( + appmanager.Config{ + ValidateTxGasLimit: a.app.config.GasConfig.ValidateTxGasLimit, + QueryGasLimit: a.app.config.GasConfig.QueryGasLimit, + SimulationGasLimit: a.app.config.GasConfig.SimulationGasLimit, + }, + a.app.db, + a.app.stf, + a.initGenesis, + a.exportGenesis, + ) + + return a.app, nil +} + +// initGenesis returns the app initialization genesis for modules +func (a *AppBuilder[T]) initGenesis(ctx context.Context, src io.Reader, txHandler func(json.RawMessage) error) (store.WriterMap, error) { + // this implementation assumes that the state is a JSON object + bz, err := io.ReadAll(src) + if err != nil { + return nil, fmt.Errorf("failed to read import state: %w", err) + } + + var genesisJSON map[string]json.RawMessage + if err = json.Unmarshal(bz, &genesisJSON); err != nil { + return nil, err + } + + v, zeroState, err := a.app.db.StateLatest() + if err != nil { + return nil, fmt.Errorf("unable to get latest state: %w", err) + } + if v != 0 { // TODO: genesis state may be > 0, we need to set version on store + return nil, errors.New("cannot init genesis on non-zero state") + } + genesisCtx := services.NewGenesisContext(a.branch(zeroState)) + genesisState, err := genesisCtx.Mutate(ctx, func(ctx context.Context) error { + err = a.app.moduleManager.InitGenesisJSON(ctx, genesisJSON, txHandler) + if err != nil { + return fmt.Errorf("failed to init genesis: %w", err) + } + return nil + }) + + return genesisState, err +} + +// exportGenesis returns the app export genesis logic for modules +func (a *AppBuilder[T]) exportGenesis(ctx context.Context, version uint64) ([]byte, error) { + state, err := a.app.db.StateAt(version) + if err != nil { + return nil, fmt.Errorf("unable to get state at given version: %w", err) + } + + genesisJson, err := a.app.moduleManager.ExportGenesisForModules( + ctx, + func() store.WriterMap { + return a.branch(state) + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to export genesis: %w", err) + } + + bz, err := json.Marshal(genesisJson) + if err != nil { + return nil, fmt.Errorf("failed to marshal genesis: %w", err) + } + + return bz, nil +} + +// AppBuilderOption is a function that can be passed to AppBuilder.Build to customize the resulting app. +type AppBuilderOption[T transaction.Tx] func(*AppBuilder[T]) + +// AppBuilderWithBranch sets a custom branch implementation for the app. +func AppBuilderWithBranch[T transaction.Tx](branch func(state store.ReaderMap) store.WriterMap) AppBuilderOption[T] { + return func(a *AppBuilder[T]) { + a.branch = branch + } +} + +// AppBuilderWithTxValidator sets the tx validator for the app. +// It overrides all default tx validators defined by modules. +func AppBuilderWithTxValidator[T transaction.Tx]( + txValidators func( + ctx context.Context, tx T, + ) error, +) AppBuilderOption[T] { + return func(a *AppBuilder[T]) { + a.txValidator = txValidators + } +} + +// AppBuilderWithPostTxExec sets logic that will be executed after each transaction. +// When not provided, a no-op function will be used. +func AppBuilderWithPostTxExec[T transaction.Tx]( + postTxExec func( + ctx context.Context, tx T, success bool, + ) error, +) AppBuilderOption[T] { + return func(a *AppBuilder[T]) { + a.postTxExec = postTxExec + } +} diff --git a/server/v2/cometbft/abci_test.go b/server/v2/cometbft/abci_test.go index ab1fdc722879..c2bd81d65f23 100644 --- a/server/v2/cometbft/abci_test.go +++ b/server/v2/cometbft/abci_test.go @@ -591,7 +591,7 @@ func TestConsensus_Query(t *testing.T) { c := setUpConsensus(t, 100_000, cometmock.MockMempool[mock.Tx]{}) // Write data to state storage - err := c.store.GetStateStorage().ApplyChangeset(&store.Changeset{ + err := c.store.GetStateCommitment().WriteChangeset(&store.Changeset{ Version: 1, Changes: []store.StateChanges{ { @@ -691,9 +691,8 @@ func setUpConsensus(t *testing.T, gasLimit uint64, mempool mempool.Mempool[mock. ) require.NoError(t, err) - ss := cometmock.NewMockStorage(log.NewNopLogger(), t.TempDir()) sc := cometmock.NewMockCommiter(log.NewNopLogger(), string(actorName), "stf") - mockStore := cometmock.NewMockStore(ss, sc) + mockStore := cometmock.NewMockStore(sc) am := appmanager.New(appmanager.Config{ ValidateTxGasLimit: gasLimit, @@ -786,6 +785,7 @@ func TestOptimisticExecution(t *testing.T) { Txs: ppReq.Txs, } fbResp, err := c.FinalizeBlock(context.Background(), fbReq) + require.Nil(t, fbResp) require.Error(t, err) require.ErrorContains(t, err, "test error") // from optimisticMockFunc require.Equal(t, 1, calledTimes) diff --git a/server/v2/cometbft/internal/mock/mock_reader.go b/server/v2/cometbft/internal/mock/mock_reader.go index 9911ee55eb81..46c1d422c648 100644 --- a/server/v2/cometbft/internal/mock/mock_reader.go +++ b/server/v2/cometbft/internal/mock/mock_reader.go @@ -39,7 +39,7 @@ func NewMockReader(v uint64, rs *MockStore, actor []byte) *MockReader { } func (roa *MockReader) Has(key []byte) (bool, error) { - val, err := roa.store.GetStateStorage().Has(roa.actor, roa.version, key) + val, err := roa.store.GetStateCommitment().Has(roa.actor, roa.version, key) if err != nil { return false, err } @@ -48,7 +48,7 @@ func (roa *MockReader) Has(key []byte) (bool, error) { } func (roa *MockReader) Get(key []byte) ([]byte, error) { - result, err := roa.store.GetStateStorage().Get(roa.actor, roa.version, key) + result, err := roa.store.GetStateCommitment().Get(roa.actor, roa.version, key) if err != nil { return nil, err } @@ -57,9 +57,9 @@ func (roa *MockReader) Get(key []byte) ([]byte, error) { } func (roa *MockReader) Iterator(start, end []byte) (corestore.Iterator, error) { - return roa.store.GetStateStorage().Iterator(roa.actor, roa.version, start, end) + return roa.store.GetStateCommitment().Iterator(roa.actor, roa.version, start, end) } func (roa *MockReader) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - return roa.store.GetStateStorage().ReverseIterator(roa.actor, roa.version, start, end) + return roa.store.GetStateCommitment().ReverseIterator(roa.actor, roa.version, start, end) } diff --git a/server/v2/cometbft/internal/mock/mock_store.go b/server/v2/cometbft/internal/mock/mock_store.go index b485a75d876b..8cb4542ac41e 100644 --- a/server/v2/cometbft/internal/mock/mock_store.go +++ b/server/v2/cometbft/internal/mock/mock_store.go @@ -11,21 +11,12 @@ import ( "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) type MockStore struct { - Storage storev2.VersionedWriter Committer storev2.Committer } -func NewMockStorage(logger log.Logger, dir string) storev2.VersionedWriter { - storageDB, _ := pebbledb.New(dir) - ss := storage.NewStorageStore(storageDB, logger) - return ss -} - func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer { treeMap := make(map[string]commitment.Tree) for _, actor := range actors { @@ -36,8 +27,8 @@ func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer { return sc } -func NewMockStore(ss storev2.VersionedWriter, sc storev2.Committer) *MockStore { - return &MockStore{Storage: ss, Committer: sc} +func NewMockStore(sc storev2.Committer) *MockStore { + return &MockStore{Committer: sc} } func (s *MockStore) GetLatestVersion() (uint64, error) { @@ -59,12 +50,7 @@ func (s *MockStore) StateLatest() (uint64, corestore.ReaderMap, error) { } func (s *MockStore) Commit(changeset *corestore.Changeset) (corestore.Hash, error) { - err := s.Storage.ApplyChangeset(changeset) - if err != nil { - return []byte{}, err - } - - err = s.Committer.WriteChangeset(changeset) + err := s.Committer.WriteChangeset(changeset) if err != nil { return []byte{}, err } @@ -81,10 +67,6 @@ func (s *MockStore) StateAt(version uint64) (corestore.ReaderMap, error) { return NewMockReaderMap(version, s), nil } -func (s *MockStore) GetStateStorage() storev2.VersionedWriter { - return s.Storage -} - func (s *MockStore) GetStateCommitment() storev2.Committer { return s.Committer } diff --git a/server/v2/cometbft/server.go b/server/v2/cometbft/server.go index 55a38b5e9646..ed0c4fba8702 100644 --- a/server/v2/cometbft/server.go +++ b/server/v2/cometbft/server.go @@ -127,7 +127,6 @@ func New[T transaction.Tx]( indexEvents[e] = struct{}{} } - ss := store.GetStateStorage().(snapshots.StorageSnapshotter) sc := store.GetStateCommitment().(snapshots.CommitSnapshotter) snapshotStore, err := GetSnapshotStore(srv.config.ConfigTomlConfig.RootDir) @@ -155,7 +154,6 @@ func New[T transaction.Tx]( snapshotStore, srv.serverOptions.SnapshotOptions(cfg), sc, - ss, nil, // extensions snapshotter registered below logger, ) diff --git a/server/v2/stf/branch/bench_test.go b/server/v2/stf/branch/bench_test.go new file mode 100644 index 000000000000..67122b59b66f --- /dev/null +++ b/server/v2/stf/branch/bench_test.go @@ -0,0 +1,128 @@ +package branch + +import ( + "encoding/binary" + "fmt" + "testing" + + "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" +) + +var ( + stackSizes = []int{1, 10, 100} + elemsInStack = 10 +) + +func Benchmark_CacheStack_Set(b *testing.B) { + for _, stackSize := range stackSizes { + b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { + bs := makeBranchStack(b, stackSize) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err := bs.Set([]byte{0}, []byte{0}) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +var sink any + +func Benchmark_Get(b *testing.B) { + for _, stackSize := range stackSizes { + b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { + bs := makeBranchStack(b, stackSize) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + sink, _ = bs.Get([]byte{0}) + } + }) + } + if sink == nil { + b.Fatal("benchmark did not run") + } + sink = nil +} + +func Benchmark_GetSparse(b *testing.B) { + var sink any + for _, stackSize := range stackSizes { + b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { + bs := makeBranchStack(b, stackSize) + keys := func() [][]byte { + var keys [][]byte + for i := 0; i < b.N; i++ { + keys = append(keys, numToBytes(i)) + } + return keys + }() + b.ResetTimer() + b.ReportAllocs() + for _, key := range keys { + sink, _ = bs.Get(key) + } + }) + } + if sink == nil { + b.Fatal("benchmark did not run") + } + sink = nil +} + +var ( + keySink any + valueSink any +) + +func Benchmark_Iterate(b *testing.B) { + for _, stackSize := range stackSizes { + b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { + bs := makeBranchStack(b, stackSize) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + iter, _ := bs.Iterator(nil, nil) + for iter.Valid() { + keySink = iter.Key() + valueSink = iter.Value() + iter.Next() + } + _ = iter.Close() + } + }) + } + if valueSink == nil || keySink == nil { + b.Fatal("benchmark did not run") + } + valueSink = nil + keySink = nil +} + +// makeBranchStack creates a branch stack of the given size and initializes it with unique key-value pairs. +func makeBranchStack(b *testing.B, stackSize int) Store[store.KVStore] { + b.Helper() + parent := coretesting.NewMemKV() + branch := NewStore[store.KVStore](parent) + for i := 1; i < stackSize; i++ { + branch = NewStore[store.KVStore](branch) + for j := 0; j < elemsInStack; j++ { + // create unique keys by including the branch index. + key := append(numToBytes(i), numToBytes(j)...) + value := []byte{byte(j)} + err := branch.Set(key, value) + if err != nil { + b.Fatal(err) + } + } + } + return branch +} + +func numToBytes[T ~int](n T) []byte { + return binary.BigEndian.AppendUint64(nil, uint64(n)) +} diff --git a/server/v2/store/snapshot.go b/server/v2/store/snapshot.go new file mode 100644 index 000000000000..bf9e5ddb3827 --- /dev/null +++ b/server/v2/store/snapshot.go @@ -0,0 +1,416 @@ +package store + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strconv" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "cosmossdk.io/log" + serverv2 "cosmossdk.io/server/v2" + storev2 "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/snapshots" + "cosmossdk.io/store/v2/snapshots/types" +) + +const SnapshotFileName = "_snapshot" + +// ExportSnapshotCmd exports app state to snapshot store. +func (s *Server[T]) ExportSnapshotCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "export", + Short: "Export app state to snapshot store", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + v := serverv2.GetViperFromCmd(cmd) + + height, err := cmd.Flags().GetInt64("height") + if err != nil { + return err + } + + logger := log.NewLogger(cmd.OutOrStdout()) + rootStore, _, err := createRootStore(v, logger) + if err != nil { + return err + } + if height == 0 { + lastCommitId, err := rootStore.LastCommitID() + if err != nil { + return err + } + height = int64(lastCommitId.Version) + } + + cmd.Printf("Exporting snapshot for height %d\n", height) + + sm, err := createSnapshotsManager(cmd, v, logger, rootStore) + if err != nil { + return err + } + + snapshot, err := sm.Create(uint64(height)) + if err != nil { + return err + } + + cmd.Printf("Snapshot created at height %d, format %d, chunks %d\n", snapshot.Height, snapshot.Format, snapshot.Chunks) + return nil + }, + } + + addSnapshotFlagsToCmd(cmd) + cmd.Flags().Int64("height", 0, "Height to export, default to latest state height") + + return cmd +} + +// RestoreSnapshotCmd returns a command to restore a snapshot +func (s *Server[T]) RestoreSnapshotCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "restore ", + Short: "Restore app state from local snapshot", + Long: "Restore app state from local snapshot", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + v := serverv2.GetViperFromCmd(cmd) + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + format, err := strconv.ParseUint(args[1], 10, 32) + if err != nil { + return err + } + + logger := log.NewLogger(cmd.OutOrStdout()) + + rootStore, _, err := createRootStore(v, logger) + if err != nil { + return fmt.Errorf("failed to create root store: %w", err) + } + sm, err := createSnapshotsManager(cmd, v, logger, rootStore) + if err != nil { + return err + } + + return sm.RestoreLocalSnapshot(height, uint32(format)) + }, + } + + addSnapshotFlagsToCmd(cmd) + + return cmd +} + +// ListSnapshotsCmd returns the command to list local snapshots +func (s *Server[T]) ListSnapshotsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List local snapshots", + RunE: func(cmd *cobra.Command, args []string) error { + v := serverv2.GetViperFromCmd(cmd) + snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) + if err != nil { + return err + } + snapshots, err := snapshotStore.List() + if err != nil { + return fmt.Errorf("failed to list snapshots: %w", err) + } + for _, snapshot := range snapshots { + cmd.Println("height:", snapshot.Height, "format:", snapshot.Format, "chunks:", snapshot.Chunks) + } + + return nil + }, + } + + return cmd +} + +// DeleteSnapshotCmd returns the command to delete a local snapshot +func (s *Server[T]) DeleteSnapshotCmd() *cobra.Command { + return &cobra.Command{ + Use: "delete ", + Short: "Delete a local snapshot", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + v := serverv2.GetViperFromCmd(cmd) + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + format, err := strconv.ParseUint(args[1], 10, 32) + if err != nil { + return err + } + + snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) + if err != nil { + return err + } + + return snapshotStore.Delete(height, uint32(format)) + }, + } +} + +// DumpArchiveCmd returns a command to dump the snapshot as portable archive format +func (s *Server[T]) DumpArchiveCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "dump ", + Short: "Dump the snapshot as portable archive format", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + v := serverv2.GetViperFromCmd(cmd) + snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) + if err != nil { + return err + } + + output, err := cmd.Flags().GetString("output") + if err != nil { + return err + } + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + format, err := strconv.ParseUint(args[1], 10, 32) + if err != nil { + return err + } + + if output == "" { + output = fmt.Sprintf("%d-%d.tar.gz", height, format) + } + + snapshot, err := snapshotStore.Get(height, uint32(format)) + if err != nil { + return err + } + + if snapshot == nil { + return errors.New("snapshot doesn't exist") + } + + bz, err := snapshot.Marshal() + if err != nil { + return err + } + + fp, err := os.Create(output) + if err != nil { + return err + } + defer fp.Close() + + // since the chunk files are already compressed, we just use fastest compression here + gzipWriter, err := gzip.NewWriterLevel(fp, gzip.BestSpeed) + if err != nil { + return err + } + tarWriter := tar.NewWriter(gzipWriter) + if err := tarWriter.WriteHeader(&tar.Header{ + Name: SnapshotFileName, + Mode: 0o644, + Size: int64(len(bz)), + }); err != nil { + return fmt.Errorf("failed to write snapshot header to tar: %w", err) + } + if _, err := tarWriter.Write(bz); err != nil { + return fmt.Errorf("failed to write snapshot to tar: %w", err) + } + + for i := uint32(0); i < snapshot.Chunks; i++ { + path := snapshotStore.PathChunk(height, uint32(format), i) + tarName := strconv.FormatUint(uint64(i), 10) + if err := processChunk(tarWriter, path, tarName); err != nil { + return err + } + } + + if err := tarWriter.Close(); err != nil { + return fmt.Errorf("failed to close tar writer: %w", err) + } + + if err := gzipWriter.Close(); err != nil { + return fmt.Errorf("failed to close gzip writer: %w", err) + } + + return fp.Close() + }, + } + + cmd.Flags().StringP("output", "o", "", "output file") + + return cmd +} + +// LoadArchiveCmd load a portable archive format snapshot into snapshot store +func (s *Server[T]) LoadArchiveCmd() *cobra.Command { + return &cobra.Command{ + Use: "load ", + Short: "Load a snapshot archive file (.tar.gz) into snapshot store", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + v := serverv2.GetViperFromCmd(cmd) + snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) + if err != nil { + return err + } + + path := args[0] + fp, err := os.Open(path) + if err != nil { + return fmt.Errorf("failed to open archive file: %w", err) + } + reader, err := gzip.NewReader(fp) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + + var snapshot types.Snapshot + tr := tar.NewReader(reader) + + hdr, err := tr.Next() + if err != nil { + return fmt.Errorf("failed to read snapshot file header: %w", err) + } + if hdr.Name != SnapshotFileName { + return fmt.Errorf("invalid archive, expect file: snapshot, got: %s", hdr.Name) + } + bz, err := io.ReadAll(tr) + if err != nil { + return fmt.Errorf("failed to read snapshot file: %w", err) + } + if err := snapshot.Unmarshal(bz); err != nil { + return fmt.Errorf("failed to unmarshal snapshot: %w", err) + } + + // make sure the channel is unbuffered, because the tar reader can't do concurrency + chunks := make(chan io.ReadCloser) + quitChan := make(chan *types.Snapshot) + go func() { + defer close(quitChan) + + savedSnapshot, err := snapshotStore.Save(snapshot.Height, snapshot.Format, chunks) + if err != nil { + cmd.Println("failed to save snapshot", err) + return + } + quitChan <- savedSnapshot + }() + + for i := uint32(0); i < snapshot.Chunks; i++ { + hdr, err = tr.Next() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + if hdr.Name != strconv.FormatInt(int64(i), 10) { + return fmt.Errorf("invalid archive, expect file: %d, got: %s", i, hdr.Name) + } + + bz, err := io.ReadAll(tr) + if err != nil { + return fmt.Errorf("failed to read chunk file: %w", err) + } + chunks <- io.NopCloser(bytes.NewReader(bz)) + } + close(chunks) + + savedSnapshot := <-quitChan + if savedSnapshot == nil { + return errors.New("failed to save snapshot") + } + + if !reflect.DeepEqual(&snapshot, savedSnapshot) { + _ = snapshotStore.Delete(snapshot.Height, snapshot.Format) + return errors.New("invalid archive, the saved snapshot is not equal to the original one") + } + + return nil + }, + } +} + +func createSnapshotsManager( + cmd *cobra.Command, v *viper.Viper, logger log.Logger, store storev2.Backend, +) (*snapshots.Manager, error) { + home := v.GetString(serverv2.FlagHome) + snapshotStore, err := snapshots.NewStore(filepath.Join(home, "data", "snapshots")) + if err != nil { + return nil, err + } + var interval, keepRecent uint64 + // if flag was not passed, use as 0. + if cmd.Flags().Changed(FlagKeepRecent) { + keepRecent, err = cmd.Flags().GetUint64(FlagKeepRecent) + if err != nil { + return nil, err + } + } + if cmd.Flags().Changed(FlagInterval) { + interval, err = cmd.Flags().GetUint64(FlagInterval) + if err != nil { + return nil, err + } + } + + sm := snapshots.NewManager( + snapshotStore, + snapshots.NewSnapshotOptions(interval, uint32(keepRecent)), + store.GetStateCommitment().(snapshots.CommitSnapshotter), + nil, + logger) + return sm, nil +} + +func addSnapshotFlagsToCmd(cmd *cobra.Command) { + cmd.Flags().Uint64(FlagKeepRecent, 0, "KeepRecent defines how many snapshots to keep in heights") + cmd.Flags().Uint64(FlagInterval, 0, "Interval defines at which heights the snapshot is taken") +} + +func processChunk(tarWriter *tar.Writer, path, tarName string) error { + file, err := os.Open(path) + if err != nil { + return fmt.Errorf("failed to open chunk file %s: %w", path, err) + } + defer file.Close() + + st, err := file.Stat() + if err != nil { + return fmt.Errorf("failed to stat chunk file %s: %w", path, err) + } + + if err := tarWriter.WriteHeader(&tar.Header{ + Name: tarName, + Mode: 0o644, + Size: st.Size(), + }); err != nil { + return fmt.Errorf("failed to write chunk header to tar: %w", err) + } + + if _, err := io.Copy(tarWriter, file); err != nil { + return fmt.Errorf("failed to write chunk to tar: %w", err) + } + + return nil +} diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go new file mode 100644 index 000000000000..3a6050e2453e --- /dev/null +++ b/store/iavl/store_test.go @@ -0,0 +1,710 @@ +package iavl + +import ( + "bytes" + crand "crypto/rand" + "fmt" + "math" + "sort" + "testing" + + "github.com/cosmos/iavl" + "github.com/stretchr/testify/require" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/log" + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/metrics" + "cosmossdk.io/store/types" +) + +var ( + cacheSize = 100 + treeData = map[string]string{ + "hello": "goodbye", + "aloha": "shalom", + } + nMoreData = 0 +) + +func randBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, _ = crand.Read(b) + return b +} + +// make a tree with data from above and save it +func newAlohaTree(t *testing.T, db corestore.KVStoreWithBatch) (*iavl.MutableTree, types.CommitID) { + t.Helper() + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + for k, v := range treeData { + _, err := tree.Set([]byte(k), []byte(v)) + require.NoError(t, err) + } + + for i := 0; i < nMoreData; i++ { + key := randBytes(12) + value := randBytes(50) + _, err := tree.Set(key, value) + require.NoError(t, err) + } + + hash, ver, err := tree.SaveVersion() + require.Nil(t, err) + + return tree, types.CommitID{Version: ver, Hash: hash} +} + +func TestLoadStore(t *testing.T) { + db := coretesting.NewMemDB() + tree, _ := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + // Create non-pruned height H + updated, err := tree.Set([]byte("hello"), []byte("hallo")) + require.NoError(t, err) + require.True(t, updated) + hash, verH, err := tree.SaveVersion() + cIDH := types.CommitID{Version: verH, Hash: hash} + require.Nil(t, err) + + // Create pruned height Hp + updated, err = tree.Set([]byte("hello"), []byte("hola")) + require.NoError(t, err) + require.True(t, updated) + hash, verHp, err := tree.SaveVersion() + cIDHp := types.CommitID{Version: verHp, Hash: hash} + require.Nil(t, err) + + // Create current height Hc + updated, err = tree.Set([]byte("hello"), []byte("ciao")) + require.NoError(t, err) + require.True(t, updated) + hash, verHc, err := tree.SaveVersion() + cIDHc := types.CommitID{Version: verHc, Hash: hash} + require.Nil(t, err) + + // Querying an existing store at some previous non-pruned height H + hStore, err := store.GetImmutable(verH) + require.NoError(t, err) + require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo") + + // Querying an existing store at some previous pruned height Hp + hpStore, err := store.GetImmutable(verHp) + require.NoError(t, err) + require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola") + + // Querying an existing store at current height Hc + hcStore, err := store.GetImmutable(verHc) + require.NoError(t, err) + require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao") + + // Querying a new store at some previous non-pruned height H + newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + require.NoError(t, err) + require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo") + + // Querying a new store at some previous pruned height Hp + newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + require.NoError(t, err) + require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola") + + // Querying a new store at current height H + newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) + require.NoError(t, err) + require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao") +} + +func TestGetImmutable(t *testing.T) { + db := coretesting.NewMemDB() + tree, _ := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + updated, err := tree.Set([]byte("hello"), []byte("adios")) + require.NoError(t, err) + require.True(t, updated) + hash, ver, err := tree.SaveVersion() + cID := types.CommitID{Version: ver, Hash: hash} + require.Nil(t, err) + + _, err = store.GetImmutable(cID.Version + 1) + require.Error(t, err) + + newStore, err := store.GetImmutable(cID.Version - 1) + require.NoError(t, err) + require.Equal(t, newStore.Get([]byte("hello")), []byte("goodbye")) + + newStore, err = store.GetImmutable(cID.Version) + require.NoError(t, err) + require.Equal(t, newStore.Get([]byte("hello")), []byte("adios")) + + res, err := newStore.Query(&types.RequestQuery{Data: []byte("hello"), Height: cID.Version, Path: "/key", Prove: true}) + require.NoError(t, err) + require.Equal(t, res.Value, []byte("adios")) + require.NotNil(t, res.ProofOps) + + require.Panics(t, func() { newStore.Set(nil, nil) }) + require.Panics(t, func() { newStore.Delete(nil) }) + require.Panics(t, func() { newStore.Commit() }) +} + +func TestTestGetImmutableIterator(t *testing.T) { + db := coretesting.NewMemDB() + tree, cID := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + newStore, err := store.GetImmutable(cID.Version) + require.NoError(t, err) + + iter := newStore.Iterator([]byte("aloha"), []byte("hellz")) + expected := []string{"aloha", "hello"} + var i int + + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + + require.Equal(t, len(expected), i) +} + +func TestIAVLStoreGetSetHasDelete(t *testing.T) { + db := coretesting.NewMemDB() + tree, _ := newAlohaTree(t, db) + iavlStore := UnsafeNewStore(tree) + + key := "hello" + + exists := iavlStore.Has([]byte(key)) + require.True(t, exists) + + value := iavlStore.Get([]byte(key)) + require.EqualValues(t, value, treeData[key]) + + value2 := "notgoodbye" + iavlStore.Set([]byte(key), []byte(value2)) + + value = iavlStore.Get([]byte(key)) + require.EqualValues(t, value, value2) + + iavlStore.Delete([]byte(key)) + + exists = iavlStore.Has([]byte(key)) + require.False(t, exists) +} + +func TestIAVLStoreNoNilSet(t *testing.T) { + db := coretesting.NewMemDB() + tree, _ := newAlohaTree(t, db) + iavlStore := UnsafeNewStore(tree) + + require.Panics(t, func() { iavlStore.Set(nil, []byte("value")) }, "setting a nil key should panic") + require.Panics(t, func() { iavlStore.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") + + require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic") +} + +func TestIAVLIterator(t *testing.T) { + db := coretesting.NewMemDB() + tree, _ := newAlohaTree(t, db) + iavlStore := UnsafeNewStore(tree) + iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz")) + expected := []string{"aloha", "hello"} + var i int + + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator([]byte("golang"), []byte("rocks")) + expected = []string{"hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator(nil, []byte("golang")) + expected = []string{"aloha"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator(nil, []byte("shalom")) + expected = []string{"aloha", "hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator(nil, nil) + expected = []string{"aloha", "hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) + + iter = iavlStore.Iterator([]byte("golang"), nil) + expected = []string{"hello"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, treeData[expectedKey]) + i++ + } + require.Equal(t, len(expected), i) +} + +func TestIAVLReverseIterator(t *testing.T) { + db := coretesting.NewMemDB() + + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + iavlStore.Set([]byte{0x00}, []byte("0")) + iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0")) + iavlStore.Set([]byte{0x00, 0x01}, []byte("0 1")) + iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2")) + iavlStore.Set([]byte{0x01}, []byte("1")) + + testReverseIterator := func(t *testing.T, start, end []byte, expected []string) { + t.Helper() + iter := iavlStore.ReverseIterator(start, end) + var i int + for i = 0; iter.Valid(); iter.Next() { + expectedValue := expected[i] + value := iter.Value() + require.EqualValues(t, string(value), expectedValue) + i++ + } + require.Equal(t, len(expected), i) + } + + testReverseIterator(t, nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) + testReverseIterator(t, []byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) + testReverseIterator(t, []byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"}) + testReverseIterator(t, []byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) + testReverseIterator(t, []byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"}) + testReverseIterator(t, nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) +} + +func TestIAVLPrefixIterator(t *testing.T) { + db := coretesting.NewMemDB() + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + iavlStore.Set([]byte("test1"), []byte("test1")) + iavlStore.Set([]byte("test2"), []byte("test2")) + iavlStore.Set([]byte("test3"), []byte("test3")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) + + var i int + + iter := types.KVStorePrefixIterator(iavlStore, []byte("test")) + expected := []string{"test1", "test2", "test3"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, expectedKey) + i++ + } + iter.Close() + require.Equal(t, len(expected), i) + + iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) + expected2 := [][]byte{ + {byte(55), byte(255), byte(255), byte(0)}, + {byte(55), byte(255), byte(255), byte(1)}, + {byte(55), byte(255), byte(255), byte(255)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + iter.Close() + require.Equal(t, len(expected), i) + + iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) + expected2 = [][]byte{ + {byte(255), byte(255), byte(0)}, + {byte(255), byte(255), byte(1)}, + {byte(255), byte(255), byte(255)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + iter.Close() + require.Equal(t, len(expected), i) +} + +func TestIAVLReversePrefixIterator(t *testing.T) { + db := coretesting.NewMemDB() + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + iavlStore.Set([]byte("test1"), []byte("test1")) + iavlStore.Set([]byte("test2"), []byte("test2")) + iavlStore.Set([]byte("test3"), []byte("test3")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) + iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) + + var i int + + iter := types.KVStoreReversePrefixIterator(iavlStore, []byte("test")) + expected := []string{"test3", "test2", "test1"} + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, expectedKey) + i++ + } + require.Equal(t, len(expected), i) + + iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) + expected2 := [][]byte{ + {byte(55), byte(255), byte(255), byte(255)}, + {byte(55), byte(255), byte(255), byte(1)}, + {byte(55), byte(255), byte(255), byte(0)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + require.Equal(t, len(expected), i) + + iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) + expected2 = [][]byte{ + {byte(255), byte(255), byte(255)}, + {byte(255), byte(255), byte(1)}, + {byte(255), byte(255), byte(0)}, + } + for i = 0; iter.Valid(); iter.Next() { + expectedKey := expected2[i] + key, value := iter.Key(), iter.Value() + require.EqualValues(t, key, expectedKey) + require.EqualValues(t, value, []byte("test4")) + i++ + } + require.Equal(t, len(expected), i) +} + +func nextVersion(iavl *Store) { + key := []byte(fmt.Sprintf("Key for tree: %d", iavl.LastCommitID().Version)) + value := []byte(fmt.Sprintf("Value for tree: %d", iavl.LastCommitID().Version)) + iavl.Set(key, value) + iavl.Commit() +} + +func TestIAVLNoPrune(t *testing.T) { + db := coretesting.NewMemDB() + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + nextVersion(iavlStore) + + for i := 1; i < 100; i++ { + for j := 1; j <= i; j++ { + require.True(t, iavlStore.VersionExists(int64(j)), + "Missing version %d with latest version %d. Should be storing all versions", + j, i) + } + + nextVersion(iavlStore) + } +} + +func TestIAVLStoreQuery(t *testing.T) { + db := coretesting.NewMemDB() + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + iavlStore := UnsafeNewStore(tree) + + k1, v1 := []byte("key1"), []byte("val1") + k2, v2 := []byte("key2"), []byte("val2") + v3 := []byte("val3") + + ksub := []byte("key") + KVs0 := kv.Pairs{} //nolint:staticcheck // We are in store v1. + KVs1 := kv.Pairs{ //nolint:staticcheck // We are in store v1. + Pairs: []kv.Pair{ //nolint:staticcheck // We are in store v1. + {Key: k1, Value: v1}, + {Key: k2, Value: v2}, + }, + } + KVs2 := kv.Pairs{ //nolint:staticcheck // We are in store v1. + Pairs: []kv.Pair{ //nolint:staticcheck // We are in store v1. + {Key: k1, Value: v3}, + {Key: k2, Value: v2}, + }, + } + + valExpSubEmpty, err := KVs0.Marshal() + require.NoError(t, err) + + valExpSub1, err := KVs1.Marshal() + require.NoError(t, err) + + valExpSub2, err := KVs2.Marshal() + require.NoError(t, err) + + cid := iavlStore.Commit() + ver := cid.Version + query := types.RequestQuery{Path: "/key", Data: k1, Height: ver} + querySub := types.RequestQuery{Path: "/subspace", Data: ksub, Height: ver} + + // query subspace before anything set + qres, err := iavlStore.Query(&querySub) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, valExpSubEmpty, qres.Value) + + // set data + iavlStore.Set(k1, v1) + iavlStore.Set(k2, v2) + + // set data without commit, doesn't show up + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Nil(t, qres.Value) + + // commit it, but still don't see on old version + cid = iavlStore.Commit() + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Nil(t, qres.Value) + + // but yes on the new version + query.Height = cid.Version + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v1, qres.Value) + + // and for the subspace + qres, err = iavlStore.Query(&querySub) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, valExpSub1, qres.Value) + + // modify + iavlStore.Set(k1, v3) + cid = iavlStore.Commit() + + // query will return old values, as height is fixed + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v1, qres.Value) + + // update to latest in the query and we are happy + query.Height = cid.Version + qres, err = iavlStore.Query(&query) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v3, qres.Value) + query2 := types.RequestQuery{Path: "/key", Data: k2, Height: cid.Version} + + qres, err = iavlStore.Query(&query2) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v2, qres.Value) + // and for the subspace + qres, err = iavlStore.Query(&querySub) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, valExpSub2, qres.Value) + + // default (height 0) will show latest -1 + query0 := types.RequestQuery{Path: "/key", Data: k1} + qres, err = iavlStore.Query(&query0) + require.NoError(t, err) + require.Equal(t, uint32(0), qres.Code) + require.Equal(t, v1, qres.Value) +} + +func BenchmarkIAVLIteratorNext(b *testing.B) { + b.ReportAllocs() + db := coretesting.NewMemDB() + treeSize := 1000 + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + + for i := 0; i < treeSize; i++ { + key := randBytes(4) + value := randBytes(50) + _, err := tree.Set(key, value) + require.NoError(b, err) + } + + iavlStore := UnsafeNewStore(tree) + iterators := make([]types.Iterator, b.N/treeSize) + + for i := 0; i < len(iterators); i++ { + iterators[i] = iavlStore.Iterator([]byte{0}, []byte{255, 255, 255, 255, 255}) + } + + b.ResetTimer() + for i := 0; i < len(iterators); i++ { + iter := iterators[i] + for j := 0; j < treeSize; j++ { + iter.Next() + } + } +} + +func TestSetInitialVersion(t *testing.T) { + testCases := []struct { + name string + storeFn func(db corestore.KVStoreWithBatch) *Store + expPanic bool + }{ + { + "works with a mutable tree", + func(db corestore.KVStoreWithBatch) *Store { + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + store := UnsafeNewStore(tree) + + return store + }, false, + }, + { + "throws error on immutable tree", + func(db corestore.KVStoreWithBatch) *Store { + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) + store := UnsafeNewStore(tree) + _, version, err := store.tree.SaveVersion() + require.NoError(t, err) + require.Equal(t, int64(1), version) + store, err = store.GetImmutable(1) + require.NoError(t, err) + + return store + }, true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := coretesting.NewMemDB() + store := tc.storeFn(db) + + if tc.expPanic { + require.Panics(t, func() { store.SetInitialVersion(5) }) + } else { + store.SetInitialVersion(5) + cid := store.Commit() + require.Equal(t, int64(5), cid.GetVersion()) + } + }) + } +} + +func TestCacheWraps(t *testing.T) { + db := coretesting.NewMemDB() + tree, _ := newAlohaTree(t, db) + store := UnsafeNewStore(tree) + + cacheWrapper := store.CacheWrap() + require.IsType(t, &cachekv.Store{}, cacheWrapper) + + cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) + require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) +} + +func TestChangeSets(t *testing.T) { + db := coretesting.NewMemDB() + treeSize := 1000 + treeVersion := int64(10) + targetVersion := int64(6) + tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger(), iavl.FlushThresholdOption(math.MaxInt)) + + for j := int64(0); j < treeVersion; j++ { + keys := [][]byte{} + for i := 0; i < treeSize; i++ { + keys = append(keys, randBytes(4)) + } + sort.Slice(keys, func(p, q int) bool { + return bytes.Compare(keys[p], keys[q]) < 0 + }) + for i := 0; i < treeSize; i++ { + key := keys[i] + value := randBytes(50) + _, err := tree.Set(key, value) + require.NoError(t, err) + } + _, _, err := tree.SaveVersion() + require.NoError(t, err) + } + + changeSets := []*iavl.ChangeSet{} + iavlStore := UnsafeNewStore(tree) + commitID := iavlStore.LastCommitID() + + require.NoError(t, iavlStore.TraverseStateChanges(targetVersion+1, treeVersion, func(v int64, cs *iavl.ChangeSet) error { + changeSets = append(changeSets, cs) + return nil + })) + require.NoError(t, iavlStore.LoadVersionForOverwriting(targetVersion)) + + for i, cs := range changeSets { + v, err := tree.SaveChangeSet(cs) + require.NoError(t, err) + require.Equal(t, v, targetVersion+int64(i+1)) + } + + restoreCommitID := iavlStore.LastCommitID() + require.Equal(t, commitID, restoreCommitID) +} diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go new file mode 100644 index 000000000000..4aaac08ab8bf --- /dev/null +++ b/store/v2/commitment/iavl/tree.go @@ -0,0 +1,201 @@ +package iavl + +import ( + "fmt" + + "github.com/cosmos/iavl" + ics23 "github.com/cosmos/ics23/go" + + "cosmossdk.io/core/log" + corestore "cosmossdk.io/core/store" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" +) + +var ( + _ commitment.Tree = (*IavlTree)(nil) + _ commitment.Reader = (*IavlTree)(nil) + _ store.PausablePruner = (*IavlTree)(nil) +) + +// IavlTree is a wrapper around iavl.MutableTree. +type IavlTree struct { + tree *iavl.MutableTree +} + +// NewIavlTree creates a new IavlTree instance. +func NewIavlTree(db corestore.KVStoreWithBatch, logger log.Logger, cfg *Config) *IavlTree { + tree := iavl.NewMutableTree(db, cfg.CacheSize, cfg.SkipFastStorageUpgrade, logger, iavl.AsyncPruningOption(true)) + return &IavlTree{ + tree: tree, + } +} + +// Remove removes the given key from the tree. +func (t *IavlTree) Remove(key []byte) error { + _, _, err := t.tree.Remove(key) + if err != nil { + return err + } + return nil +} + +// Set sets the given key-value pair in the tree. +func (t *IavlTree) Set(key, value []byte) error { + _, err := t.tree.Set(key, value) + return err +} + +// Hash returns the hash of the latest saved version of the tree. +func (t *IavlTree) Hash() []byte { + return t.tree.Hash() +} + +// Version returns the current version of the tree. +func (t *IavlTree) Version() uint64 { + return uint64(t.tree.Version()) +} + +// WorkingHash returns the working hash of the tree. +// Danger! iavl.MutableTree.WorkingHash() is a mutating operation! +// It advances the tree version by 1. +func (t *IavlTree) WorkingHash() []byte { + return t.tree.WorkingHash() +} + +// LoadVersion loads the state at the given version. +func (t *IavlTree) LoadVersion(version uint64) error { + _, err := t.tree.LoadVersion(int64(version)) + return err +} + +// LoadVersionForOverwriting loads the state at the given version. +// Any versions greater than targetVersion will be deleted. +func (t *IavlTree) LoadVersionForOverwriting(version uint64) error { + return t.tree.LoadVersionForOverwriting(int64(version)) +} + +// Commit commits the current state to the tree. +func (t *IavlTree) Commit() ([]byte, uint64, error) { + hash, v, err := t.tree.SaveVersion() + return hash, uint64(v), err +} + +// GetProof returns a proof for the given key and version. +func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { + return t.tree.GetProof(key) + } + + immutableTree, err := t.tree.GetImmutable(int64(version)) + if err != nil { + return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) + } + + return immutableTree.GetProof(key) +} + +// Get implements the Reader interface. +func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { + return t.tree.Get(key) + } + + immutableTree, err := t.tree.GetImmutable(int64(version)) + if err != nil { + return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) + } + + return immutableTree.Get(key) +} + +// Iterator implements the Reader interface. +func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not empty when the storekey is removed + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { + return t.tree.Iterator(start, end, ascending) + } + + immutableTree, err := t.tree.GetImmutable(int64(version)) + if err != nil { + return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) + } + + return immutableTree.Iterator(start, end, ascending) +} + +// GetLatestVersion returns the latest version of the tree. +func (t *IavlTree) GetLatestVersion() (uint64, error) { + v, err := t.tree.GetLatestVersion() + return uint64(v), err +} + +// SetInitialVersion sets the initial version of the database. +func (t *IavlTree) SetInitialVersion(version uint64) error { + t.tree.SetInitialVersion(version) + return nil +} + +// Prune prunes all versions up to and including the provided version. +func (t *IavlTree) Prune(version uint64) error { + return t.tree.DeleteVersionsTo(int64(version)) +} + +// PausePruning pauses the pruning process. +func (t *IavlTree) PausePruning(pause bool) { + if pause { + t.tree.SetCommitting() + } else { + t.tree.UnsetCommitting() + } +} + +// Export exports the tree exporter at the given version. +func (t *IavlTree) Export(version uint64) (commitment.Exporter, error) { + tree, err := t.tree.GetImmutable(int64(version)) + if err != nil { + return nil, err + } + exporter, err := tree.Export() + if err != nil { + return nil, err + } + + return &Exporter{ + exporter: exporter, + }, nil +} + +// Import imports the tree importer at the given version. +func (t *IavlTree) Import(version uint64) (commitment.Importer, error) { + importer, err := t.tree.Import(int64(version)) + if err != nil { + return nil, err + } + + return &Importer{ + importer: importer, + }, nil +} + +// Close closes the iavl tree. +func (t *IavlTree) Close() error { + return t.tree.Close() +} diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go new file mode 100644 index 000000000000..aa383b57ae56 --- /dev/null +++ b/store/v2/commitment/store.go @@ -0,0 +1,571 @@ +package commitment + +import ( + "errors" + "fmt" + "io" + "maps" + "math" + "slices" + + protoio "github.com/cosmos/gogoproto/io" + + corelog "cosmossdk.io/core/log" + corestore "cosmossdk.io/core/store" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/internal" + "cosmossdk.io/store/v2/internal/conv" + "cosmossdk.io/store/v2/proof" + "cosmossdk.io/store/v2/snapshots" + snapshotstypes "cosmossdk.io/store/v2/snapshots/types" +) + +var ( + _ store.Committer = (*CommitStore)(nil) + _ store.UpgradeableStore = (*CommitStore)(nil) + _ snapshots.CommitSnapshotter = (*CommitStore)(nil) + _ store.PausablePruner = (*CommitStore)(nil) + + // NOTE: It is not recommended to use the CommitStore as a reader. This is only used + // during the migration process. Generally, the SC layer does not provide a reader + // in the store/v2. + _ store.VersionedReader = (*CommitStore)(nil) +) + +// MountTreeFn is a function that mounts a tree given a store key. +// It is used to lazily mount trees when needed (e.g. during upgrade or proof generation). +type MountTreeFn func(storeKey string) (Tree, error) + +// CommitStore is a wrapper around multiple Tree objects mapped by a unique store +// key. Each store key reflects dedicated and unique usage within a module. A caller +// can construct a CommitStore with one or more store keys. It is expected that a +// RootStore use a CommitStore as an abstraction to handle multiple store keys +// and trees. +type CommitStore struct { + logger corelog.Logger + metadata *MetadataStore + multiTrees map[string]Tree + // oldTrees is a map of store keys to old trees that have been deleted or renamed. + // It is used to get the proof for the old store keys. + oldTrees map[string]Tree +} + +// NewCommitStore creates a new CommitStore instance. +func NewCommitStore(trees, oldTrees map[string]Tree, db corestore.KVStoreWithBatch, logger corelog.Logger) (*CommitStore, error) { + return &CommitStore{ + logger: logger, + multiTrees: trees, + oldTrees: oldTrees, + metadata: NewMetadataStore(db), + }, nil +} + +func (c *CommitStore) WriteChangeset(cs *corestore.Changeset) error { + for _, pairs := range cs.Changes { + key := conv.UnsafeBytesToStr(pairs.Actor) + + tree, ok := c.multiTrees[key] + if !ok { + return fmt.Errorf("store key %s not found in multiTrees", key) + } + for _, kv := range pairs.StateChanges { + if kv.Remove { + if err := tree.Remove(kv.Key); err != nil { + return err + } + } else if err := tree.Set(kv.Key, kv.Value); err != nil { + return err + } + } + } + + return nil +} + +func (c *CommitStore) LoadVersion(targetVersion uint64) error { + storeKeys := make([]string, 0, len(c.multiTrees)) + for storeKey := range c.multiTrees { + storeKeys = append(storeKeys, storeKey) + } + return c.loadVersion(targetVersion, storeKeys, false) +} + +func (c *CommitStore) LoadVersionForOverwriting(targetVersion uint64) error { + storeKeys := make([]string, 0, len(c.multiTrees)) + for storeKey := range c.multiTrees { + storeKeys = append(storeKeys, storeKey) + } + + return c.loadVersion(targetVersion, storeKeys, true) +} + +// LoadVersionAndUpgrade implements store.UpgradeableStore. +func (c *CommitStore) LoadVersionAndUpgrade(targetVersion uint64, upgrades *corestore.StoreUpgrades) error { + // deterministic iteration order for upgrades (as the underlying store may change and + // upgrades make store changes where the execution order may matter) + storeKeys := slices.Sorted(maps.Keys(c.multiTrees)) + removeTree := func(storeKey string) error { + if oldTree, ok := c.multiTrees[storeKey]; ok { + if err := oldTree.Close(); err != nil { + return err + } + delete(c.multiTrees, storeKey) + } + return nil + } + + newStoreKeys := make([]string, 0, len(c.multiTrees)) + removedStoreKeys := make([]string, 0) + for _, storeKey := range storeKeys { + // If it has been deleted, remove the tree. + if upgrades.IsDeleted(storeKey) { + if err := removeTree(storeKey); err != nil { + return err + } + removedStoreKeys = append(removedStoreKeys, storeKey) + continue + } + + // If it has been added, set the initial version. + if upgrades.IsAdded(storeKey) { + if err := c.multiTrees[storeKey].SetInitialVersion(targetVersion + 1); err != nil { + return err + } + // This is the empty tree, no need to load the version. + continue + } + + newStoreKeys = append(newStoreKeys, storeKey) + } + + if err := c.metadata.flushRemovedStoreKeys(targetVersion, removedStoreKeys); err != nil { + return err + } + + return c.loadVersion(targetVersion, newStoreKeys, true) +} + +func (c *CommitStore) loadVersion(targetVersion uint64, storeKeys []string, overrideAfter bool) error { + // Rollback the metadata to the target version. + latestVersion, err := c.GetLatestVersion() + if err != nil { + return err + } + if targetVersion < latestVersion { + for version := latestVersion; version > targetVersion; version-- { + if err = c.metadata.deleteCommitInfo(version); err != nil { + return err + } + } + if err := c.metadata.setLatestVersion(targetVersion); err != nil { + return err + } + } + + for _, storeKey := range storeKeys { + if overrideAfter { + if err := c.multiTrees[storeKey].LoadVersionForOverwriting(targetVersion); err != nil { + return err + } + } else { + if err := c.multiTrees[storeKey].LoadVersion(targetVersion); err != nil { + return err + } + } + } + + // If the target version is greater than the latest version, it is the snapshot + // restore case, we should create a new commit info for the target version. + if targetVersion > latestVersion { + cInfo, err := c.GetCommitInfo(targetVersion) + if err != nil { + return err + } + return c.metadata.flushCommitInfo(targetVersion, cInfo) + } + + return nil +} + +func (c *CommitStore) Commit(version uint64) (*proof.CommitInfo, error) { + storeInfos := make([]proof.StoreInfo, 0, len(c.multiTrees)) + + for storeKey, tree := range c.multiTrees { + if internal.IsMemoryStoreKey(storeKey) { + continue + } + hash, cversion, err := tree.Commit() + if err != nil { + return nil, err + } + if cversion != version { + return nil, fmt.Errorf("commit version %d does not match the target version %d", cversion, version) + } + commitID := proof.CommitID{ + Version: version, + Hash: hash, + } + storeInfos = append(storeInfos, proof.StoreInfo{ + Name: []byte(storeKey), + CommitID: commitID, + }) + } + + cInfo := &proof.CommitInfo{ + Version: version, + StoreInfos: storeInfos, + } + + if err := c.metadata.flushCommitInfo(version, cInfo); err != nil { + return nil, err + } + + return cInfo, nil +} + +func (c *CommitStore) SetInitialVersion(version uint64) error { + for _, tree := range c.multiTrees { + if err := tree.SetInitialVersion(version); err != nil { + return err + } + } + + return nil +} + +// GetProof returns a proof for the given key and version. +func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) { + rawStoreKey := conv.UnsafeBytesToStr(storeKey) + tree, ok := c.multiTrees[rawStoreKey] + if !ok { + tree, ok = c.oldTrees[rawStoreKey] + if !ok { + return nil, fmt.Errorf("store %s not found", rawStoreKey) + } + } + + iProof, err := tree.GetProof(version, key) + if err != nil { + return nil, err + } + cInfo, err := c.metadata.GetCommitInfo(version) + if err != nil { + return nil, err + } + if cInfo == nil { + return nil, fmt.Errorf("commit info not found for version %d", version) + } + commitOp := proof.NewIAVLCommitmentOp(key, iProof) + _, storeCommitmentOp, err := cInfo.GetStoreProof(storeKey) + if err != nil { + return nil, err + } + + return []proof.CommitmentOp{commitOp, *storeCommitmentOp}, nil +} + +// getReader returns a reader for the given store key. It will return an error if the +// store key does not exist or the tree does not implement the Reader interface. +// WARNING: This function is only used during the migration process. The SC layer +// generally does not provide a reader for the CommitStore. +func (c *CommitStore) getReader(storeKey string) (Reader, error) { + var tree Tree + if storeTree, ok := c.oldTrees[storeKey]; ok { + tree = storeTree + } else if storeTree, ok := c.multiTrees[storeKey]; ok { + tree = storeTree + } else { + return nil, fmt.Errorf("store %s not found", storeKey) + } + + reader, ok := tree.(Reader) + if !ok { + return nil, fmt.Errorf("tree for store %s does not implement Reader", storeKey) + } + + return reader, nil +} + +// VersionExists implements store.VersionedReader. +func (c *CommitStore) VersionExists(version uint64) (bool, error) { + latestVersion, err := c.metadata.GetLatestVersion() + if err != nil { + return false, err + } + if latestVersion == 0 { + return version == 0, nil + } + + ci, err := c.metadata.GetCommitInfo(version) + return ci != nil, err +} + +// Get implements store.VersionedReader. +func (c *CommitStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { + reader, err := c.getReader(conv.UnsafeBytesToStr(storeKey)) + if err != nil { + return nil, err + } + + bz, err := reader.Get(version, key) + if err != nil { + return nil, fmt.Errorf("failed to get key %s from store %s: %w", key, storeKey, err) + } + + return bz, nil +} + +// Has implements store.VersionedReader. +func (c *CommitStore) Has(storeKey []byte, version uint64, key []byte) (bool, error) { + val, err := c.Get(storeKey, version, key) + return val != nil, err +} + +// Iterator implements store.VersionedReader. +func (c *CommitStore) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { + reader, err := c.getReader(conv.UnsafeBytesToStr(storeKey)) + if err != nil { + return nil, err + } + + return reader.Iterator(version, start, end, true) +} + +// ReverseIterator implements store.VersionedReader. +func (c *CommitStore) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { + reader, err := c.getReader(conv.UnsafeBytesToStr(storeKey)) + if err != nil { + return nil, err + } + + return reader.Iterator(version, start, end, false) +} + +// Prune implements store.Pruner. +func (c *CommitStore) Prune(version uint64) error { + // prune the metadata + for v := version; v > 0; v-- { + if err := c.metadata.deleteCommitInfo(v); err != nil { + return err + } + } + // prune the trees + for _, tree := range c.multiTrees { + if err := tree.Prune(version); err != nil { + return err + } + } + // prune the removed store keys + if err := c.pruneRemovedStoreKeys(version); err != nil { + return err + } + + return nil +} + +func (c *CommitStore) pruneRemovedStoreKeys(version uint64) error { + clearKVStore := func(storeKey []byte, version uint64) (err error) { + tree, ok := c.oldTrees[string(storeKey)] + if !ok { + return fmt.Errorf("store %s not found in oldTrees", storeKey) + } + return tree.Prune(version) + } + return c.metadata.deleteRemovedStoreKeys(version, clearKVStore) +} + +// PausePruning implements store.PausablePruner. +func (c *CommitStore) PausePruning(pause bool) { + for _, tree := range c.multiTrees { + if pruner, ok := tree.(store.PausablePruner); ok { + pruner.PausePruning(pause) + } + } +} + +// Snapshot implements snapshotstypes.CommitSnapshotter. +func (c *CommitStore) Snapshot(version uint64, protoWriter protoio.Writer) error { + if version == 0 { + return errors.New("the snapshot version must be greater than 0") + } + + latestVersion, err := c.GetLatestVersion() + if err != nil { + return err + } + if version > latestVersion { + return fmt.Errorf("the snapshot version %d is greater than the latest version %d", version, latestVersion) + } + + for storeKey, tree := range c.multiTrees { + // TODO: check the parallelism of this loop + if err := func() error { + exporter, err := tree.Export(version) + if err != nil { + return fmt.Errorf("failed to export tree for version %d: %w", version, err) + } + defer exporter.Close() + + err = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ + Item: &snapshotstypes.SnapshotItem_Store{ + Store: &snapshotstypes.SnapshotStoreItem{ + Name: storeKey, + }, + }, + }) + if err != nil { + return fmt.Errorf("failed to write store name: %w", err) + } + + for { + item, err := exporter.Next() + if errors.Is(err, ErrorExportDone) { + break + } else if err != nil { + return fmt.Errorf("failed to get the next export node: %w", err) + } + + if err = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ + Item: &snapshotstypes.SnapshotItem_IAVL{ + IAVL: item, + }, + }); err != nil { + return fmt.Errorf("failed to write iavl node: %w", err) + } + } + + return nil + }(); err != nil { + return err + } + } + + return nil +} + +// Restore implements snapshotstypes.CommitSnapshotter. +func (c *CommitStore) Restore( + version uint64, + format uint32, + protoReader protoio.Reader, +) (snapshotstypes.SnapshotItem, error) { + var ( + importer Importer + snapshotItem snapshotstypes.SnapshotItem + ) + +loop: + for { + snapshotItem = snapshotstypes.SnapshotItem{} + err := protoReader.ReadMsg(&snapshotItem) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("invalid protobuf message: %w", err) + } + + switch item := snapshotItem.Item.(type) { + case *snapshotstypes.SnapshotItem_Store: + if importer != nil { + if err := importer.Commit(); err != nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to commit importer: %w", err) + } + if err := importer.Close(); err != nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to close importer: %w", err) + } + } + tree := c.multiTrees[item.Store.Name] + if tree == nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", item.Store.Name) + } + importer, err = tree.Import(version) + if err != nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to import tree for version %d: %w", version, err) + } + defer importer.Close() + + case *snapshotstypes.SnapshotItem_IAVL: + if importer == nil { + return snapshotstypes.SnapshotItem{}, errors.New("received IAVL node item before store item") + } + node := item.IAVL + if node.Height > int32(math.MaxInt8) { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("node height %v cannot exceed %v", + item.IAVL.Height, math.MaxInt8) + } + // Protobuf does not differentiate between []byte{} and nil, but fortunately IAVL does + // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty. + if node.Key == nil { + node.Key = []byte{} + } + if node.Height == 0 { + if node.Value == nil { + node.Value = []byte{} + } + } + err := importer.Add(node) + if err != nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to add node to importer: %w", err) + } + default: + break loop + } + } + + if importer != nil { + if err := importer.Commit(); err != nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to commit importer: %w", err) + } + } + + return snapshotItem, c.LoadVersion(version) +} + +func (c *CommitStore) GetCommitInfo(version uint64) (*proof.CommitInfo, error) { + // if the commit info is already stored, return it + ci, err := c.metadata.GetCommitInfo(version) + if err != nil { + return nil, err + } + if ci != nil { + return ci, nil + } + // otherwise built the commit info from the trees + storeInfos := make([]proof.StoreInfo, 0, len(c.multiTrees)) + for storeKey, tree := range c.multiTrees { + if internal.IsMemoryStoreKey(storeKey) { + continue + } + v := tree.Version() + if v != version { + return nil, fmt.Errorf("tree version %d does not match the target version %d", v, version) + } + bz := []byte(storeKey) + storeInfos = append(storeInfos, proof.StoreInfo{ + Name: bz, + CommitID: proof.CommitID{ + Version: v, + Hash: tree.Hash(), + }, + }) + } + + ci = &proof.CommitInfo{ + Version: version, + StoreInfos: storeInfos, + } + return ci, nil +} + +func (c *CommitStore) GetLatestVersion() (uint64, error) { + return c.metadata.GetLatestVersion() +} + +func (c *CommitStore) Close() error { + for _, tree := range c.multiTrees { + if err := tree.Close(); err != nil { + return err + } + } + return nil +} diff --git a/store/v2/commitment/store_test_suite.go b/store/v2/commitment/store_test_suite.go new file mode 100644 index 000000000000..b91119301c1e --- /dev/null +++ b/store/v2/commitment/store_test_suite.go @@ -0,0 +1,495 @@ +package commitment + +import ( + "bytes" + "fmt" + "io" + + "github.com/stretchr/testify/suite" + + corelog "cosmossdk.io/core/log" + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/store/v2" + dbm "cosmossdk.io/store/v2/db" + "cosmossdk.io/store/v2/proof" + "cosmossdk.io/store/v2/snapshots" + snapshotstypes "cosmossdk.io/store/v2/snapshots/types" +) + +const ( + storeKey1 = "store1" + storeKey2 = "store2" + storeKey3 = "store3" +) + +// CommitStoreTestSuite is a test suite to be used for all tree backends. +type CommitStoreTestSuite struct { + suite.Suite + + NewStore func(db corestore.KVStoreWithBatch, dbDir string, storeKeys, oldStoreKeys []string, logger corelog.Logger) (*CommitStore, error) + TreeType string +} + +// TestStore_Snapshotter tests the snapshot functionality of the CommitStore. +// This test verifies that the store can correctly create snapshots and restore from them. +// The test follows these steps: +// +// 1. Setup & Data Population: +// - Creates a new CommitStore with two stores (store1 and store2) +// - Writes 10 versions of data (version 1-10) +// - For each version, writes 10 key-value pairs to each store +// - Total data: 2 stores * 10 versions * 10 pairs = 200 key-value pairs +// - Keys are formatted as "key-{version}-{index}" +// - Values are formatted as "value-{version}-{index}" +// - Each version is committed to get a CommitInfo +// +// 2. Snapshot Creation: +// - Creates a dummy extension item for metadata testing +// - Sets up a new target store for restoration +// - Creates a channel for snapshot chunks +// - Launches a goroutine to: +// - Create a snapshot writer +// - Take a snapshot at version 10 +// - Write extension metadata +// +// 3. Snapshot Restoration: +// - Creates a snapshot reader from the chunks +// - Sets up a channel for state changes during restoration +// - Launches a goroutine to collect restored key-value pairs +// - Restores the snapshot into the target store +// - Verifies the extension metadata was preserved +// +// 4. Verification: +// - Confirms all 200 key-value pairs were restored correctly +// - Verifies the format: "{storeKey}_key-{version}-{index}" -> "value-{version}-{index}" +// - Checks that the restored store's Merkle tree hashes match the original +// - Ensures store integrity by comparing CommitInfo hashes +func (s *CommitStoreTestSuite) TestStore_Snapshotter() { + if s.TreeType == "iavlv2" { + s.T().Skip("FIXME: iavlv2 does not yet support snapshots") + } + storeKeys := []string{storeKey1, storeKey2} + commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + + // We'll create 10 versions of data + latestVersion := uint64(10) + kvCount := 10 + var cInfo *proof.CommitInfo + + // For each version 1-10 + for i := uint64(1); i <= latestVersion; i++ { + // Create KV pairs for each store + kvPairs := make(map[string]corestore.KVPairs) + for _, storeKey := range storeKeys { + kvPairs[storeKey] = corestore.KVPairs{} + // Create 10 KV pairs for this store + for j := 0; j < kvCount; j++ { + key := []byte(fmt.Sprintf("key-%d-%d", i, j)) + value := []byte(fmt.Sprintf("value-%d-%d", i, j)) + kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) + } + } + // Write and commit the changes for this version + s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) + cInfo, err = commitStore.Commit(i) + s.Require().NoError(err) + } + + s.Require().Equal(len(storeKeys), len(cInfo.StoreInfos)) + + // create a snapshot + dummyExtensionItem := snapshotstypes.SnapshotItem{ + Item: &snapshotstypes.SnapshotItem_Extension{ + Extension: &snapshotstypes.SnapshotExtensionMeta{ + Name: "test", + Format: 1, + }, + }, + } + + targetStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + + chunks := make(chan io.ReadCloser, kvCount*int(latestVersion)) + go func() { + streamWriter := snapshots.NewStreamWriter(chunks) + s.Require().NotNil(streamWriter) + defer streamWriter.Close() + err := commitStore.Snapshot(latestVersion, streamWriter) + s.Require().NoError(err) + // write an extension metadata + err = streamWriter.WriteMsg(&dummyExtensionItem) + s.Require().NoError(err) + }() + + streamReader, err := snapshots.NewStreamReader(chunks) + s.Require().NoError(err) + + nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader) + s.Require().NoError(err) + s.Require().Equal(*dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) + + // check the restored tree hash + targetCommitInfo, err := targetStore.GetCommitInfo(latestVersion) + s.Require().NoError(err) + for _, storeInfo := range targetCommitInfo.StoreInfos { + matched := false + for _, latestStoreInfo := range cInfo.StoreInfos { + if bytes.Equal(storeInfo.Name, latestStoreInfo.Name) { + s.Require().Equal(latestStoreInfo.GetHash(), storeInfo.GetHash()) + matched = true + } + } + s.Require().True(matched) + } +} + +func (s *CommitStoreTestSuite) TestStore_LoadVersion() { + storeKeys := []string{storeKey1, storeKey2} + mdb := dbm.NewMemDB() + dbDir := s.T().TempDir() + commitStore, err := s.NewStore(mdb, dbDir, storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + + latestVersion := uint64(10) + kvCount := 10 + for i := uint64(1); i <= latestVersion; i++ { + kvPairs := make(map[string]corestore.KVPairs) + for _, storeKey := range storeKeys { + kvPairs[storeKey] = corestore.KVPairs{} + for j := 0; j < kvCount; j++ { + key := []byte(fmt.Sprintf("key-%d-%d", i, j)) + value := []byte(fmt.Sprintf("value-%d-%d", i, j)) + kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) + } + } + s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) + _, err = commitStore.Commit(i) + s.Require().NoError(err) + } + + // load the store with the latest version + targetStore, err := s.NewStore(mdb, dbDir, storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + err = targetStore.LoadVersion(latestVersion) + s.Require().NoError(err) + // check the store + for i := uint64(1); i <= latestVersion; i++ { + commitInfo, _ := targetStore.GetCommitInfo(i) + s.Require().NotNil(commitInfo) + s.Require().Equal(i, commitInfo.Version) + } + + // rollback to a previous version + rollbackVersion := uint64(5) + rollbackStore, err := s.NewStore(mdb, dbDir, storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + err = rollbackStore.LoadVersion(rollbackVersion) + s.Require().NoError(err) + // check the store + v, err := rollbackStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(rollbackVersion, v) + for i := uint64(1); i <= latestVersion; i++ { + commitInfo, _ := rollbackStore.GetCommitInfo(i) + if i > rollbackVersion { + s.Require().Nil(commitInfo) + } else { + s.Require().NotNil(commitInfo) + } + } +} + +func (s *CommitStoreTestSuite) TestStore_Pruning() { + storeKeys := []string{storeKey1, storeKey2} + pruneOpts := store.NewPruningOptionWithCustom(10, 5) + commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + + latestVersion := uint64(100) + kvCount := 10 + for i := uint64(1); i <= latestVersion; i++ { + kvPairs := make(map[string]corestore.KVPairs) + for _, storeKey := range storeKeys { + kvPairs[storeKey] = corestore.KVPairs{} + for j := 0; j < kvCount; j++ { + key := []byte(fmt.Sprintf("key-%d-%d", i, j)) + value := []byte(fmt.Sprintf("value-%d-%d", i, j)) + kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) + } + } + s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) + + _, err = commitStore.Commit(i) + s.Require().NoError(err) + + if prune, pruneVersion := pruneOpts.ShouldPrune(i); prune { + s.Require().NoError(commitStore.Prune(pruneVersion)) + } + + } + + pruneVersion := latestVersion - pruneOpts.KeepRecent - 1 + // check the store + for i := uint64(1); i <= latestVersion; i++ { + commitInfo, _ := commitStore.GetCommitInfo(i) + if i <= pruneVersion { + s.Require().Nil(commitInfo) + } else { + s.Require().NotNil(commitInfo) + } + } +} + +func (s *CommitStoreTestSuite) TestStore_GetProof() { + storeKeys := []string{storeKey1, storeKey2} + commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + + toVersion := uint64(10) + keyCount := 5 + + // commit some changes + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + err := commitStore.WriteChangeset(cs) + s.Require().NoError(err) + _, err = commitStore.Commit(version) + s.Require().NoError(err) + } + + // get proof + for version := uint64(1); version <= toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + _, err := commitStore.GetProof([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i))) + s.Require().NoError(err) + } + } + } + + // prune version 1 + s.Require().NoError(commitStore.Prune(1)) + + // check if proof for version 1 is pruned + _, err = commitStore.GetProof([]byte(storeKeys[0]), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) + s.Require().Error(err) + // check the commit info + commit, _ := commitStore.GetCommitInfo(1) + s.Require().Nil(commit) +} + +func (s *CommitStoreTestSuite) TestStore_Get() { + storeKeys := []string{storeKey1, storeKey2} + commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + + toVersion := uint64(10) + keyCount := 5 + + // commit some changes + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + err := commitStore.WriteChangeset(cs) + s.Require().NoError(err) + _, err = commitStore.Commit(version) + s.Require().NoError(err) + } + + // get proof + for version := uint64(1); version <= toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + val, err := commitStore.Get([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i))) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), val) + } + } + } +} + +func (s *CommitStoreTestSuite) TestStore_Upgrades() { + storeKeys := []string{storeKey1, storeKey2, storeKey3} + commitDB := dbm.NewMemDB() + commitDir := s.T().TempDir() + commitStore, err := s.NewStore(commitDB, commitDir, storeKeys, nil, coretesting.NewNopLogger()) + s.Require().NoError(err) + + latestVersion := uint64(10) + kvCount := 10 + for i := uint64(1); i <= latestVersion; i++ { + kvPairs := make(map[string]corestore.KVPairs) + for _, storeKey := range storeKeys { + kvPairs[storeKey] = corestore.KVPairs{} + for j := 0; j < kvCount; j++ { + key := []byte(fmt.Sprintf("key-%d-%d", i, j)) + value := []byte(fmt.Sprintf("value-%d-%d", i, j)) + kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) + } + } + s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) + _, err = commitStore.Commit(i) + s.Require().NoError(err) + } + + // create a new commitment store with upgrades + upgrades := &corestore.StoreUpgrades{ + Added: []string{"newStore1", "newStore2"}, + Deleted: []string{storeKey3}, + } + newStoreKeys := []string{storeKey1, storeKey2, storeKey3, "newStore1", "newStore2"} + realStoreKeys := []string{storeKey1, storeKey2, "newStore1", "newStore2"} + oldStoreKeys := []string{storeKey3} + commitStore, err = s.NewStore(commitDB, commitDir, newStoreKeys, oldStoreKeys, coretesting.NewNopLogger()) + s.Require().NoError(err) + err = commitStore.LoadVersionAndUpgrade(latestVersion, upgrades) + s.Require().NoError(err) + + // GetProof should work for the old stores + for _, storeKey := range []string{storeKey3} { + for i := uint64(1); i <= latestVersion; i++ { + for j := 0; j < kvCount; j++ { + proof, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().NoError(err) + s.Require().NotNil(proof) + } + } + } + // GetProof should fail for the new stores against the old versions + for _, storeKey := range []string{"newStore1", "newStore2"} { + for i := uint64(1); i <= latestVersion; i++ { + for j := 0; j < kvCount; j++ { + _, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().Error(err) + } + } + } + + // apply the changeset again + for i := latestVersion + 1; i < latestVersion*2; i++ { + kvPairs := make(map[string]corestore.KVPairs) + for _, storeKey := range realStoreKeys { + kvPairs[storeKey] = corestore.KVPairs{} + for j := 0; j < kvCount; j++ { + key := []byte(fmt.Sprintf("key-%d-%d", i, j)) + value := []byte(fmt.Sprintf("value-%d-%d", i, j)) + kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) + } + } + s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) + commitInfo, err := commitStore.Commit(i) + s.Require().NoError(err) + s.Require().NotNil(commitInfo) + s.Require().Equal(len(realStoreKeys), len(commitInfo.StoreInfos)) + for _, storeKey := range realStoreKeys { + s.Require().NotNil(commitInfo.GetStoreCommitID([]byte(storeKey))) + } + } + + // verify new stores + for _, storeKey := range []string{"newStore1", "newStore2"} { + for i := latestVersion + 1; i < latestVersion*2; i++ { + for j := 0; j < kvCount; j++ { + proof, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().NoError(err) + s.Require().NotNil(proof) + } + } + } + + // verify existing store + for i := uint64(1); i < latestVersion*2; i++ { + for j := 0; j < kvCount; j++ { + prf, err := commitStore.GetProof([]byte(storeKey2), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().NoError(err) + s.Require().NotNil(prf) + } + } + + // create a new commitment store with one more upgrades + upgrades = &corestore.StoreUpgrades{ + Deleted: []string{storeKey2}, + Added: []string{"newStore3"}, + } + newRealStoreKeys := []string{storeKey1, "newStore1", "newStore2", "newStore3"} + oldStoreKeys = []string{storeKey2, storeKey3} + commitStore, err = s.NewStore(commitDB, commitDir, newRealStoreKeys, oldStoreKeys, coretesting.NewNopLogger()) + s.Require().NoError(err) + err = commitStore.LoadVersionAndUpgrade(2*latestVersion-1, upgrades) + s.Require().NoError(err) + + // apply the changeset again + for i := latestVersion * 2; i < latestVersion*3; i++ { + kvPairs := make(map[string]corestore.KVPairs) + for _, storeKey := range newRealStoreKeys { + kvPairs[storeKey] = corestore.KVPairs{} + for j := 0; j < kvCount; j++ { + key := []byte(fmt.Sprintf("key-%d-%d", i, j)) + value := []byte(fmt.Sprintf("value-%d-%d", i, j)) + kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) + } + } + err = commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs)) + s.Require().NoError(err) + commitInfo, err := commitStore.Commit(i) + s.Require().NoError(err) + s.Require().NotNil(commitInfo) + s.Require().Equal(len(newRealStoreKeys), len(commitInfo.StoreInfos)) + for _, storeKey := range newRealStoreKeys { + s.Require().NotNil(commitInfo.GetStoreCommitID([]byte(storeKey))) + } + } + + // prune the old stores + s.Require().NoError(commitStore.Prune(latestVersion)) + s.T().Logf("prune to version %d", latestVersion) + // GetProof should fail for the old stores + for _, storeKey := range []string{storeKey1, storeKey3} { + for i := uint64(1); i <= latestVersion; i++ { + for j := 0; j < kvCount; j++ { + _, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().Error(err) + } + } + } + s.T().Log("GetProof should work for the new stores") + // GetProof should not fail for the newly removed store + for i := latestVersion + 1; i < latestVersion*2; i++ { + for j := 0; j < kvCount; j++ { + proof, err := commitStore.GetProof([]byte(storeKey2), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().NoError(err) + s.Require().NotNil(proof) + } + } + + s.T().Logf("Prune to version %d", latestVersion*2) + s.Require().NoError(commitStore.Prune(latestVersion * 2)) + // GetProof should fail for the newly deleted stores + for i := uint64(1); i < latestVersion*2; i++ { + for j := 0; j < kvCount; j++ { + _, err := commitStore.GetProof([]byte(storeKey2), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().Error(err) + } + } + s.T().Log("GetProof should work for the new added store") + // GetProof should work for the new added store + for i := latestVersion*2 + 1; i < latestVersion*3; i++ { + for j := 0; j < kvCount; j++ { + proof, err := commitStore.GetProof([]byte("newStore3"), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) + s.Require().NoError(err) + s.Require().NotNil(proof) + } + } +} diff --git a/store/v2/database.go b/store/v2/database.go new file mode 100644 index 000000000000..0e0697de57bb --- /dev/null +++ b/store/v2/database.go @@ -0,0 +1,63 @@ +package store + +import ( + "io" + + corestore "cosmossdk.io/core/store" + "cosmossdk.io/store/v2/proof" +) + +type VersionedReader interface { + Has(storeKey []byte, version uint64, key []byte) (bool, error) + Get(storeKey []byte, version uint64, key []byte) ([]byte, error) + + GetLatestVersion() (uint64, error) + VersionExists(v uint64) (bool, error) + + Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) + ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) +} + +// UpgradableDatabase defines an API for a versioned database that allows pruning +// deleted storeKeys +type UpgradableDatabase interface { + // PruneStoreKeys prunes all data associated with the given storeKeys whenever + // the given version is pruned. + PruneStoreKeys(storeKeys []string, version uint64) error +} + +// Committer defines an API for committing state. +type Committer interface { + UpgradeableStore + VersionedReader + // WriteChangeset writes the changeset to the commitment state. + WriteChangeset(cs *corestore.Changeset) error + + // GetLatestVersion returns the latest version. + GetLatestVersion() (uint64, error) + + // LoadVersion loads the tree at the given version. + LoadVersion(targetVersion uint64) error + + // LoadVersionForOverwriting loads the tree at the given version. + // Any versions greater than targetVersion will be deleted. + LoadVersionForOverwriting(targetVersion uint64) error + + // Commit commits the working tree to the database. + Commit(version uint64) (*proof.CommitInfo, error) + + // GetProof returns the proof of existence or non-existence for the given key. + GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) + + // SetInitialVersion sets the initial version of the committer. + SetInitialVersion(version uint64) error + + // GetCommitInfo returns the CommitInfo for the given version. + GetCommitInfo(version uint64) (*proof.CommitInfo, error) + + Get(storeKey []byte, version uint64, key []byte) ([]byte, error) + + // Closer releases associated resources. It should NOT be idempotent. It must + // only be called once and any call after may panic. + io.Closer +} diff --git a/store/v2/migration/README.md b/store/v2/migration/README.md new file mode 100644 index 000000000000..88b395f63f75 --- /dev/null +++ b/store/v2/migration/README.md @@ -0,0 +1,111 @@ +# Migration Manager + +The `migration` package contains the `migration.Manager`, which is responsible +for migrating data from `store/v1` to `store/v2`. To ensure a smooth transition, +the process is designed to **lazily** migrate data in the background without blocking +`root.Store` operations. + +## Overview + +The migration process involves several steps: + +1. **Create a snapshot** of the current state while `Commit` operations continue to + function with `store/v1`. +2. **Restore the snapshot** into the new StateStorage (SS) and StateCommitment (SC). +3. **Sync recent state changes** from `store/v1` to the new SS and SC. +4. After syncing, the `Commit` operation will be switched to the new `store/v2`. + +Taking a snapshot is a lightweight operation. The snapshot is not stored on disk but +consumed by the `Restore` process, which replays state changes to the new SS and SC. + +> **Note:** After migration, `store/v2` does **not** support historical queries. +If historical data access is required, a full state migration to `store/v2` is necessary. + +## Usage + +You can create a new `migration.Manager` by calling the following function: + +```go +func NewManager( + db corestore.KVStoreWithBatch, + sm *snapshots.Manager, + ss *storage.StorageStore, + sc *commitment.CommitStore, + logger log.Logger +) *Manager +``` + +* `sc` (Commitment Store) can be `nil`. In that case, the Manager will migrate only + the state storage. +* The migration process is lazy, meaning data is migrated in the background while + `root.Store` remains fully operational. + +To initiate the migration process, call the `Start` method: + +```go +func (m *Manager) Start(ctx context.Context) error +``` + +> **Note:** It should be called by the RootStore, running in the background. + +## Migration Flow + +```mermaid +sequenceDiagram + autonumber + + participant A as RootStore + participant B as MigrationManager + participant C as SnapshotsManager + participant D as StateCommitment + participant E as StateStorage + + A->>B: Start + loop Old Data Migration + B->>C: Create Snapshot + C->>B: Stream Snapshot + B->>D: State Sync (Restore) + B->>E: Write Changeset (Restore) + end + + loop New Commit Data Sync + A->>B: Commit(Changeset) + B->>B: Store Changeset + B->>D: Commit Changeset + B->>E: Write Changeset + end + + B->>A: Switch to new store/v2 +``` + +## Key Considerations + +### Laziness and Background Operation + +The migration is performed lazily, meaning it occurs in the background without +interrupting the current operations on root.Store. This allows the chain to continue +running while data is gradually migrated to `store/v2`. State synchronization ensures +that any new state changes during the migration are also applied to `store/v2`. + +However, note that there may be a performance impact depending on the size of the data +being migrated, and it’s essential to monitor the migration process in production +environments. + +### Handling Failures and Rollbacks + +It is important to consider how the migration manager handles errors or system failures +during the migration process: + +* If the migration fails, there is no impact on the existing `store/v1` operations, + but need to restart the migration process from the scratch. +* In the event of a critical failure after migration, a rollback may not be possible, + and it is needed to keep the `store/v1` backup for a certain period. + +### Impact on Historical Queries + +After the migration, the new `store/v2` does not support historical queries. +This limitation should be clearly understood before starting the migration process, +especially if the node relies on historical data for any operations. + +If historical queries are required, users must fully migrate all historical data to `store/v2`. +Alternatively, keeping store/v1 accessible for historical queries could be an option. diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go new file mode 100644 index 000000000000..5365e8eb6a11 --- /dev/null +++ b/store/v2/migration/manager.go @@ -0,0 +1,213 @@ +package migration + +import ( + "encoding/binary" + "errors" + "fmt" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" + + "cosmossdk.io/core/log" + corestore "cosmossdk.io/core/store" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/internal/encoding" + "cosmossdk.io/store/v2/snapshots" +) + +const ( + // defaultChannelBufferSize is the default buffer size for the migration stream. + defaultChannelBufferSize = 1024 + + migrateChangesetKeyFmt = "m/cs_%x" // m/cs_ +) + +// VersionedChangeset is a pair of version and Changeset. +type VersionedChangeset struct { + Version uint64 + Changeset *corestore.Changeset +} + +// Manager manages the migration of the whole state from store/v1 to store/v2. +type Manager struct { + logger log.Logger + snapshotsManager *snapshots.Manager + + stateCommitment *commitment.CommitStore + + db corestore.KVStoreWithBatch + + migratedVersion atomic.Uint64 + + chChangeset <-chan *VersionedChangeset + chDone <-chan struct{} +} + +// NewManager returns a new Manager. +// +// NOTE: `sc` can be `nil` if don't want to migrate the commitment. +func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, sc *commitment.CommitStore, logger log.Logger) *Manager { + return &Manager{ + logger: logger, + snapshotsManager: sm, + stateCommitment: sc, + db: db, + } +} + +// Start starts the whole migration process. +// It migrates the whole state at the given version to the new store/v2 (both SC and SS). +// It also catches up the Changesets which are committed while the migration is in progress. +// `chChangeset` is the channel to receive the committed Changesets from the RootStore. +// `chDone` is the channel to receive the done signal from the RootStore. +// NOTE: It should be called by the RootStore, running in the background. +func (m *Manager) Start(version uint64, chChangeset <-chan *VersionedChangeset, chDone <-chan struct{}) error { + m.chChangeset = chChangeset + m.chDone = chDone + + go func() { + if err := m.writeChangeset(); err != nil { + m.logger.Error("failed to write changeset", "err", err) + } + }() + + if err := m.Migrate(version); err != nil { + return fmt.Errorf("failed to migrate state: %w", err) + } + + return m.Sync() +} + +// GetStateCommitment returns the state commitment. +func (m *Manager) GetStateCommitment() *commitment.CommitStore { + return m.stateCommitment +} + +// Migrate migrates the whole state at the given height to the new store/v2. +func (m *Manager) Migrate(height uint64) error { + // create the migration stream and snapshot, + // which acts as protoio.Reader and snapshots.WriteCloser. + ms := NewMigrationStream(defaultChannelBufferSize) + if err := m.snapshotsManager.CreateMigration(height, ms); err != nil { + return err + } + + eg := new(errgroup.Group) + eg.Go(func() error { + if _, err := m.stateCommitment.Restore(height, 0, ms); err != nil { + return err + } + return nil + }) + + if err := eg.Wait(); err != nil { + return err + } + + m.migratedVersion.Store(height) + + return nil +} + +// writeChangeset writes the Changeset to the db. +func (m *Manager) writeChangeset() error { + for vc := range m.chChangeset { + cs := vc.Changeset + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, vc.Version) + csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) + csBytes, err := encoding.MarshalChangeset(cs) + if err != nil { + return fmt.Errorf("failed to marshal changeset: %w", err) + } + + batch := m.db.NewBatch() + // Invoking this code in a closure so that defer is called immediately on return + // yet not in the for-loop which can leave resource lingering. + err = func() (err error) { + defer func() { + err = errors.Join(err, batch.Close()) + }() + + if err := batch.Set(csKey, csBytes); err != nil { + return fmt.Errorf("failed to write changeset to db.Batch: %w", err) + } + if err := batch.Write(); err != nil { + return fmt.Errorf("failed to write changeset to db: %w", err) + } + return nil + }() + if err != nil { + return err + } + } + + return nil +} + +// GetMigratedVersion returns the migrated version. +// It is used to check the migrated version in the RootStore. +func (m *Manager) GetMigratedVersion() uint64 { + return m.migratedVersion.Load() +} + +// Sync catches up the Changesets which are committed while the migration is in progress. +// It should be called after the migration is done. +func (m *Manager) Sync() error { + version := m.GetMigratedVersion() + if version == 0 { + return errors.New("migration is not done yet") + } + version += 1 + + for { + select { + case <-m.chDone: + return nil + default: + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, version) + csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) + csBytes, err := m.db.Get(csKey) + if err != nil { + return fmt.Errorf("failed to get changeset from db: %w", err) + } + if csBytes == nil { + // wait for the next changeset + time.Sleep(100 * time.Millisecond) + continue + } + + cs := corestore.NewChangeset(version) + if err := encoding.UnmarshalChangeset(cs, csBytes); err != nil { + return fmt.Errorf("failed to unmarshal changeset: %w", err) + } + if m.stateCommitment != nil { + if err := m.stateCommitment.WriteChangeset(cs); err != nil { + return fmt.Errorf("failed to write changeset to commitment: %w", err) + } + if _, err := m.stateCommitment.Commit(version); err != nil { + return fmt.Errorf("failed to commit changeset to commitment: %w", err) + } + } + + m.migratedVersion.Store(version) + + version += 1 + } + } +} + +// Close closes the manager. It should be called after the migration is done. +// It will close the db and notify the snapshotsManager that the migration is done. +func (m *Manager) Close() error { + if err := m.db.Close(); err != nil { + return fmt.Errorf("failed to close db: %w", err) + } + if m.stateCommitment != nil { + m.snapshotsManager.EndMigration(m.stateCommitment) + } + + return nil +} diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go new file mode 100644 index 000000000000..103b3244b650 --- /dev/null +++ b/store/v2/migration/manager_test.go @@ -0,0 +1,179 @@ +package migration + +import ( + "encoding/binary" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + dbm "cosmossdk.io/store/v2/db" + "cosmossdk.io/store/v2/snapshots" +) + +var storeKeys = []string{"store1", "store2"} + +func setupMigrationManager(t *testing.T) (*Manager, *commitment.CommitStore) { + t.Helper() + + db := dbm.NewMemDB() + multiTrees := make(map[string]commitment.Tree) + for _, storeKey := range storeKeys { + prefixDB := dbm.NewPrefixDB(db, []byte(storeKey)) + multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) + } + commitStore, err := commitment.NewCommitStore(multiTrees, nil, db, coretesting.NewNopLogger()) + require.NoError(t, err) + + snapshotsStore, err := snapshots.NewStore(t.TempDir()) + require.NoError(t, err) + + snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, coretesting.NewNopLogger()) + + db1 := dbm.NewMemDB() + multiTrees1 := make(map[string]commitment.Tree) + for _, storeKey := range storeKeys { + prefixDB := dbm.NewPrefixDB(db1, []byte(storeKey)) + multiTrees1[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) + } + + newCommitStore, err := commitment.NewCommitStore(multiTrees1, nil, db1, coretesting.NewNopLogger()) // for store/v2 + require.NoError(t, err) + + return NewManager(db, snapshotsManager, newCommitStore, coretesting.NewNopLogger()), commitStore +} + +func TestMigrateState(t *testing.T) { + m, orgCommitStore := setupMigrationManager(t) + // apply changeset + toVersion := uint64(100) + keyCount := 10 + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + require.NoError(t, orgCommitStore.WriteChangeset(cs)) + _, err := orgCommitStore.Commit(version) + require.NoError(t, err) + } + + err := m.Migrate(toVersion - 1) + require.NoError(t, err) + + // expecting error for conflicting process, since Migrate trigger snapshotter create migration, + // which start a snapshot process already. + _, err = m.snapshotsManager.Create(toVersion - 1) + fmt.Println(1) + require.Error(t, err) + + // check the migrated state + for version := uint64(1); version < toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) + require.NoError(t, err) + require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) + } + } + + // check the latest state + val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) + require.NoError(t, err) + require.Nil(t, val) + val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) + require.NoError(t, err) + require.Nil(t, val) + } +} + +func TestStartMigrateState(t *testing.T) { + m, orgCommitStore := setupMigrationManager(t) + + chDone := make(chan struct{}) + chChangeset := make(chan *VersionedChangeset, 1) + + // apply changeset + toVersion := uint64(10) + keyCount := 5 + changesets := []corestore.Changeset{} + + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + changesets = append(changesets, *cs) + require.NoError(t, orgCommitStore.WriteChangeset(cs)) + _, err := orgCommitStore.Commit(version) + require.NoError(t, err) + } + + // feed changesets to channel + go func() { + for version := uint64(1); version <= toVersion; version++ { + chChangeset <- &VersionedChangeset{ + Version: version, + Changeset: &changesets[version-1], + } + } + }() + + // check if migrate process complete + go func() { + for { + migrateVersion := m.GetMigratedVersion() + if migrateVersion == toVersion-1 { + break + } + } + + chDone <- struct{}{} + }() + + err := m.Start(toVersion-1, chChangeset, chDone) + require.NoError(t, err) + + // expecting error for conflicting process, since Migrate trigger snapshotter create migration, + // which start a snapshot process already. + _, err = m.snapshotsManager.Create(toVersion - 1) + require.Error(t, err) + + if m.stateCommitment != nil { + // check the migrated state + for version := uint64(1); version < toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) + require.NoError(t, err) + require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) + } + } + } + // check the latest state + val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) + require.NoError(t, err) + require.Nil(t, val) + val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) + require.NoError(t, err) + require.Nil(t, val) + } + + // check if migration db write change set to storage + for version := uint64(1); version < toVersion; version++ { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, version) + csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) + csVal, err := m.db.Get(csKey) + require.NoError(t, err) + require.NotEmpty(t, csVal) + } +} diff --git a/store/v2/mock/db_mock.go b/store/v2/mock/db_mock.go new file mode 100644 index 000000000000..31541c998f3b --- /dev/null +++ b/store/v2/mock/db_mock.go @@ -0,0 +1,301 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./types.go +// +// Generated by this command: +// +// mockgen -package mock -destination ./db_mock.go -source ./types.go +// + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + store "cosmossdk.io/core/store" + proof "cosmossdk.io/store/v2/proof" + gomock "go.uber.org/mock/gomock" +) + +// MockStateCommitter is a mock of StateCommitter interface. +type MockStateCommitter struct { + ctrl *gomock.Controller + recorder *MockStateCommitterMockRecorder + isgomock struct{} +} + +// MockStateCommitterMockRecorder is the mock recorder for MockStateCommitter. +type MockStateCommitterMockRecorder struct { + mock *MockStateCommitter +} + +// NewMockStateCommitter creates a new mock instance. +func NewMockStateCommitter(ctrl *gomock.Controller) *MockStateCommitter { + mock := &MockStateCommitter{ctrl: ctrl} + mock.recorder = &MockStateCommitterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStateCommitter) EXPECT() *MockStateCommitterMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockStateCommitter) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockStateCommitterMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockStateCommitter)(nil).Close)) +} + +// Commit mocks base method. +func (m *MockStateCommitter) Commit(version uint64) (*proof.CommitInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Commit", version) + ret0, _ := ret[0].(*proof.CommitInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Commit indicates an expected call of Commit. +func (mr *MockStateCommitterMockRecorder) Commit(version any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockStateCommitter)(nil).Commit), version) +} + +// Get mocks base method. +func (m *MockStateCommitter) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", storeKey, version, key) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockStateCommitterMockRecorder) Get(storeKey, version, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockStateCommitter)(nil).Get), storeKey, version, key) +} + +// GetCommitInfo mocks base method. +func (m *MockStateCommitter) GetCommitInfo(version uint64) (*proof.CommitInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCommitInfo", version) + ret0, _ := ret[0].(*proof.CommitInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCommitInfo indicates an expected call of GetCommitInfo. +func (mr *MockStateCommitterMockRecorder) GetCommitInfo(version any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommitInfo", reflect.TypeOf((*MockStateCommitter)(nil).GetCommitInfo), version) +} + +// GetLatestVersion mocks base method. +func (m *MockStateCommitter) GetLatestVersion() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestVersion") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLatestVersion indicates an expected call of GetLatestVersion. +func (mr *MockStateCommitterMockRecorder) GetLatestVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestVersion", reflect.TypeOf((*MockStateCommitter)(nil).GetLatestVersion)) +} + +// GetProof mocks base method. +func (m *MockStateCommitter) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProof", storeKey, version, key) + ret0, _ := ret[0].([]proof.CommitmentOp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProof indicates an expected call of GetProof. +func (mr *MockStateCommitterMockRecorder) GetProof(storeKey, version, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockStateCommitter)(nil).GetProof), storeKey, version, key) +} + +// Has mocks base method. +func (m *MockStateCommitter) Has(storeKey []byte, version uint64, key []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", storeKey, version, key) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockStateCommitterMockRecorder) Has(storeKey, version, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateCommitter)(nil).Has), storeKey, version, key) +} + +// Iterator mocks base method. +func (m *MockStateCommitter) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end) + ret0, _ := ret[0].(store.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Iterator indicates an expected call of Iterator. +func (mr *MockStateCommitterMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateCommitter)(nil).Iterator), storeKey, version, start, end) +} + +// LoadVersion mocks base method. +func (m *MockStateCommitter) LoadVersion(targetVersion uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadVersion", targetVersion) + ret0, _ := ret[0].(error) + return ret0 +} + +// LoadVersion indicates an expected call of LoadVersion. +func (mr *MockStateCommitterMockRecorder) LoadVersion(targetVersion any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersion", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersion), targetVersion) +} + +// LoadVersionAndUpgrade mocks base method. +func (m *MockStateCommitter) LoadVersionAndUpgrade(version uint64, upgrades *store.StoreUpgrades) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadVersionAndUpgrade", version, upgrades) + ret0, _ := ret[0].(error) + return ret0 +} + +// LoadVersionAndUpgrade indicates an expected call of LoadVersionAndUpgrade. +func (mr *MockStateCommitterMockRecorder) LoadVersionAndUpgrade(version, upgrades any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersionAndUpgrade", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersionAndUpgrade), version, upgrades) +} + +// LoadVersionForOverwriting mocks base method. +func (m *MockStateCommitter) LoadVersionForOverwriting(targetVersion uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadVersionForOverwriting", targetVersion) + ret0, _ := ret[0].(error) + return ret0 +} + +// LoadVersionForOverwriting indicates an expected call of LoadVersionForOverwriting. +func (mr *MockStateCommitterMockRecorder) LoadVersionForOverwriting(targetVersion any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersionForOverwriting", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersionForOverwriting), targetVersion) +} + +// PausePruning mocks base method. +func (m *MockStateCommitter) PausePruning(pause bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PausePruning", pause) +} + +// PausePruning indicates an expected call of PausePruning. +func (mr *MockStateCommitterMockRecorder) PausePruning(pause any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PausePruning", reflect.TypeOf((*MockStateCommitter)(nil).PausePruning), pause) +} + +// Prune mocks base method. +func (m *MockStateCommitter) Prune(version uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Prune", version) + ret0, _ := ret[0].(error) + return ret0 +} + +// Prune indicates an expected call of Prune. +func (mr *MockStateCommitterMockRecorder) Prune(version any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateCommitter)(nil).Prune), version) +} + +// PruneStoreKeys mocks base method. +func (m *MockStateCommitter) PruneStoreKeys(storeKeys []string, version uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PruneStoreKeys", storeKeys, version) + ret0, _ := ret[0].(error) + return ret0 +} + +// PruneStoreKeys indicates an expected call of PruneStoreKeys. +func (mr *MockStateCommitterMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateCommitter)(nil).PruneStoreKeys), storeKeys, version) +} + +// ReverseIterator mocks base method. +func (m *MockStateCommitter) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReverseIterator", storeKey, version, start, end) + ret0, _ := ret[0].(store.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReverseIterator indicates an expected call of ReverseIterator. +func (mr *MockStateCommitterMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateCommitter)(nil).ReverseIterator), storeKey, version, start, end) +} + +// SetInitialVersion mocks base method. +func (m *MockStateCommitter) SetInitialVersion(version uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetInitialVersion", version) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetInitialVersion indicates an expected call of SetInitialVersion. +func (mr *MockStateCommitterMockRecorder) SetInitialVersion(version any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialVersion", reflect.TypeOf((*MockStateCommitter)(nil).SetInitialVersion), version) +} + +// VersionExists mocks base method. +func (m *MockStateCommitter) VersionExists(v uint64) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VersionExists", v) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// VersionExists indicates an expected call of VersionExists. +func (mr *MockStateCommitterMockRecorder) VersionExists(v any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateCommitter)(nil).VersionExists), v) +} + +// WriteChangeset mocks base method. +func (m *MockStateCommitter) WriteChangeset(cs *store.Changeset) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteChangeset", cs) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteChangeset indicates an expected call of WriteChangeset. +func (mr *MockStateCommitterMockRecorder) WriteChangeset(cs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChangeset", reflect.TypeOf((*MockStateCommitter)(nil).WriteChangeset), cs) +} diff --git a/store/v2/mock/types.go b/store/v2/mock/types.go new file mode 100644 index 000000000000..3c5edb372a85 --- /dev/null +++ b/store/v2/mock/types.go @@ -0,0 +1,13 @@ +package mock + +import "cosmossdk.io/store/v2" + +// StateCommitter is a mock of store.Committer +type StateCommitter interface { + store.Committer + store.Pruner + store.PausablePruner + store.UpgradeableStore + store.VersionedReader + store.UpgradableDatabase +} diff --git a/store/v2/pruning/manager.go b/store/v2/pruning/manager.go new file mode 100644 index 000000000000..e21fe1ce1952 --- /dev/null +++ b/store/v2/pruning/manager.go @@ -0,0 +1,52 @@ +package pruning + +import ( + "cosmossdk.io/store/v2" +) + +// Manager is a struct that manages the pruning of old versions of the SC and SS. +type Manager struct { + // scPruner is the pruner for the SC. + scPruner store.Pruner + // scPruningOption are the pruning options for the SC. + scPruningOption *store.PruningOption +} + +// NewManager creates a new Pruning Manager. +func NewManager(scPruner store.Pruner, scPruningOption *store.PruningOption) *Manager { + return &Manager{ + scPruner: scPruner, + scPruningOption: scPruningOption, + } +} + +// Prune prunes the SC and SS to the provided version. +// +// NOTE: It can be called outside the store manually. +func (m *Manager) Prune(version uint64) error { + // Prune the SC. + if m.scPruningOption != nil { + if prune, pruneTo := m.scPruningOption.ShouldPrune(version); prune { + if err := m.scPruner.Prune(pruneTo); err != nil { + return err + } + } + } + + return nil +} + +func (m *Manager) signalPruning(pause bool) { + if scPausablePruner, ok := m.scPruner.(store.PausablePruner); ok { + scPausablePruner.PausePruning(pause) + } +} + +func (m *Manager) PausePruning() { + m.signalPruning(true) +} + +func (m *Manager) ResumePruning(version uint64) error { + m.signalPruning(false) + return m.Prune(version) +} diff --git a/store/v2/pruning/manager_test.go b/store/v2/pruning/manager_test.go new file mode 100644 index 000000000000..d45d123a3504 --- /dev/null +++ b/store/v2/pruning/manager_test.go @@ -0,0 +1,227 @@ +package pruning + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + dbm "cosmossdk.io/store/v2/db" +) + +var storeKeys = []string{"store1", "store2", "store3"} + +type PruningManagerTestSuite struct { + suite.Suite + + manager *Manager + sc *commitment.CommitStore +} + +func TestPruningManagerTestSuite(t *testing.T) { + suite.Run(t, &PruningManagerTestSuite{}) +} + +func (s *PruningManagerTestSuite) SetupTest() { + nopLog := coretesting.NewNopLogger() + var err error + + mdb := dbm.NewMemDB() + multiTrees := make(map[string]commitment.Tree) + for _, storeKey := range storeKeys { + prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) + multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) + } + s.sc, err = commitment.NewCommitStore(multiTrees, nil, mdb, nopLog) + s.Require().NoError(err) + + scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all + s.manager = NewManager(s.sc, scPruningOption) +} + +func (s *PruningManagerTestSuite) TestPrune() { + // commit changesets with pruning + toVersion := uint64(100) + keyCount := 10 + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + s.Require().NoError(s.sc.WriteChangeset(cs)) + _, err := s.sc.Commit(version) + s.Require().NoError(err) + + s.Require().NoError(s.manager.Prune(version)) + } + + // wait for the pruning to finish in the commitment store + checkSCPrune := func() bool { + count := 0 + for _, storeKey := range storeKeys { + _, err := s.sc.GetProof([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", toVersion-1, 0))) + if err != nil { + count++ + } + } + + return count == len(storeKeys) + } + s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) +} + +func TestPruningOption(t *testing.T) { + testCases := []struct { + name string + options *store.PruningOption + version uint64 + pruning bool + pruneVersion uint64 + }{ + { + name: "no pruning", + options: store.NewPruningOptionWithCustom(100, 0), + version: 100, + pruning: false, + pruneVersion: 0, + }, + { + name: "prune all", + options: store.NewPruningOptionWithCustom(0, 1), + version: 19, + pruning: true, + pruneVersion: 18, + }, + { + name: "prune none", + options: store.NewPruningOptionWithCustom(100, 10), + version: 19, + pruning: false, + pruneVersion: 0, + }, + { + name: "prune some", + options: store.NewPruningOptionWithCustom(10, 50), + version: 100, + pruning: true, + pruneVersion: 89, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pruning, pruneVersion := tc.options.ShouldPrune(tc.version) + require.Equal(t, tc.pruning, pruning) + require.Equal(t, tc.pruneVersion, pruneVersion) + }) + } +} + +func (s *PruningManagerTestSuite) TestSignalCommit() { + // commit version 1 + cs := corestore.NewChangeset(1) + for _, storeKey := range storeKeys { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", 1, 0)), []byte(fmt.Sprintf("value-%d-%d", 1, 0)), false) + } + + s.Require().NoError(s.sc.WriteChangeset(cs)) + _, err := s.sc.Commit(1) + s.Require().NoError(err) + + // commit version 2 + for _, storeKey := range storeKeys { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", 2, 0)), []byte(fmt.Sprintf("value-%d-%d", 2, 0)), false) + } + cs.Version = 2 + + // signaling commit has started + s.manager.PausePruning() + + s.Require().NoError(s.sc.WriteChangeset(cs)) + _, err = s.sc.Commit(2) + s.Require().NoError(err) + + // try prune before signaling commit has finished + s.Require().NoError(s.manager.Prune(2)) + + // proof is removed no matter SignalCommit has not yet inform that commit process has finish + // since commitInfo is remove async with tree data + checkSCPrune := func() bool { + count := 0 + for _, storeKey := range storeKeys { + _, err := s.sc.GetProof([]byte(storeKey), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) + if err != nil { + count++ + } + } + + return count == len(storeKeys) + } + s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) + + // data from state commitment should not be pruned since we haven't signal the commit process has finished + val, err := s.sc.Get([]byte(storeKeys[0]), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) + s.Require().NoError(err) + s.Require().Equal(val, []byte(fmt.Sprintf("value-%d-%d", 1, 0))) + + // signaling commit has finished, version 1 should be pruned + err = s.manager.ResumePruning(2) + s.Require().NoError(err) + + checkSCPrune = func() bool { + count := 0 + for _, storeKey := range storeKeys { + _, err := s.sc.GetProof([]byte(storeKey), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) + if err != nil { + count++ + } + } + + return count == len(storeKeys) + } + s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) + + // try with signal commit start and finish accordingly + // commit changesets with pruning + toVersion := uint64(100) + keyCount := 10 + for version := uint64(3); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + s.manager.PausePruning() + + s.Require().NoError(s.sc.WriteChangeset(cs)) + _, err := s.sc.Commit(version) + s.Require().NoError(err) + + err = s.manager.ResumePruning(version) + s.Require().NoError(err) + } + + // wait for the pruning to finish in the commitment store + checkSCPrune = func() bool { + count := 0 + for _, storeKey := range storeKeys { + _, err := s.sc.GetProof([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", toVersion-1, 0))) + if err != nil { + count++ + } + } + + return count == len(storeKeys) + } + s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) +} diff --git a/store/v2/root/factory.go b/store/v2/root/factory.go new file mode 100644 index 000000000000..36eadf2382bc --- /dev/null +++ b/store/v2/root/factory.go @@ -0,0 +1,131 @@ +package root + +import ( + "errors" + "fmt" + + "cosmossdk.io/core/log" + corestore "cosmossdk.io/core/store" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + "cosmossdk.io/store/v2/commitment/mem" + "cosmossdk.io/store/v2/db" + "cosmossdk.io/store/v2/internal" + "cosmossdk.io/store/v2/pruning" +) + +type ( + SCType string +) + +const ( + SCTypeIavl SCType = "iavl" + SCTypeIavlV2 SCType = "iavl-v2" +) + +// Options are the options for creating a root store. +type Options struct { + SCType SCType `mapstructure:"sc-type" toml:"sc-type" comment:"State commitment database type. Currently we support: \"iavl\" and \"iavl-v2\""` + SCPruningOption *store.PruningOption `mapstructure:"sc-pruning-option" toml:"sc-pruning-option" comment:"Pruning options for state commitment"` + IavlConfig *iavl.Config `mapstructure:"iavl-config" toml:"iavl-config"` +} + +// FactoryOptions are the options for creating a root store. +type FactoryOptions struct { + Logger log.Logger + RootDir string + Options Options + StoreKeys []string + SCRawDB corestore.KVStoreWithBatch +} + +// DefaultStoreOptions returns the default options for creating a root store. +func DefaultStoreOptions() Options { + return Options{ + SCType: SCTypeIavl, + SCPruningOption: &store.PruningOption{ + KeepRecent: 2, + Interval: 100, + }, + IavlConfig: &iavl.Config{ + CacheSize: 100_000, + SkipFastStorageUpgrade: true, + }, + } +} + +// CreateRootStore is a convenience function to create a root store based on the +// provided FactoryOptions. Strictly speaking app developers can create the root +// store directly by calling root.New, so this function is not +// necessary, but demonstrates the required steps and configuration to create a root store. +func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) { + var ( + sc *commitment.CommitStore + err error + ) + + storeOpts := opts.Options + + metadata := commitment.NewMetadataStore(opts.SCRawDB) + latestVersion, err := metadata.GetLatestVersion() + if err != nil { + return nil, err + } + if len(opts.StoreKeys) == 0 { + lastCommitInfo, err := metadata.GetCommitInfo(latestVersion) + if err != nil { + return nil, err + } + if lastCommitInfo == nil { + return nil, fmt.Errorf("tried to construct a root store with no store keys specified but no commit info found for version %d", latestVersion) + } + for _, si := range lastCommitInfo.StoreInfos { + opts.StoreKeys = append(opts.StoreKeys, string(si.Name)) + } + } + removedStoreKeys, err := metadata.GetRemovedStoreKeys(latestVersion) + if err != nil { + return nil, err + } + + newTreeFn := func(key string) (commitment.Tree, error) { + if internal.IsMemoryStoreKey(key) { + return mem.New(), nil + } else { + switch storeOpts.SCType { + case SCTypeIavl: + return iavl.NewIavlTree(db.NewPrefixDB(opts.SCRawDB, []byte(key)), opts.Logger, storeOpts.IavlConfig), nil + case SCTypeIavlV2: + return nil, errors.New("iavl v2 not supported") + default: + return nil, errors.New("unsupported commitment store type") + } + } + } + + trees := make(map[string]commitment.Tree, len(opts.StoreKeys)) + for _, key := range opts.StoreKeys { + tree, err := newTreeFn(key) + if err != nil { + return nil, err + } + trees[key] = tree + } + oldTrees := make(map[string]commitment.Tree, len(opts.StoreKeys)) + for _, key := range removedStoreKeys { + tree, err := newTreeFn(string(key)) + if err != nil { + return nil, err + } + oldTrees[string(key)] = tree + } + + sc, err = commitment.NewCommitStore(trees, oldTrees, opts.SCRawDB, opts.Logger) + if err != nil { + return nil, err + } + + pm := pruning.NewManager(sc, storeOpts.SCPruningOption) + return New(opts.SCRawDB, opts.Logger, sc, pm, nil, nil) +} diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go new file mode 100644 index 000000000000..3b431bdb24f6 --- /dev/null +++ b/store/v2/root/migrate_test.go @@ -0,0 +1,156 @@ +package root + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/log" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + dbm "cosmossdk.io/store/v2/db" + "cosmossdk.io/store/v2/migration" + "cosmossdk.io/store/v2/pruning" + "cosmossdk.io/store/v2/snapshots" +) + +var storeKeys = []string{"store1", "store2", "store3"} + +type MigrateStoreTestSuite struct { + suite.Suite + + rootStore store.RootStore +} + +func TestMigrateStoreTestSuite(t *testing.T) { + suite.Run(t, &MigrateStoreTestSuite{}) +} + +func (s *MigrateStoreTestSuite) SetupTest() { + testLog := log.NewTestLogger(s.T()) + nopLog := coretesting.NewNopLogger() + + mdb := dbm.NewMemDB() + multiTrees := make(map[string]commitment.Tree) + for _, storeKey := range storeKeys { + prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) + multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) + } + orgSC, err := commitment.NewCommitStore(multiTrees, nil, mdb, testLog) + s.Require().NoError(err) + + // apply changeset against the original store + toVersion := uint64(200) + keyCount := 10 + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + s.Require().NoError(orgSC.WriteChangeset(cs)) + _, err = orgSC.Commit(version) + s.Require().NoError(err) + } + + multiTrees1 := make(map[string]commitment.Tree) + for _, storeKey := range storeKeys { + multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) + } + sc, err := commitment.NewCommitStore(multiTrees1, nil, dbm.NewMemDB(), testLog) + s.Require().NoError(err) + + snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) + s.Require().NoError(err) + snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) + migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) + pm := pruning.NewManager(sc, nil) + + // assume no storage store, simulate the migration process + s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) + s.Require().NoError(err) +} + +func (s *MigrateStoreTestSuite) TestMigrateState() { + err := s.rootStore.LoadLatestVersion() + s.Require().NoError(err) + originalLatestVersion, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + + // check if the Query fallback to the original SC + for version := uint64(1); version <= originalLatestVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < 10; i++ { + res, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) + } + } + } + + // continue to apply changeset against the original store + latestVersion := originalLatestVersion + 1 + keyCount := 10 + for ; latestVersion < 2*originalLatestVersion; latestVersion++ { + cs := corestore.NewChangeset(latestVersion) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", latestVersion, i)), []byte(fmt.Sprintf("value-%d-%d", latestVersion, i)), false) + } + } + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + + // check if the migration is completed + ver, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + if ver == latestVersion { + break + } + + // add some delay to simulate the consensus process + time.Sleep(100 * time.Millisecond) + } + + // check if the migration is successful + version, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(latestVersion, version) + + // query against the migrated store + for version := uint64(1); version <= latestVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + targetVersion := version + if version < originalLatestVersion { + targetVersion = originalLatestVersion + } + res, err := s.rootStore.Query([]byte(storeKey), targetVersion, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) + } + } + } + + // apply changeset against the migrated store + for version := latestVersion + 1; version <= latestVersion+10; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + } + + version, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(latestVersion+10, version) +} diff --git a/store/v2/root/store.go b/store/v2/root/store.go new file mode 100644 index 000000000000..6faa51602c5b --- /dev/null +++ b/store/v2/root/store.go @@ -0,0 +1,400 @@ +package root + +import ( + "crypto/sha256" + "errors" + "fmt" + "io" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + corelog "cosmossdk.io/core/log" + corestore "cosmossdk.io/core/store" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/metrics" + "cosmossdk.io/store/v2/migration" + "cosmossdk.io/store/v2/proof" + "cosmossdk.io/store/v2/pruning" +) + +var ( + _ store.RootStore = (*Store)(nil) + _ store.UpgradeableStore = (*Store)(nil) +) + +// Store defines the SDK's default RootStore implementation. It contains a single +// State Storage (SS) backend and a single State Commitment (SC) backend. The SC +// backend may or may not support multiple store keys and is implementation +// dependent. +type Store struct { + logger corelog.Logger + + // holds the db instance for closing it + dbCloser io.Closer + + // stateCommitment reflects the state commitment (SC) backend + stateCommitment store.Committer + + // lastCommitInfo reflects the last version/hash that has been committed + lastCommitInfo *proof.CommitInfo + + // telemetry reflects a telemetry agent responsible for emitting metrics (if any) + telemetry metrics.StoreMetrics + + // pruningManager reflects the pruning manager used to prune state of the SS and SC backends + pruningManager *pruning.Manager + + // Migration related fields + // migrationManager reflects the migration manager used to migrate state from v1 to v2 + migrationManager *migration.Manager + // chChangeset reflects the channel used to send the changeset to the migration manager + chChangeset chan *migration.VersionedChangeset + // chDone reflects the channel used to signal the migration manager that the migration + // is done + chDone chan struct{} + // isMigrating reflects whether the store is currently migrating + isMigrating bool +} + +// New creates a new root Store instance. +// +// NOTE: The migration manager is optional and can be nil if no migration is required. +func New( + dbCloser io.Closer, + logger corelog.Logger, + sc store.Committer, + pm *pruning.Manager, + mm *migration.Manager, + m metrics.StoreMetrics, +) (store.RootStore, error) { + return &Store{ + dbCloser: dbCloser, + logger: logger, + stateCommitment: sc, + pruningManager: pm, + migrationManager: mm, + telemetry: m, + isMigrating: mm != nil, + }, nil +} + +// Close closes the store and resets all internal fields. Note, Close() is NOT +// idempotent and should only be called once. +func (s *Store) Close() (err error) { + err = errors.Join(err, s.stateCommitment.Close()) + err = errors.Join(err, s.dbCloser.Close()) + + s.stateCommitment = nil + s.lastCommitInfo = nil + + return err +} + +func (s *Store) SetMetrics(m metrics.Metrics) { + s.telemetry = m +} + +func (s *Store) SetInitialVersion(v uint64) error { + return s.stateCommitment.SetInitialVersion(v) +} + +// getVersionedReader returns a VersionedReader based on the given version. If the +// version exists in the state storage, it returns the state storage. +// If not, it checks if the state commitment implements the VersionedReader interface +// and the version exists in the state commitment, since the state storage will be +// synced during migration. +func (s *Store) getVersionedReader(version uint64) (store.VersionedReader, error) { + isExist, err := s.stateCommitment.VersionExists(version) + if err != nil { + return nil, err + } + if isExist { + return s.stateCommitment, nil + } + return nil, fmt.Errorf("version %d does not exist", version) +} + +func (s *Store) StateLatest() (uint64, corestore.ReaderMap, error) { + v, err := s.GetLatestVersion() + if err != nil { + return 0, nil, err + } + vReader, err := s.getVersionedReader(v) + if err != nil { + return 0, nil, err + } + + return v, NewReaderMap(v, vReader), nil +} + +// StateAt returns a read-only view of the state at a given version. +func (s *Store) StateAt(v uint64) (corestore.ReaderMap, error) { + vReader, err := s.getVersionedReader(v) + return NewReaderMap(v, vReader), err +} + +func (s *Store) GetStateCommitment() store.Committer { + return s.stateCommitment +} + +// LastCommitID returns a CommitID based off of the latest internal CommitInfo. +// If an internal CommitInfo is not set, a new one will be returned with only the +// latest version set, which is based off of the SC view. +func (s *Store) LastCommitID() (proof.CommitID, error) { + if s.lastCommitInfo != nil { + return s.lastCommitInfo.CommitID(), nil + } + + latestVersion, err := s.stateCommitment.GetLatestVersion() + if err != nil { + return proof.CommitID{}, err + } + // if the latest version is 0, we return a CommitID with version 0 and a hash of an empty byte slice + bz := sha256.Sum256([]byte{}) + + return proof.CommitID{Version: latestVersion, Hash: bz[:]}, nil +} + +// GetLatestVersion returns the latest version based on the latest internal +// CommitInfo. An error is returned if the latest CommitInfo or version cannot +// be retrieved. +func (s *Store) GetLatestVersion() (uint64, error) { + lastCommitID, err := s.LastCommitID() + if err != nil { + return 0, err + } + + return lastCommitID.Version, nil +} + +func (s *Store) Query(storeKey []byte, version uint64, key []byte, prove bool) (store.QueryResult, error) { + if s.telemetry != nil { + now := time.Now() + defer s.telemetry.MeasureSince(now, "root_store", "query") + } + + val, err := s.stateCommitment.Get(storeKey, version, key) + if err != nil { + return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err) + } + + result := store.QueryResult{ + Key: key, + Value: val, + Version: version, + } + + if prove { + result.ProofOps, err = s.stateCommitment.GetProof(storeKey, version, key) + if err != nil { + return store.QueryResult{}, fmt.Errorf("failed to get SC store proof: %w", err) + } + } + + return result, nil +} + +func (s *Store) LoadLatestVersion() error { + if s.telemetry != nil { + now := time.Now() + defer s.telemetry.MeasureSince(now, "root_store", "load_latest_version") + } + + lv, err := s.GetLatestVersion() + if err != nil { + return err + } + + return s.loadVersion(lv, nil, false) +} + +func (s *Store) LoadVersion(version uint64) error { + if s.telemetry != nil { + now := time.Now() + defer s.telemetry.MeasureSince(now, "root_store", "load_version") + } + + return s.loadVersion(version, nil, false) +} + +func (s *Store) LoadVersionForOverwriting(version uint64) error { + if s.telemetry != nil { + now := time.Now() + defer s.telemetry.MeasureSince(now, "root_store", "load_version_for_overwriting") + } + + return s.loadVersion(version, nil, true) +} + +// LoadVersionAndUpgrade implements the UpgradeableStore interface. +// +// NOTE: It cannot be called while the store is migrating. +func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreUpgrades) error { + if upgrades == nil { + return errors.New("upgrades cannot be nil") + } + + if s.telemetry != nil { + defer s.telemetry.MeasureSince(time.Now(), "root_store", "load_version_and_upgrade") + } + + if s.isMigrating { + return errors.New("cannot upgrade while migrating") + } + + if err := s.loadVersion(version, upgrades, true); err != nil { + return err + } + + return nil +} + +func (s *Store) loadVersion(v uint64, upgrades *corestore.StoreUpgrades, overrideAfter bool) error { + s.logger.Debug("loading version", "version", v) + + if upgrades == nil { + if !overrideAfter { + if err := s.stateCommitment.LoadVersion(v); err != nil { + return fmt.Errorf("failed to load SC version %d: %w", v, err) + } + } else { + if err := s.stateCommitment.LoadVersionForOverwriting(v); err != nil { + return fmt.Errorf("failed to load SC version %d: %w", v, err) + } + } + } else { + // if upgrades are provided, we need to load the version and apply the upgrades + if err := s.stateCommitment.LoadVersionAndUpgrade(v, upgrades); err != nil { + return fmt.Errorf("failed to load SS version with upgrades %d: %w", v, err) + } + } + + // set lastCommitInfo explicitly s.t. Commit commits the correct version, i.e. v+1 + var err error + s.lastCommitInfo, err = s.stateCommitment.GetCommitInfo(v) + if err != nil { + return fmt.Errorf("failed to get commit info for version %d: %w", v, err) + } + + // if we're migrating, we need to start the migration process + if s.isMigrating { + s.startMigration() + } + + return nil +} + +// Commit commits all state changes to the underlying SS and SC backends. It +// writes a batch of the changeset to the SC tree, and retrieves the CommitInfo +// from the SC tree. Finally, it commits the SC tree and returns the hash of +// the CommitInfo. +func (s *Store) Commit(cs *corestore.Changeset) ([]byte, error) { + if s.telemetry != nil { + now := time.Now() + defer s.telemetry.MeasureSince(now, "root_store", "commit") + } + + if err := s.handleMigration(cs); err != nil { + return nil, err + } + + // signal to the pruning manager that a new version is about to be committed + // this may be required if the SS and SC backends implementation have the + // background pruning process (iavl v1 for example) which must be paused during the commit + s.pruningManager.PausePruning() + + eg := new(errgroup.Group) + + // commit SC async + var cInfo *proof.CommitInfo + eg.Go(func() error { + if err := s.stateCommitment.WriteChangeset(cs); err != nil { + return fmt.Errorf("failed to write batch to SC store: %w", err) + } + var scErr error + cInfo, scErr = s.stateCommitment.Commit(cs.Version) + if scErr != nil { + return fmt.Errorf("failed to commit SC store: %w", scErr) + } + return nil + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + + if cInfo.Version != cs.Version { + return nil, fmt.Errorf("commit version mismatch: got %d, expected %d", cInfo.Version, cs.Version) + } + s.lastCommitInfo = cInfo + + // signal to the pruning manager that the commit is done + if err := s.pruningManager.ResumePruning(s.lastCommitInfo.Version); err != nil { + s.logger.Error("failed to signal commit done to pruning manager", "err", err) + } + + return s.lastCommitInfo.Hash(), nil +} + +// startMigration starts a migration process to migrate the RootStore/v1 to the +// SS and SC backends of store/v2 and initializes the channels. +// It runs in a separate goroutine and replaces the current RootStore with the +// migrated new backends once the migration is complete. +// +// NOTE: This method should only be called once after loadVersion. +func (s *Store) startMigration() { + // buffer at most 1 changeset, if the receiver is behind attempting to buffer + // more than 1 will block. + s.chChangeset = make(chan *migration.VersionedChangeset, 1) + // it is used to signal the migration manager that the migration is done + s.chDone = make(chan struct{}) + + mtx := sync.Mutex{} + mtx.Lock() + go func() { + version := s.lastCommitInfo.Version + s.logger.Info("starting migration", "version", version) + mtx.Unlock() + if err := s.migrationManager.Start(version, s.chChangeset, s.chDone); err != nil { + s.logger.Error("failed to start migration", "err", err) + } + }() + + // wait for the migration manager to start + mtx.Lock() + defer mtx.Unlock() +} + +func (s *Store) handleMigration(cs *corestore.Changeset) error { + if s.isMigrating { + // if the migration manager has already migrated to the version, close the + // channels and replace the state commitment + if s.migrationManager.GetMigratedVersion() == s.lastCommitInfo.Version { + close(s.chDone) + close(s.chChangeset) + s.isMigrating = false + // close the old state commitment and replace it with the new one + if err := s.stateCommitment.Close(); err != nil { + return fmt.Errorf("failed to close the old SC store: %w", err) + } + newStateCommitment := s.migrationManager.GetStateCommitment() + if newStateCommitment != nil { + s.stateCommitment = newStateCommitment + } + if err := s.migrationManager.Close(); err != nil { + return fmt.Errorf("failed to close migration manager: %w", err) + } + s.logger.Info("migration completed", "version", s.lastCommitInfo.Version) + } else { + // queue the next changeset to the migration manager + s.chChangeset <- &migration.VersionedChangeset{Version: s.lastCommitInfo.Version + 1, Changeset: cs} + } + } + return nil +} + +func (s *Store) Prune(version uint64) error { + return s.pruningManager.Prune(version) +} diff --git a/store/v2/root/store_mock_test.go b/store/v2/root/store_mock_test.go new file mode 100644 index 000000000000..0ec0a31bdaf2 --- /dev/null +++ b/store/v2/root/store_mock_test.go @@ -0,0 +1,103 @@ +package root + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/metrics" + "cosmossdk.io/store/v2/mock" + "cosmossdk.io/store/v2/pruning" +) + +func newTestRootStore(sc store.Committer) *Store { + noopLog := coretesting.NewNopLogger() + pm := pruning.NewManager(sc.(store.Pruner), nil) + return &Store{ + logger: noopLog, + telemetry: metrics.Metrics{}, + stateCommitment: sc, + pruningManager: pm, + isMigrating: false, + } +} + +func TestGetLatestState(t *testing.T) { + ctrl := gomock.NewController(t) + sc := mock.NewMockStateCommitter(ctrl) + rs := newTestRootStore(sc) + + // Get the latest version + sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) + _, err := rs.GetLatestVersion() + require.Error(t, err) + sc.EXPECT().GetLatestVersion().Return(uint64(1), nil) + v, err := rs.GetLatestVersion() + require.NoError(t, err) + require.Equal(t, uint64(1), v) +} + +func TestQuery(t *testing.T) { + ctrl := gomock.NewController(t) + sc := mock.NewMockStateCommitter(ctrl) + rs := newTestRootStore(sc) + + // Query without Proof + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) + _, err := rs.Query(nil, 0, nil, false) + require.Error(t, err) + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) + v, err := rs.Query(nil, 0, nil, false) + require.NoError(t, err) + require.Equal(t, []byte("value"), v.Value) + + // Query with Proof + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) + sc.EXPECT().GetProof(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) + _, err = rs.Query(nil, 0, nil, true) + require.Error(t, err) + + // Query with Migration + + rs.isMigrating = true + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) + _, err = rs.Query(nil, 0, nil, false) + require.NoError(t, err) +} + +func TestLoadVersion(t *testing.T) { + ctrl := gomock.NewController(t) + sc := mock.NewMockStateCommitter(ctrl) + rs := newTestRootStore(sc) + + // LoadLatestVersion + sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) + err := rs.LoadLatestVersion() + require.Error(t, err) + sc.EXPECT().GetLatestVersion().Return(uint64(1), nil) + sc.EXPECT().LoadVersion(uint64(1)).Return(errors.New("error")) + err = rs.LoadLatestVersion() + require.Error(t, err) + + // LoadVersion + sc.EXPECT().LoadVersion(gomock.Any()).Return(nil) + sc.EXPECT().GetCommitInfo(uint64(2)).Return(nil, errors.New("error")) + err = rs.LoadVersion(uint64(2)) + require.Error(t, err) + + // LoadVersionUpgrade + v := &corestore.StoreUpgrades{} + sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(errors.New("error")) + err = rs.LoadVersionAndUpgrade(uint64(2), v) + require.Error(t, err) + + // LoadVersionUpgrade with Migration + rs.isMigrating = true + err = rs.LoadVersionAndUpgrade(uint64(2), v) + require.Error(t, err) +} diff --git a/store/v2/root/store_test.go b/store/v2/root/store_test.go new file mode 100644 index 000000000000..59df4d68384d --- /dev/null +++ b/store/v2/root/store_test.go @@ -0,0 +1,830 @@ +package root + +import ( + "crypto/sha256" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + dbm "cosmossdk.io/store/v2/db" + "cosmossdk.io/store/v2/proof" + "cosmossdk.io/store/v2/pruning" +) + +const ( + testStoreKey = "test_store_key" + testStoreKey2 = "test_store_key2" + testStoreKey3 = "test_store_key3" +) + +var testStoreKeys = []string{testStoreKey, testStoreKey2, testStoreKey3} + +var ( + testStoreKeyBytes = []byte(testStoreKey) + testStoreKey2Bytes = []byte(testStoreKey2) + testStoreKey3Bytes = []byte(testStoreKey3) +) + +type RootStoreTestSuite struct { + suite.Suite + + rootStore store.RootStore +} + +func TestStorageTestSuite(t *testing.T) { + suite.Run(t, &RootStoreTestSuite{}) +} + +func (s *RootStoreTestSuite) SetupTest() { + noopLog := coretesting.NewNopLogger() + + tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) + tree2 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) + tree3 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) + sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree, testStoreKey2: tree2, testStoreKey3: tree3}, nil, dbm.NewMemDB(), noopLog) + s.Require().NoError(err) + + pm := pruning.NewManager(sc, nil) + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) + s.Require().NoError(err) + + s.rootStore = rs +} + +func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption) { + noopLog := coretesting.NewNopLogger() + + mdb := dbm.NewMemDB() + multiTrees := make(map[string]commitment.Tree) + for _, storeKey := range testStoreKeys { + prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) + multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) + } + + sc, err := commitment.NewCommitStore(multiTrees, nil, dbm.NewMemDB(), noopLog) + s.Require().NoError(err) + + pm := pruning.NewManager(sc, config) + + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) + s.Require().NoError(err) + + s.rootStore = rs +} + +func (s *RootStoreTestSuite) newStoreWithBackendMount(sc store.Committer, pm *pruning.Manager) { + noopLog := coretesting.NewNopLogger() + + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) + s.Require().NoError(err) + + s.rootStore = rs +} + +func (s *RootStoreTestSuite) TearDownTest() { + err := s.rootStore.Close() + s.Require().NoError(err) +} + +func (s *RootStoreTestSuite) TestGetStateCommitment() { + s.Require().Equal(s.rootStore.GetStateCommitment(), s.rootStore.(*Store).stateCommitment) +} + +func (s *RootStoreTestSuite) TestSetInitialVersion() { + initialVersion := uint64(5) + s.Require().NoError(s.rootStore.SetInitialVersion(initialVersion)) + + // perform an initial, empty commit + cs := corestore.NewChangeset(initialVersion) + cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) + _, err := s.rootStore.Commit(corestore.NewChangeset(initialVersion)) + s.Require().NoError(err) + + // check the latest version + lVersion, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(initialVersion, lVersion) + + // set the initial version again + rInitialVersion := uint64(100) + s.Require().NoError(s.rootStore.SetInitialVersion(rInitialVersion)) + + // TODO fix version munging here + // perform the commit + cs = corestore.NewChangeset(initialVersion + 1) + cs.Add(testStoreKey2Bytes, []byte("foo"), []byte("bar"), false) + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + lVersion, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + // SetInitialVersion only works once + s.Require().NotEqual(rInitialVersion, lVersion) + s.Require().Equal(initialVersion+1, lVersion) +} + +func (s *RootStoreTestSuite) TestQuery() { + _, err := s.rootStore.Query([]byte{}, 1, []byte("foo"), true) + s.Require().Error(err) + + // write and commit a changeset + cs := corestore.NewChangeset(1) + cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) + + commitHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(commitHash) + + // ensure the proof is non-nil for the corresponding version + result, err := s.rootStore.Query([]byte(testStoreKey), 1, []byte("foo"), true) + s.Require().NoError(err) + s.Require().NotNil(result.ProofOps) + s.Require().Equal([]byte("foo"), result.ProofOps[0].Key) +} + +func (s *RootStoreTestSuite) TestGetFallback() { + sc := s.rootStore.GetStateCommitment() + + // create a changeset and commit it to SC ONLY + cs := corestore.NewChangeset(1) + cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) + + err := sc.WriteChangeset(cs) + s.Require().NoError(err) + + _, err = sc.Commit(cs.Version) + s.Require().NoError(err) + + // ensure we can query for the key, which should fallback to SC + qResult, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("foo"), false) + s.Require().NoError(err) + s.Require().Equal([]byte("bar"), qResult.Value) + + // non-existent key + qResult, err = s.rootStore.Query(testStoreKeyBytes, 1, []byte("non_existent_key"), false) + s.Require().NoError(err) + s.Require().Nil(qResult.Value) +} + +func (s *RootStoreTestSuite) TestQueryProof() { + cs := corestore.NewChangeset(1) + // testStoreKey + cs.Add(testStoreKeyBytes, []byte("key1"), []byte("value1"), false) + cs.Add(testStoreKeyBytes, []byte("key2"), []byte("value2"), false) + // testStoreKey2 + cs.Add(testStoreKey2Bytes, []byte("key3"), []byte("value3"), false) + // testStoreKey3 + cs.Add(testStoreKey3Bytes, []byte("key4"), []byte("value4"), false) + + // commit + _, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + + // query proof for testStoreKey + result, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("key1"), true) + s.Require().NoError(err) + s.Require().NotNil(result.ProofOps) + cInfo, err := s.rootStore.GetStateCommitment().GetCommitInfo(1) + s.Require().NoError(err) + storeHash := cInfo.GetStoreCommitID(testStoreKeyBytes).Hash + treeRoots, err := result.ProofOps[0].Run([][]byte{[]byte("value1")}) + s.Require().NoError(err) + s.Require().Equal(treeRoots[0], storeHash) + expRoots, err := result.ProofOps[1].Run([][]byte{storeHash}) + s.Require().NoError(err) + s.Require().Equal(expRoots[0], cInfo.Hash()) +} + +func (s *RootStoreTestSuite) TestLoadVersion() { + // write and commit a few changesets + for v := uint64(1); v <= 5; v++ { + val := fmt.Sprintf("val%03d", v) // val001, val002, ..., val005 + + cs := corestore.NewChangeset(v) + cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) + + commitHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(commitHash) + } + + // ensure the latest version is correct + latest, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(5), latest) + + // attempt to load a non-existent version + err = s.rootStore.LoadVersion(6) + s.Require().Error(err) + + // attempt to load a previously committed version + err = s.rootStore.LoadVersion(3) + s.Require().NoError(err) + + // ensure the latest version is correct + latest, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(3), latest) + + // query state and ensure values returned are based on the loaded version + _, ro, err := s.rootStore.StateLatest() + s.Require().NoError(err) + + reader, err := ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + val, err := reader.Get([]byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("val003"), val) + + // attempt to write and commit a few changesets + for v := 4; v <= 5; v++ { + val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005 + + cs := corestore.NewChangeset(uint64(v)) + cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) + + _, err := s.rootStore.Commit(cs) + s.Require().Error(err) + } + + // ensure the latest version is correct + latest, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(3), latest) // should have stayed at 3 after failed commits + + // query state and ensure values returned are based on the loaded version + _, ro, err = s.rootStore.StateLatest() + s.Require().NoError(err) + + reader, err = ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + val, err = reader.Get([]byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("val003"), val) +} + +func (s *RootStoreTestSuite) TestLoadVersionForOverwriting() { + // write and commit a few changesets + for v := uint64(1); v <= 5; v++ { + val := fmt.Sprintf("val%03d", v) // val001, val002, ..., val005 + + cs := corestore.NewChangeset(v) + cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) + + commitHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(commitHash) + } + + // ensure the latest version is correct + latest, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(5), latest) + + // attempt to load a non-existent version + err = s.rootStore.LoadVersionForOverwriting(6) + s.Require().Error(err) + + // attempt to load a previously committed version + err = s.rootStore.LoadVersionForOverwriting(3) + s.Require().NoError(err) + + // ensure the latest version is correct + latest, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(3), latest) + + // query state and ensure values returned are based on the loaded version + _, ro, err := s.rootStore.StateLatest() + s.Require().NoError(err) + + reader, err := ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + val, err := reader.Get([]byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("val003"), val) + + // attempt to write and commit a few changesets + for v := 4; v <= 5; v++ { + val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005 + + cs := corestore.NewChangeset(uint64(v)) + cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) + + commitHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(commitHash) + } + + // ensure the latest version is correct + latest, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(5), latest) + + // query state and ensure values returned are based on the loaded version + _, ro, err = s.rootStore.StateLatest() + s.Require().NoError(err) + + reader, err = ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + val, err = reader.Get([]byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("overwritten_val005"), val) +} + +func (s *RootStoreTestSuite) TestCommit() { + lv, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Zero(lv) + + // perform changes + cs := corestore.NewChangeset(1) + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) + } + + cHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(cHash) + + // ensure latest version is updated + lv, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(1), lv) + + // perform reads on the updated root store + _, ro, err := s.rootStore.StateLatest() + s.Require().NoError(err) + + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + reader, err := ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + result, err := reader.Get([]byte(key)) + s.Require().NoError(err) + + s.Require().Equal([]byte(val), result) + } +} + +func (s *RootStoreTestSuite) TestStateAt() { + // write keys over multiple versions + for v := uint64(1); v <= 5; v++ { + // perform changes + cs := corestore.NewChangeset(v) + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1 + + cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) + } + + // execute Commit + cHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(cHash) + } + + lv, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(5), lv) + + // ensure we can read state correctly at each version + for v := uint64(1); v <= 5; v++ { + ro, err := s.rootStore.StateAt(v) + s.Require().NoError(err) + + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1 + + reader, err := ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + isExist, err := reader.Has([]byte(key)) + s.Require().NoError(err) + s.Require().True(isExist) + result, err := reader.Get([]byte(key)) + s.Require().NoError(err) + s.Require().Equal([]byte(val), result) + } + + // non-existent key + reader, err := ro.GetReader(testStoreKey2Bytes) + s.Require().NoError(err) + isExist, err := reader.Has([]byte("key")) + s.Require().NoError(err) + s.Require().False(isExist) + v, err := reader.Get([]byte("key")) + s.Require().NoError(err) + s.Require().Nil(v) + } +} + +func (s *RootStoreTestSuite) TestPrune() { + // perform changes + cs := corestore.NewChangeset(1) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) + } + + testCases := []struct { + name string + numVersions int64 + po store.PruningOption + deleted []uint64 + saved []uint64 + }{ + {"prune nothing", 10, store.PruningOption{ + KeepRecent: 0, + Interval: 0, + }, nil, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {"prune everything", 12, store.PruningOption{ + KeepRecent: 1, + Interval: 10, + }, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, []uint64{9, 10, 11, 12}}, + {"prune some; no batch", 10, store.PruningOption{ + KeepRecent: 2, + Interval: 1, + }, []uint64{1, 2, 3, 4, 6, 5, 7}, []uint64{8, 9, 10}}, + {"prune some; small batch", 10, store.PruningOption{ + KeepRecent: 2, + Interval: 3, + }, []uint64{1, 2, 3, 4, 5, 6}, []uint64{7, 8, 9, 10}}, + {"prune some; large batch", 10, store.PruningOption{ + KeepRecent: 2, + Interval: 11, + }, nil, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + } + + for _, tc := range testCases { + + s.newStoreWithPruneConfig(&tc.po) + + // write keys over multiple versions + for i := int64(0); i < tc.numVersions; i++ { + // execute Commit + cs.Version = uint64(i + 1) + cHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(cHash) + } + + for _, v := range tc.saved { + ro, err := s.rootStore.StateAt(v) + s.Require().NoError(err, "expected no error when loading height %d at test %s", v, tc.name) + + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + reader, err := ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + result, err := reader.Get([]byte(key)) + s.Require().NoError(err) + s.Require().Equal([]byte(val), result, "value should be equal for test: %s", tc.name) + } + } + + for _, v := range tc.deleted { + var err error + checkErr := func() bool { + if _, err = s.rootStore.StateAt(v); err != nil { + return true + } + return false + } + // wait for async pruning process to finish + s.Require().Eventually(checkErr, 2*time.Second, 100*time.Millisecond) + s.Require().Error(err, "expected error when loading height %d at test %s", v, tc.name) + } + } +} + +func (s *RootStoreTestSuite) TestMultiStore_Pruning_SameHeightsTwice() { + // perform changes + cs := corestore.NewChangeset(1) + cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) + + const ( + numVersions uint64 = 10 + keepRecent uint64 = 1 + interval uint64 = 10 + ) + + s.newStoreWithPruneConfig(&store.PruningOption{ + KeepRecent: keepRecent, + Interval: interval, + }) + s.Require().NoError(s.rootStore.LoadLatestVersion()) + + for i := uint64(0); i < numVersions; i++ { + // execute Commit + cs.Version = i + 1 + cHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(cHash) + } + + latestVer, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(numVersions, latestVer) + + for v := uint64(1); v < numVersions-keepRecent; v++ { + var err error + checkErr := func() bool { + if _, err = s.rootStore.StateAt(v); err != nil { + return true + } + return false + } + // wait for async pruning process to finish + s.Require().Eventually(checkErr, 2*time.Second, 100*time.Millisecond, "expected no error when loading height: %d", v) + } + + for v := (numVersions - keepRecent); v < numVersions; v++ { + _, err := s.rootStore.StateAt(v) + s.Require().NoError(err, "expected no error when loading height: %d", v) + } + + // Get latest + err = s.rootStore.LoadVersion(numVersions) + s.Require().NoError(err) + + // Test pruning the same heights again + cs.Version++ + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + + // Ensure that can commit one more height with no panic + cs.Version++ + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) +} + +func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { + // perform changes + cs := corestore.NewChangeset(1) + cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) + + pruneOpt := &store.PruningOption{ + KeepRecent: 2, + Interval: 11, + } + + noopLog := coretesting.NewNopLogger() + + mdb1 := dbm.NewMemDB() + mdb2 := dbm.NewMemDB() + + tree := iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) + sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) + s.Require().NoError(err) + + pm := pruning.NewManager(sc, pruneOpt) + + s.newStoreWithBackendMount(sc, pm) + s.Require().NoError(s.rootStore.LoadLatestVersion()) + + // Commit enough to build up heights to prune, where on the next block we should + // batch delete. + for i := uint64(1); i <= 10; i++ { + // execute Commit + cs.Version = i + cHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(cHash) + } + + latestVer, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + + ok, actualHeightToPrune := pruneOpt.ShouldPrune(latestVer) + s.Require().False(ok) + s.Require().Equal(uint64(0), actualHeightToPrune) + + tree = iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) + sc, err = commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) + s.Require().NoError(err) + + pm = pruning.NewManager(sc, pruneOpt) + + s.newStoreWithBackendMount(sc, pm) + err = s.rootStore.LoadLatestVersion() + s.Require().NoError(err) + + latestVer, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + + ok, actualHeightToPrune = pruneOpt.ShouldPrune(latestVer) + s.Require().False(ok) + s.Require().Equal(uint64(0), actualHeightToPrune) + + // commit one more block and ensure the heights have been pruned + // execute Commit + cs.Version++ + cHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(cHash) + + latestVer, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + + ok, actualHeightToPrune = pruneOpt.ShouldPrune(latestVer) + s.Require().True(ok) + s.Require().Equal(uint64(8), actualHeightToPrune) + + for v := uint64(1); v <= actualHeightToPrune; v++ { + checkErr := func() bool { + if _, err = s.rootStore.StateAt(v); err != nil { + return true + } + return false + } + // wait for async pruning process to finish + s.Require().Eventually(checkErr, 10*time.Second, 1*time.Second, "expected error when loading height: %d", v) + } +} + +func (s *RootStoreTestSuite) TestMultiStoreRestart() { + noopLog := coretesting.NewNopLogger() + + mdb1 := dbm.NewMemDB() + mdb2 := dbm.NewMemDB() + multiTrees := make(map[string]commitment.Tree) + for _, storeKey := range testStoreKeys { + prefixDB := dbm.NewPrefixDB(mdb1, []byte(storeKey)) + multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) + } + + sc, err := commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) + s.Require().NoError(err) + + pm := pruning.NewManager(sc, nil) + + s.newStoreWithBackendMount(sc, pm) + s.Require().NoError(s.rootStore.LoadLatestVersion()) + + // perform changes + for i := 1; i < 3; i++ { + cs := corestore.NewChangeset(uint64(i)) + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d_%03d", i, 1) // val000_1, val001_1, ..., val099_1 + + cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) + + key = fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val = fmt.Sprintf("val%03d_%03d", i, 2) // val000_1, val001_1, ..., val099_1 + + cs.Add(testStoreKey2Bytes, []byte(key), []byte(val), false) + + key = fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val = fmt.Sprintf("val%03d_%03d", i, 3) // val000_1, val001_1, ..., val099_1 + + cs.Add(testStoreKey3Bytes, []byte(key), []byte(val), false) + + // execute Commit + cHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(cHash) + + latestVer, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(i), latestVer) + } + + // more changes + cs1 := corestore.NewChangeset(3) + key := fmt.Sprintf("key%03d", 3) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d_%03d", 3, 1) // val000_1, val001_1, ..., val099_1 + + cs1.Add(testStoreKeyBytes, []byte(key), []byte(val), false) + + key = fmt.Sprintf("key%03d", 3) // key000, key001, ..., key099 + val = fmt.Sprintf("val%03d_%03d", 3, 2) // val000_1, val001_1, ..., val099_1 + + cs1.Add(testStoreKey2Bytes, []byte(key), []byte(val), false) + + // execute Commit + cHash, err := s.rootStore.Commit(cs1) + s.Require().NoError(err) + s.Require().NotNil(cHash) + + latestVer, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(3), latestVer) + + cs2 := corestore.NewChangeset(4) + key = fmt.Sprintf("key%03d", 4) // key000, key001, ..., key099 + val = fmt.Sprintf("val%03d_%03d", 4, 3) // val000_1, val001_1, ..., val099_1 + + cs2.Add(testStoreKey3Bytes, []byte(key), []byte(val), false) + + // execute Commit + cHash, err = s.rootStore.Commit(cs2) + s.Require().NoError(err) + s.Require().NotNil(cHash) + + latestVer, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(4), latestVer) + + _, ro1, err := s.rootStore.StateLatest() + s.Require().Nil(err) + reader1, err := ro1.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + result1, err := reader1.Get([]byte(fmt.Sprintf("key%03d", 3))) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 3, 1)), result1, "value should be equal") + + // "restart" + multiTrees = make(map[string]commitment.Tree) + for _, storeKey := range testStoreKeys { + prefixDB := dbm.NewPrefixDB(mdb1, []byte(storeKey)) + multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) + } + + sc, err = commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) + s.Require().NoError(err) + + pm = pruning.NewManager(sc, nil) + + s.newStoreWithBackendMount(sc, pm) + err = s.rootStore.LoadLatestVersion() + s.Require().Nil(err) + + latestVer, ro, err := s.rootStore.StateLatest() + s.Require().Nil(err) + s.Require().Equal(uint64(4), latestVer) + reader, err := ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + result, err := reader.Get([]byte(fmt.Sprintf("key%03d", 3))) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 3, 1)), result, "value should be equal") + + reader, err = ro.GetReader(testStoreKey2Bytes) + s.Require().NoError(err) + result, err = reader.Get([]byte(fmt.Sprintf("key%03d", 2))) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 2, 2)), result, "value should be equal") + + reader, err = ro.GetReader(testStoreKey3Bytes) + s.Require().NoError(err) + result, err = reader.Get([]byte(fmt.Sprintf("key%03d", 4))) + s.Require().NoError(err) + s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 4, 3)), result, "value should be equal") +} + +func (s *RootStoreTestSuite) TestHashStableWithEmptyCommitAndRestart() { + err := s.rootStore.LoadLatestVersion() + s.Require().NoError(err) + + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + commitID := proof.CommitID{Hash: appHash} + lastCommitID, err := s.rootStore.LastCommitID() + s.Require().Nil(err) + + // the hash of a store with no commits is the root hash of a tree with empty hashes as leaves. + // it should not be equal an empty hash. + s.Require().NotEqual(commitID, lastCommitID) + + cs := corestore.NewChangeset(1) + cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) + + cHash, err := s.rootStore.Commit(cs) + s.Require().Nil(err) + s.Require().NotNil(cHash) + latestVersion, err := s.rootStore.GetLatestVersion() + hash := cHash + s.Require().Nil(err) + s.Require().Equal(uint64(1), latestVersion) + + // make an empty commit, it should update version, but not affect hash + cHash, err = s.rootStore.Commit(corestore.NewChangeset(2)) + s.Require().Nil(err) + s.Require().NotNil(cHash) + latestVersion, err = s.rootStore.GetLatestVersion() + s.Require().Nil(err) + s.Require().Equal(uint64(2), latestVersion) + s.Require().Equal(hash, cHash) + + // reload the store + s.Require().NoError(s.rootStore.LoadLatestVersion()) + lastCommitID, err = s.rootStore.LastCommitID() + s.Require().NoError(err) + s.Require().Equal(lastCommitID.Hash, hash) +} diff --git a/store/v2/root/upgrade_test.go b/store/v2/root/upgrade_test.go new file mode 100644 index 000000000000..1bcee4149b48 --- /dev/null +++ b/store/v2/root/upgrade_test.go @@ -0,0 +1,151 @@ +package root + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + + corestore "cosmossdk.io/core/store" + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/log" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + dbm "cosmossdk.io/store/v2/db" + "cosmossdk.io/store/v2/pruning" +) + +type UpgradeStoreTestSuite struct { + suite.Suite + + commitDB corestore.KVStoreWithBatch + rootStore store.RootStore +} + +func TestUpgradeStoreTestSuite(t *testing.T) { + suite.Run(t, &UpgradeStoreTestSuite{}) +} + +func (s *UpgradeStoreTestSuite) SetupTest() { + testLog := log.NewTestLogger(s.T()) + nopLog := coretesting.NewNopLogger() + + s.commitDB = dbm.NewMemDB() + multiTrees := make(map[string]commitment.Tree) + newTreeFn := func(storeKey string) (commitment.Tree, error) { + prefixDB := dbm.NewPrefixDB(s.commitDB, []byte(storeKey)) + return iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()), nil + } + for _, storeKey := range storeKeys { + multiTrees[storeKey], _ = newTreeFn(storeKey) + } + + sc, err := commitment.NewCommitStore(multiTrees, nil, s.commitDB, testLog) + s.Require().NoError(err) + pm := pruning.NewManager(sc, nil) + s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) + s.Require().NoError(err) + + // commit changeset + toVersion := uint64(20) + keyCount := 10 + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + } +} + +func (s *UpgradeStoreTestSuite) loadWithUpgrades(upgrades *corestore.StoreUpgrades) { + testLog := log.NewTestLogger(s.T()) + nopLog := coretesting.NewNopLogger() + + // create a new commitment store + multiTrees := make(map[string]commitment.Tree) + oldTrees := make(map[string]commitment.Tree) + newTreeFn := func(storeKey string) (commitment.Tree, error) { + prefixDB := dbm.NewPrefixDB(s.commitDB, []byte(storeKey)) + return iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()), nil + } + for _, storeKey := range storeKeys { + multiTrees[storeKey], _ = newTreeFn(storeKey) + } + for _, added := range upgrades.Added { + multiTrees[added], _ = newTreeFn(added) + } + for _, deleted := range upgrades.Deleted { + oldTrees[deleted], _ = newTreeFn(deleted) + } + + sc, err := commitment.NewCommitStore(multiTrees, oldTrees, s.commitDB, testLog) + s.Require().NoError(err) + pm := pruning.NewManager(sc, nil) + s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) + s.Require().NoError(err) +} + +func (s *UpgradeStoreTestSuite) TestLoadVersionAndUpgrade() { + // upgrade store keys + upgrades := &corestore.StoreUpgrades{ + Added: []string{"newStore1", "newStore2"}, + Deleted: []string{"store3"}, + } + s.loadWithUpgrades(upgrades) + + // load the store with the upgrades + v, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + err = s.rootStore.(store.UpgradeableStore).LoadVersionAndUpgrade(v, upgrades) + s.Require().NoError(err) + + keyCount := 10 + // check old store keys are queryable + oldStoreKeys := []string{"store1", "store2", "store3"} + for _, storeKey := range oldStoreKeys { + for version := uint64(1); version <= v; version++ { + for i := 0; i < keyCount; i++ { + proof, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) + s.Require().NoError(err) + s.Require().NotNil(proof) + } + } + } + + // commit changeset + newStoreKeys := []string{"newStore1", "newStore2"} + toVersion := uint64(40) + for version := v + 1; version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range newStoreKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) + } + } + _, err = s.rootStore.Commit(cs) + s.Require().NoError(err) + } + + // check new store keys are queryable + for _, storeKey := range newStoreKeys { + for version := v + 1; version <= toVersion; version++ { + for i := 0; i < keyCount; i++ { + _, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) + s.Require().NoError(err) + } + } + } + + // check the original store key is queryable + for version := uint64(1); version <= toVersion; version++ { + for i := 0; i < keyCount; i++ { + _, err := s.rootStore.Query([]byte("store2"), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) + s.Require().NoError(err) + } + } +} diff --git a/store/v2/snapshots/helpers_test.go b/store/v2/snapshots/helpers_test.go new file mode 100644 index 000000000000..40090c896817 --- /dev/null +++ b/store/v2/snapshots/helpers_test.go @@ -0,0 +1,282 @@ +package snapshots_test + +import ( + "bufio" + "bytes" + "compress/zlib" + "crypto/sha256" + "errors" + "fmt" + "io" + "testing" + "time" + + protoio "github.com/cosmos/gogoproto/io" + "github.com/stretchr/testify/require" + + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/store/v2/snapshots" + snapshotstypes "cosmossdk.io/store/v2/snapshots/types" +) + +func checksums(slice [][]byte) [][]byte { + hasher := sha256.New() + checksums := make([][]byte, len(slice)) + for i, chunk := range slice { + hasher.Write(chunk) + checksums[i] = hasher.Sum(nil) + hasher.Reset() + } + return checksums +} + +func hash(chunks [][]byte) []byte { + hasher := sha256.New() + for _, chunk := range chunks { + hasher.Write(chunk) + } + return hasher.Sum(nil) +} + +func makeChunks(chunks [][]byte) <-chan io.ReadCloser { + ch := make(chan io.ReadCloser, len(chunks)) + for _, chunk := range chunks { + ch <- io.NopCloser(bytes.NewReader(chunk)) + } + close(ch) + return ch +} + +func readChunks(chunks <-chan io.ReadCloser) [][]byte { + bodies := [][]byte{} + for chunk := range chunks { + body, err := io.ReadAll(chunk) + if err != nil { + panic(err) + } + bodies = append(bodies, body) + } + return bodies +} + +// snapshotItems serialize a array of bytes as SnapshotItem_ExtensionPayload, and return the chunks. +func snapshotItems(items [][]byte, ext snapshots.ExtensionSnapshotter) [][]byte { + // copy the same parameters from the code + snapshotChunkSize := uint64(10e6) + snapshotBufferSize := int(snapshotChunkSize) + + ch := make(chan io.ReadCloser) + go func() { + chunkWriter := snapshots.NewChunkWriter(ch, snapshotChunkSize) + bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) + zWriter, _ := zlib.NewWriterLevel(bufWriter, 7) + protoWriter := protoio.NewDelimitedWriter(zWriter) + for _, item := range items { + _ = snapshotstypes.WriteExtensionPayload(protoWriter, item) + } + // write extension metadata + _ = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ + Item: &snapshotstypes.SnapshotItem_Extension{ + Extension: &snapshotstypes.SnapshotExtensionMeta{ + Name: ext.SnapshotName(), + Format: ext.SnapshotFormat(), + }, + }, + }) + _ = ext.SnapshotExtension(0, func(payload []byte) error { + return snapshotstypes.WriteExtensionPayload(protoWriter, payload) + }) + _ = protoWriter.Close() + _ = bufWriter.Flush() + _ = chunkWriter.Close() + }() + + var chunks [][]byte + for chunkBody := range ch { + chunk, err := io.ReadAll(chunkBody) + if err != nil { + panic(err) + } + chunks = append(chunks, chunk) + } + + return chunks +} + +type mockCommitSnapshotter struct { + items [][]byte +} + +func (m *mockCommitSnapshotter) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshotstypes.SnapshotItem, error) { + if format == 0 { + return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat + } + if m.items != nil { + return snapshotstypes.SnapshotItem{}, errors.New("already has contents") + } + + var item snapshotstypes.SnapshotItem + m.items = [][]byte{} + for { + item.Reset() + err := protoReader.ReadMsg(&item) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return snapshotstypes.SnapshotItem{}, fmt.Errorf("invalid protobuf message: %w", err) + } + payload := item.GetExtensionPayload() + if payload == nil { + break + } + m.items = append(m.items, payload.Payload) + } + + return item, nil +} + +func (m *mockCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { + for _, item := range m.items { + if err := snapshotstypes.WriteExtensionPayload(protoWriter, item); err != nil { + return err + } + } + return nil +} + +func (m *mockCommitSnapshotter) SnapshotFormat() uint32 { + return snapshotstypes.CurrentFormat +} + +func (m *mockCommitSnapshotter) SupportedFormats() []uint32 { + return []uint32{snapshotstypes.CurrentFormat} +} + +type mockErrorCommitSnapshotter struct{} + +var _ snapshots.CommitSnapshotter = (*mockErrorCommitSnapshotter)(nil) + +func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { + return errors.New("mock snapshot error") +} + +func (m *mockErrorCommitSnapshotter) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshotstypes.SnapshotItem, error) { + return snapshotstypes.SnapshotItem{}, errors.New("mock restore error") +} + +func (m *mockErrorCommitSnapshotter) SnapshotFormat() uint32 { + return snapshotstypes.CurrentFormat +} + +func (m *mockErrorCommitSnapshotter) SupportedFormats() []uint32 { + return []uint32{snapshotstypes.CurrentFormat} +} + +// setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1. +// The snapshot will complete when the returned closer is called. +func setupBusyManager(t *testing.T) *snapshots.Manager { + t.Helper() + store, err := snapshots.NewStore(t.TempDir()) + require.NoError(t, err) + hung := newHungCommitSnapshotter() + mgr := snapshots.NewManager(store, opts, hung, nil, coretesting.NewNopLogger()) + + // Channel to ensure the test doesn't finish until the goroutine is done. + // Without this, there are intermittent test failures about + // the t.TempDir() cleanup failing due to the directory not being empty. + done := make(chan struct{}) + + go func() { + defer close(done) + _, err := mgr.Create(1) + require.NoError(t, err) + }() + time.Sleep(10 * time.Millisecond) + + t.Cleanup(func() { + <-done + }) + + t.Cleanup(hung.Close) + + return mgr +} + +// hungCommitSnapshotter can be used to test operations in progress. Call close to end the snapshot. +type hungCommitSnapshotter struct { + ch chan struct{} +} + +func newHungCommitSnapshotter() *hungCommitSnapshotter { + return &hungCommitSnapshotter{ + ch: make(chan struct{}), + } +} + +func (m *hungCommitSnapshotter) Close() { + close(m.ch) +} + +func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { + <-m.ch + return nil +} + +func (m *hungCommitSnapshotter) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshotstypes.SnapshotItem, error) { + panic("not implemented") +} + +type extSnapshotter struct { + state []uint64 +} + +func newExtSnapshotter(count int) *extSnapshotter { + state := make([]uint64, 0, count) + for i := 0; i < count; i++ { + state = append(state, uint64(i)) + } + return &extSnapshotter{ + state, + } +} + +func (s *extSnapshotter) SnapshotName() string { + return "mock" +} + +func (s *extSnapshotter) SnapshotFormat() uint32 { + return 1 +} + +func (s *extSnapshotter) SupportedFormats() []uint32 { + return []uint32{1} +} + +func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshots.ExtensionPayloadWriter) error { + for _, i := range s.state { + if err := payloadWriter(snapshotstypes.Uint64ToBigEndian(i)); err != nil { + return err + } + } + return nil +} + +func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshots.ExtensionPayloadReader) error { + for { + payload, err := payloadReader() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } + s.state = append(s.state, snapshotstypes.BigEndianToUint64(payload)) + } + // finalize restoration + return nil +} diff --git a/store/v2/snapshots/manager.go b/store/v2/snapshots/manager.go new file mode 100644 index 000000000000..a0d7895513d8 --- /dev/null +++ b/store/v2/snapshots/manager.go @@ -0,0 +1,591 @@ +package snapshots + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "sync" + + corelog "cosmossdk.io/core/log" + errorsmod "cosmossdk.io/errors/v2" + storeerrors "cosmossdk.io/store/v2/errors" + "cosmossdk.io/store/v2/snapshots/types" +) + +// Manager manages snapshot and restore operations for an app, making sure only a single +// long-running operation is in progress at any given time, and provides convenience methods +// mirroring the ABCI interface. +// +// Although the ABCI interface (and this manager) passes chunks as byte slices, the internal +// snapshot/restore APIs use IO streams (i.e. chan io.ReadCloser), for two reasons: +// +// 1. In the future, ABCI should support streaming. Consider e.g. InitChain during chain +// upgrades, which currently passes the entire chain state as an in-memory byte slice. +// https://github.com/tendermint/tendermint/issues/5184 +// +// 2. io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary +// errors via io.Pipe.CloseWithError(). +type Manager struct { + extensions map[string]ExtensionSnapshotter + // store is the snapshot store where all completed snapshots are persisted. + store *Store + opts SnapshotOptions + // commitSnapshotter is the snapshotter for the commitment state. + commitSnapshotter CommitSnapshotter + + logger corelog.Logger + + mtx sync.Mutex + operation operation + chRestore chan<- uint32 + chRestoreDone <-chan restoreDone + restoreSnapshot *types.Snapshot + restoreChunkIndex uint32 +} + +// operation represents a Manager operation. Only one operation can be in progress at a time. +type operation string + +// restoreDone represents the result of a restore operation. +type restoreDone struct { + complete bool // if true, restore completed successfully (not prematurely) + err error // if non-nil, restore errored +} + +const ( + opNone operation = "" + opSnapshot operation = "snapshot" + opPrune operation = "prune" + opRestore operation = "restore" + + chunkBufferSize = 4 + chunkIDBufferSize = 1024 + defaultStorageChannelBufferSize = 1024 + + snapshotMaxItemSize = int(64e6) // SDK has no key/value size limit, so we set an arbitrary limit +) + +var ErrOptsZeroSnapshotInterval = errors.New("snapshot-interval must not be 0") + +// NewManager creates a new manager. +func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager { + if extensions == nil { + extensions = map[string]ExtensionSnapshotter{} + } + return &Manager{ + store: store, + opts: opts, + commitSnapshotter: commitSnapshotter, + extensions: extensions, + logger: logger, + } +} + +// RegisterExtensions register extension snapshotters to manager +func (m *Manager) RegisterExtensions(extensions ...ExtensionSnapshotter) error { + if m.extensions == nil { + m.extensions = make(map[string]ExtensionSnapshotter, len(extensions)) + } + for _, extension := range extensions { + name := extension.SnapshotName() + if _, ok := m.extensions[name]; ok { + return fmt.Errorf("duplicated snapshotter name: %s", name) + } + if !IsFormatSupported(extension, extension.SnapshotFormat()) { + return fmt.Errorf("snapshotter don't support it's own snapshot format: %s %d", name, extension.SnapshotFormat()) + } + m.extensions[name] = extension + } + return nil +} + +// begin starts an operation, or errors if one is in progress. It manages the mutex itself. +func (m *Manager) begin(op operation) error { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.beginLocked(op) +} + +// beginLocked begins an operation while already holding the mutex. +func (m *Manager) beginLocked(op operation) error { + if op == opNone { + return errorsmod.Wrap(storeerrors.ErrLogic, "can't begin a none operation") + } + if m.operation != opNone { + return errorsmod.Wrapf(storeerrors.ErrConflict, "a %v operation is in progress", m.operation) + } + m.operation = op + return nil +} + +// end ends the current operation. +func (m *Manager) end() { + m.mtx.Lock() + defer m.mtx.Unlock() + m.endLocked() +} + +// endLocked ends the current operation while already holding the mutex. +func (m *Manager) endLocked() { + m.operation = opNone + if m.chRestore != nil { + close(m.chRestore) + m.chRestore = nil + } + m.chRestoreDone = nil + m.restoreSnapshot = nil + m.restoreChunkIndex = 0 +} + +// GetInterval returns snapshot interval represented in heights. +func (m *Manager) GetInterval() uint64 { + return m.opts.Interval +} + +// GetKeepRecent returns snapshot keep-recent represented in heights. +func (m *Manager) GetKeepRecent() uint32 { + return m.opts.KeepRecent +} + +// GetSnapshotBlockRetentionHeights returns the number of heights needed +// for block retention. Blocks since the oldest available snapshot must be +// available for state sync nodes to catch up (oldest because a node may be +// restoring an old snapshot while a new snapshot was taken). +func (m *Manager) GetSnapshotBlockRetentionHeights() int64 { + return int64(m.opts.Interval * uint64(m.opts.KeepRecent)) +} + +// Create creates a snapshot and returns its metadata. +func (m *Manager) Create(height uint64) (*types.Snapshot, error) { + if m == nil { + return nil, errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil") + } + + err := m.begin(opSnapshot) + if err != nil { + return nil, err + } + defer m.end() + + latest, err := m.store.GetLatest() + if err != nil { + return nil, errorsmod.Wrap(err, "failed to examine latest snapshot") + } + if latest != nil && latest.Height >= height { + return nil, errorsmod.Wrapf(storeerrors.ErrConflict, + "a more recent snapshot already exists at height %v", latest.Height) + } + + // Spawn goroutine to generate snapshot chunks and pass their io.ReadClosers through a channel + ch := make(chan io.ReadCloser) + go m.createSnapshot(height, ch) + + return m.store.Save(height, types.CurrentFormat, ch) +} + +// createSnapshot do the heavy work of snapshotting after the validations of request are done +// the produced chunks are written to the channel. +func (m *Manager) createSnapshot(height uint64, ch chan<- io.ReadCloser) { + streamWriter := NewStreamWriter(ch) + if streamWriter == nil { + return + } + defer func() { + if err := streamWriter.Close(); err != nil { + streamWriter.CloseWithError(err) + } + }() + + if err := m.commitSnapshotter.Snapshot(height, streamWriter); err != nil { + streamWriter.CloseWithError(err) + return + } + for _, name := range m.sortedExtensionNames() { + extension := m.extensions[name] + // write extension metadata + err := streamWriter.WriteMsg(&types.SnapshotItem{ + Item: &types.SnapshotItem_Extension{ + Extension: &types.SnapshotExtensionMeta{ + Name: name, + Format: extension.SnapshotFormat(), + }, + }, + }) + if err != nil { + streamWriter.CloseWithError(err) + return + } + payloadWriter := func(payload []byte) error { + return types.WriteExtensionPayload(streamWriter, payload) + } + if err := extension.SnapshotExtension(height, payloadWriter); err != nil { + streamWriter.CloseWithError(err) + return + } + } +} + +// CreateMigration creates a migration snapshot and writes it to the given writer. +// It is used to migrate the state from the original store to the store/v2. +func (m *Manager) CreateMigration(height uint64, protoWriter WriteCloser) error { + if m == nil { + return errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil") + } + + err := m.begin(opSnapshot) + if err != nil { + return err + } + // m.end() will be called by the migration manager with EndMigration(). + + go func() { + if err := m.commitSnapshotter.Snapshot(height, protoWriter); err != nil { + protoWriter.CloseWithError(err) + return + } + _ = protoWriter.Close() // always return nil + }() + + return nil +} + +// EndMigration ends the migration operation. +// It will replace the current commitSnapshotter with the new one. +func (m *Manager) EndMigration(commitSnapshotter CommitSnapshotter) { + defer m.end() + m.commitSnapshotter = commitSnapshotter +} + +// List lists snapshots, mirroring ABCI ListSnapshots. It can be concurrent with other operations. +func (m *Manager) List() ([]*types.Snapshot, error) { + return m.store.List() +} + +// LoadChunk loads a chunk into a byte slice, mirroring ABCI LoadChunk. It can be called +// concurrently with other operations. If the chunk does not exist, nil is returned. +func (m *Manager) LoadChunk(height uint64, format, chunk uint32) ([]byte, error) { + reader, err := m.store.LoadChunk(height, format, chunk) + if err != nil { + return nil, err + } + if reader == nil { + return nil, nil + } + defer reader.Close() + + return io.ReadAll(reader) +} + +// Prune prunes snapshots, if no other operations are in progress. +func (m *Manager) Prune(retain uint32) (uint64, error) { + err := m.begin(opPrune) + if err != nil { + return 0, err + } + defer m.end() + return m.store.Prune(retain) +} + +// Restore begins an async snapshot restoration, mirroring ABCI OfferSnapshot. Chunks must be fed +// via RestoreChunk() until the restore is complete or a chunk fails. +func (m *Manager) Restore(snapshot types.Snapshot) error { + if snapshot.Chunks == 0 { + return errorsmod.Wrap(types.ErrInvalidMetadata, "no chunks") + } + if uint32(len(snapshot.Metadata.ChunkHashes)) != snapshot.Chunks { + return errorsmod.Wrapf(types.ErrInvalidMetadata, "snapshot has %v chunk hashes, but %v chunks", + uint32(len(snapshot.Metadata.ChunkHashes)), + snapshot.Chunks) + } + m.mtx.Lock() + defer m.mtx.Unlock() + + // check multistore supported format preemptive + if snapshot.Format != types.CurrentFormat { + return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format) + } + if snapshot.Height == 0 { + return errorsmod.Wrap(storeerrors.ErrLogic, "cannot restore snapshot at height 0") + } + if snapshot.Height > uint64(math.MaxInt64) { + return errorsmod.Wrapf(types.ErrInvalidMetadata, + "snapshot height %v cannot exceed %v", snapshot.Height, int64(math.MaxInt64)) + } + + err := m.beginLocked(opRestore) + if err != nil { + return err + } + + // Start an asynchronous snapshot restoration, passing chunks and completion status via channels. + chChunkIDs := make(chan uint32, chunkIDBufferSize) + chDone := make(chan restoreDone, 1) + + dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) + if err := os.MkdirAll(dir, 0o750); err != nil { + return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) + } + + chChunks := m.loadChunkStream(snapshot.Height, snapshot.Format, chChunkIDs) + + go func() { + err := m.doRestoreSnapshot(snapshot, chChunks) + chDone <- restoreDone{ + complete: err == nil, + err: err, + } + close(chDone) + }() + + m.chRestore = chChunkIDs + m.chRestoreDone = chDone + m.restoreSnapshot = &snapshot + m.restoreChunkIndex = 0 + return nil +} + +func (m *Manager) loadChunkStream(height uint64, format uint32, chunkIDs <-chan uint32) <-chan io.ReadCloser { + chunks := make(chan io.ReadCloser, chunkBufferSize) + go func() { + defer close(chunks) + + for chunkID := range chunkIDs { + chunk, err := m.store.loadChunkFile(height, format, chunkID) + if err != nil { + m.logger.Error("load chunk file failed", "height", height, "format", format, "chunk", chunkID, "err", err) + break + } + chunks <- chunk + } + }() + + return chunks +} + +// doRestoreSnapshot do the heavy work of snapshot restoration after preliminary checks on request have passed. +func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.ReadCloser) error { + dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) + if err := os.MkdirAll(dir, 0o750); err != nil { + return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) + } + + var nextItem types.SnapshotItem + streamReader, err := NewStreamReader(chChunks) + if err != nil { + return err + } + defer streamReader.Close() + + // payloadReader reads an extension payload for extension snapshotter, it returns `io.EOF` at extension boundaries. + payloadReader := func() ([]byte, error) { + nextItem.Reset() + if err := streamReader.ReadMsg(&nextItem); err != nil { + return nil, err + } + payload := nextItem.GetExtensionPayload() + if payload == nil { + return nil, io.EOF + } + return payload.Payload, nil + } + + nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader) + if err != nil { + return errorsmod.Wrap(err, "multistore restore") + } + + for { + if nextItem.Item == nil { + // end of stream + break + } + metadata := nextItem.GetExtension() + if metadata == nil { + return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown snapshot item %T", nextItem.Item) + } + extension, ok := m.extensions[metadata.Name] + if !ok { + return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown extension snapshotter %s", metadata.Name) + } + if !IsFormatSupported(extension, metadata.Format) { + return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name) + } + + if err := extension.RestoreExtension(snapshot.Height, metadata.Format, payloadReader); err != nil { + return errorsmod.Wrapf(err, "extension %s restore", metadata.Name) + } + + payload := nextItem.GetExtensionPayload() + if payload != nil && len(payload.Payload) != 0 { + return fmt.Errorf("extension %s don't exhausted payload stream", metadata.Name) + } else { + break + } + } + + return nil +} + +// RestoreChunk adds a chunk to an active snapshot restoration, mirroring ABCI ApplySnapshotChunk. +// Chunks must be given until the restore is complete, returning true, or a chunk errors. +func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + if m.operation != opRestore { + return false, errorsmod.Wrap(storeerrors.ErrLogic, "no restore operation in progress") + } + + if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { + return false, errorsmod.Wrap(storeerrors.ErrLogic, "received unexpected chunk") + } + + // Check if any errors have occurred yet. + select { + case done := <-m.chRestoreDone: + m.endLocked() + if done.err != nil { + return false, done.err + } + return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended unexpectedly") + default: + } + + // Verify the chunk hash. + hash := sha256.Sum256(chunk) + expected := m.restoreSnapshot.Metadata.ChunkHashes[m.restoreChunkIndex] + if !bytes.Equal(hash[:], expected) { + return false, errorsmod.Wrapf(types.ErrChunkHashMismatch, + "expected %x, got %x", hash, expected) + } + + if err := m.store.saveChunkContent(chunk, m.restoreChunkIndex, m.restoreSnapshot); err != nil { + return false, errorsmod.Wrapf(err, "save chunk content %d", m.restoreChunkIndex) + } + + // Pass the chunk to the restore, and wait for completion if it was the final one. + m.chRestore <- m.restoreChunkIndex + m.restoreChunkIndex++ + + if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { + close(m.chRestore) + m.chRestore = nil + + // the chunks are all written into files, we can save the snapshot to the db, + // even if the restoration may not completed yet. + if err := m.store.saveSnapshot(m.restoreSnapshot); err != nil { + return false, errorsmod.Wrap(err, "save restoring snapshot") + } + + done := <-m.chRestoreDone + m.endLocked() + if done.err != nil { + return false, done.err + } + if !done.complete { + return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended prematurely") + } + + return true, nil + } + return false, nil +} + +// RestoreLocalSnapshot restores app state from a local snapshot. +func (m *Manager) RestoreLocalSnapshot(height uint64, format uint32) error { + snapshot, ch, err := m.store.Load(height, format) + if err != nil { + return err + } + + if snapshot == nil { + return fmt.Errorf("snapshot doesn't exist, height: %d, format: %d", height, format) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + err = m.beginLocked(opRestore) + if err != nil { + return err + } + defer m.endLocked() + + return m.doRestoreSnapshot(*snapshot, ch) +} + +// sortedExtensionNames sort extension names for deterministic iteration. +func (m *Manager) sortedExtensionNames() []string { + names := make([]string, 0, len(m.extensions)) + for name := range m.extensions { + names = append(names, name) + } + + sort.Strings(names) + return names +} + +// IsFormatSupported returns if the snapshotter supports restoration from given format. +func IsFormatSupported(snapshotter ExtensionSnapshotter, format uint32) bool { + for _, i := range snapshotter.SupportedFormats() { + if i == format { + return true + } + } + return false +} + +// SnapshotIfApplicable takes a snapshot of the current state if we are on a snapshot height. +// It also prunes any old snapshots. +func (m *Manager) SnapshotIfApplicable(height int64) { + if m == nil { + return + } + if !m.shouldTakeSnapshot(height) { + m.logger.Debug("snapshot is skipped", "height", height) + return + } + // start the routine after need to create a snapshot + go m.snapshot(height) +} + +// shouldTakeSnapshot returns true is snapshot should be taken at height. +func (m *Manager) shouldTakeSnapshot(height int64) bool { + return m.opts.Interval > 0 && uint64(height)%m.opts.Interval == 0 +} + +func (m *Manager) snapshot(height int64) { + m.logger.Info("creating state snapshot", "height", height) + + if height <= 0 { + m.logger.Error("snapshot height must be positive", "height", height) + return + } + + snapshot, err := m.Create(uint64(height)) + if err != nil { + m.logger.Error("failed to create state snapshot", "height", height, "err", err) + return + } + + m.logger.Info("completed state snapshot", "height", height, "format", snapshot.Format) + + if m.opts.KeepRecent > 0 { + m.logger.Debug("pruning state snapshots") + + pruned, err := m.Prune(m.opts.KeepRecent) + if err != nil { + m.logger.Error("Failed to prune state snapshots", "err", err) + return + } + + m.logger.Debug("pruned state snapshots", "pruned", pruned) + } +} + +// Close the snapshot database. +func (m *Manager) Close() error { return nil } diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go new file mode 100644 index 000000000000..e374b4c75cd0 --- /dev/null +++ b/store/v2/snapshots/manager_test.go @@ -0,0 +1,525 @@ +package snapshots_test + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/store/v2/snapshots" + "cosmossdk.io/store/v2/snapshots/types" +) + +var opts = snapshots.NewSnapshotOptions(1500, 2) + +func TestManager_List(t *testing.T) { + store := setupStore(t) + commitSnapshotter := &mockCommitSnapshotter{} + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) + + mgrList, err := manager.List() + require.NoError(t, err) + storeList, err := store.List() + require.NoError(t, err) + + require.NotEmpty(t, storeList) + assert.Equal(t, storeList, mgrList) + + // list should not block or error on busy managers + manager = setupBusyManager(t) + list, err := manager.List() + require.NoError(t, err) + assert.Equal(t, []*types.Snapshot{}, list) + + require.NoError(t, manager.Close()) +} + +func TestManager_LoadChunk(t *testing.T) { + store := setupStore(t) + manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) + + // Existing chunk should return body + chunk, err := manager.LoadChunk(2, 1, 1) + require.NoError(t, err) + assert.Equal(t, []byte{2, 1, 1}, chunk) + + // Missing chunk should return nil + chunk, err = manager.LoadChunk(2, 1, 9) + require.NoError(t, err) + assert.Nil(t, chunk) + + // LoadChunk should not block or error on busy managers + manager = setupBusyManager(t) + chunk, err = manager.LoadChunk(2, 1, 0) + require.NoError(t, err) + assert.Nil(t, chunk) +} + +func TestManager_Take(t *testing.T) { + store := setupStore(t) + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + commitSnapshotter := &mockCommitSnapshotter{ + items: items, + } + extSnapshotter := newExtSnapshotter(10) + + expectChunks := snapshotItems(items, extSnapshotter) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + // nil manager should return error + _, err = (*snapshots.Manager)(nil).Create(1) + require.Error(t, err) + + // creating a snapshot at a lower height than the latest should error + _, err = manager.Create(3) + require.Error(t, err) + + // creating a snapshot at a higher height should be fine, and should return it + snapshot, err := manager.Create(5) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 5, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + // creating a snapshot while a different snapshot is being created should error + manager = setupBusyManager(t) + _, err = manager.Create(9) + require.Error(t, err) +} + +func TestManager_Prune(t *testing.T) { + store := setupStore(t) + manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) + + pruned, err := manager.Prune(2) + require.NoError(t, err) + assert.EqualValues(t, 1, pruned) + + list, err := manager.List() + require.NoError(t, err) + assert.Len(t, list, 3) + + // Prune should error while a snapshot is being taken + manager = setupBusyManager(t) + _, err = manager.Prune(2) + require.Error(t, err) +} + +func TestManager_Restore(t *testing.T) { + store := setupStore(t) + target := &mockCommitSnapshotter{} + extSnapshotter := newExtSnapshotter(0) + manager := snapshots.NewManager(store, opts, target, nil, coretesting.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + expectItems := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + + chunks := snapshotItems(expectItems, newExtSnapshotter(10)) + + // Restore errors on invalid format + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: 0, + Hash: []byte{1, 2, 3}, + Chunks: uint32(len(chunks)), + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.Error(t, err) + require.ErrorIs(t, err, types.ErrUnknownFormat) + + // Restore errors on no chunks + err = manager.Restore(types.Snapshot{Height: 3, Format: types.CurrentFormat, Hash: []byte{1, 2, 3}}) + require.Error(t, err) + + // Restore errors on chunk and chunkhashes mismatch + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 4, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.Error(t, err) + + // Starting a restore works + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 1, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.NoError(t, err) + + // While the restore is in progress, any other operations fail + _, err = manager.Create(4) + require.Error(t, err) + + _, err = manager.Prune(1) + require.Error(t, err) + + // Feeding an invalid chunk should error due to invalid checksum, but not abort restoration. + _, err = manager.RestoreChunk([]byte{9, 9, 9}) + require.Error(t, err) + require.True(t, errors.Is(err, types.ErrChunkHashMismatch)) + + // Feeding the chunks should work + for i, chunk := range chunks { + done, err := manager.RestoreChunk(chunk) + require.NoError(t, err) + if i == len(chunks)-1 { + assert.True(t, done) + } else { + assert.False(t, done) + } + } + + assert.Equal(t, expectItems, target.items) + assert.Equal(t, 10, len(extSnapshotter.state)) + + // The snapshot is saved in local snapshot store + snapshots, err := store.List() + require.NoError(t, err) + snapshot := snapshots[0] + require.Equal(t, uint64(3), snapshot.Height) + require.Equal(t, types.CurrentFormat, snapshot.Format) + + // Starting a new restore should fail now, because the target already has contents. + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 3, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.Error(t, err) + + // But if we clear out the target we should be able to start a new restore. This time we'll + // fail it with a checksum error. That error should stop the operation, so that we can do + // a prune operation right after. + target.items = nil + err = manager.Restore(types.Snapshot{ + Height: 3, + Format: types.CurrentFormat, + Hash: []byte{1, 2, 3}, + Chunks: 1, + Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, + }) + require.NoError(t, err) + + // Feeding the chunks should work + for i, chunk := range chunks { + done, err := manager.RestoreChunk(chunk) + require.NoError(t, err) + if i == len(chunks)-1 { + assert.True(t, done) + } else { + assert.False(t, done) + } + } +} + +func TestManager_TakeError(t *testing.T) { + snapshotter := &mockErrorCommitSnapshotter{} + store, err := snapshots.NewStore(t.TempDir()) + require.NoError(t, err) + manager := snapshots.NewManager(store, opts, snapshotter, nil, coretesting.NewNopLogger()) + + _, err = manager.Create(1) + require.Error(t, err) +} + +func TestSnapshot_Take_Restore(t *testing.T) { + store := setupStore(t) + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + commitSnapshotter := &mockCommitSnapshotter{ + items: items, + } + + extSnapshotter := newExtSnapshotter(10) + + expectChunks := snapshotItems(items, extSnapshotter) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + // creating a snapshot at a higher height should be fine, and should return it + snapshot, err := manager.Create(5) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 5, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + err = manager.Restore(*snapshot) + require.NoError(t, err) + + // Feeding the chunks should work + for i, chunk := range readChunks(chunks) { + done, err := manager.RestoreChunk(chunk) + require.NoError(t, err) + if i == len(chunks)-1 { + assert.True(t, done) + } else { + assert.False(t, done) + } + } + + // The snapshot is saved in local snapshot store + snapshots, err := store.List() + require.NoError(t, err) + require.Equal(t, uint64(5), snapshots[0].Height) + require.Equal(t, types.CurrentFormat, snapshots[0].Format) + + // Starting a new restore should fail now, because the target already has contents. + err = manager.Restore(*snapshot) + require.Error(t, err) + + storeSnapshot, chunks, err = store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + // Feeding the chunks should work + for i, chunk := range readChunks(chunks) { + done, err := manager.RestoreChunk(chunk) + require.NoError(t, err) + if i == len(chunks)-1 { + assert.True(t, done) + } else { + assert.False(t, done) + } + } + + assert.Equal(t, items, commitSnapshotter.items) + assert.Equal(t, 10, len(extSnapshotter.state)) + + snapshots, err = store.List() + require.NoError(t, err) + require.Equal(t, uint64(5), snapshots[0].Height) + require.Equal(t, types.CurrentFormat, snapshots[0].Format) +} + +func TestSnapshot_Take_Prune(t *testing.T) { + store := setupStore(t) + + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + commitSnapshotter := &mockCommitSnapshotter{ + items: items, + } + extSnapshotter := newExtSnapshotter(10) + + expectChunks := snapshotItems(items, extSnapshotter) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + // creating a snapshot at height 4 + snapshot, err := manager.Create(4) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 4, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + pruned, err := manager.Prune(1) + require.NoError(t, err) + assert.EqualValues(t, 4, pruned) + + // creating a snapshot at a same height 4, should be error + // since we prune all the previous snapshot except the latest at height 4 + _, err = manager.Create(4) + require.Error(t, err) + + // prune all + pruned, err = manager.Prune(0) + require.NoError(t, err) + assert.EqualValues(t, 1, pruned) + + // creating a snapshot at a same height 4, should be true since we prune all the previous snapshot + snapshot, err = manager.Create(4) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 4, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + pruned, err = manager.Prune(2) + require.NoError(t, err) + assert.EqualValues(t, 0, pruned) + + list, err := manager.List() + require.NoError(t, err) + assert.Len(t, list, 1) + + // Prune should error while a snapshot is being taken + manager = setupBusyManager(t) + _, err = manager.Prune(2) + require.Error(t, err) +} + +func TestSnapshot_Pruning_Take_Snapshot_Parallel(t *testing.T) { + store := setupStore(t) + + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + commitSnapshotter := &mockCommitSnapshotter{ + items: items, + } + extSnapshotter := newExtSnapshotter(10) + + expectChunks := snapshotItems(items, extSnapshotter) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + var prunedCount uint64 + // try take snapshot and pruning parallel while prune operation begins first + go func() { + checkError := func() bool { + _, err := manager.Create(4) + return err != nil + } + + require.Eventually(t, checkError, time.Millisecond*200, time.Millisecond) + }() + + prunedCount, err = manager.Prune(1) + require.NoError(t, err) + assert.EqualValues(t, 3, prunedCount) + + // creating a snapshot at a same height 4, should be true since we prune has finished + snapshot, err := manager.Create(4) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 4, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + // try take snapshot and pruning parallel while snapshot operation begins first + go func() { + checkError := func() bool { + _, err = manager.Prune(1) + return err != nil + } + + require.Eventually(t, checkError, time.Millisecond*200, time.Millisecond) + }() + + snapshot, err = manager.Create(5) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 5, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) +} + +func TestSnapshot_SnapshotIfApplicable(t *testing.T) { + store := setupStore(t) + + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + commitSnapshotter := &mockCommitSnapshotter{ + items: items, + } + extSnapshotter := newExtSnapshotter(10) + + snapshotOpts := snapshots.NewSnapshotOptions(1, 1) + + manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, nil, coretesting.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + manager.SnapshotIfApplicable(4) + + checkLatestHeight := func() bool { + latestSnapshot, _ := store.GetLatest() + return latestSnapshot.Height == 4 + } + + require.Eventually(t, checkLatestHeight, time.Second*10, time.Second) + + pruned, err := manager.Prune(1) + require.NoError(t, err) + require.Equal(t, uint64(0), pruned) +} diff --git a/store/v2/snapshots/snapshotter.go b/store/v2/snapshots/snapshotter.go new file mode 100644 index 000000000000..f3f4d33f1cf5 --- /dev/null +++ b/store/v2/snapshots/snapshotter.go @@ -0,0 +1,46 @@ +package snapshots + +import ( + protoio "github.com/cosmos/gogoproto/io" + + "cosmossdk.io/store/v2/snapshots/types" +) + +// CommitSnapshotter defines an API for creating and restoring snapshots of the +// commitment state. +type CommitSnapshotter interface { + // Snapshot writes a snapshot of the commitment state at the given version. + Snapshot(version uint64, protoWriter protoio.Writer) error + + // Restore restores the commitment state from the snapshot reader. + Restore(version uint64, format uint32, protoReader protoio.Reader) (types.SnapshotItem, error) +} + +// ExtensionPayloadReader read extension payloads, +// it returns io.EOF when reached either end of stream or the extension boundaries. +type ExtensionPayloadReader = func() ([]byte, error) + +// ExtensionPayloadWriter is a helper to write extension payloads to underlying stream. +type ExtensionPayloadWriter = func([]byte) error + +// ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream. +// ExtensionSnapshotter has an unique name and manages it's own internal formats. +type ExtensionSnapshotter interface { + // SnapshotName returns the name of snapshotter, it should be unique in the manager. + SnapshotName() string + + // SnapshotFormat returns the default format the extension snapshotter use to encode the + // payloads when taking a snapshot. + // It's defined within the extension, different from the global format for the whole state-sync snapshot. + SnapshotFormat() uint32 + + // SupportedFormats returns a list of formats it can restore from. + SupportedFormats() []uint32 + + // SnapshotExtension writes extension payloads into the underlying protobuf stream. + SnapshotExtension(height uint64, payloadWriter ExtensionPayloadWriter) error + + // RestoreExtension restores an extension state snapshot, + // the payload reader returns `io.EOF` when reached the extension boundaries. + RestoreExtension(height uint64, format uint32, payloadReader ExtensionPayloadReader) error +} diff --git a/store/v2/store.go b/store/v2/store.go new file mode 100644 index 000000000000..20c6ab3c8ef2 --- /dev/null +++ b/store/v2/store.go @@ -0,0 +1,101 @@ +package store + +import ( + "io" + + corestore "cosmossdk.io/core/store" + "cosmossdk.io/store/v2/metrics" + "cosmossdk.io/store/v2/proof" +) + +// RootStore defines an abstraction layer containing a State Storage (SS) engine +// and one or more State Commitment (SC) engines. +type RootStore interface { + Pruner + Backend + + // StateLatest returns a read-only version of the RootStore at the latest + // height, alongside the associated version. + StateLatest() (uint64, corestore.ReaderMap, error) + + // StateAt is analogous to StateLatest() except it returns a read-only version + // of the RootStore at the provided version. If such a version cannot be found, + // an error must be returned. + StateAt(version uint64) (corestore.ReaderMap, error) + + // Query performs a query on the RootStore for a given store key, version (height), + // and key tuple. Queries should be routed to the underlying SS engine. + Query(storeKey []byte, version uint64, key []byte, prove bool) (QueryResult, error) + + // LoadVersion loads the RootStore to the given version. + LoadVersion(version uint64) error + + // LoadVersionForOverwriting loads the state at the given version. + // Any versions greater than targetVersion will be deleted. + LoadVersionForOverwriting(version uint64) error + + // LoadLatestVersion behaves identically to LoadVersion except it loads the + // latest version implicitly. + LoadLatestVersion() error + + // GetLatestVersion returns the latest version, i.e. height, committed. + GetLatestVersion() (uint64, error) + + // SetInitialVersion sets the initial version on the RootStore. + SetInitialVersion(v uint64) error + + // Commit should be responsible for taking the provided changeset and flushing + // it to disk. Note, it will overwrite the changeset if WorkingHash() was called. + // Commit() should ensure the changeset is committed to all SC and SS backends + // and flushed to disk. It must return a hash of the merkle-ized committed state. + Commit(cs *corestore.Changeset) ([]byte, error) + + // LastCommitID returns a CommitID pertaining to the last commitment. + LastCommitID() (proof.CommitID, error) + + // SetMetrics sets the telemetry handler on the RootStore. + SetMetrics(m metrics.Metrics) + + io.Closer +} + +// Backend defines the interface for the RootStore backends. +type Backend interface { + // GetStateCommitment returns the SC backend. + GetStateCommitment() Committer +} + +// UpgradeableStore defines the interface for upgrading store keys. +type UpgradeableStore interface { + // LoadVersionAndUpgrade behaves identically to LoadVersion except it also + // accepts a StoreUpgrades object that defines a series of transformations to + // apply to store keys (if any). + // + // Note, handling StoreUpgrades is optional depending on the underlying store + // implementation. + LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreUpgrades) error +} + +// Pruner defines the interface for pruning old versions of the store or database. +type Pruner interface { + // Prune prunes the store to the provided version. + Prune(version uint64) error +} + +// PausablePruner extends the Pruner interface to include the API for pausing +// the pruning process. +type PausablePruner interface { + Pruner + + // PausePruning pauses or resumes the pruning process to avoid the parallel writes + // while committing the state. + PausePruning(pause bool) +} + +// QueryResult defines the response type to performing a query on a RootStore. +type QueryResult struct { + Key []byte + Value []byte + Version uint64 + ProofOps []proof.CommitmentOp +} diff --git a/tests/integration/accounts/base_account_test.go b/tests/integration/accounts/base_account_test.go index a50975b8ff79..8db4cbf1a9e3 100644 --- a/tests/integration/accounts/base_account_test.go +++ b/tests/integration/accounts/base_account_test.go @@ -51,12 +51,14 @@ func TestBaseAccount(t *testing.T) { } func sendTx(t *testing.T, ctx sdk.Context, app *simapp.SimApp, sender []byte, msg sdk.Msg) { + t.Helper() tx := sign(t, ctx, app, sender, privKey, msg) _, _, err := app.SimDeliver(app.TxEncode, tx) require.NoError(t, err) } func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress, privKey cryptotypes.PrivKey, msg sdk.Msg) sdk.Tx { + t.Helper() r := rand.New(rand.NewSource(0)) accNum, err := app.AccountsKeeper.AccountByNumber.Get(ctx, from) @@ -81,12 +83,14 @@ func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress } func bechify(t *testing.T, app *simapp.SimApp, addr []byte) string { + t.Helper() bech32, err := app.AuthKeeper.AddressCodec().BytesToString(addr) require.NoError(t, err) return bech32 } func fundAccount(t *testing.T, app *simapp.SimApp, ctx sdk.Context, addr sdk.AccAddress, amt string) { + t.Helper() require.NoError(t, testutil.FundAccount(ctx, app.BankKeeper, addr, coins(t, amt))) } diff --git a/tests/integration/accounts/bundler_test.go b/tests/integration/accounts/bundler_test.go index 1b94ddd78fa1..2fb88983ddf6 100644 --- a/tests/integration/accounts/bundler_test.go +++ b/tests/integration/accounts/bundler_test.go @@ -209,6 +209,7 @@ func TestMsgServer_ExecuteBundle(t *testing.T) { } func makeTx(t *testing.T, msg gogoproto.Message, sig []byte, xt *account_abstractionv1.TxExtension) []byte { + t.Helper() anyMsg, err := codectypes.NewAnyWithValue(msg) require.NoError(t, err) diff --git a/tests/integration/v2/auth/app_test.go b/tests/integration/v2/auth/app_test.go new file mode 100644 index 000000000000..6331492b014c --- /dev/null +++ b/tests/integration/v2/auth/app_test.go @@ -0,0 +1,134 @@ +package auth + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/core/router" + "cosmossdk.io/core/transaction" + "cosmossdk.io/depinject" + "cosmossdk.io/log" + "cosmossdk.io/runtime/v2" + "cosmossdk.io/runtime/v2/services" + "cosmossdk.io/server/v2/stf" + "cosmossdk.io/x/accounts" + basedepinject "cosmossdk.io/x/accounts/defaults/base/depinject" + accountsv1 "cosmossdk.io/x/accounts/v1" + _ "cosmossdk.io/x/bank" // import as blank for app wiring + bankkeeper "cosmossdk.io/x/bank/keeper" + banktypes "cosmossdk.io/x/bank/types" + _ "cosmossdk.io/x/consensus" // import as blank for app wiring + _ "cosmossdk.io/x/staking" // import as blank for app wirings + + "github.com/cosmos/cosmos-sdk/tests/integration/v2" + "github.com/cosmos/cosmos-sdk/testutil/configurator" + _ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import as blank for app wiring`` + _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import as blank for app wiring + _ "github.com/cosmos/cosmos-sdk/x/genutil" // import as blank for app wiring +) + +type suite struct { + app *integration.App + + ctx context.Context + + authKeeper authkeeper.AccountKeeper + accountsKeeper accounts.Keeper + bankKeeper bankkeeper.Keeper +} + +func (s suite) mustAddr(address []byte) string { + str, _ := s.authKeeper.AddressCodec().BytesToString(address) + return str +} + +func createTestSuite(t *testing.T) *suite { + t.Helper() + res := suite{} + + moduleConfigs := []configurator.ModuleOption{ + configurator.AccountsModule(), + configurator.AuthModule(), + configurator.BankModule(), + configurator.VestingModule(), + configurator.StakingModule(), + configurator.TxModule(), + configurator.ValidateModule(), + configurator.ConsensusModule(), + configurator.GenutilModule(), + } + + var err error + startupCfg := integration.DefaultStartUpConfig(t) + + msgRouterService := integration.NewRouterService() + res.registerMsgRouterService(msgRouterService) + + var routerFactory runtime.RouterServiceFactory = func(_ []byte) router.Service { + return msgRouterService + } + + queryRouterService := integration.NewRouterService() + res.registerQueryRouterService(queryRouterService) + + serviceBuilder := runtime.NewRouterBuilder(routerFactory, queryRouterService) + + startupCfg.BranchService = &integration.BranchService{} + startupCfg.RouterServiceBuilder = serviceBuilder + startupCfg.HeaderService = services.NewGenesisHeaderService(stf.HeaderService{}) + + res.app, err = integration.NewApp( + depinject.Configs(configurator.NewAppV2Config(moduleConfigs...), depinject.Provide( + // inject desired account types: + basedepinject.ProvideAccount, + + // provide base account options + basedepinject.ProvideSecp256K1PubKey, + + // provide extra accounts + ProvideMockRetroCompatAccountValid, + ProvideMockRetroCompatAccountNoInfo, + ProvideMockRetroCompatAccountNoImplement, + ), depinject.Supply(log.NewNopLogger())), + startupCfg, + &res.bankKeeper, &res.accountsKeeper, &res.authKeeper) + require.NoError(t, err) + + res.ctx = res.app.StateLatestContext(t) + + return &res +} + +func (s *suite) registerMsgRouterService(router *integration.RouterService) { + // register custom router service + bankSendHandler := func(ctx context.Context, req transaction.Msg) (transaction.Msg, error) { + msg, ok := req.(*banktypes.MsgSend) + if !ok { + return nil, integration.ErrInvalidMsgType + } + msgServer := bankkeeper.NewMsgServerImpl(s.bankKeeper) + resp, err := msgServer.Send(ctx, msg) + return resp, err + } + + router.RegisterHandler(bankSendHandler, "cosmos.bank.v1beta1.MsgSend") +} + +func (s *suite) registerQueryRouterService(router *integration.RouterService) { + // register custom router service + queryHandler := func(ctx context.Context, msg transaction.Msg) (transaction.Msg, error) { + req, ok := msg.(*accountsv1.AccountNumberRequest) + if !ok { + return nil, integration.ErrInvalidMsgType + } + qs := accounts.NewQueryServer(s.accountsKeeper) + resp, err := qs.AccountNumber(ctx, req) + return resp, err + } + + router.RegisterHandler(queryHandler, "cosmos.accounts.v1.AccountNumberRequest") +} From bbcce9206909bca5958cb0290039dd666b709ecb Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 3 Dec 2024 17:34:35 +0100 Subject: [PATCH 2/4] delete diff (todo bump pseudo version) --- runtime/v2/builder.go | 219 ------- server/v2/stf/branch/bench_test.go | 128 ---- server/v2/store/snapshot.go | 416 ------------ store/iavl/store_test.go | 710 -------------------- store/v2/commitment/iavl/tree.go | 201 ------ store/v2/commitment/store.go | 571 ---------------- store/v2/commitment/store_test_suite.go | 495 -------------- store/v2/database.go | 63 -- store/v2/migration/README.md | 111 ---- store/v2/migration/manager.go | 213 ------ store/v2/migration/manager_test.go | 179 ----- store/v2/mock/db_mock.go | 301 --------- store/v2/mock/types.go | 13 - store/v2/pruning/manager.go | 52 -- store/v2/pruning/manager_test.go | 227 ------- store/v2/root/factory.go | 131 ---- store/v2/root/migrate_test.go | 156 ----- store/v2/root/store.go | 400 ------------ store/v2/root/store_mock_test.go | 103 --- store/v2/root/store_test.go | 830 ------------------------ store/v2/root/upgrade_test.go | 151 ----- store/v2/snapshots/helpers_test.go | 282 -------- store/v2/snapshots/manager.go | 591 ----------------- store/v2/snapshots/manager_test.go | 525 --------------- store/v2/snapshots/snapshotter.go | 46 -- store/v2/store.go | 101 --- tests/integration/v2/auth/app_test.go | 134 ---- 27 files changed, 7349 deletions(-) delete mode 100644 runtime/v2/builder.go delete mode 100644 server/v2/stf/branch/bench_test.go delete mode 100644 server/v2/store/snapshot.go delete mode 100644 store/iavl/store_test.go delete mode 100644 store/v2/commitment/iavl/tree.go delete mode 100644 store/v2/commitment/store.go delete mode 100644 store/v2/commitment/store_test_suite.go delete mode 100644 store/v2/database.go delete mode 100644 store/v2/migration/README.md delete mode 100644 store/v2/migration/manager.go delete mode 100644 store/v2/migration/manager_test.go delete mode 100644 store/v2/mock/db_mock.go delete mode 100644 store/v2/mock/types.go delete mode 100644 store/v2/pruning/manager.go delete mode 100644 store/v2/pruning/manager_test.go delete mode 100644 store/v2/root/factory.go delete mode 100644 store/v2/root/migrate_test.go delete mode 100644 store/v2/root/store.go delete mode 100644 store/v2/root/store_mock_test.go delete mode 100644 store/v2/root/store_test.go delete mode 100644 store/v2/root/upgrade_test.go delete mode 100644 store/v2/snapshots/helpers_test.go delete mode 100644 store/v2/snapshots/manager.go delete mode 100644 store/v2/snapshots/manager_test.go delete mode 100644 store/v2/snapshots/snapshotter.go delete mode 100644 store/v2/store.go delete mode 100644 tests/integration/v2/auth/app_test.go diff --git a/runtime/v2/builder.go b/runtime/v2/builder.go deleted file mode 100644 index b851955943b0..000000000000 --- a/runtime/v2/builder.go +++ /dev/null @@ -1,219 +0,0 @@ -package runtime - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - - "cosmossdk.io/core/appmodule" - appmodulev2 "cosmossdk.io/core/appmodule/v2" - "cosmossdk.io/core/store" - "cosmossdk.io/core/transaction" - "cosmossdk.io/runtime/v2/services" - "cosmossdk.io/server/v2/appmanager" - "cosmossdk.io/server/v2/stf" - "cosmossdk.io/server/v2/stf/branch" - "cosmossdk.io/store/v2/root" -) - -// AppBuilder is a type that is injected into a container by the runtime/v2 module -// (as *AppBuilder) which can be used to create an app which is compatible with -// the existing app.go initialization conventions. -type AppBuilder[T transaction.Tx] struct { - app *App[T] - storeBuilder root.Builder - storeConfig *root.Config - - // the following fields are used to overwrite the default - branch func(state store.ReaderMap) store.WriterMap - txValidator func(ctx context.Context, tx T) error - postTxExec func(ctx context.Context, tx T, success bool) error -} - -// RegisterModules registers the provided modules with the module manager. -// This is the primary hook for integrating with modules which are not registered using the app config. -func (a *AppBuilder[T]) RegisterModules(modules map[string]appmodulev2.AppModule) error { - for name, appModule := range modules { - // if a (legacy) module implements the HasName interface, check that the name matches - if mod, ok := appModule.(interface{ Name() string }); ok { - if name != mod.Name() { - a.app.logger.Warn(fmt.Sprintf("module name %q does not match name returned by HasName: %q", name, mod.Name())) - } - } - - if _, ok := a.app.moduleManager.modules[name]; ok { - return fmt.Errorf("module named %q already exists", name) - } - a.app.moduleManager.modules[name] = appModule - - if mod, ok := appModule.(appmodulev2.HasRegisterInterfaces); ok { - mod.RegisterInterfaces(a.app.interfaceRegistrar) - } - - if mod, ok := appModule.(appmodule.HasAminoCodec); ok { - mod.RegisterLegacyAminoCodec(a.app.amino) - } - } - - return nil -} - -// Build builds an *App instance. -func (a *AppBuilder[T]) Build(opts ...AppBuilderOption[T]) (*App[T], error) { - for _, opt := range opts { - opt(a) - } - - // default branch - if a.branch == nil { - a.branch = branch.DefaultNewWriterMap - } - - // default tx validator - if a.txValidator == nil { - a.txValidator = a.app.moduleManager.TxValidators() - } - - // default post tx exec - if a.postTxExec == nil { - a.postTxExec = func(ctx context.Context, tx T, success bool) error { - return nil - } - } - - var err error - a.app.db, err = a.storeBuilder.Build(a.app.logger, a.storeConfig) - if err != nil { - return nil, err - } - - if err = a.app.moduleManager.RegisterServices(a.app); err != nil { - return nil, err - } - - endBlocker, valUpdate := a.app.moduleManager.EndBlock() - - stf, err := stf.New[T]( - a.app.logger.With("module", "stf"), - a.app.msgRouterBuilder, - a.app.queryRouterBuilder, - a.app.moduleManager.PreBlocker(), - a.app.moduleManager.BeginBlock(), - endBlocker, - a.txValidator, - valUpdate, - a.postTxExec, - a.branch, - ) - if err != nil { - return nil, fmt.Errorf("failed to create STF: %w", err) - } - a.app.stf = stf - - a.app.AppManager = appmanager.New[T]( - appmanager.Config{ - ValidateTxGasLimit: a.app.config.GasConfig.ValidateTxGasLimit, - QueryGasLimit: a.app.config.GasConfig.QueryGasLimit, - SimulationGasLimit: a.app.config.GasConfig.SimulationGasLimit, - }, - a.app.db, - a.app.stf, - a.initGenesis, - a.exportGenesis, - ) - - return a.app, nil -} - -// initGenesis returns the app initialization genesis for modules -func (a *AppBuilder[T]) initGenesis(ctx context.Context, src io.Reader, txHandler func(json.RawMessage) error) (store.WriterMap, error) { - // this implementation assumes that the state is a JSON object - bz, err := io.ReadAll(src) - if err != nil { - return nil, fmt.Errorf("failed to read import state: %w", err) - } - - var genesisJSON map[string]json.RawMessage - if err = json.Unmarshal(bz, &genesisJSON); err != nil { - return nil, err - } - - v, zeroState, err := a.app.db.StateLatest() - if err != nil { - return nil, fmt.Errorf("unable to get latest state: %w", err) - } - if v != 0 { // TODO: genesis state may be > 0, we need to set version on store - return nil, errors.New("cannot init genesis on non-zero state") - } - genesisCtx := services.NewGenesisContext(a.branch(zeroState)) - genesisState, err := genesisCtx.Mutate(ctx, func(ctx context.Context) error { - err = a.app.moduleManager.InitGenesisJSON(ctx, genesisJSON, txHandler) - if err != nil { - return fmt.Errorf("failed to init genesis: %w", err) - } - return nil - }) - - return genesisState, err -} - -// exportGenesis returns the app export genesis logic for modules -func (a *AppBuilder[T]) exportGenesis(ctx context.Context, version uint64) ([]byte, error) { - state, err := a.app.db.StateAt(version) - if err != nil { - return nil, fmt.Errorf("unable to get state at given version: %w", err) - } - - genesisJson, err := a.app.moduleManager.ExportGenesisForModules( - ctx, - func() store.WriterMap { - return a.branch(state) - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to export genesis: %w", err) - } - - bz, err := json.Marshal(genesisJson) - if err != nil { - return nil, fmt.Errorf("failed to marshal genesis: %w", err) - } - - return bz, nil -} - -// AppBuilderOption is a function that can be passed to AppBuilder.Build to customize the resulting app. -type AppBuilderOption[T transaction.Tx] func(*AppBuilder[T]) - -// AppBuilderWithBranch sets a custom branch implementation for the app. -func AppBuilderWithBranch[T transaction.Tx](branch func(state store.ReaderMap) store.WriterMap) AppBuilderOption[T] { - return func(a *AppBuilder[T]) { - a.branch = branch - } -} - -// AppBuilderWithTxValidator sets the tx validator for the app. -// It overrides all default tx validators defined by modules. -func AppBuilderWithTxValidator[T transaction.Tx]( - txValidators func( - ctx context.Context, tx T, - ) error, -) AppBuilderOption[T] { - return func(a *AppBuilder[T]) { - a.txValidator = txValidators - } -} - -// AppBuilderWithPostTxExec sets logic that will be executed after each transaction. -// When not provided, a no-op function will be used. -func AppBuilderWithPostTxExec[T transaction.Tx]( - postTxExec func( - ctx context.Context, tx T, success bool, - ) error, -) AppBuilderOption[T] { - return func(a *AppBuilder[T]) { - a.postTxExec = postTxExec - } -} diff --git a/server/v2/stf/branch/bench_test.go b/server/v2/stf/branch/bench_test.go deleted file mode 100644 index 67122b59b66f..000000000000 --- a/server/v2/stf/branch/bench_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package branch - -import ( - "encoding/binary" - "fmt" - "testing" - - "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" -) - -var ( - stackSizes = []int{1, 10, 100} - elemsInStack = 10 -) - -func Benchmark_CacheStack_Set(b *testing.B) { - for _, stackSize := range stackSizes { - b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { - bs := makeBranchStack(b, stackSize) - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - err := bs.Set([]byte{0}, []byte{0}) - if err != nil { - b.Fatal(err) - } - } - }) - } -} - -var sink any - -func Benchmark_Get(b *testing.B) { - for _, stackSize := range stackSizes { - b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { - bs := makeBranchStack(b, stackSize) - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - sink, _ = bs.Get([]byte{0}) - } - }) - } - if sink == nil { - b.Fatal("benchmark did not run") - } - sink = nil -} - -func Benchmark_GetSparse(b *testing.B) { - var sink any - for _, stackSize := range stackSizes { - b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { - bs := makeBranchStack(b, stackSize) - keys := func() [][]byte { - var keys [][]byte - for i := 0; i < b.N; i++ { - keys = append(keys, numToBytes(i)) - } - return keys - }() - b.ResetTimer() - b.ReportAllocs() - for _, key := range keys { - sink, _ = bs.Get(key) - } - }) - } - if sink == nil { - b.Fatal("benchmark did not run") - } - sink = nil -} - -var ( - keySink any - valueSink any -) - -func Benchmark_Iterate(b *testing.B) { - for _, stackSize := range stackSizes { - b.Run(fmt.Sprintf("StackSize%d", stackSize), func(b *testing.B) { - bs := makeBranchStack(b, stackSize) - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - iter, _ := bs.Iterator(nil, nil) - for iter.Valid() { - keySink = iter.Key() - valueSink = iter.Value() - iter.Next() - } - _ = iter.Close() - } - }) - } - if valueSink == nil || keySink == nil { - b.Fatal("benchmark did not run") - } - valueSink = nil - keySink = nil -} - -// makeBranchStack creates a branch stack of the given size and initializes it with unique key-value pairs. -func makeBranchStack(b *testing.B, stackSize int) Store[store.KVStore] { - b.Helper() - parent := coretesting.NewMemKV() - branch := NewStore[store.KVStore](parent) - for i := 1; i < stackSize; i++ { - branch = NewStore[store.KVStore](branch) - for j := 0; j < elemsInStack; j++ { - // create unique keys by including the branch index. - key := append(numToBytes(i), numToBytes(j)...) - value := []byte{byte(j)} - err := branch.Set(key, value) - if err != nil { - b.Fatal(err) - } - } - } - return branch -} - -func numToBytes[T ~int](n T) []byte { - return binary.BigEndian.AppendUint64(nil, uint64(n)) -} diff --git a/server/v2/store/snapshot.go b/server/v2/store/snapshot.go deleted file mode 100644 index bf9e5ddb3827..000000000000 --- a/server/v2/store/snapshot.go +++ /dev/null @@ -1,416 +0,0 @@ -package store - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "strconv" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - "cosmossdk.io/log" - serverv2 "cosmossdk.io/server/v2" - storev2 "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/snapshots/types" -) - -const SnapshotFileName = "_snapshot" - -// ExportSnapshotCmd exports app state to snapshot store. -func (s *Server[T]) ExportSnapshotCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "export", - Short: "Export app state to snapshot store", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - v := serverv2.GetViperFromCmd(cmd) - - height, err := cmd.Flags().GetInt64("height") - if err != nil { - return err - } - - logger := log.NewLogger(cmd.OutOrStdout()) - rootStore, _, err := createRootStore(v, logger) - if err != nil { - return err - } - if height == 0 { - lastCommitId, err := rootStore.LastCommitID() - if err != nil { - return err - } - height = int64(lastCommitId.Version) - } - - cmd.Printf("Exporting snapshot for height %d\n", height) - - sm, err := createSnapshotsManager(cmd, v, logger, rootStore) - if err != nil { - return err - } - - snapshot, err := sm.Create(uint64(height)) - if err != nil { - return err - } - - cmd.Printf("Snapshot created at height %d, format %d, chunks %d\n", snapshot.Height, snapshot.Format, snapshot.Chunks) - return nil - }, - } - - addSnapshotFlagsToCmd(cmd) - cmd.Flags().Int64("height", 0, "Height to export, default to latest state height") - - return cmd -} - -// RestoreSnapshotCmd returns a command to restore a snapshot -func (s *Server[T]) RestoreSnapshotCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "restore ", - Short: "Restore app state from local snapshot", - Long: "Restore app state from local snapshot", - Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - v := serverv2.GetViperFromCmd(cmd) - - height, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return err - } - format, err := strconv.ParseUint(args[1], 10, 32) - if err != nil { - return err - } - - logger := log.NewLogger(cmd.OutOrStdout()) - - rootStore, _, err := createRootStore(v, logger) - if err != nil { - return fmt.Errorf("failed to create root store: %w", err) - } - sm, err := createSnapshotsManager(cmd, v, logger, rootStore) - if err != nil { - return err - } - - return sm.RestoreLocalSnapshot(height, uint32(format)) - }, - } - - addSnapshotFlagsToCmd(cmd) - - return cmd -} - -// ListSnapshotsCmd returns the command to list local snapshots -func (s *Server[T]) ListSnapshotsCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "List local snapshots", - RunE: func(cmd *cobra.Command, args []string) error { - v := serverv2.GetViperFromCmd(cmd) - snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) - if err != nil { - return err - } - snapshots, err := snapshotStore.List() - if err != nil { - return fmt.Errorf("failed to list snapshots: %w", err) - } - for _, snapshot := range snapshots { - cmd.Println("height:", snapshot.Height, "format:", snapshot.Format, "chunks:", snapshot.Chunks) - } - - return nil - }, - } - - return cmd -} - -// DeleteSnapshotCmd returns the command to delete a local snapshot -func (s *Server[T]) DeleteSnapshotCmd() *cobra.Command { - return &cobra.Command{ - Use: "delete ", - Short: "Delete a local snapshot", - Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - v := serverv2.GetViperFromCmd(cmd) - - height, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return err - } - format, err := strconv.ParseUint(args[1], 10, 32) - if err != nil { - return err - } - - snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) - if err != nil { - return err - } - - return snapshotStore.Delete(height, uint32(format)) - }, - } -} - -// DumpArchiveCmd returns a command to dump the snapshot as portable archive format -func (s *Server[T]) DumpArchiveCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "dump ", - Short: "Dump the snapshot as portable archive format", - Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - v := serverv2.GetViperFromCmd(cmd) - snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) - if err != nil { - return err - } - - output, err := cmd.Flags().GetString("output") - if err != nil { - return err - } - - height, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return err - } - format, err := strconv.ParseUint(args[1], 10, 32) - if err != nil { - return err - } - - if output == "" { - output = fmt.Sprintf("%d-%d.tar.gz", height, format) - } - - snapshot, err := snapshotStore.Get(height, uint32(format)) - if err != nil { - return err - } - - if snapshot == nil { - return errors.New("snapshot doesn't exist") - } - - bz, err := snapshot.Marshal() - if err != nil { - return err - } - - fp, err := os.Create(output) - if err != nil { - return err - } - defer fp.Close() - - // since the chunk files are already compressed, we just use fastest compression here - gzipWriter, err := gzip.NewWriterLevel(fp, gzip.BestSpeed) - if err != nil { - return err - } - tarWriter := tar.NewWriter(gzipWriter) - if err := tarWriter.WriteHeader(&tar.Header{ - Name: SnapshotFileName, - Mode: 0o644, - Size: int64(len(bz)), - }); err != nil { - return fmt.Errorf("failed to write snapshot header to tar: %w", err) - } - if _, err := tarWriter.Write(bz); err != nil { - return fmt.Errorf("failed to write snapshot to tar: %w", err) - } - - for i := uint32(0); i < snapshot.Chunks; i++ { - path := snapshotStore.PathChunk(height, uint32(format), i) - tarName := strconv.FormatUint(uint64(i), 10) - if err := processChunk(tarWriter, path, tarName); err != nil { - return err - } - } - - if err := tarWriter.Close(); err != nil { - return fmt.Errorf("failed to close tar writer: %w", err) - } - - if err := gzipWriter.Close(); err != nil { - return fmt.Errorf("failed to close gzip writer: %w", err) - } - - return fp.Close() - }, - } - - cmd.Flags().StringP("output", "o", "", "output file") - - return cmd -} - -// LoadArchiveCmd load a portable archive format snapshot into snapshot store -func (s *Server[T]) LoadArchiveCmd() *cobra.Command { - return &cobra.Command{ - Use: "load ", - Short: "Load a snapshot archive file (.tar.gz) into snapshot store", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - v := serverv2.GetViperFromCmd(cmd) - snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) - if err != nil { - return err - } - - path := args[0] - fp, err := os.Open(path) - if err != nil { - return fmt.Errorf("failed to open archive file: %w", err) - } - reader, err := gzip.NewReader(fp) - if err != nil { - return fmt.Errorf("failed to create gzip reader: %w", err) - } - - var snapshot types.Snapshot - tr := tar.NewReader(reader) - - hdr, err := tr.Next() - if err != nil { - return fmt.Errorf("failed to read snapshot file header: %w", err) - } - if hdr.Name != SnapshotFileName { - return fmt.Errorf("invalid archive, expect file: snapshot, got: %s", hdr.Name) - } - bz, err := io.ReadAll(tr) - if err != nil { - return fmt.Errorf("failed to read snapshot file: %w", err) - } - if err := snapshot.Unmarshal(bz); err != nil { - return fmt.Errorf("failed to unmarshal snapshot: %w", err) - } - - // make sure the channel is unbuffered, because the tar reader can't do concurrency - chunks := make(chan io.ReadCloser) - quitChan := make(chan *types.Snapshot) - go func() { - defer close(quitChan) - - savedSnapshot, err := snapshotStore.Save(snapshot.Height, snapshot.Format, chunks) - if err != nil { - cmd.Println("failed to save snapshot", err) - return - } - quitChan <- savedSnapshot - }() - - for i := uint32(0); i < snapshot.Chunks; i++ { - hdr, err = tr.Next() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - if hdr.Name != strconv.FormatInt(int64(i), 10) { - return fmt.Errorf("invalid archive, expect file: %d, got: %s", i, hdr.Name) - } - - bz, err := io.ReadAll(tr) - if err != nil { - return fmt.Errorf("failed to read chunk file: %w", err) - } - chunks <- io.NopCloser(bytes.NewReader(bz)) - } - close(chunks) - - savedSnapshot := <-quitChan - if savedSnapshot == nil { - return errors.New("failed to save snapshot") - } - - if !reflect.DeepEqual(&snapshot, savedSnapshot) { - _ = snapshotStore.Delete(snapshot.Height, snapshot.Format) - return errors.New("invalid archive, the saved snapshot is not equal to the original one") - } - - return nil - }, - } -} - -func createSnapshotsManager( - cmd *cobra.Command, v *viper.Viper, logger log.Logger, store storev2.Backend, -) (*snapshots.Manager, error) { - home := v.GetString(serverv2.FlagHome) - snapshotStore, err := snapshots.NewStore(filepath.Join(home, "data", "snapshots")) - if err != nil { - return nil, err - } - var interval, keepRecent uint64 - // if flag was not passed, use as 0. - if cmd.Flags().Changed(FlagKeepRecent) { - keepRecent, err = cmd.Flags().GetUint64(FlagKeepRecent) - if err != nil { - return nil, err - } - } - if cmd.Flags().Changed(FlagInterval) { - interval, err = cmd.Flags().GetUint64(FlagInterval) - if err != nil { - return nil, err - } - } - - sm := snapshots.NewManager( - snapshotStore, - snapshots.NewSnapshotOptions(interval, uint32(keepRecent)), - store.GetStateCommitment().(snapshots.CommitSnapshotter), - nil, - logger) - return sm, nil -} - -func addSnapshotFlagsToCmd(cmd *cobra.Command) { - cmd.Flags().Uint64(FlagKeepRecent, 0, "KeepRecent defines how many snapshots to keep in heights") - cmd.Flags().Uint64(FlagInterval, 0, "Interval defines at which heights the snapshot is taken") -} - -func processChunk(tarWriter *tar.Writer, path, tarName string) error { - file, err := os.Open(path) - if err != nil { - return fmt.Errorf("failed to open chunk file %s: %w", path, err) - } - defer file.Close() - - st, err := file.Stat() - if err != nil { - return fmt.Errorf("failed to stat chunk file %s: %w", path, err) - } - - if err := tarWriter.WriteHeader(&tar.Header{ - Name: tarName, - Mode: 0o644, - Size: st.Size(), - }); err != nil { - return fmt.Errorf("failed to write chunk header to tar: %w", err) - } - - if _, err := io.Copy(tarWriter, file); err != nil { - return fmt.Errorf("failed to write chunk to tar: %w", err) - } - - return nil -} diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go deleted file mode 100644 index 3a6050e2453e..000000000000 --- a/store/iavl/store_test.go +++ /dev/null @@ -1,710 +0,0 @@ -package iavl - -import ( - "bytes" - crand "crypto/rand" - "fmt" - "math" - "sort" - "testing" - - "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/log" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/types" -) - -var ( - cacheSize = 100 - treeData = map[string]string{ - "hello": "goodbye", - "aloha": "shalom", - } - nMoreData = 0 -) - -func randBytes(numBytes int) []byte { - b := make([]byte, numBytes) - _, _ = crand.Read(b) - return b -} - -// make a tree with data from above and save it -func newAlohaTree(t *testing.T, db corestore.KVStoreWithBatch) (*iavl.MutableTree, types.CommitID) { - t.Helper() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - for k, v := range treeData { - _, err := tree.Set([]byte(k), []byte(v)) - require.NoError(t, err) - } - - for i := 0; i < nMoreData; i++ { - key := randBytes(12) - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(t, err) - } - - hash, ver, err := tree.SaveVersion() - require.Nil(t, err) - - return tree, types.CommitID{Version: ver, Hash: hash} -} - -func TestLoadStore(t *testing.T) { - db := coretesting.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - // Create non-pruned height H - updated, err := tree.Set([]byte("hello"), []byte("hallo")) - require.NoError(t, err) - require.True(t, updated) - hash, verH, err := tree.SaveVersion() - cIDH := types.CommitID{Version: verH, Hash: hash} - require.Nil(t, err) - - // Create pruned height Hp - updated, err = tree.Set([]byte("hello"), []byte("hola")) - require.NoError(t, err) - require.True(t, updated) - hash, verHp, err := tree.SaveVersion() - cIDHp := types.CommitID{Version: verHp, Hash: hash} - require.Nil(t, err) - - // Create current height Hc - updated, err = tree.Set([]byte("hello"), []byte("ciao")) - require.NoError(t, err) - require.True(t, updated) - hash, verHc, err := tree.SaveVersion() - cIDHc := types.CommitID{Version: verHc, Hash: hash} - require.Nil(t, err) - - // Querying an existing store at some previous non-pruned height H - hStore, err := store.GetImmutable(verH) - require.NoError(t, err) - require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo") - - // Querying an existing store at some previous pruned height Hp - hpStore, err := store.GetImmutable(verHp) - require.NoError(t, err) - require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola") - - // Querying an existing store at current height Hc - hcStore, err := store.GetImmutable(verHc) - require.NoError(t, err) - require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao") - - // Querying a new store at some previous non-pruned height H - newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo") - - // Querying a new store at some previous pruned height Hp - newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola") - - // Querying a new store at current height H - newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao") -} - -func TestGetImmutable(t *testing.T) { - db := coretesting.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - updated, err := tree.Set([]byte("hello"), []byte("adios")) - require.NoError(t, err) - require.True(t, updated) - hash, ver, err := tree.SaveVersion() - cID := types.CommitID{Version: ver, Hash: hash} - require.Nil(t, err) - - _, err = store.GetImmutable(cID.Version + 1) - require.Error(t, err) - - newStore, err := store.GetImmutable(cID.Version - 1) - require.NoError(t, err) - require.Equal(t, newStore.Get([]byte("hello")), []byte("goodbye")) - - newStore, err = store.GetImmutable(cID.Version) - require.NoError(t, err) - require.Equal(t, newStore.Get([]byte("hello")), []byte("adios")) - - res, err := newStore.Query(&types.RequestQuery{Data: []byte("hello"), Height: cID.Version, Path: "/key", Prove: true}) - require.NoError(t, err) - require.Equal(t, res.Value, []byte("adios")) - require.NotNil(t, res.ProofOps) - - require.Panics(t, func() { newStore.Set(nil, nil) }) - require.Panics(t, func() { newStore.Delete(nil) }) - require.Panics(t, func() { newStore.Commit() }) -} - -func TestTestGetImmutableIterator(t *testing.T) { - db := coretesting.NewMemDB() - tree, cID := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - newStore, err := store.GetImmutable(cID.Version) - require.NoError(t, err) - - iter := newStore.Iterator([]byte("aloha"), []byte("hellz")) - expected := []string{"aloha", "hello"} - var i int - - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - - require.Equal(t, len(expected), i) -} - -func TestIAVLStoreGetSetHasDelete(t *testing.T) { - db := coretesting.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - - key := "hello" - - exists := iavlStore.Has([]byte(key)) - require.True(t, exists) - - value := iavlStore.Get([]byte(key)) - require.EqualValues(t, value, treeData[key]) - - value2 := "notgoodbye" - iavlStore.Set([]byte(key), []byte(value2)) - - value = iavlStore.Get([]byte(key)) - require.EqualValues(t, value, value2) - - iavlStore.Delete([]byte(key)) - - exists = iavlStore.Has([]byte(key)) - require.False(t, exists) -} - -func TestIAVLStoreNoNilSet(t *testing.T) { - db := coretesting.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - - require.Panics(t, func() { iavlStore.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { iavlStore.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - - require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic") -} - -func TestIAVLIterator(t *testing.T) { - db := coretesting.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz")) - expected := []string{"aloha", "hello"} - var i int - - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator([]byte("golang"), []byte("rocks")) - expected = []string{"hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, []byte("golang")) - expected = []string{"aloha"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, []byte("shalom")) - expected = []string{"aloha", "hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, nil) - expected = []string{"aloha", "hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator([]byte("golang"), nil) - expected = []string{"hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) -} - -func TestIAVLReverseIterator(t *testing.T) { - db := coretesting.NewMemDB() - - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte{0x00}, []byte("0")) - iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0")) - iavlStore.Set([]byte{0x00, 0x01}, []byte("0 1")) - iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2")) - iavlStore.Set([]byte{0x01}, []byte("1")) - - testReverseIterator := func(t *testing.T, start, end []byte, expected []string) { - t.Helper() - iter := iavlStore.ReverseIterator(start, end) - var i int - for i = 0; iter.Valid(); iter.Next() { - expectedValue := expected[i] - value := iter.Value() - require.EqualValues(t, string(value), expectedValue) - i++ - } - require.Equal(t, len(expected), i) - } - - testReverseIterator(t, nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"}) - testReverseIterator(t, []byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"}) - testReverseIterator(t, nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) -} - -func TestIAVLPrefixIterator(t *testing.T) { - db := coretesting.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte("test1"), []byte("test1")) - iavlStore.Set([]byte("test2"), []byte("test2")) - iavlStore.Set([]byte("test3"), []byte("test3")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) - - var i int - - iter := types.KVStorePrefixIterator(iavlStore, []byte("test")) - expected := []string{"test1", "test2", "test3"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, expectedKey) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) - - iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) - expected2 := [][]byte{ - {byte(55), byte(255), byte(255), byte(0)}, - {byte(55), byte(255), byte(255), byte(1)}, - {byte(55), byte(255), byte(255), byte(255)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) - - iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) - expected2 = [][]byte{ - {byte(255), byte(255), byte(0)}, - {byte(255), byte(255), byte(1)}, - {byte(255), byte(255), byte(255)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) -} - -func TestIAVLReversePrefixIterator(t *testing.T) { - db := coretesting.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte("test1"), []byte("test1")) - iavlStore.Set([]byte("test2"), []byte("test2")) - iavlStore.Set([]byte("test3"), []byte("test3")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) - - var i int - - iter := types.KVStoreReversePrefixIterator(iavlStore, []byte("test")) - expected := []string{"test3", "test2", "test1"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, expectedKey) - i++ - } - require.Equal(t, len(expected), i) - - iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) - expected2 := [][]byte{ - {byte(55), byte(255), byte(255), byte(255)}, - {byte(55), byte(255), byte(255), byte(1)}, - {byte(55), byte(255), byte(255), byte(0)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - require.Equal(t, len(expected), i) - - iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) - expected2 = [][]byte{ - {byte(255), byte(255), byte(255)}, - {byte(255), byte(255), byte(1)}, - {byte(255), byte(255), byte(0)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - require.Equal(t, len(expected), i) -} - -func nextVersion(iavl *Store) { - key := []byte(fmt.Sprintf("Key for tree: %d", iavl.LastCommitID().Version)) - value := []byte(fmt.Sprintf("Value for tree: %d", iavl.LastCommitID().Version)) - iavl.Set(key, value) - iavl.Commit() -} - -func TestIAVLNoPrune(t *testing.T) { - db := coretesting.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - nextVersion(iavlStore) - - for i := 1; i < 100; i++ { - for j := 1; j <= i; j++ { - require.True(t, iavlStore.VersionExists(int64(j)), - "Missing version %d with latest version %d. Should be storing all versions", - j, i) - } - - nextVersion(iavlStore) - } -} - -func TestIAVLStoreQuery(t *testing.T) { - db := coretesting.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - k1, v1 := []byte("key1"), []byte("val1") - k2, v2 := []byte("key2"), []byte("val2") - v3 := []byte("val3") - - ksub := []byte("key") - KVs0 := kv.Pairs{} //nolint:staticcheck // We are in store v1. - KVs1 := kv.Pairs{ //nolint:staticcheck // We are in store v1. - Pairs: []kv.Pair{ //nolint:staticcheck // We are in store v1. - {Key: k1, Value: v1}, - {Key: k2, Value: v2}, - }, - } - KVs2 := kv.Pairs{ //nolint:staticcheck // We are in store v1. - Pairs: []kv.Pair{ //nolint:staticcheck // We are in store v1. - {Key: k1, Value: v3}, - {Key: k2, Value: v2}, - }, - } - - valExpSubEmpty, err := KVs0.Marshal() - require.NoError(t, err) - - valExpSub1, err := KVs1.Marshal() - require.NoError(t, err) - - valExpSub2, err := KVs2.Marshal() - require.NoError(t, err) - - cid := iavlStore.Commit() - ver := cid.Version - query := types.RequestQuery{Path: "/key", Data: k1, Height: ver} - querySub := types.RequestQuery{Path: "/subspace", Data: ksub, Height: ver} - - // query subspace before anything set - qres, err := iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSubEmpty, qres.Value) - - // set data - iavlStore.Set(k1, v1) - iavlStore.Set(k2, v2) - - // set data without commit, doesn't show up - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Nil(t, qres.Value) - - // commit it, but still don't see on old version - cid = iavlStore.Commit() - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Nil(t, qres.Value) - - // but yes on the new version - query.Height = cid.Version - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) - - // and for the subspace - qres, err = iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSub1, qres.Value) - - // modify - iavlStore.Set(k1, v3) - cid = iavlStore.Commit() - - // query will return old values, as height is fixed - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) - - // update to latest in the query and we are happy - query.Height = cid.Version - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v3, qres.Value) - query2 := types.RequestQuery{Path: "/key", Data: k2, Height: cid.Version} - - qres, err = iavlStore.Query(&query2) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v2, qres.Value) - // and for the subspace - qres, err = iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSub2, qres.Value) - - // default (height 0) will show latest -1 - query0 := types.RequestQuery{Path: "/key", Data: k1} - qres, err = iavlStore.Query(&query0) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) -} - -func BenchmarkIAVLIteratorNext(b *testing.B) { - b.ReportAllocs() - db := coretesting.NewMemDB() - treeSize := 1000 - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - for i := 0; i < treeSize; i++ { - key := randBytes(4) - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(b, err) - } - - iavlStore := UnsafeNewStore(tree) - iterators := make([]types.Iterator, b.N/treeSize) - - for i := 0; i < len(iterators); i++ { - iterators[i] = iavlStore.Iterator([]byte{0}, []byte{255, 255, 255, 255, 255}) - } - - b.ResetTimer() - for i := 0; i < len(iterators); i++ { - iter := iterators[i] - for j := 0; j < treeSize; j++ { - iter.Next() - } - } -} - -func TestSetInitialVersion(t *testing.T) { - testCases := []struct { - name string - storeFn func(db corestore.KVStoreWithBatch) *Store - expPanic bool - }{ - { - "works with a mutable tree", - func(db corestore.KVStoreWithBatch) *Store { - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - store := UnsafeNewStore(tree) - - return store - }, false, - }, - { - "throws error on immutable tree", - func(db corestore.KVStoreWithBatch) *Store { - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - store := UnsafeNewStore(tree) - _, version, err := store.tree.SaveVersion() - require.NoError(t, err) - require.Equal(t, int64(1), version) - store, err = store.GetImmutable(1) - require.NoError(t, err) - - return store - }, true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - db := coretesting.NewMemDB() - store := tc.storeFn(db) - - if tc.expPanic { - require.Panics(t, func() { store.SetInitialVersion(5) }) - } else { - store.SetInitialVersion(5) - cid := store.Commit() - require.Equal(t, int64(5), cid.GetVersion()) - } - }) - } -} - -func TestCacheWraps(t *testing.T) { - db := coretesting.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - cacheWrapper := store.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} - -func TestChangeSets(t *testing.T) { - db := coretesting.NewMemDB() - treeSize := 1000 - treeVersion := int64(10) - targetVersion := int64(6) - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger(), iavl.FlushThresholdOption(math.MaxInt)) - - for j := int64(0); j < treeVersion; j++ { - keys := [][]byte{} - for i := 0; i < treeSize; i++ { - keys = append(keys, randBytes(4)) - } - sort.Slice(keys, func(p, q int) bool { - return bytes.Compare(keys[p], keys[q]) < 0 - }) - for i := 0; i < treeSize; i++ { - key := keys[i] - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(t, err) - } - _, _, err := tree.SaveVersion() - require.NoError(t, err) - } - - changeSets := []*iavl.ChangeSet{} - iavlStore := UnsafeNewStore(tree) - commitID := iavlStore.LastCommitID() - - require.NoError(t, iavlStore.TraverseStateChanges(targetVersion+1, treeVersion, func(v int64, cs *iavl.ChangeSet) error { - changeSets = append(changeSets, cs) - return nil - })) - require.NoError(t, iavlStore.LoadVersionForOverwriting(targetVersion)) - - for i, cs := range changeSets { - v, err := tree.SaveChangeSet(cs) - require.NoError(t, err) - require.Equal(t, v, targetVersion+int64(i+1)) - } - - restoreCommitID := iavlStore.LastCommitID() - require.Equal(t, commitID, restoreCommitID) -} diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go deleted file mode 100644 index 4aaac08ab8bf..000000000000 --- a/store/v2/commitment/iavl/tree.go +++ /dev/null @@ -1,201 +0,0 @@ -package iavl - -import ( - "fmt" - - "github.com/cosmos/iavl" - ics23 "github.com/cosmos/ics23/go" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" -) - -var ( - _ commitment.Tree = (*IavlTree)(nil) - _ commitment.Reader = (*IavlTree)(nil) - _ store.PausablePruner = (*IavlTree)(nil) -) - -// IavlTree is a wrapper around iavl.MutableTree. -type IavlTree struct { - tree *iavl.MutableTree -} - -// NewIavlTree creates a new IavlTree instance. -func NewIavlTree(db corestore.KVStoreWithBatch, logger log.Logger, cfg *Config) *IavlTree { - tree := iavl.NewMutableTree(db, cfg.CacheSize, cfg.SkipFastStorageUpgrade, logger, iavl.AsyncPruningOption(true)) - return &IavlTree{ - tree: tree, - } -} - -// Remove removes the given key from the tree. -func (t *IavlTree) Remove(key []byte) error { - _, _, err := t.tree.Remove(key) - if err != nil { - return err - } - return nil -} - -// Set sets the given key-value pair in the tree. -func (t *IavlTree) Set(key, value []byte) error { - _, err := t.tree.Set(key, value) - return err -} - -// Hash returns the hash of the latest saved version of the tree. -func (t *IavlTree) Hash() []byte { - return t.tree.Hash() -} - -// Version returns the current version of the tree. -func (t *IavlTree) Version() uint64 { - return uint64(t.tree.Version()) -} - -// WorkingHash returns the working hash of the tree. -// Danger! iavl.MutableTree.WorkingHash() is a mutating operation! -// It advances the tree version by 1. -func (t *IavlTree) WorkingHash() []byte { - return t.tree.WorkingHash() -} - -// LoadVersion loads the state at the given version. -func (t *IavlTree) LoadVersion(version uint64) error { - _, err := t.tree.LoadVersion(int64(version)) - return err -} - -// LoadVersionForOverwriting loads the state at the given version. -// Any versions greater than targetVersion will be deleted. -func (t *IavlTree) LoadVersionForOverwriting(version uint64) error { - return t.tree.LoadVersionForOverwriting(int64(version)) -} - -// Commit commits the current state to the tree. -func (t *IavlTree) Commit() ([]byte, uint64, error) { - hash, v, err := t.tree.SaveVersion() - return hash, uint64(v), err -} - -// GetProof returns a proof for the given key and version. -func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed - // by checking the latest version we can determine if we are in genesis or have a key that has been removed - lv, err := t.tree.GetLatestVersion() - if err != nil { - return nil, err - } - if lv == 0 { - return t.tree.GetProof(key) - } - - immutableTree, err := t.tree.GetImmutable(int64(version)) - if err != nil { - return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) - } - - return immutableTree.GetProof(key) -} - -// Get implements the Reader interface. -func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { - // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed - // by checking the latest version we can determine if we are in genesis or have a key that has been removed - lv, err := t.tree.GetLatestVersion() - if err != nil { - return nil, err - } - if lv == 0 { - return t.tree.Get(key) - } - - immutableTree, err := t.tree.GetImmutable(int64(version)) - if err != nil { - return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) - } - - return immutableTree.Get(key) -} - -// Iterator implements the Reader interface. -func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { - // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not empty when the storekey is removed - // by checking the latest version we can determine if we are in genesis or have a key that has been removed - lv, err := t.tree.GetLatestVersion() - if err != nil { - return nil, err - } - if lv == 0 { - return t.tree.Iterator(start, end, ascending) - } - - immutableTree, err := t.tree.GetImmutable(int64(version)) - if err != nil { - return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) - } - - return immutableTree.Iterator(start, end, ascending) -} - -// GetLatestVersion returns the latest version of the tree. -func (t *IavlTree) GetLatestVersion() (uint64, error) { - v, err := t.tree.GetLatestVersion() - return uint64(v), err -} - -// SetInitialVersion sets the initial version of the database. -func (t *IavlTree) SetInitialVersion(version uint64) error { - t.tree.SetInitialVersion(version) - return nil -} - -// Prune prunes all versions up to and including the provided version. -func (t *IavlTree) Prune(version uint64) error { - return t.tree.DeleteVersionsTo(int64(version)) -} - -// PausePruning pauses the pruning process. -func (t *IavlTree) PausePruning(pause bool) { - if pause { - t.tree.SetCommitting() - } else { - t.tree.UnsetCommitting() - } -} - -// Export exports the tree exporter at the given version. -func (t *IavlTree) Export(version uint64) (commitment.Exporter, error) { - tree, err := t.tree.GetImmutable(int64(version)) - if err != nil { - return nil, err - } - exporter, err := tree.Export() - if err != nil { - return nil, err - } - - return &Exporter{ - exporter: exporter, - }, nil -} - -// Import imports the tree importer at the given version. -func (t *IavlTree) Import(version uint64) (commitment.Importer, error) { - importer, err := t.tree.Import(int64(version)) - if err != nil { - return nil, err - } - - return &Importer{ - importer: importer, - }, nil -} - -// Close closes the iavl tree. -func (t *IavlTree) Close() error { - return t.tree.Close() -} diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go deleted file mode 100644 index aa383b57ae56..000000000000 --- a/store/v2/commitment/store.go +++ /dev/null @@ -1,571 +0,0 @@ -package commitment - -import ( - "errors" - "fmt" - "io" - "maps" - "math" - "slices" - - protoio "github.com/cosmos/gogoproto/io" - - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/internal" - "cosmossdk.io/store/v2/internal/conv" - "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -var ( - _ store.Committer = (*CommitStore)(nil) - _ store.UpgradeableStore = (*CommitStore)(nil) - _ snapshots.CommitSnapshotter = (*CommitStore)(nil) - _ store.PausablePruner = (*CommitStore)(nil) - - // NOTE: It is not recommended to use the CommitStore as a reader. This is only used - // during the migration process. Generally, the SC layer does not provide a reader - // in the store/v2. - _ store.VersionedReader = (*CommitStore)(nil) -) - -// MountTreeFn is a function that mounts a tree given a store key. -// It is used to lazily mount trees when needed (e.g. during upgrade or proof generation). -type MountTreeFn func(storeKey string) (Tree, error) - -// CommitStore is a wrapper around multiple Tree objects mapped by a unique store -// key. Each store key reflects dedicated and unique usage within a module. A caller -// can construct a CommitStore with one or more store keys. It is expected that a -// RootStore use a CommitStore as an abstraction to handle multiple store keys -// and trees. -type CommitStore struct { - logger corelog.Logger - metadata *MetadataStore - multiTrees map[string]Tree - // oldTrees is a map of store keys to old trees that have been deleted or renamed. - // It is used to get the proof for the old store keys. - oldTrees map[string]Tree -} - -// NewCommitStore creates a new CommitStore instance. -func NewCommitStore(trees, oldTrees map[string]Tree, db corestore.KVStoreWithBatch, logger corelog.Logger) (*CommitStore, error) { - return &CommitStore{ - logger: logger, - multiTrees: trees, - oldTrees: oldTrees, - metadata: NewMetadataStore(db), - }, nil -} - -func (c *CommitStore) WriteChangeset(cs *corestore.Changeset) error { - for _, pairs := range cs.Changes { - key := conv.UnsafeBytesToStr(pairs.Actor) - - tree, ok := c.multiTrees[key] - if !ok { - return fmt.Errorf("store key %s not found in multiTrees", key) - } - for _, kv := range pairs.StateChanges { - if kv.Remove { - if err := tree.Remove(kv.Key); err != nil { - return err - } - } else if err := tree.Set(kv.Key, kv.Value); err != nil { - return err - } - } - } - - return nil -} - -func (c *CommitStore) LoadVersion(targetVersion uint64) error { - storeKeys := make([]string, 0, len(c.multiTrees)) - for storeKey := range c.multiTrees { - storeKeys = append(storeKeys, storeKey) - } - return c.loadVersion(targetVersion, storeKeys, false) -} - -func (c *CommitStore) LoadVersionForOverwriting(targetVersion uint64) error { - storeKeys := make([]string, 0, len(c.multiTrees)) - for storeKey := range c.multiTrees { - storeKeys = append(storeKeys, storeKey) - } - - return c.loadVersion(targetVersion, storeKeys, true) -} - -// LoadVersionAndUpgrade implements store.UpgradeableStore. -func (c *CommitStore) LoadVersionAndUpgrade(targetVersion uint64, upgrades *corestore.StoreUpgrades) error { - // deterministic iteration order for upgrades (as the underlying store may change and - // upgrades make store changes where the execution order may matter) - storeKeys := slices.Sorted(maps.Keys(c.multiTrees)) - removeTree := func(storeKey string) error { - if oldTree, ok := c.multiTrees[storeKey]; ok { - if err := oldTree.Close(); err != nil { - return err - } - delete(c.multiTrees, storeKey) - } - return nil - } - - newStoreKeys := make([]string, 0, len(c.multiTrees)) - removedStoreKeys := make([]string, 0) - for _, storeKey := range storeKeys { - // If it has been deleted, remove the tree. - if upgrades.IsDeleted(storeKey) { - if err := removeTree(storeKey); err != nil { - return err - } - removedStoreKeys = append(removedStoreKeys, storeKey) - continue - } - - // If it has been added, set the initial version. - if upgrades.IsAdded(storeKey) { - if err := c.multiTrees[storeKey].SetInitialVersion(targetVersion + 1); err != nil { - return err - } - // This is the empty tree, no need to load the version. - continue - } - - newStoreKeys = append(newStoreKeys, storeKey) - } - - if err := c.metadata.flushRemovedStoreKeys(targetVersion, removedStoreKeys); err != nil { - return err - } - - return c.loadVersion(targetVersion, newStoreKeys, true) -} - -func (c *CommitStore) loadVersion(targetVersion uint64, storeKeys []string, overrideAfter bool) error { - // Rollback the metadata to the target version. - latestVersion, err := c.GetLatestVersion() - if err != nil { - return err - } - if targetVersion < latestVersion { - for version := latestVersion; version > targetVersion; version-- { - if err = c.metadata.deleteCommitInfo(version); err != nil { - return err - } - } - if err := c.metadata.setLatestVersion(targetVersion); err != nil { - return err - } - } - - for _, storeKey := range storeKeys { - if overrideAfter { - if err := c.multiTrees[storeKey].LoadVersionForOverwriting(targetVersion); err != nil { - return err - } - } else { - if err := c.multiTrees[storeKey].LoadVersion(targetVersion); err != nil { - return err - } - } - } - - // If the target version is greater than the latest version, it is the snapshot - // restore case, we should create a new commit info for the target version. - if targetVersion > latestVersion { - cInfo, err := c.GetCommitInfo(targetVersion) - if err != nil { - return err - } - return c.metadata.flushCommitInfo(targetVersion, cInfo) - } - - return nil -} - -func (c *CommitStore) Commit(version uint64) (*proof.CommitInfo, error) { - storeInfos := make([]proof.StoreInfo, 0, len(c.multiTrees)) - - for storeKey, tree := range c.multiTrees { - if internal.IsMemoryStoreKey(storeKey) { - continue - } - hash, cversion, err := tree.Commit() - if err != nil { - return nil, err - } - if cversion != version { - return nil, fmt.Errorf("commit version %d does not match the target version %d", cversion, version) - } - commitID := proof.CommitID{ - Version: version, - Hash: hash, - } - storeInfos = append(storeInfos, proof.StoreInfo{ - Name: []byte(storeKey), - CommitID: commitID, - }) - } - - cInfo := &proof.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } - - if err := c.metadata.flushCommitInfo(version, cInfo); err != nil { - return nil, err - } - - return cInfo, nil -} - -func (c *CommitStore) SetInitialVersion(version uint64) error { - for _, tree := range c.multiTrees { - if err := tree.SetInitialVersion(version); err != nil { - return err - } - } - - return nil -} - -// GetProof returns a proof for the given key and version. -func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) { - rawStoreKey := conv.UnsafeBytesToStr(storeKey) - tree, ok := c.multiTrees[rawStoreKey] - if !ok { - tree, ok = c.oldTrees[rawStoreKey] - if !ok { - return nil, fmt.Errorf("store %s not found", rawStoreKey) - } - } - - iProof, err := tree.GetProof(version, key) - if err != nil { - return nil, err - } - cInfo, err := c.metadata.GetCommitInfo(version) - if err != nil { - return nil, err - } - if cInfo == nil { - return nil, fmt.Errorf("commit info not found for version %d", version) - } - commitOp := proof.NewIAVLCommitmentOp(key, iProof) - _, storeCommitmentOp, err := cInfo.GetStoreProof(storeKey) - if err != nil { - return nil, err - } - - return []proof.CommitmentOp{commitOp, *storeCommitmentOp}, nil -} - -// getReader returns a reader for the given store key. It will return an error if the -// store key does not exist or the tree does not implement the Reader interface. -// WARNING: This function is only used during the migration process. The SC layer -// generally does not provide a reader for the CommitStore. -func (c *CommitStore) getReader(storeKey string) (Reader, error) { - var tree Tree - if storeTree, ok := c.oldTrees[storeKey]; ok { - tree = storeTree - } else if storeTree, ok := c.multiTrees[storeKey]; ok { - tree = storeTree - } else { - return nil, fmt.Errorf("store %s not found", storeKey) - } - - reader, ok := tree.(Reader) - if !ok { - return nil, fmt.Errorf("tree for store %s does not implement Reader", storeKey) - } - - return reader, nil -} - -// VersionExists implements store.VersionedReader. -func (c *CommitStore) VersionExists(version uint64) (bool, error) { - latestVersion, err := c.metadata.GetLatestVersion() - if err != nil { - return false, err - } - if latestVersion == 0 { - return version == 0, nil - } - - ci, err := c.metadata.GetCommitInfo(version) - return ci != nil, err -} - -// Get implements store.VersionedReader. -func (c *CommitStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - reader, err := c.getReader(conv.UnsafeBytesToStr(storeKey)) - if err != nil { - return nil, err - } - - bz, err := reader.Get(version, key) - if err != nil { - return nil, fmt.Errorf("failed to get key %s from store %s: %w", key, storeKey, err) - } - - return bz, nil -} - -// Has implements store.VersionedReader. -func (c *CommitStore) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - val, err := c.Get(storeKey, version, key) - return val != nil, err -} - -// Iterator implements store.VersionedReader. -func (c *CommitStore) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - reader, err := c.getReader(conv.UnsafeBytesToStr(storeKey)) - if err != nil { - return nil, err - } - - return reader.Iterator(version, start, end, true) -} - -// ReverseIterator implements store.VersionedReader. -func (c *CommitStore) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - reader, err := c.getReader(conv.UnsafeBytesToStr(storeKey)) - if err != nil { - return nil, err - } - - return reader.Iterator(version, start, end, false) -} - -// Prune implements store.Pruner. -func (c *CommitStore) Prune(version uint64) error { - // prune the metadata - for v := version; v > 0; v-- { - if err := c.metadata.deleteCommitInfo(v); err != nil { - return err - } - } - // prune the trees - for _, tree := range c.multiTrees { - if err := tree.Prune(version); err != nil { - return err - } - } - // prune the removed store keys - if err := c.pruneRemovedStoreKeys(version); err != nil { - return err - } - - return nil -} - -func (c *CommitStore) pruneRemovedStoreKeys(version uint64) error { - clearKVStore := func(storeKey []byte, version uint64) (err error) { - tree, ok := c.oldTrees[string(storeKey)] - if !ok { - return fmt.Errorf("store %s not found in oldTrees", storeKey) - } - return tree.Prune(version) - } - return c.metadata.deleteRemovedStoreKeys(version, clearKVStore) -} - -// PausePruning implements store.PausablePruner. -func (c *CommitStore) PausePruning(pause bool) { - for _, tree := range c.multiTrees { - if pruner, ok := tree.(store.PausablePruner); ok { - pruner.PausePruning(pause) - } - } -} - -// Snapshot implements snapshotstypes.CommitSnapshotter. -func (c *CommitStore) Snapshot(version uint64, protoWriter protoio.Writer) error { - if version == 0 { - return errors.New("the snapshot version must be greater than 0") - } - - latestVersion, err := c.GetLatestVersion() - if err != nil { - return err - } - if version > latestVersion { - return fmt.Errorf("the snapshot version %d is greater than the latest version %d", version, latestVersion) - } - - for storeKey, tree := range c.multiTrees { - // TODO: check the parallelism of this loop - if err := func() error { - exporter, err := tree.Export(version) - if err != nil { - return fmt.Errorf("failed to export tree for version %d: %w", version, err) - } - defer exporter.Close() - - err = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_Store{ - Store: &snapshotstypes.SnapshotStoreItem{ - Name: storeKey, - }, - }, - }) - if err != nil { - return fmt.Errorf("failed to write store name: %w", err) - } - - for { - item, err := exporter.Next() - if errors.Is(err, ErrorExportDone) { - break - } else if err != nil { - return fmt.Errorf("failed to get the next export node: %w", err) - } - - if err = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_IAVL{ - IAVL: item, - }, - }); err != nil { - return fmt.Errorf("failed to write iavl node: %w", err) - } - } - - return nil - }(); err != nil { - return err - } - } - - return nil -} - -// Restore implements snapshotstypes.CommitSnapshotter. -func (c *CommitStore) Restore( - version uint64, - format uint32, - protoReader protoio.Reader, -) (snapshotstypes.SnapshotItem, error) { - var ( - importer Importer - snapshotItem snapshotstypes.SnapshotItem - ) - -loop: - for { - snapshotItem = snapshotstypes.SnapshotItem{} - err := protoReader.ReadMsg(&snapshotItem) - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("invalid protobuf message: %w", err) - } - - switch item := snapshotItem.Item.(type) { - case *snapshotstypes.SnapshotItem_Store: - if importer != nil { - if err := importer.Commit(); err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to commit importer: %w", err) - } - if err := importer.Close(); err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to close importer: %w", err) - } - } - tree := c.multiTrees[item.Store.Name] - if tree == nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", item.Store.Name) - } - importer, err = tree.Import(version) - if err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to import tree for version %d: %w", version, err) - } - defer importer.Close() - - case *snapshotstypes.SnapshotItem_IAVL: - if importer == nil { - return snapshotstypes.SnapshotItem{}, errors.New("received IAVL node item before store item") - } - node := item.IAVL - if node.Height > int32(math.MaxInt8) { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("node height %v cannot exceed %v", - item.IAVL.Height, math.MaxInt8) - } - // Protobuf does not differentiate between []byte{} and nil, but fortunately IAVL does - // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty. - if node.Key == nil { - node.Key = []byte{} - } - if node.Height == 0 { - if node.Value == nil { - node.Value = []byte{} - } - } - err := importer.Add(node) - if err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to add node to importer: %w", err) - } - default: - break loop - } - } - - if importer != nil { - if err := importer.Commit(); err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to commit importer: %w", err) - } - } - - return snapshotItem, c.LoadVersion(version) -} - -func (c *CommitStore) GetCommitInfo(version uint64) (*proof.CommitInfo, error) { - // if the commit info is already stored, return it - ci, err := c.metadata.GetCommitInfo(version) - if err != nil { - return nil, err - } - if ci != nil { - return ci, nil - } - // otherwise built the commit info from the trees - storeInfos := make([]proof.StoreInfo, 0, len(c.multiTrees)) - for storeKey, tree := range c.multiTrees { - if internal.IsMemoryStoreKey(storeKey) { - continue - } - v := tree.Version() - if v != version { - return nil, fmt.Errorf("tree version %d does not match the target version %d", v, version) - } - bz := []byte(storeKey) - storeInfos = append(storeInfos, proof.StoreInfo{ - Name: bz, - CommitID: proof.CommitID{ - Version: v, - Hash: tree.Hash(), - }, - }) - } - - ci = &proof.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } - return ci, nil -} - -func (c *CommitStore) GetLatestVersion() (uint64, error) { - return c.metadata.GetLatestVersion() -} - -func (c *CommitStore) Close() error { - for _, tree := range c.multiTrees { - if err := tree.Close(); err != nil { - return err - } - } - return nil -} diff --git a/store/v2/commitment/store_test_suite.go b/store/v2/commitment/store_test_suite.go deleted file mode 100644 index b91119301c1e..000000000000 --- a/store/v2/commitment/store_test_suite.go +++ /dev/null @@ -1,495 +0,0 @@ -package commitment - -import ( - "bytes" - "fmt" - "io" - - "github.com/stretchr/testify/suite" - - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -const ( - storeKey1 = "store1" - storeKey2 = "store2" - storeKey3 = "store3" -) - -// CommitStoreTestSuite is a test suite to be used for all tree backends. -type CommitStoreTestSuite struct { - suite.Suite - - NewStore func(db corestore.KVStoreWithBatch, dbDir string, storeKeys, oldStoreKeys []string, logger corelog.Logger) (*CommitStore, error) - TreeType string -} - -// TestStore_Snapshotter tests the snapshot functionality of the CommitStore. -// This test verifies that the store can correctly create snapshots and restore from them. -// The test follows these steps: -// -// 1. Setup & Data Population: -// - Creates a new CommitStore with two stores (store1 and store2) -// - Writes 10 versions of data (version 1-10) -// - For each version, writes 10 key-value pairs to each store -// - Total data: 2 stores * 10 versions * 10 pairs = 200 key-value pairs -// - Keys are formatted as "key-{version}-{index}" -// - Values are formatted as "value-{version}-{index}" -// - Each version is committed to get a CommitInfo -// -// 2. Snapshot Creation: -// - Creates a dummy extension item for metadata testing -// - Sets up a new target store for restoration -// - Creates a channel for snapshot chunks -// - Launches a goroutine to: -// - Create a snapshot writer -// - Take a snapshot at version 10 -// - Write extension metadata -// -// 3. Snapshot Restoration: -// - Creates a snapshot reader from the chunks -// - Sets up a channel for state changes during restoration -// - Launches a goroutine to collect restored key-value pairs -// - Restores the snapshot into the target store -// - Verifies the extension metadata was preserved -// -// 4. Verification: -// - Confirms all 200 key-value pairs were restored correctly -// - Verifies the format: "{storeKey}_key-{version}-{index}" -> "value-{version}-{index}" -// - Checks that the restored store's Merkle tree hashes match the original -// - Ensures store integrity by comparing CommitInfo hashes -func (s *CommitStoreTestSuite) TestStore_Snapshotter() { - if s.TreeType == "iavlv2" { - s.T().Skip("FIXME: iavlv2 does not yet support snapshots") - } - storeKeys := []string{storeKey1, storeKey2} - commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - - // We'll create 10 versions of data - latestVersion := uint64(10) - kvCount := 10 - var cInfo *proof.CommitInfo - - // For each version 1-10 - for i := uint64(1); i <= latestVersion; i++ { - // Create KV pairs for each store - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range storeKeys { - kvPairs[storeKey] = corestore.KVPairs{} - // Create 10 KV pairs for this store - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - // Write and commit the changes for this version - s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) - cInfo, err = commitStore.Commit(i) - s.Require().NoError(err) - } - - s.Require().Equal(len(storeKeys), len(cInfo.StoreInfos)) - - // create a snapshot - dummyExtensionItem := snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_Extension{ - Extension: &snapshotstypes.SnapshotExtensionMeta{ - Name: "test", - Format: 1, - }, - }, - } - - targetStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - - chunks := make(chan io.ReadCloser, kvCount*int(latestVersion)) - go func() { - streamWriter := snapshots.NewStreamWriter(chunks) - s.Require().NotNil(streamWriter) - defer streamWriter.Close() - err := commitStore.Snapshot(latestVersion, streamWriter) - s.Require().NoError(err) - // write an extension metadata - err = streamWriter.WriteMsg(&dummyExtensionItem) - s.Require().NoError(err) - }() - - streamReader, err := snapshots.NewStreamReader(chunks) - s.Require().NoError(err) - - nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader) - s.Require().NoError(err) - s.Require().Equal(*dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) - - // check the restored tree hash - targetCommitInfo, err := targetStore.GetCommitInfo(latestVersion) - s.Require().NoError(err) - for _, storeInfo := range targetCommitInfo.StoreInfos { - matched := false - for _, latestStoreInfo := range cInfo.StoreInfos { - if bytes.Equal(storeInfo.Name, latestStoreInfo.Name) { - s.Require().Equal(latestStoreInfo.GetHash(), storeInfo.GetHash()) - matched = true - } - } - s.Require().True(matched) - } -} - -func (s *CommitStoreTestSuite) TestStore_LoadVersion() { - storeKeys := []string{storeKey1, storeKey2} - mdb := dbm.NewMemDB() - dbDir := s.T().TempDir() - commitStore, err := s.NewStore(mdb, dbDir, storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - - latestVersion := uint64(10) - kvCount := 10 - for i := uint64(1); i <= latestVersion; i++ { - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range storeKeys { - kvPairs[storeKey] = corestore.KVPairs{} - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) - _, err = commitStore.Commit(i) - s.Require().NoError(err) - } - - // load the store with the latest version - targetStore, err := s.NewStore(mdb, dbDir, storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - err = targetStore.LoadVersion(latestVersion) - s.Require().NoError(err) - // check the store - for i := uint64(1); i <= latestVersion; i++ { - commitInfo, _ := targetStore.GetCommitInfo(i) - s.Require().NotNil(commitInfo) - s.Require().Equal(i, commitInfo.Version) - } - - // rollback to a previous version - rollbackVersion := uint64(5) - rollbackStore, err := s.NewStore(mdb, dbDir, storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - err = rollbackStore.LoadVersion(rollbackVersion) - s.Require().NoError(err) - // check the store - v, err := rollbackStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(rollbackVersion, v) - for i := uint64(1); i <= latestVersion; i++ { - commitInfo, _ := rollbackStore.GetCommitInfo(i) - if i > rollbackVersion { - s.Require().Nil(commitInfo) - } else { - s.Require().NotNil(commitInfo) - } - } -} - -func (s *CommitStoreTestSuite) TestStore_Pruning() { - storeKeys := []string{storeKey1, storeKey2} - pruneOpts := store.NewPruningOptionWithCustom(10, 5) - commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - - latestVersion := uint64(100) - kvCount := 10 - for i := uint64(1); i <= latestVersion; i++ { - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range storeKeys { - kvPairs[storeKey] = corestore.KVPairs{} - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) - - _, err = commitStore.Commit(i) - s.Require().NoError(err) - - if prune, pruneVersion := pruneOpts.ShouldPrune(i); prune { - s.Require().NoError(commitStore.Prune(pruneVersion)) - } - - } - - pruneVersion := latestVersion - pruneOpts.KeepRecent - 1 - // check the store - for i := uint64(1); i <= latestVersion; i++ { - commitInfo, _ := commitStore.GetCommitInfo(i) - if i <= pruneVersion { - s.Require().Nil(commitInfo) - } else { - s.Require().NotNil(commitInfo) - } - } -} - -func (s *CommitStoreTestSuite) TestStore_GetProof() { - storeKeys := []string{storeKey1, storeKey2} - commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - - toVersion := uint64(10) - keyCount := 5 - - // commit some changes - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - err := commitStore.WriteChangeset(cs) - s.Require().NoError(err) - _, err = commitStore.Commit(version) - s.Require().NoError(err) - } - - // get proof - for version := uint64(1); version <= toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - _, err := commitStore.GetProof([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i))) - s.Require().NoError(err) - } - } - } - - // prune version 1 - s.Require().NoError(commitStore.Prune(1)) - - // check if proof for version 1 is pruned - _, err = commitStore.GetProof([]byte(storeKeys[0]), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) - s.Require().Error(err) - // check the commit info - commit, _ := commitStore.GetCommitInfo(1) - s.Require().Nil(commit) -} - -func (s *CommitStoreTestSuite) TestStore_Get() { - storeKeys := []string{storeKey1, storeKey2} - commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - - toVersion := uint64(10) - keyCount := 5 - - // commit some changes - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - err := commitStore.WriteChangeset(cs) - s.Require().NoError(err) - _, err = commitStore.Commit(version) - s.Require().NoError(err) - } - - // get proof - for version := uint64(1); version <= toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := commitStore.Get([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } -} - -func (s *CommitStoreTestSuite) TestStore_Upgrades() { - storeKeys := []string{storeKey1, storeKey2, storeKey3} - commitDB := dbm.NewMemDB() - commitDir := s.T().TempDir() - commitStore, err := s.NewStore(commitDB, commitDir, storeKeys, nil, coretesting.NewNopLogger()) - s.Require().NoError(err) - - latestVersion := uint64(10) - kvCount := 10 - for i := uint64(1); i <= latestVersion; i++ { - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range storeKeys { - kvPairs[storeKey] = corestore.KVPairs{} - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) - _, err = commitStore.Commit(i) - s.Require().NoError(err) - } - - // create a new commitment store with upgrades - upgrades := &corestore.StoreUpgrades{ - Added: []string{"newStore1", "newStore2"}, - Deleted: []string{storeKey3}, - } - newStoreKeys := []string{storeKey1, storeKey2, storeKey3, "newStore1", "newStore2"} - realStoreKeys := []string{storeKey1, storeKey2, "newStore1", "newStore2"} - oldStoreKeys := []string{storeKey3} - commitStore, err = s.NewStore(commitDB, commitDir, newStoreKeys, oldStoreKeys, coretesting.NewNopLogger()) - s.Require().NoError(err) - err = commitStore.LoadVersionAndUpgrade(latestVersion, upgrades) - s.Require().NoError(err) - - // GetProof should work for the old stores - for _, storeKey := range []string{storeKey3} { - for i := uint64(1); i <= latestVersion; i++ { - for j := 0; j < kvCount; j++ { - proof, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().NoError(err) - s.Require().NotNil(proof) - } - } - } - // GetProof should fail for the new stores against the old versions - for _, storeKey := range []string{"newStore1", "newStore2"} { - for i := uint64(1); i <= latestVersion; i++ { - for j := 0; j < kvCount; j++ { - _, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().Error(err) - } - } - } - - // apply the changeset again - for i := latestVersion + 1; i < latestVersion*2; i++ { - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range realStoreKeys { - kvPairs[storeKey] = corestore.KVPairs{} - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) - commitInfo, err := commitStore.Commit(i) - s.Require().NoError(err) - s.Require().NotNil(commitInfo) - s.Require().Equal(len(realStoreKeys), len(commitInfo.StoreInfos)) - for _, storeKey := range realStoreKeys { - s.Require().NotNil(commitInfo.GetStoreCommitID([]byte(storeKey))) - } - } - - // verify new stores - for _, storeKey := range []string{"newStore1", "newStore2"} { - for i := latestVersion + 1; i < latestVersion*2; i++ { - for j := 0; j < kvCount; j++ { - proof, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().NoError(err) - s.Require().NotNil(proof) - } - } - } - - // verify existing store - for i := uint64(1); i < latestVersion*2; i++ { - for j := 0; j < kvCount; j++ { - prf, err := commitStore.GetProof([]byte(storeKey2), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().NoError(err) - s.Require().NotNil(prf) - } - } - - // create a new commitment store with one more upgrades - upgrades = &corestore.StoreUpgrades{ - Deleted: []string{storeKey2}, - Added: []string{"newStore3"}, - } - newRealStoreKeys := []string{storeKey1, "newStore1", "newStore2", "newStore3"} - oldStoreKeys = []string{storeKey2, storeKey3} - commitStore, err = s.NewStore(commitDB, commitDir, newRealStoreKeys, oldStoreKeys, coretesting.NewNopLogger()) - s.Require().NoError(err) - err = commitStore.LoadVersionAndUpgrade(2*latestVersion-1, upgrades) - s.Require().NoError(err) - - // apply the changeset again - for i := latestVersion * 2; i < latestVersion*3; i++ { - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range newRealStoreKeys { - kvPairs[storeKey] = corestore.KVPairs{} - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - err = commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs)) - s.Require().NoError(err) - commitInfo, err := commitStore.Commit(i) - s.Require().NoError(err) - s.Require().NotNil(commitInfo) - s.Require().Equal(len(newRealStoreKeys), len(commitInfo.StoreInfos)) - for _, storeKey := range newRealStoreKeys { - s.Require().NotNil(commitInfo.GetStoreCommitID([]byte(storeKey))) - } - } - - // prune the old stores - s.Require().NoError(commitStore.Prune(latestVersion)) - s.T().Logf("prune to version %d", latestVersion) - // GetProof should fail for the old stores - for _, storeKey := range []string{storeKey1, storeKey3} { - for i := uint64(1); i <= latestVersion; i++ { - for j := 0; j < kvCount; j++ { - _, err := commitStore.GetProof([]byte(storeKey), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().Error(err) - } - } - } - s.T().Log("GetProof should work for the new stores") - // GetProof should not fail for the newly removed store - for i := latestVersion + 1; i < latestVersion*2; i++ { - for j := 0; j < kvCount; j++ { - proof, err := commitStore.GetProof([]byte(storeKey2), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().NoError(err) - s.Require().NotNil(proof) - } - } - - s.T().Logf("Prune to version %d", latestVersion*2) - s.Require().NoError(commitStore.Prune(latestVersion * 2)) - // GetProof should fail for the newly deleted stores - for i := uint64(1); i < latestVersion*2; i++ { - for j := 0; j < kvCount; j++ { - _, err := commitStore.GetProof([]byte(storeKey2), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().Error(err) - } - } - s.T().Log("GetProof should work for the new added store") - // GetProof should work for the new added store - for i := latestVersion*2 + 1; i < latestVersion*3; i++ { - for j := 0; j < kvCount; j++ { - proof, err := commitStore.GetProof([]byte("newStore3"), i, []byte(fmt.Sprintf("key-%d-%d", i, j))) - s.Require().NoError(err) - s.Require().NotNil(proof) - } - } -} diff --git a/store/v2/database.go b/store/v2/database.go deleted file mode 100644 index 0e0697de57bb..000000000000 --- a/store/v2/database.go +++ /dev/null @@ -1,63 +0,0 @@ -package store - -import ( - "io" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/proof" -) - -type VersionedReader interface { - Has(storeKey []byte, version uint64, key []byte) (bool, error) - Get(storeKey []byte, version uint64, key []byte) ([]byte, error) - - GetLatestVersion() (uint64, error) - VersionExists(v uint64) (bool, error) - - Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) -} - -// UpgradableDatabase defines an API for a versioned database that allows pruning -// deleted storeKeys -type UpgradableDatabase interface { - // PruneStoreKeys prunes all data associated with the given storeKeys whenever - // the given version is pruned. - PruneStoreKeys(storeKeys []string, version uint64) error -} - -// Committer defines an API for committing state. -type Committer interface { - UpgradeableStore - VersionedReader - // WriteChangeset writes the changeset to the commitment state. - WriteChangeset(cs *corestore.Changeset) error - - // GetLatestVersion returns the latest version. - GetLatestVersion() (uint64, error) - - // LoadVersion loads the tree at the given version. - LoadVersion(targetVersion uint64) error - - // LoadVersionForOverwriting loads the tree at the given version. - // Any versions greater than targetVersion will be deleted. - LoadVersionForOverwriting(targetVersion uint64) error - - // Commit commits the working tree to the database. - Commit(version uint64) (*proof.CommitInfo, error) - - // GetProof returns the proof of existence or non-existence for the given key. - GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) - - // SetInitialVersion sets the initial version of the committer. - SetInitialVersion(version uint64) error - - // GetCommitInfo returns the CommitInfo for the given version. - GetCommitInfo(version uint64) (*proof.CommitInfo, error) - - Get(storeKey []byte, version uint64, key []byte) ([]byte, error) - - // Closer releases associated resources. It should NOT be idempotent. It must - // only be called once and any call after may panic. - io.Closer -} diff --git a/store/v2/migration/README.md b/store/v2/migration/README.md deleted file mode 100644 index 88b395f63f75..000000000000 --- a/store/v2/migration/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# Migration Manager - -The `migration` package contains the `migration.Manager`, which is responsible -for migrating data from `store/v1` to `store/v2`. To ensure a smooth transition, -the process is designed to **lazily** migrate data in the background without blocking -`root.Store` operations. - -## Overview - -The migration process involves several steps: - -1. **Create a snapshot** of the current state while `Commit` operations continue to - function with `store/v1`. -2. **Restore the snapshot** into the new StateStorage (SS) and StateCommitment (SC). -3. **Sync recent state changes** from `store/v1` to the new SS and SC. -4. After syncing, the `Commit` operation will be switched to the new `store/v2`. - -Taking a snapshot is a lightweight operation. The snapshot is not stored on disk but -consumed by the `Restore` process, which replays state changes to the new SS and SC. - -> **Note:** After migration, `store/v2` does **not** support historical queries. -If historical data access is required, a full state migration to `store/v2` is necessary. - -## Usage - -You can create a new `migration.Manager` by calling the following function: - -```go -func NewManager( - db corestore.KVStoreWithBatch, - sm *snapshots.Manager, - ss *storage.StorageStore, - sc *commitment.CommitStore, - logger log.Logger -) *Manager -``` - -* `sc` (Commitment Store) can be `nil`. In that case, the Manager will migrate only - the state storage. -* The migration process is lazy, meaning data is migrated in the background while - `root.Store` remains fully operational. - -To initiate the migration process, call the `Start` method: - -```go -func (m *Manager) Start(ctx context.Context) error -``` - -> **Note:** It should be called by the RootStore, running in the background. - -## Migration Flow - -```mermaid -sequenceDiagram - autonumber - - participant A as RootStore - participant B as MigrationManager - participant C as SnapshotsManager - participant D as StateCommitment - participant E as StateStorage - - A->>B: Start - loop Old Data Migration - B->>C: Create Snapshot - C->>B: Stream Snapshot - B->>D: State Sync (Restore) - B->>E: Write Changeset (Restore) - end - - loop New Commit Data Sync - A->>B: Commit(Changeset) - B->>B: Store Changeset - B->>D: Commit Changeset - B->>E: Write Changeset - end - - B->>A: Switch to new store/v2 -``` - -## Key Considerations - -### Laziness and Background Operation - -The migration is performed lazily, meaning it occurs in the background without -interrupting the current operations on root.Store. This allows the chain to continue -running while data is gradually migrated to `store/v2`. State synchronization ensures -that any new state changes during the migration are also applied to `store/v2`. - -However, note that there may be a performance impact depending on the size of the data -being migrated, and it’s essential to monitor the migration process in production -environments. - -### Handling Failures and Rollbacks - -It is important to consider how the migration manager handles errors or system failures -during the migration process: - -* If the migration fails, there is no impact on the existing `store/v1` operations, - but need to restart the migration process from the scratch. -* In the event of a critical failure after migration, a rollback may not be possible, - and it is needed to keep the `store/v1` backup for a certain period. - -### Impact on Historical Queries - -After the migration, the new `store/v2` does not support historical queries. -This limitation should be clearly understood before starting the migration process, -especially if the node relies on historical data for any operations. - -If historical queries are required, users must fully migrate all historical data to `store/v2`. -Alternatively, keeping store/v1 accessible for historical queries could be an option. diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go deleted file mode 100644 index 5365e8eb6a11..000000000000 --- a/store/v2/migration/manager.go +++ /dev/null @@ -1,213 +0,0 @@ -package migration - -import ( - "encoding/binary" - "errors" - "fmt" - "sync/atomic" - "time" - - "golang.org/x/sync/errgroup" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/internal/encoding" - "cosmossdk.io/store/v2/snapshots" -) - -const ( - // defaultChannelBufferSize is the default buffer size for the migration stream. - defaultChannelBufferSize = 1024 - - migrateChangesetKeyFmt = "m/cs_%x" // m/cs_ -) - -// VersionedChangeset is a pair of version and Changeset. -type VersionedChangeset struct { - Version uint64 - Changeset *corestore.Changeset -} - -// Manager manages the migration of the whole state from store/v1 to store/v2. -type Manager struct { - logger log.Logger - snapshotsManager *snapshots.Manager - - stateCommitment *commitment.CommitStore - - db corestore.KVStoreWithBatch - - migratedVersion atomic.Uint64 - - chChangeset <-chan *VersionedChangeset - chDone <-chan struct{} -} - -// NewManager returns a new Manager. -// -// NOTE: `sc` can be `nil` if don't want to migrate the commitment. -func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, sc *commitment.CommitStore, logger log.Logger) *Manager { - return &Manager{ - logger: logger, - snapshotsManager: sm, - stateCommitment: sc, - db: db, - } -} - -// Start starts the whole migration process. -// It migrates the whole state at the given version to the new store/v2 (both SC and SS). -// It also catches up the Changesets which are committed while the migration is in progress. -// `chChangeset` is the channel to receive the committed Changesets from the RootStore. -// `chDone` is the channel to receive the done signal from the RootStore. -// NOTE: It should be called by the RootStore, running in the background. -func (m *Manager) Start(version uint64, chChangeset <-chan *VersionedChangeset, chDone <-chan struct{}) error { - m.chChangeset = chChangeset - m.chDone = chDone - - go func() { - if err := m.writeChangeset(); err != nil { - m.logger.Error("failed to write changeset", "err", err) - } - }() - - if err := m.Migrate(version); err != nil { - return fmt.Errorf("failed to migrate state: %w", err) - } - - return m.Sync() -} - -// GetStateCommitment returns the state commitment. -func (m *Manager) GetStateCommitment() *commitment.CommitStore { - return m.stateCommitment -} - -// Migrate migrates the whole state at the given height to the new store/v2. -func (m *Manager) Migrate(height uint64) error { - // create the migration stream and snapshot, - // which acts as protoio.Reader and snapshots.WriteCloser. - ms := NewMigrationStream(defaultChannelBufferSize) - if err := m.snapshotsManager.CreateMigration(height, ms); err != nil { - return err - } - - eg := new(errgroup.Group) - eg.Go(func() error { - if _, err := m.stateCommitment.Restore(height, 0, ms); err != nil { - return err - } - return nil - }) - - if err := eg.Wait(); err != nil { - return err - } - - m.migratedVersion.Store(height) - - return nil -} - -// writeChangeset writes the Changeset to the db. -func (m *Manager) writeChangeset() error { - for vc := range m.chChangeset { - cs := vc.Changeset - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, vc.Version) - csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) - csBytes, err := encoding.MarshalChangeset(cs) - if err != nil { - return fmt.Errorf("failed to marshal changeset: %w", err) - } - - batch := m.db.NewBatch() - // Invoking this code in a closure so that defer is called immediately on return - // yet not in the for-loop which can leave resource lingering. - err = func() (err error) { - defer func() { - err = errors.Join(err, batch.Close()) - }() - - if err := batch.Set(csKey, csBytes); err != nil { - return fmt.Errorf("failed to write changeset to db.Batch: %w", err) - } - if err := batch.Write(); err != nil { - return fmt.Errorf("failed to write changeset to db: %w", err) - } - return nil - }() - if err != nil { - return err - } - } - - return nil -} - -// GetMigratedVersion returns the migrated version. -// It is used to check the migrated version in the RootStore. -func (m *Manager) GetMigratedVersion() uint64 { - return m.migratedVersion.Load() -} - -// Sync catches up the Changesets which are committed while the migration is in progress. -// It should be called after the migration is done. -func (m *Manager) Sync() error { - version := m.GetMigratedVersion() - if version == 0 { - return errors.New("migration is not done yet") - } - version += 1 - - for { - select { - case <-m.chDone: - return nil - default: - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, version) - csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) - csBytes, err := m.db.Get(csKey) - if err != nil { - return fmt.Errorf("failed to get changeset from db: %w", err) - } - if csBytes == nil { - // wait for the next changeset - time.Sleep(100 * time.Millisecond) - continue - } - - cs := corestore.NewChangeset(version) - if err := encoding.UnmarshalChangeset(cs, csBytes); err != nil { - return fmt.Errorf("failed to unmarshal changeset: %w", err) - } - if m.stateCommitment != nil { - if err := m.stateCommitment.WriteChangeset(cs); err != nil { - return fmt.Errorf("failed to write changeset to commitment: %w", err) - } - if _, err := m.stateCommitment.Commit(version); err != nil { - return fmt.Errorf("failed to commit changeset to commitment: %w", err) - } - } - - m.migratedVersion.Store(version) - - version += 1 - } - } -} - -// Close closes the manager. It should be called after the migration is done. -// It will close the db and notify the snapshotsManager that the migration is done. -func (m *Manager) Close() error { - if err := m.db.Close(); err != nil { - return fmt.Errorf("failed to close db: %w", err) - } - if m.stateCommitment != nil { - m.snapshotsManager.EndMigration(m.stateCommitment) - } - - return nil -} diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go deleted file mode 100644 index 103b3244b650..000000000000 --- a/store/v2/migration/manager_test.go +++ /dev/null @@ -1,179 +0,0 @@ -package migration - -import ( - "encoding/binary" - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/snapshots" -) - -var storeKeys = []string{"store1", "store2"} - -func setupMigrationManager(t *testing.T) (*Manager, *commitment.CommitStore) { - t.Helper() - - db := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(db, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) - } - commitStore, err := commitment.NewCommitStore(multiTrees, nil, db, coretesting.NewNopLogger()) - require.NoError(t, err) - - snapshotsStore, err := snapshots.NewStore(t.TempDir()) - require.NoError(t, err) - - snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, coretesting.NewNopLogger()) - - db1 := dbm.NewMemDB() - multiTrees1 := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(db1, []byte(storeKey)) - multiTrees1[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) - } - - newCommitStore, err := commitment.NewCommitStore(multiTrees1, nil, db1, coretesting.NewNopLogger()) // for store/v2 - require.NoError(t, err) - - return NewManager(db, snapshotsManager, newCommitStore, coretesting.NewNopLogger()), commitStore -} - -func TestMigrateState(t *testing.T) { - m, orgCommitStore := setupMigrationManager(t) - // apply changeset - toVersion := uint64(100) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - require.NoError(t, orgCommitStore.WriteChangeset(cs)) - _, err := orgCommitStore.Commit(version) - require.NoError(t, err) - } - - err := m.Migrate(toVersion - 1) - require.NoError(t, err) - - // expecting error for conflicting process, since Migrate trigger snapshotter create migration, - // which start a snapshot process already. - _, err = m.snapshotsManager.Create(toVersion - 1) - fmt.Println(1) - require.Error(t, err) - - // check the migrated state - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - - // check the latest state - val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) - require.NoError(t, err) - require.Nil(t, val) - val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) - require.NoError(t, err) - require.Nil(t, val) - } -} - -func TestStartMigrateState(t *testing.T) { - m, orgCommitStore := setupMigrationManager(t) - - chDone := make(chan struct{}) - chChangeset := make(chan *VersionedChangeset, 1) - - // apply changeset - toVersion := uint64(10) - keyCount := 5 - changesets := []corestore.Changeset{} - - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - changesets = append(changesets, *cs) - require.NoError(t, orgCommitStore.WriteChangeset(cs)) - _, err := orgCommitStore.Commit(version) - require.NoError(t, err) - } - - // feed changesets to channel - go func() { - for version := uint64(1); version <= toVersion; version++ { - chChangeset <- &VersionedChangeset{ - Version: version, - Changeset: &changesets[version-1], - } - } - }() - - // check if migrate process complete - go func() { - for { - migrateVersion := m.GetMigratedVersion() - if migrateVersion == toVersion-1 { - break - } - } - - chDone <- struct{}{} - }() - - err := m.Start(toVersion-1, chChangeset, chDone) - require.NoError(t, err) - - // expecting error for conflicting process, since Migrate trigger snapshotter create migration, - // which start a snapshot process already. - _, err = m.snapshotsManager.Create(toVersion - 1) - require.Error(t, err) - - if m.stateCommitment != nil { - // check the migrated state - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } - // check the latest state - val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) - require.NoError(t, err) - require.Nil(t, val) - val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) - require.NoError(t, err) - require.Nil(t, val) - } - - // check if migration db write change set to storage - for version := uint64(1); version < toVersion; version++ { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, version) - csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) - csVal, err := m.db.Get(csKey) - require.NoError(t, err) - require.NotEmpty(t, csVal) - } -} diff --git a/store/v2/mock/db_mock.go b/store/v2/mock/db_mock.go deleted file mode 100644 index 31541c998f3b..000000000000 --- a/store/v2/mock/db_mock.go +++ /dev/null @@ -1,301 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./types.go -// -// Generated by this command: -// -// mockgen -package mock -destination ./db_mock.go -source ./types.go -// - -// Package mock is a generated GoMock package. -package mock - -import ( - reflect "reflect" - - store "cosmossdk.io/core/store" - proof "cosmossdk.io/store/v2/proof" - gomock "go.uber.org/mock/gomock" -) - -// MockStateCommitter is a mock of StateCommitter interface. -type MockStateCommitter struct { - ctrl *gomock.Controller - recorder *MockStateCommitterMockRecorder - isgomock struct{} -} - -// MockStateCommitterMockRecorder is the mock recorder for MockStateCommitter. -type MockStateCommitterMockRecorder struct { - mock *MockStateCommitter -} - -// NewMockStateCommitter creates a new mock instance. -func NewMockStateCommitter(ctrl *gomock.Controller) *MockStateCommitter { - mock := &MockStateCommitter{ctrl: ctrl} - mock.recorder = &MockStateCommitterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStateCommitter) EXPECT() *MockStateCommitterMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockStateCommitter) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockStateCommitterMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockStateCommitter)(nil).Close)) -} - -// Commit mocks base method. -func (m *MockStateCommitter) Commit(version uint64) (*proof.CommitInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Commit", version) - ret0, _ := ret[0].(*proof.CommitInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Commit indicates an expected call of Commit. -func (mr *MockStateCommitterMockRecorder) Commit(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockStateCommitter)(nil).Commit), version) -} - -// Get mocks base method. -func (m *MockStateCommitter) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", storeKey, version, key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockStateCommitterMockRecorder) Get(storeKey, version, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockStateCommitter)(nil).Get), storeKey, version, key) -} - -// GetCommitInfo mocks base method. -func (m *MockStateCommitter) GetCommitInfo(version uint64) (*proof.CommitInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCommitInfo", version) - ret0, _ := ret[0].(*proof.CommitInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCommitInfo indicates an expected call of GetCommitInfo. -func (mr *MockStateCommitterMockRecorder) GetCommitInfo(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommitInfo", reflect.TypeOf((*MockStateCommitter)(nil).GetCommitInfo), version) -} - -// GetLatestVersion mocks base method. -func (m *MockStateCommitter) GetLatestVersion() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLatestVersion") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetLatestVersion indicates an expected call of GetLatestVersion. -func (mr *MockStateCommitterMockRecorder) GetLatestVersion() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestVersion", reflect.TypeOf((*MockStateCommitter)(nil).GetLatestVersion)) -} - -// GetProof mocks base method. -func (m *MockStateCommitter) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProof", storeKey, version, key) - ret0, _ := ret[0].([]proof.CommitmentOp) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProof indicates an expected call of GetProof. -func (mr *MockStateCommitterMockRecorder) GetProof(storeKey, version, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockStateCommitter)(nil).GetProof), storeKey, version, key) -} - -// Has mocks base method. -func (m *MockStateCommitter) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", storeKey, version, key) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Has indicates an expected call of Has. -func (mr *MockStateCommitterMockRecorder) Has(storeKey, version, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateCommitter)(nil).Has), storeKey, version, key) -} - -// Iterator mocks base method. -func (m *MockStateCommitter) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end) - ret0, _ := ret[0].(store.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Iterator indicates an expected call of Iterator. -func (mr *MockStateCommitterMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateCommitter)(nil).Iterator), storeKey, version, start, end) -} - -// LoadVersion mocks base method. -func (m *MockStateCommitter) LoadVersion(targetVersion uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LoadVersion", targetVersion) - ret0, _ := ret[0].(error) - return ret0 -} - -// LoadVersion indicates an expected call of LoadVersion. -func (mr *MockStateCommitterMockRecorder) LoadVersion(targetVersion any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersion", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersion), targetVersion) -} - -// LoadVersionAndUpgrade mocks base method. -func (m *MockStateCommitter) LoadVersionAndUpgrade(version uint64, upgrades *store.StoreUpgrades) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LoadVersionAndUpgrade", version, upgrades) - ret0, _ := ret[0].(error) - return ret0 -} - -// LoadVersionAndUpgrade indicates an expected call of LoadVersionAndUpgrade. -func (mr *MockStateCommitterMockRecorder) LoadVersionAndUpgrade(version, upgrades any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersionAndUpgrade", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersionAndUpgrade), version, upgrades) -} - -// LoadVersionForOverwriting mocks base method. -func (m *MockStateCommitter) LoadVersionForOverwriting(targetVersion uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LoadVersionForOverwriting", targetVersion) - ret0, _ := ret[0].(error) - return ret0 -} - -// LoadVersionForOverwriting indicates an expected call of LoadVersionForOverwriting. -func (mr *MockStateCommitterMockRecorder) LoadVersionForOverwriting(targetVersion any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersionForOverwriting", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersionForOverwriting), targetVersion) -} - -// PausePruning mocks base method. -func (m *MockStateCommitter) PausePruning(pause bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PausePruning", pause) -} - -// PausePruning indicates an expected call of PausePruning. -func (mr *MockStateCommitterMockRecorder) PausePruning(pause any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PausePruning", reflect.TypeOf((*MockStateCommitter)(nil).PausePruning), pause) -} - -// Prune mocks base method. -func (m *MockStateCommitter) Prune(version uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Prune", version) - ret0, _ := ret[0].(error) - return ret0 -} - -// Prune indicates an expected call of Prune. -func (mr *MockStateCommitterMockRecorder) Prune(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateCommitter)(nil).Prune), version) -} - -// PruneStoreKeys mocks base method. -func (m *MockStateCommitter) PruneStoreKeys(storeKeys []string, version uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PruneStoreKeys", storeKeys, version) - ret0, _ := ret[0].(error) - return ret0 -} - -// PruneStoreKeys indicates an expected call of PruneStoreKeys. -func (mr *MockStateCommitterMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateCommitter)(nil).PruneStoreKeys), storeKeys, version) -} - -// ReverseIterator mocks base method. -func (m *MockStateCommitter) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReverseIterator", storeKey, version, start, end) - ret0, _ := ret[0].(store.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReverseIterator indicates an expected call of ReverseIterator. -func (mr *MockStateCommitterMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateCommitter)(nil).ReverseIterator), storeKey, version, start, end) -} - -// SetInitialVersion mocks base method. -func (m *MockStateCommitter) SetInitialVersion(version uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetInitialVersion", version) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetInitialVersion indicates an expected call of SetInitialVersion. -func (mr *MockStateCommitterMockRecorder) SetInitialVersion(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialVersion", reflect.TypeOf((*MockStateCommitter)(nil).SetInitialVersion), version) -} - -// VersionExists mocks base method. -func (m *MockStateCommitter) VersionExists(v uint64) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VersionExists", v) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// VersionExists indicates an expected call of VersionExists. -func (mr *MockStateCommitterMockRecorder) VersionExists(v any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateCommitter)(nil).VersionExists), v) -} - -// WriteChangeset mocks base method. -func (m *MockStateCommitter) WriteChangeset(cs *store.Changeset) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteChangeset", cs) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteChangeset indicates an expected call of WriteChangeset. -func (mr *MockStateCommitterMockRecorder) WriteChangeset(cs any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChangeset", reflect.TypeOf((*MockStateCommitter)(nil).WriteChangeset), cs) -} diff --git a/store/v2/mock/types.go b/store/v2/mock/types.go deleted file mode 100644 index 3c5edb372a85..000000000000 --- a/store/v2/mock/types.go +++ /dev/null @@ -1,13 +0,0 @@ -package mock - -import "cosmossdk.io/store/v2" - -// StateCommitter is a mock of store.Committer -type StateCommitter interface { - store.Committer - store.Pruner - store.PausablePruner - store.UpgradeableStore - store.VersionedReader - store.UpgradableDatabase -} diff --git a/store/v2/pruning/manager.go b/store/v2/pruning/manager.go deleted file mode 100644 index e21fe1ce1952..000000000000 --- a/store/v2/pruning/manager.go +++ /dev/null @@ -1,52 +0,0 @@ -package pruning - -import ( - "cosmossdk.io/store/v2" -) - -// Manager is a struct that manages the pruning of old versions of the SC and SS. -type Manager struct { - // scPruner is the pruner for the SC. - scPruner store.Pruner - // scPruningOption are the pruning options for the SC. - scPruningOption *store.PruningOption -} - -// NewManager creates a new Pruning Manager. -func NewManager(scPruner store.Pruner, scPruningOption *store.PruningOption) *Manager { - return &Manager{ - scPruner: scPruner, - scPruningOption: scPruningOption, - } -} - -// Prune prunes the SC and SS to the provided version. -// -// NOTE: It can be called outside the store manually. -func (m *Manager) Prune(version uint64) error { - // Prune the SC. - if m.scPruningOption != nil { - if prune, pruneTo := m.scPruningOption.ShouldPrune(version); prune { - if err := m.scPruner.Prune(pruneTo); err != nil { - return err - } - } - } - - return nil -} - -func (m *Manager) signalPruning(pause bool) { - if scPausablePruner, ok := m.scPruner.(store.PausablePruner); ok { - scPausablePruner.PausePruning(pause) - } -} - -func (m *Manager) PausePruning() { - m.signalPruning(true) -} - -func (m *Manager) ResumePruning(version uint64) error { - m.signalPruning(false) - return m.Prune(version) -} diff --git a/store/v2/pruning/manager_test.go b/store/v2/pruning/manager_test.go deleted file mode 100644 index d45d123a3504..000000000000 --- a/store/v2/pruning/manager_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package pruning - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" -) - -var storeKeys = []string{"store1", "store2", "store3"} - -type PruningManagerTestSuite struct { - suite.Suite - - manager *Manager - sc *commitment.CommitStore -} - -func TestPruningManagerTestSuite(t *testing.T) { - suite.Run(t, &PruningManagerTestSuite{}) -} - -func (s *PruningManagerTestSuite) SetupTest() { - nopLog := coretesting.NewNopLogger() - var err error - - mdb := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) - } - s.sc, err = commitment.NewCommitStore(multiTrees, nil, mdb, nopLog) - s.Require().NoError(err) - - scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all - s.manager = NewManager(s.sc, scPruningOption) -} - -func (s *PruningManagerTestSuite) TestPrune() { - // commit changesets with pruning - toVersion := uint64(100) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - s.Require().NoError(s.sc.WriteChangeset(cs)) - _, err := s.sc.Commit(version) - s.Require().NoError(err) - - s.Require().NoError(s.manager.Prune(version)) - } - - // wait for the pruning to finish in the commitment store - checkSCPrune := func() bool { - count := 0 - for _, storeKey := range storeKeys { - _, err := s.sc.GetProof([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", toVersion-1, 0))) - if err != nil { - count++ - } - } - - return count == len(storeKeys) - } - s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) -} - -func TestPruningOption(t *testing.T) { - testCases := []struct { - name string - options *store.PruningOption - version uint64 - pruning bool - pruneVersion uint64 - }{ - { - name: "no pruning", - options: store.NewPruningOptionWithCustom(100, 0), - version: 100, - pruning: false, - pruneVersion: 0, - }, - { - name: "prune all", - options: store.NewPruningOptionWithCustom(0, 1), - version: 19, - pruning: true, - pruneVersion: 18, - }, - { - name: "prune none", - options: store.NewPruningOptionWithCustom(100, 10), - version: 19, - pruning: false, - pruneVersion: 0, - }, - { - name: "prune some", - options: store.NewPruningOptionWithCustom(10, 50), - version: 100, - pruning: true, - pruneVersion: 89, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - pruning, pruneVersion := tc.options.ShouldPrune(tc.version) - require.Equal(t, tc.pruning, pruning) - require.Equal(t, tc.pruneVersion, pruneVersion) - }) - } -} - -func (s *PruningManagerTestSuite) TestSignalCommit() { - // commit version 1 - cs := corestore.NewChangeset(1) - for _, storeKey := range storeKeys { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", 1, 0)), []byte(fmt.Sprintf("value-%d-%d", 1, 0)), false) - } - - s.Require().NoError(s.sc.WriteChangeset(cs)) - _, err := s.sc.Commit(1) - s.Require().NoError(err) - - // commit version 2 - for _, storeKey := range storeKeys { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", 2, 0)), []byte(fmt.Sprintf("value-%d-%d", 2, 0)), false) - } - cs.Version = 2 - - // signaling commit has started - s.manager.PausePruning() - - s.Require().NoError(s.sc.WriteChangeset(cs)) - _, err = s.sc.Commit(2) - s.Require().NoError(err) - - // try prune before signaling commit has finished - s.Require().NoError(s.manager.Prune(2)) - - // proof is removed no matter SignalCommit has not yet inform that commit process has finish - // since commitInfo is remove async with tree data - checkSCPrune := func() bool { - count := 0 - for _, storeKey := range storeKeys { - _, err := s.sc.GetProof([]byte(storeKey), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) - if err != nil { - count++ - } - } - - return count == len(storeKeys) - } - s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) - - // data from state commitment should not be pruned since we haven't signal the commit process has finished - val, err := s.sc.Get([]byte(storeKeys[0]), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) - s.Require().NoError(err) - s.Require().Equal(val, []byte(fmt.Sprintf("value-%d-%d", 1, 0))) - - // signaling commit has finished, version 1 should be pruned - err = s.manager.ResumePruning(2) - s.Require().NoError(err) - - checkSCPrune = func() bool { - count := 0 - for _, storeKey := range storeKeys { - _, err := s.sc.GetProof([]byte(storeKey), 1, []byte(fmt.Sprintf("key-%d-%d", 1, 0))) - if err != nil { - count++ - } - } - - return count == len(storeKeys) - } - s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) - - // try with signal commit start and finish accordingly - // commit changesets with pruning - toVersion := uint64(100) - keyCount := 10 - for version := uint64(3); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - s.manager.PausePruning() - - s.Require().NoError(s.sc.WriteChangeset(cs)) - _, err := s.sc.Commit(version) - s.Require().NoError(err) - - err = s.manager.ResumePruning(version) - s.Require().NoError(err) - } - - // wait for the pruning to finish in the commitment store - checkSCPrune = func() bool { - count := 0 - for _, storeKey := range storeKeys { - _, err := s.sc.GetProof([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", toVersion-1, 0))) - if err != nil { - count++ - } - } - - return count == len(storeKeys) - } - s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) -} diff --git a/store/v2/root/factory.go b/store/v2/root/factory.go deleted file mode 100644 index 36eadf2382bc..000000000000 --- a/store/v2/root/factory.go +++ /dev/null @@ -1,131 +0,0 @@ -package root - -import ( - "errors" - "fmt" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - "cosmossdk.io/store/v2/commitment/mem" - "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/internal" - "cosmossdk.io/store/v2/pruning" -) - -type ( - SCType string -) - -const ( - SCTypeIavl SCType = "iavl" - SCTypeIavlV2 SCType = "iavl-v2" -) - -// Options are the options for creating a root store. -type Options struct { - SCType SCType `mapstructure:"sc-type" toml:"sc-type" comment:"State commitment database type. Currently we support: \"iavl\" and \"iavl-v2\""` - SCPruningOption *store.PruningOption `mapstructure:"sc-pruning-option" toml:"sc-pruning-option" comment:"Pruning options for state commitment"` - IavlConfig *iavl.Config `mapstructure:"iavl-config" toml:"iavl-config"` -} - -// FactoryOptions are the options for creating a root store. -type FactoryOptions struct { - Logger log.Logger - RootDir string - Options Options - StoreKeys []string - SCRawDB corestore.KVStoreWithBatch -} - -// DefaultStoreOptions returns the default options for creating a root store. -func DefaultStoreOptions() Options { - return Options{ - SCType: SCTypeIavl, - SCPruningOption: &store.PruningOption{ - KeepRecent: 2, - Interval: 100, - }, - IavlConfig: &iavl.Config{ - CacheSize: 100_000, - SkipFastStorageUpgrade: true, - }, - } -} - -// CreateRootStore is a convenience function to create a root store based on the -// provided FactoryOptions. Strictly speaking app developers can create the root -// store directly by calling root.New, so this function is not -// necessary, but demonstrates the required steps and configuration to create a root store. -func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) { - var ( - sc *commitment.CommitStore - err error - ) - - storeOpts := opts.Options - - metadata := commitment.NewMetadataStore(opts.SCRawDB) - latestVersion, err := metadata.GetLatestVersion() - if err != nil { - return nil, err - } - if len(opts.StoreKeys) == 0 { - lastCommitInfo, err := metadata.GetCommitInfo(latestVersion) - if err != nil { - return nil, err - } - if lastCommitInfo == nil { - return nil, fmt.Errorf("tried to construct a root store with no store keys specified but no commit info found for version %d", latestVersion) - } - for _, si := range lastCommitInfo.StoreInfos { - opts.StoreKeys = append(opts.StoreKeys, string(si.Name)) - } - } - removedStoreKeys, err := metadata.GetRemovedStoreKeys(latestVersion) - if err != nil { - return nil, err - } - - newTreeFn := func(key string) (commitment.Tree, error) { - if internal.IsMemoryStoreKey(key) { - return mem.New(), nil - } else { - switch storeOpts.SCType { - case SCTypeIavl: - return iavl.NewIavlTree(db.NewPrefixDB(opts.SCRawDB, []byte(key)), opts.Logger, storeOpts.IavlConfig), nil - case SCTypeIavlV2: - return nil, errors.New("iavl v2 not supported") - default: - return nil, errors.New("unsupported commitment store type") - } - } - } - - trees := make(map[string]commitment.Tree, len(opts.StoreKeys)) - for _, key := range opts.StoreKeys { - tree, err := newTreeFn(key) - if err != nil { - return nil, err - } - trees[key] = tree - } - oldTrees := make(map[string]commitment.Tree, len(opts.StoreKeys)) - for _, key := range removedStoreKeys { - tree, err := newTreeFn(string(key)) - if err != nil { - return nil, err - } - oldTrees[string(key)] = tree - } - - sc, err = commitment.NewCommitStore(trees, oldTrees, opts.SCRawDB, opts.Logger) - if err != nil { - return nil, err - } - - pm := pruning.NewManager(sc, storeOpts.SCPruningOption) - return New(opts.SCRawDB, opts.Logger, sc, pm, nil, nil) -} diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go deleted file mode 100644 index 3b431bdb24f6..000000000000 --- a/store/v2/root/migrate_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package root - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/log" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/migration" - "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/snapshots" -) - -var storeKeys = []string{"store1", "store2", "store3"} - -type MigrateStoreTestSuite struct { - suite.Suite - - rootStore store.RootStore -} - -func TestMigrateStoreTestSuite(t *testing.T) { - suite.Run(t, &MigrateStoreTestSuite{}) -} - -func (s *MigrateStoreTestSuite) SetupTest() { - testLog := log.NewTestLogger(s.T()) - nopLog := coretesting.NewNopLogger() - - mdb := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) - } - orgSC, err := commitment.NewCommitStore(multiTrees, nil, mdb, testLog) - s.Require().NoError(err) - - // apply changeset against the original store - toVersion := uint64(200) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - s.Require().NoError(orgSC.WriteChangeset(cs)) - _, err = orgSC.Commit(version) - s.Require().NoError(err) - } - - multiTrees1 := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) - } - sc, err := commitment.NewCommitStore(multiTrees1, nil, dbm.NewMemDB(), testLog) - s.Require().NoError(err) - - snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) - s.Require().NoError(err) - snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) - migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) - pm := pruning.NewManager(sc, nil) - - // assume no storage store, simulate the migration process - s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) - s.Require().NoError(err) -} - -func (s *MigrateStoreTestSuite) TestMigrateState() { - err := s.rootStore.LoadLatestVersion() - s.Require().NoError(err) - originalLatestVersion, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - // check if the Query fallback to the original SC - for version := uint64(1); version <= originalLatestVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < 10; i++ { - res, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) - } - } - } - - // continue to apply changeset against the original store - latestVersion := originalLatestVersion + 1 - keyCount := 10 - for ; latestVersion < 2*originalLatestVersion; latestVersion++ { - cs := corestore.NewChangeset(latestVersion) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", latestVersion, i)), []byte(fmt.Sprintf("value-%d-%d", latestVersion, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - - // check if the migration is completed - ver, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - if ver == latestVersion { - break - } - - // add some delay to simulate the consensus process - time.Sleep(100 * time.Millisecond) - } - - // check if the migration is successful - version, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(latestVersion, version) - - // query against the migrated store - for version := uint64(1); version <= latestVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - targetVersion := version - if version < originalLatestVersion { - targetVersion = originalLatestVersion - } - res, err := s.rootStore.Query([]byte(storeKey), targetVersion, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) - } - } - } - - // apply changeset against the migrated store - for version := latestVersion + 1; version <= latestVersion+10; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - } - - version, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(latestVersion+10, version) -} diff --git a/store/v2/root/store.go b/store/v2/root/store.go deleted file mode 100644 index 6faa51602c5b..000000000000 --- a/store/v2/root/store.go +++ /dev/null @@ -1,400 +0,0 @@ -package root - -import ( - "crypto/sha256" - "errors" - "fmt" - "io" - "sync" - "time" - - "golang.org/x/sync/errgroup" - - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/metrics" - "cosmossdk.io/store/v2/migration" - "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/pruning" -) - -var ( - _ store.RootStore = (*Store)(nil) - _ store.UpgradeableStore = (*Store)(nil) -) - -// Store defines the SDK's default RootStore implementation. It contains a single -// State Storage (SS) backend and a single State Commitment (SC) backend. The SC -// backend may or may not support multiple store keys and is implementation -// dependent. -type Store struct { - logger corelog.Logger - - // holds the db instance for closing it - dbCloser io.Closer - - // stateCommitment reflects the state commitment (SC) backend - stateCommitment store.Committer - - // lastCommitInfo reflects the last version/hash that has been committed - lastCommitInfo *proof.CommitInfo - - // telemetry reflects a telemetry agent responsible for emitting metrics (if any) - telemetry metrics.StoreMetrics - - // pruningManager reflects the pruning manager used to prune state of the SS and SC backends - pruningManager *pruning.Manager - - // Migration related fields - // migrationManager reflects the migration manager used to migrate state from v1 to v2 - migrationManager *migration.Manager - // chChangeset reflects the channel used to send the changeset to the migration manager - chChangeset chan *migration.VersionedChangeset - // chDone reflects the channel used to signal the migration manager that the migration - // is done - chDone chan struct{} - // isMigrating reflects whether the store is currently migrating - isMigrating bool -} - -// New creates a new root Store instance. -// -// NOTE: The migration manager is optional and can be nil if no migration is required. -func New( - dbCloser io.Closer, - logger corelog.Logger, - sc store.Committer, - pm *pruning.Manager, - mm *migration.Manager, - m metrics.StoreMetrics, -) (store.RootStore, error) { - return &Store{ - dbCloser: dbCloser, - logger: logger, - stateCommitment: sc, - pruningManager: pm, - migrationManager: mm, - telemetry: m, - isMigrating: mm != nil, - }, nil -} - -// Close closes the store and resets all internal fields. Note, Close() is NOT -// idempotent and should only be called once. -func (s *Store) Close() (err error) { - err = errors.Join(err, s.stateCommitment.Close()) - err = errors.Join(err, s.dbCloser.Close()) - - s.stateCommitment = nil - s.lastCommitInfo = nil - - return err -} - -func (s *Store) SetMetrics(m metrics.Metrics) { - s.telemetry = m -} - -func (s *Store) SetInitialVersion(v uint64) error { - return s.stateCommitment.SetInitialVersion(v) -} - -// getVersionedReader returns a VersionedReader based on the given version. If the -// version exists in the state storage, it returns the state storage. -// If not, it checks if the state commitment implements the VersionedReader interface -// and the version exists in the state commitment, since the state storage will be -// synced during migration. -func (s *Store) getVersionedReader(version uint64) (store.VersionedReader, error) { - isExist, err := s.stateCommitment.VersionExists(version) - if err != nil { - return nil, err - } - if isExist { - return s.stateCommitment, nil - } - return nil, fmt.Errorf("version %d does not exist", version) -} - -func (s *Store) StateLatest() (uint64, corestore.ReaderMap, error) { - v, err := s.GetLatestVersion() - if err != nil { - return 0, nil, err - } - vReader, err := s.getVersionedReader(v) - if err != nil { - return 0, nil, err - } - - return v, NewReaderMap(v, vReader), nil -} - -// StateAt returns a read-only view of the state at a given version. -func (s *Store) StateAt(v uint64) (corestore.ReaderMap, error) { - vReader, err := s.getVersionedReader(v) - return NewReaderMap(v, vReader), err -} - -func (s *Store) GetStateCommitment() store.Committer { - return s.stateCommitment -} - -// LastCommitID returns a CommitID based off of the latest internal CommitInfo. -// If an internal CommitInfo is not set, a new one will be returned with only the -// latest version set, which is based off of the SC view. -func (s *Store) LastCommitID() (proof.CommitID, error) { - if s.lastCommitInfo != nil { - return s.lastCommitInfo.CommitID(), nil - } - - latestVersion, err := s.stateCommitment.GetLatestVersion() - if err != nil { - return proof.CommitID{}, err - } - // if the latest version is 0, we return a CommitID with version 0 and a hash of an empty byte slice - bz := sha256.Sum256([]byte{}) - - return proof.CommitID{Version: latestVersion, Hash: bz[:]}, nil -} - -// GetLatestVersion returns the latest version based on the latest internal -// CommitInfo. An error is returned if the latest CommitInfo or version cannot -// be retrieved. -func (s *Store) GetLatestVersion() (uint64, error) { - lastCommitID, err := s.LastCommitID() - if err != nil { - return 0, err - } - - return lastCommitID.Version, nil -} - -func (s *Store) Query(storeKey []byte, version uint64, key []byte, prove bool) (store.QueryResult, error) { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "query") - } - - val, err := s.stateCommitment.Get(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err) - } - - result := store.QueryResult{ - Key: key, - Value: val, - Version: version, - } - - if prove { - result.ProofOps, err = s.stateCommitment.GetProof(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to get SC store proof: %w", err) - } - } - - return result, nil -} - -func (s *Store) LoadLatestVersion() error { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "load_latest_version") - } - - lv, err := s.GetLatestVersion() - if err != nil { - return err - } - - return s.loadVersion(lv, nil, false) -} - -func (s *Store) LoadVersion(version uint64) error { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "load_version") - } - - return s.loadVersion(version, nil, false) -} - -func (s *Store) LoadVersionForOverwriting(version uint64) error { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "load_version_for_overwriting") - } - - return s.loadVersion(version, nil, true) -} - -// LoadVersionAndUpgrade implements the UpgradeableStore interface. -// -// NOTE: It cannot be called while the store is migrating. -func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreUpgrades) error { - if upgrades == nil { - return errors.New("upgrades cannot be nil") - } - - if s.telemetry != nil { - defer s.telemetry.MeasureSince(time.Now(), "root_store", "load_version_and_upgrade") - } - - if s.isMigrating { - return errors.New("cannot upgrade while migrating") - } - - if err := s.loadVersion(version, upgrades, true); err != nil { - return err - } - - return nil -} - -func (s *Store) loadVersion(v uint64, upgrades *corestore.StoreUpgrades, overrideAfter bool) error { - s.logger.Debug("loading version", "version", v) - - if upgrades == nil { - if !overrideAfter { - if err := s.stateCommitment.LoadVersion(v); err != nil { - return fmt.Errorf("failed to load SC version %d: %w", v, err) - } - } else { - if err := s.stateCommitment.LoadVersionForOverwriting(v); err != nil { - return fmt.Errorf("failed to load SC version %d: %w", v, err) - } - } - } else { - // if upgrades are provided, we need to load the version and apply the upgrades - if err := s.stateCommitment.LoadVersionAndUpgrade(v, upgrades); err != nil { - return fmt.Errorf("failed to load SS version with upgrades %d: %w", v, err) - } - } - - // set lastCommitInfo explicitly s.t. Commit commits the correct version, i.e. v+1 - var err error - s.lastCommitInfo, err = s.stateCommitment.GetCommitInfo(v) - if err != nil { - return fmt.Errorf("failed to get commit info for version %d: %w", v, err) - } - - // if we're migrating, we need to start the migration process - if s.isMigrating { - s.startMigration() - } - - return nil -} - -// Commit commits all state changes to the underlying SS and SC backends. It -// writes a batch of the changeset to the SC tree, and retrieves the CommitInfo -// from the SC tree. Finally, it commits the SC tree and returns the hash of -// the CommitInfo. -func (s *Store) Commit(cs *corestore.Changeset) ([]byte, error) { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "commit") - } - - if err := s.handleMigration(cs); err != nil { - return nil, err - } - - // signal to the pruning manager that a new version is about to be committed - // this may be required if the SS and SC backends implementation have the - // background pruning process (iavl v1 for example) which must be paused during the commit - s.pruningManager.PausePruning() - - eg := new(errgroup.Group) - - // commit SC async - var cInfo *proof.CommitInfo - eg.Go(func() error { - if err := s.stateCommitment.WriteChangeset(cs); err != nil { - return fmt.Errorf("failed to write batch to SC store: %w", err) - } - var scErr error - cInfo, scErr = s.stateCommitment.Commit(cs.Version) - if scErr != nil { - return fmt.Errorf("failed to commit SC store: %w", scErr) - } - return nil - }) - - if err := eg.Wait(); err != nil { - return nil, err - } - - if cInfo.Version != cs.Version { - return nil, fmt.Errorf("commit version mismatch: got %d, expected %d", cInfo.Version, cs.Version) - } - s.lastCommitInfo = cInfo - - // signal to the pruning manager that the commit is done - if err := s.pruningManager.ResumePruning(s.lastCommitInfo.Version); err != nil { - s.logger.Error("failed to signal commit done to pruning manager", "err", err) - } - - return s.lastCommitInfo.Hash(), nil -} - -// startMigration starts a migration process to migrate the RootStore/v1 to the -// SS and SC backends of store/v2 and initializes the channels. -// It runs in a separate goroutine and replaces the current RootStore with the -// migrated new backends once the migration is complete. -// -// NOTE: This method should only be called once after loadVersion. -func (s *Store) startMigration() { - // buffer at most 1 changeset, if the receiver is behind attempting to buffer - // more than 1 will block. - s.chChangeset = make(chan *migration.VersionedChangeset, 1) - // it is used to signal the migration manager that the migration is done - s.chDone = make(chan struct{}) - - mtx := sync.Mutex{} - mtx.Lock() - go func() { - version := s.lastCommitInfo.Version - s.logger.Info("starting migration", "version", version) - mtx.Unlock() - if err := s.migrationManager.Start(version, s.chChangeset, s.chDone); err != nil { - s.logger.Error("failed to start migration", "err", err) - } - }() - - // wait for the migration manager to start - mtx.Lock() - defer mtx.Unlock() -} - -func (s *Store) handleMigration(cs *corestore.Changeset) error { - if s.isMigrating { - // if the migration manager has already migrated to the version, close the - // channels and replace the state commitment - if s.migrationManager.GetMigratedVersion() == s.lastCommitInfo.Version { - close(s.chDone) - close(s.chChangeset) - s.isMigrating = false - // close the old state commitment and replace it with the new one - if err := s.stateCommitment.Close(); err != nil { - return fmt.Errorf("failed to close the old SC store: %w", err) - } - newStateCommitment := s.migrationManager.GetStateCommitment() - if newStateCommitment != nil { - s.stateCommitment = newStateCommitment - } - if err := s.migrationManager.Close(); err != nil { - return fmt.Errorf("failed to close migration manager: %w", err) - } - s.logger.Info("migration completed", "version", s.lastCommitInfo.Version) - } else { - // queue the next changeset to the migration manager - s.chChangeset <- &migration.VersionedChangeset{Version: s.lastCommitInfo.Version + 1, Changeset: cs} - } - } - return nil -} - -func (s *Store) Prune(version uint64) error { - return s.pruningManager.Prune(version) -} diff --git a/store/v2/root/store_mock_test.go b/store/v2/root/store_mock_test.go deleted file mode 100644 index 0ec0a31bdaf2..000000000000 --- a/store/v2/root/store_mock_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package root - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/metrics" - "cosmossdk.io/store/v2/mock" - "cosmossdk.io/store/v2/pruning" -) - -func newTestRootStore(sc store.Committer) *Store { - noopLog := coretesting.NewNopLogger() - pm := pruning.NewManager(sc.(store.Pruner), nil) - return &Store{ - logger: noopLog, - telemetry: metrics.Metrics{}, - stateCommitment: sc, - pruningManager: pm, - isMigrating: false, - } -} - -func TestGetLatestState(t *testing.T) { - ctrl := gomock.NewController(t) - sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(sc) - - // Get the latest version - sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) - _, err := rs.GetLatestVersion() - require.Error(t, err) - sc.EXPECT().GetLatestVersion().Return(uint64(1), nil) - v, err := rs.GetLatestVersion() - require.NoError(t, err) - require.Equal(t, uint64(1), v) -} - -func TestQuery(t *testing.T) { - ctrl := gomock.NewController(t) - sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(sc) - - // Query without Proof - sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) - _, err := rs.Query(nil, 0, nil, false) - require.Error(t, err) - sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) - v, err := rs.Query(nil, 0, nil, false) - require.NoError(t, err) - require.Equal(t, []byte("value"), v.Value) - - // Query with Proof - sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) - sc.EXPECT().GetProof(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) - _, err = rs.Query(nil, 0, nil, true) - require.Error(t, err) - - // Query with Migration - - rs.isMigrating = true - sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) - _, err = rs.Query(nil, 0, nil, false) - require.NoError(t, err) -} - -func TestLoadVersion(t *testing.T) { - ctrl := gomock.NewController(t) - sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(sc) - - // LoadLatestVersion - sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) - err := rs.LoadLatestVersion() - require.Error(t, err) - sc.EXPECT().GetLatestVersion().Return(uint64(1), nil) - sc.EXPECT().LoadVersion(uint64(1)).Return(errors.New("error")) - err = rs.LoadLatestVersion() - require.Error(t, err) - - // LoadVersion - sc.EXPECT().LoadVersion(gomock.Any()).Return(nil) - sc.EXPECT().GetCommitInfo(uint64(2)).Return(nil, errors.New("error")) - err = rs.LoadVersion(uint64(2)) - require.Error(t, err) - - // LoadVersionUpgrade - v := &corestore.StoreUpgrades{} - sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(errors.New("error")) - err = rs.LoadVersionAndUpgrade(uint64(2), v) - require.Error(t, err) - - // LoadVersionUpgrade with Migration - rs.isMigrating = true - err = rs.LoadVersionAndUpgrade(uint64(2), v) - require.Error(t, err) -} diff --git a/store/v2/root/store_test.go b/store/v2/root/store_test.go deleted file mode 100644 index 59df4d68384d..000000000000 --- a/store/v2/root/store_test.go +++ /dev/null @@ -1,830 +0,0 @@ -package root - -import ( - "crypto/sha256" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/pruning" -) - -const ( - testStoreKey = "test_store_key" - testStoreKey2 = "test_store_key2" - testStoreKey3 = "test_store_key3" -) - -var testStoreKeys = []string{testStoreKey, testStoreKey2, testStoreKey3} - -var ( - testStoreKeyBytes = []byte(testStoreKey) - testStoreKey2Bytes = []byte(testStoreKey2) - testStoreKey3Bytes = []byte(testStoreKey3) -) - -type RootStoreTestSuite struct { - suite.Suite - - rootStore store.RootStore -} - -func TestStorageTestSuite(t *testing.T) { - suite.Run(t, &RootStoreTestSuite{}) -} - -func (s *RootStoreTestSuite) SetupTest() { - noopLog := coretesting.NewNopLogger() - - tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) - tree2 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) - tree3 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) - sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree, testStoreKey2: tree2, testStoreKey3: tree3}, nil, dbm.NewMemDB(), noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, nil) - rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) - s.Require().NoError(err) - - s.rootStore = rs -} - -func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption) { - noopLog := coretesting.NewNopLogger() - - mdb := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range testStoreKeys { - prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) - } - - sc, err := commitment.NewCommitStore(multiTrees, nil, dbm.NewMemDB(), noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, config) - - rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) - s.Require().NoError(err) - - s.rootStore = rs -} - -func (s *RootStoreTestSuite) newStoreWithBackendMount(sc store.Committer, pm *pruning.Manager) { - noopLog := coretesting.NewNopLogger() - - rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) - s.Require().NoError(err) - - s.rootStore = rs -} - -func (s *RootStoreTestSuite) TearDownTest() { - err := s.rootStore.Close() - s.Require().NoError(err) -} - -func (s *RootStoreTestSuite) TestGetStateCommitment() { - s.Require().Equal(s.rootStore.GetStateCommitment(), s.rootStore.(*Store).stateCommitment) -} - -func (s *RootStoreTestSuite) TestSetInitialVersion() { - initialVersion := uint64(5) - s.Require().NoError(s.rootStore.SetInitialVersion(initialVersion)) - - // perform an initial, empty commit - cs := corestore.NewChangeset(initialVersion) - cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) - _, err := s.rootStore.Commit(corestore.NewChangeset(initialVersion)) - s.Require().NoError(err) - - // check the latest version - lVersion, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(initialVersion, lVersion) - - // set the initial version again - rInitialVersion := uint64(100) - s.Require().NoError(s.rootStore.SetInitialVersion(rInitialVersion)) - - // TODO fix version munging here - // perform the commit - cs = corestore.NewChangeset(initialVersion + 1) - cs.Add(testStoreKey2Bytes, []byte("foo"), []byte("bar"), false) - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - lVersion, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - // SetInitialVersion only works once - s.Require().NotEqual(rInitialVersion, lVersion) - s.Require().Equal(initialVersion+1, lVersion) -} - -func (s *RootStoreTestSuite) TestQuery() { - _, err := s.rootStore.Query([]byte{}, 1, []byte("foo"), true) - s.Require().Error(err) - - // write and commit a changeset - cs := corestore.NewChangeset(1) - cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) - - commitHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(commitHash) - - // ensure the proof is non-nil for the corresponding version - result, err := s.rootStore.Query([]byte(testStoreKey), 1, []byte("foo"), true) - s.Require().NoError(err) - s.Require().NotNil(result.ProofOps) - s.Require().Equal([]byte("foo"), result.ProofOps[0].Key) -} - -func (s *RootStoreTestSuite) TestGetFallback() { - sc := s.rootStore.GetStateCommitment() - - // create a changeset and commit it to SC ONLY - cs := corestore.NewChangeset(1) - cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) - - err := sc.WriteChangeset(cs) - s.Require().NoError(err) - - _, err = sc.Commit(cs.Version) - s.Require().NoError(err) - - // ensure we can query for the key, which should fallback to SC - qResult, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("foo"), false) - s.Require().NoError(err) - s.Require().Equal([]byte("bar"), qResult.Value) - - // non-existent key - qResult, err = s.rootStore.Query(testStoreKeyBytes, 1, []byte("non_existent_key"), false) - s.Require().NoError(err) - s.Require().Nil(qResult.Value) -} - -func (s *RootStoreTestSuite) TestQueryProof() { - cs := corestore.NewChangeset(1) - // testStoreKey - cs.Add(testStoreKeyBytes, []byte("key1"), []byte("value1"), false) - cs.Add(testStoreKeyBytes, []byte("key2"), []byte("value2"), false) - // testStoreKey2 - cs.Add(testStoreKey2Bytes, []byte("key3"), []byte("value3"), false) - // testStoreKey3 - cs.Add(testStoreKey3Bytes, []byte("key4"), []byte("value4"), false) - - // commit - _, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - - // query proof for testStoreKey - result, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("key1"), true) - s.Require().NoError(err) - s.Require().NotNil(result.ProofOps) - cInfo, err := s.rootStore.GetStateCommitment().GetCommitInfo(1) - s.Require().NoError(err) - storeHash := cInfo.GetStoreCommitID(testStoreKeyBytes).Hash - treeRoots, err := result.ProofOps[0].Run([][]byte{[]byte("value1")}) - s.Require().NoError(err) - s.Require().Equal(treeRoots[0], storeHash) - expRoots, err := result.ProofOps[1].Run([][]byte{storeHash}) - s.Require().NoError(err) - s.Require().Equal(expRoots[0], cInfo.Hash()) -} - -func (s *RootStoreTestSuite) TestLoadVersion() { - // write and commit a few changesets - for v := uint64(1); v <= 5; v++ { - val := fmt.Sprintf("val%03d", v) // val001, val002, ..., val005 - - cs := corestore.NewChangeset(v) - cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) - - commitHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(commitHash) - } - - // ensure the latest version is correct - latest, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(5), latest) - - // attempt to load a non-existent version - err = s.rootStore.LoadVersion(6) - s.Require().Error(err) - - // attempt to load a previously committed version - err = s.rootStore.LoadVersion(3) - s.Require().NoError(err) - - // ensure the latest version is correct - latest, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(3), latest) - - // query state and ensure values returned are based on the loaded version - _, ro, err := s.rootStore.StateLatest() - s.Require().NoError(err) - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - val, err := reader.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("val003"), val) - - // attempt to write and commit a few changesets - for v := 4; v <= 5; v++ { - val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005 - - cs := corestore.NewChangeset(uint64(v)) - cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) - - _, err := s.rootStore.Commit(cs) - s.Require().Error(err) - } - - // ensure the latest version is correct - latest, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(3), latest) // should have stayed at 3 after failed commits - - // query state and ensure values returned are based on the loaded version - _, ro, err = s.rootStore.StateLatest() - s.Require().NoError(err) - - reader, err = ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - val, err = reader.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("val003"), val) -} - -func (s *RootStoreTestSuite) TestLoadVersionForOverwriting() { - // write and commit a few changesets - for v := uint64(1); v <= 5; v++ { - val := fmt.Sprintf("val%03d", v) // val001, val002, ..., val005 - - cs := corestore.NewChangeset(v) - cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) - - commitHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(commitHash) - } - - // ensure the latest version is correct - latest, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(5), latest) - - // attempt to load a non-existent version - err = s.rootStore.LoadVersionForOverwriting(6) - s.Require().Error(err) - - // attempt to load a previously committed version - err = s.rootStore.LoadVersionForOverwriting(3) - s.Require().NoError(err) - - // ensure the latest version is correct - latest, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(3), latest) - - // query state and ensure values returned are based on the loaded version - _, ro, err := s.rootStore.StateLatest() - s.Require().NoError(err) - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - val, err := reader.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("val003"), val) - - // attempt to write and commit a few changesets - for v := 4; v <= 5; v++ { - val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005 - - cs := corestore.NewChangeset(uint64(v)) - cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) - - commitHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(commitHash) - } - - // ensure the latest version is correct - latest, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(5), latest) - - // query state and ensure values returned are based on the loaded version - _, ro, err = s.rootStore.StateLatest() - s.Require().NoError(err) - - reader, err = ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - val, err = reader.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("overwritten_val005"), val) -} - -func (s *RootStoreTestSuite) TestCommit() { - lv, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Zero(lv) - - // perform changes - cs := corestore.NewChangeset(1) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - } - - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - // ensure latest version is updated - lv, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(1), lv) - - // perform reads on the updated root store - _, ro, err := s.rootStore.StateLatest() - s.Require().NoError(err) - - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result, err := reader.Get([]byte(key)) - s.Require().NoError(err) - - s.Require().Equal([]byte(val), result) - } -} - -func (s *RootStoreTestSuite) TestStateAt() { - // write keys over multiple versions - for v := uint64(1); v <= 5; v++ { - // perform changes - cs := corestore.NewChangeset(v) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - } - - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - lv, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(5), lv) - - // ensure we can read state correctly at each version - for v := uint64(1); v <= 5; v++ { - ro, err := s.rootStore.StateAt(v) - s.Require().NoError(err) - - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1 - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - isExist, err := reader.Has([]byte(key)) - s.Require().NoError(err) - s.Require().True(isExist) - result, err := reader.Get([]byte(key)) - s.Require().NoError(err) - s.Require().Equal([]byte(val), result) - } - - // non-existent key - reader, err := ro.GetReader(testStoreKey2Bytes) - s.Require().NoError(err) - isExist, err := reader.Has([]byte("key")) - s.Require().NoError(err) - s.Require().False(isExist) - v, err := reader.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Nil(v) - } -} - -func (s *RootStoreTestSuite) TestPrune() { - // perform changes - cs := corestore.NewChangeset(1) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - } - - testCases := []struct { - name string - numVersions int64 - po store.PruningOption - deleted []uint64 - saved []uint64 - }{ - {"prune nothing", 10, store.PruningOption{ - KeepRecent: 0, - Interval: 0, - }, nil, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - {"prune everything", 12, store.PruningOption{ - KeepRecent: 1, - Interval: 10, - }, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, []uint64{9, 10, 11, 12}}, - {"prune some; no batch", 10, store.PruningOption{ - KeepRecent: 2, - Interval: 1, - }, []uint64{1, 2, 3, 4, 6, 5, 7}, []uint64{8, 9, 10}}, - {"prune some; small batch", 10, store.PruningOption{ - KeepRecent: 2, - Interval: 3, - }, []uint64{1, 2, 3, 4, 5, 6}, []uint64{7, 8, 9, 10}}, - {"prune some; large batch", 10, store.PruningOption{ - KeepRecent: 2, - Interval: 11, - }, nil, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - } - - for _, tc := range testCases { - - s.newStoreWithPruneConfig(&tc.po) - - // write keys over multiple versions - for i := int64(0); i < tc.numVersions; i++ { - // execute Commit - cs.Version = uint64(i + 1) - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - for _, v := range tc.saved { - ro, err := s.rootStore.StateAt(v) - s.Require().NoError(err, "expected no error when loading height %d at test %s", v, tc.name) - - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result, err := reader.Get([]byte(key)) - s.Require().NoError(err) - s.Require().Equal([]byte(val), result, "value should be equal for test: %s", tc.name) - } - } - - for _, v := range tc.deleted { - var err error - checkErr := func() bool { - if _, err = s.rootStore.StateAt(v); err != nil { - return true - } - return false - } - // wait for async pruning process to finish - s.Require().Eventually(checkErr, 2*time.Second, 100*time.Millisecond) - s.Require().Error(err, "expected error when loading height %d at test %s", v, tc.name) - } - } -} - -func (s *RootStoreTestSuite) TestMultiStore_Pruning_SameHeightsTwice() { - // perform changes - cs := corestore.NewChangeset(1) - cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) - - const ( - numVersions uint64 = 10 - keepRecent uint64 = 1 - interval uint64 = 10 - ) - - s.newStoreWithPruneConfig(&store.PruningOption{ - KeepRecent: keepRecent, - Interval: interval, - }) - s.Require().NoError(s.rootStore.LoadLatestVersion()) - - for i := uint64(0); i < numVersions; i++ { - // execute Commit - cs.Version = i + 1 - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(numVersions, latestVer) - - for v := uint64(1); v < numVersions-keepRecent; v++ { - var err error - checkErr := func() bool { - if _, err = s.rootStore.StateAt(v); err != nil { - return true - } - return false - } - // wait for async pruning process to finish - s.Require().Eventually(checkErr, 2*time.Second, 100*time.Millisecond, "expected no error when loading height: %d", v) - } - - for v := (numVersions - keepRecent); v < numVersions; v++ { - _, err := s.rootStore.StateAt(v) - s.Require().NoError(err, "expected no error when loading height: %d", v) - } - - // Get latest - err = s.rootStore.LoadVersion(numVersions) - s.Require().NoError(err) - - // Test pruning the same heights again - cs.Version++ - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - - // Ensure that can commit one more height with no panic - cs.Version++ - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) -} - -func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { - // perform changes - cs := corestore.NewChangeset(1) - cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) - - pruneOpt := &store.PruningOption{ - KeepRecent: 2, - Interval: 11, - } - - noopLog := coretesting.NewNopLogger() - - mdb1 := dbm.NewMemDB() - mdb2 := dbm.NewMemDB() - - tree := iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) - sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, pruneOpt) - - s.newStoreWithBackendMount(sc, pm) - s.Require().NoError(s.rootStore.LoadLatestVersion()) - - // Commit enough to build up heights to prune, where on the next block we should - // batch delete. - for i := uint64(1); i <= 10; i++ { - // execute Commit - cs.Version = i - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - ok, actualHeightToPrune := pruneOpt.ShouldPrune(latestVer) - s.Require().False(ok) - s.Require().Equal(uint64(0), actualHeightToPrune) - - tree = iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) - sc, err = commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) - s.Require().NoError(err) - - pm = pruning.NewManager(sc, pruneOpt) - - s.newStoreWithBackendMount(sc, pm) - err = s.rootStore.LoadLatestVersion() - s.Require().NoError(err) - - latestVer, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - ok, actualHeightToPrune = pruneOpt.ShouldPrune(latestVer) - s.Require().False(ok) - s.Require().Equal(uint64(0), actualHeightToPrune) - - // commit one more block and ensure the heights have been pruned - // execute Commit - cs.Version++ - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - ok, actualHeightToPrune = pruneOpt.ShouldPrune(latestVer) - s.Require().True(ok) - s.Require().Equal(uint64(8), actualHeightToPrune) - - for v := uint64(1); v <= actualHeightToPrune; v++ { - checkErr := func() bool { - if _, err = s.rootStore.StateAt(v); err != nil { - return true - } - return false - } - // wait for async pruning process to finish - s.Require().Eventually(checkErr, 10*time.Second, 1*time.Second, "expected error when loading height: %d", v) - } -} - -func (s *RootStoreTestSuite) TestMultiStoreRestart() { - noopLog := coretesting.NewNopLogger() - - mdb1 := dbm.NewMemDB() - mdb2 := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range testStoreKeys { - prefixDB := dbm.NewPrefixDB(mdb1, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) - } - - sc, err := commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, nil) - - s.newStoreWithBackendMount(sc, pm) - s.Require().NoError(s.rootStore.LoadLatestVersion()) - - // perform changes - for i := 1; i < 3; i++ { - cs := corestore.NewChangeset(uint64(i)) - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", i, 1) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - - key = fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", i, 2) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKey2Bytes, []byte(key), []byte(val), false) - - key = fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", i, 3) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKey3Bytes, []byte(key), []byte(val), false) - - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(i), latestVer) - } - - // more changes - cs1 := corestore.NewChangeset(3) - key := fmt.Sprintf("key%03d", 3) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", 3, 1) // val000_1, val001_1, ..., val099_1 - - cs1.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - - key = fmt.Sprintf("key%03d", 3) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", 3, 2) // val000_1, val001_1, ..., val099_1 - - cs1.Add(testStoreKey2Bytes, []byte(key), []byte(val), false) - - // execute Commit - cHash, err := s.rootStore.Commit(cs1) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(3), latestVer) - - cs2 := corestore.NewChangeset(4) - key = fmt.Sprintf("key%03d", 4) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", 4, 3) // val000_1, val001_1, ..., val099_1 - - cs2.Add(testStoreKey3Bytes, []byte(key), []byte(val), false) - - // execute Commit - cHash, err = s.rootStore.Commit(cs2) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(4), latestVer) - - _, ro1, err := s.rootStore.StateLatest() - s.Require().Nil(err) - reader1, err := ro1.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result1, err := reader1.Get([]byte(fmt.Sprintf("key%03d", 3))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 3, 1)), result1, "value should be equal") - - // "restart" - multiTrees = make(map[string]commitment.Tree) - for _, storeKey := range testStoreKeys { - prefixDB := dbm.NewPrefixDB(mdb1, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) - } - - sc, err = commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) - s.Require().NoError(err) - - pm = pruning.NewManager(sc, nil) - - s.newStoreWithBackendMount(sc, pm) - err = s.rootStore.LoadLatestVersion() - s.Require().Nil(err) - - latestVer, ro, err := s.rootStore.StateLatest() - s.Require().Nil(err) - s.Require().Equal(uint64(4), latestVer) - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result, err := reader.Get([]byte(fmt.Sprintf("key%03d", 3))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 3, 1)), result, "value should be equal") - - reader, err = ro.GetReader(testStoreKey2Bytes) - s.Require().NoError(err) - result, err = reader.Get([]byte(fmt.Sprintf("key%03d", 2))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 2, 2)), result, "value should be equal") - - reader, err = ro.GetReader(testStoreKey3Bytes) - s.Require().NoError(err) - result, err = reader.Get([]byte(fmt.Sprintf("key%03d", 4))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 4, 3)), result, "value should be equal") -} - -func (s *RootStoreTestSuite) TestHashStableWithEmptyCommitAndRestart() { - err := s.rootStore.LoadLatestVersion() - s.Require().NoError(err) - - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - commitID := proof.CommitID{Hash: appHash} - lastCommitID, err := s.rootStore.LastCommitID() - s.Require().Nil(err) - - // the hash of a store with no commits is the root hash of a tree with empty hashes as leaves. - // it should not be equal an empty hash. - s.Require().NotEqual(commitID, lastCommitID) - - cs := corestore.NewChangeset(1) - cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) - - cHash, err := s.rootStore.Commit(cs) - s.Require().Nil(err) - s.Require().NotNil(cHash) - latestVersion, err := s.rootStore.GetLatestVersion() - hash := cHash - s.Require().Nil(err) - s.Require().Equal(uint64(1), latestVersion) - - // make an empty commit, it should update version, but not affect hash - cHash, err = s.rootStore.Commit(corestore.NewChangeset(2)) - s.Require().Nil(err) - s.Require().NotNil(cHash) - latestVersion, err = s.rootStore.GetLatestVersion() - s.Require().Nil(err) - s.Require().Equal(uint64(2), latestVersion) - s.Require().Equal(hash, cHash) - - // reload the store - s.Require().NoError(s.rootStore.LoadLatestVersion()) - lastCommitID, err = s.rootStore.LastCommitID() - s.Require().NoError(err) - s.Require().Equal(lastCommitID.Hash, hash) -} diff --git a/store/v2/root/upgrade_test.go b/store/v2/root/upgrade_test.go deleted file mode 100644 index 1bcee4149b48..000000000000 --- a/store/v2/root/upgrade_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package root - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/log" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/pruning" -) - -type UpgradeStoreTestSuite struct { - suite.Suite - - commitDB corestore.KVStoreWithBatch - rootStore store.RootStore -} - -func TestUpgradeStoreTestSuite(t *testing.T) { - suite.Run(t, &UpgradeStoreTestSuite{}) -} - -func (s *UpgradeStoreTestSuite) SetupTest() { - testLog := log.NewTestLogger(s.T()) - nopLog := coretesting.NewNopLogger() - - s.commitDB = dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - newTreeFn := func(storeKey string) (commitment.Tree, error) { - prefixDB := dbm.NewPrefixDB(s.commitDB, []byte(storeKey)) - return iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()), nil - } - for _, storeKey := range storeKeys { - multiTrees[storeKey], _ = newTreeFn(storeKey) - } - - sc, err := commitment.NewCommitStore(multiTrees, nil, s.commitDB, testLog) - s.Require().NoError(err) - pm := pruning.NewManager(sc, nil) - s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) - s.Require().NoError(err) - - // commit changeset - toVersion := uint64(20) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - } -} - -func (s *UpgradeStoreTestSuite) loadWithUpgrades(upgrades *corestore.StoreUpgrades) { - testLog := log.NewTestLogger(s.T()) - nopLog := coretesting.NewNopLogger() - - // create a new commitment store - multiTrees := make(map[string]commitment.Tree) - oldTrees := make(map[string]commitment.Tree) - newTreeFn := func(storeKey string) (commitment.Tree, error) { - prefixDB := dbm.NewPrefixDB(s.commitDB, []byte(storeKey)) - return iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()), nil - } - for _, storeKey := range storeKeys { - multiTrees[storeKey], _ = newTreeFn(storeKey) - } - for _, added := range upgrades.Added { - multiTrees[added], _ = newTreeFn(added) - } - for _, deleted := range upgrades.Deleted { - oldTrees[deleted], _ = newTreeFn(deleted) - } - - sc, err := commitment.NewCommitStore(multiTrees, oldTrees, s.commitDB, testLog) - s.Require().NoError(err) - pm := pruning.NewManager(sc, nil) - s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) - s.Require().NoError(err) -} - -func (s *UpgradeStoreTestSuite) TestLoadVersionAndUpgrade() { - // upgrade store keys - upgrades := &corestore.StoreUpgrades{ - Added: []string{"newStore1", "newStore2"}, - Deleted: []string{"store3"}, - } - s.loadWithUpgrades(upgrades) - - // load the store with the upgrades - v, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - err = s.rootStore.(store.UpgradeableStore).LoadVersionAndUpgrade(v, upgrades) - s.Require().NoError(err) - - keyCount := 10 - // check old store keys are queryable - oldStoreKeys := []string{"store1", "store2", "store3"} - for _, storeKey := range oldStoreKeys { - for version := uint64(1); version <= v; version++ { - for i := 0; i < keyCount; i++ { - proof, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - s.Require().NotNil(proof) - } - } - } - - // commit changeset - newStoreKeys := []string{"newStore1", "newStore2"} - toVersion := uint64(40) - for version := v + 1; version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range newStoreKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - } - - // check new store keys are queryable - for _, storeKey := range newStoreKeys { - for version := v + 1; version <= toVersion; version++ { - for i := 0; i < keyCount; i++ { - _, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - } - } - } - - // check the original store key is queryable - for version := uint64(1); version <= toVersion; version++ { - for i := 0; i < keyCount; i++ { - _, err := s.rootStore.Query([]byte("store2"), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - } - } -} diff --git a/store/v2/snapshots/helpers_test.go b/store/v2/snapshots/helpers_test.go deleted file mode 100644 index 40090c896817..000000000000 --- a/store/v2/snapshots/helpers_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package snapshots_test - -import ( - "bufio" - "bytes" - "compress/zlib" - "crypto/sha256" - "errors" - "fmt" - "io" - "testing" - "time" - - protoio "github.com/cosmos/gogoproto/io" - "github.com/stretchr/testify/require" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -func checksums(slice [][]byte) [][]byte { - hasher := sha256.New() - checksums := make([][]byte, len(slice)) - for i, chunk := range slice { - hasher.Write(chunk) - checksums[i] = hasher.Sum(nil) - hasher.Reset() - } - return checksums -} - -func hash(chunks [][]byte) []byte { - hasher := sha256.New() - for _, chunk := range chunks { - hasher.Write(chunk) - } - return hasher.Sum(nil) -} - -func makeChunks(chunks [][]byte) <-chan io.ReadCloser { - ch := make(chan io.ReadCloser, len(chunks)) - for _, chunk := range chunks { - ch <- io.NopCloser(bytes.NewReader(chunk)) - } - close(ch) - return ch -} - -func readChunks(chunks <-chan io.ReadCloser) [][]byte { - bodies := [][]byte{} - for chunk := range chunks { - body, err := io.ReadAll(chunk) - if err != nil { - panic(err) - } - bodies = append(bodies, body) - } - return bodies -} - -// snapshotItems serialize a array of bytes as SnapshotItem_ExtensionPayload, and return the chunks. -func snapshotItems(items [][]byte, ext snapshots.ExtensionSnapshotter) [][]byte { - // copy the same parameters from the code - snapshotChunkSize := uint64(10e6) - snapshotBufferSize := int(snapshotChunkSize) - - ch := make(chan io.ReadCloser) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, snapshotChunkSize) - bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) - zWriter, _ := zlib.NewWriterLevel(bufWriter, 7) - protoWriter := protoio.NewDelimitedWriter(zWriter) - for _, item := range items { - _ = snapshotstypes.WriteExtensionPayload(protoWriter, item) - } - // write extension metadata - _ = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_Extension{ - Extension: &snapshotstypes.SnapshotExtensionMeta{ - Name: ext.SnapshotName(), - Format: ext.SnapshotFormat(), - }, - }, - }) - _ = ext.SnapshotExtension(0, func(payload []byte) error { - return snapshotstypes.WriteExtensionPayload(protoWriter, payload) - }) - _ = protoWriter.Close() - _ = bufWriter.Flush() - _ = chunkWriter.Close() - }() - - var chunks [][]byte - for chunkBody := range ch { - chunk, err := io.ReadAll(chunkBody) - if err != nil { - panic(err) - } - chunks = append(chunks, chunk) - } - - return chunks -} - -type mockCommitSnapshotter struct { - items [][]byte -} - -func (m *mockCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshotstypes.SnapshotItem, error) { - if format == 0 { - return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat - } - if m.items != nil { - return snapshotstypes.SnapshotItem{}, errors.New("already has contents") - } - - var item snapshotstypes.SnapshotItem - m.items = [][]byte{} - for { - item.Reset() - err := protoReader.ReadMsg(&item) - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("invalid protobuf message: %w", err) - } - payload := item.GetExtensionPayload() - if payload == nil { - break - } - m.items = append(m.items, payload.Payload) - } - - return item, nil -} - -func (m *mockCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - for _, item := range m.items { - if err := snapshotstypes.WriteExtensionPayload(protoWriter, item); err != nil { - return err - } - } - return nil -} - -func (m *mockCommitSnapshotter) SnapshotFormat() uint32 { - return snapshotstypes.CurrentFormat -} - -func (m *mockCommitSnapshotter) SupportedFormats() []uint32 { - return []uint32{snapshotstypes.CurrentFormat} -} - -type mockErrorCommitSnapshotter struct{} - -var _ snapshots.CommitSnapshotter = (*mockErrorCommitSnapshotter)(nil) - -func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - return errors.New("mock snapshot error") -} - -func (m *mockErrorCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshotstypes.SnapshotItem, error) { - return snapshotstypes.SnapshotItem{}, errors.New("mock restore error") -} - -func (m *mockErrorCommitSnapshotter) SnapshotFormat() uint32 { - return snapshotstypes.CurrentFormat -} - -func (m *mockErrorCommitSnapshotter) SupportedFormats() []uint32 { - return []uint32{snapshotstypes.CurrentFormat} -} - -// setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1. -// The snapshot will complete when the returned closer is called. -func setupBusyManager(t *testing.T) *snapshots.Manager { - t.Helper() - store, err := snapshots.NewStore(t.TempDir()) - require.NoError(t, err) - hung := newHungCommitSnapshotter() - mgr := snapshots.NewManager(store, opts, hung, nil, coretesting.NewNopLogger()) - - // Channel to ensure the test doesn't finish until the goroutine is done. - // Without this, there are intermittent test failures about - // the t.TempDir() cleanup failing due to the directory not being empty. - done := make(chan struct{}) - - go func() { - defer close(done) - _, err := mgr.Create(1) - require.NoError(t, err) - }() - time.Sleep(10 * time.Millisecond) - - t.Cleanup(func() { - <-done - }) - - t.Cleanup(hung.Close) - - return mgr -} - -// hungCommitSnapshotter can be used to test operations in progress. Call close to end the snapshot. -type hungCommitSnapshotter struct { - ch chan struct{} -} - -func newHungCommitSnapshotter() *hungCommitSnapshotter { - return &hungCommitSnapshotter{ - ch: make(chan struct{}), - } -} - -func (m *hungCommitSnapshotter) Close() { - close(m.ch) -} - -func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - <-m.ch - return nil -} - -func (m *hungCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshotstypes.SnapshotItem, error) { - panic("not implemented") -} - -type extSnapshotter struct { - state []uint64 -} - -func newExtSnapshotter(count int) *extSnapshotter { - state := make([]uint64, 0, count) - for i := 0; i < count; i++ { - state = append(state, uint64(i)) - } - return &extSnapshotter{ - state, - } -} - -func (s *extSnapshotter) SnapshotName() string { - return "mock" -} - -func (s *extSnapshotter) SnapshotFormat() uint32 { - return 1 -} - -func (s *extSnapshotter) SupportedFormats() []uint32 { - return []uint32{1} -} - -func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshots.ExtensionPayloadWriter) error { - for _, i := range s.state { - if err := payloadWriter(snapshotstypes.Uint64ToBigEndian(i)); err != nil { - return err - } - } - return nil -} - -func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshots.ExtensionPayloadReader) error { - for { - payload, err := payloadReader() - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return err - } - s.state = append(s.state, snapshotstypes.BigEndianToUint64(payload)) - } - // finalize restoration - return nil -} diff --git a/store/v2/snapshots/manager.go b/store/v2/snapshots/manager.go deleted file mode 100644 index a0d7895513d8..000000000000 --- a/store/v2/snapshots/manager.go +++ /dev/null @@ -1,591 +0,0 @@ -package snapshots - -import ( - "bytes" - "crypto/sha256" - "errors" - "fmt" - "io" - "math" - "os" - "sort" - "sync" - - corelog "cosmossdk.io/core/log" - errorsmod "cosmossdk.io/errors/v2" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/snapshots/types" -) - -// Manager manages snapshot and restore operations for an app, making sure only a single -// long-running operation is in progress at any given time, and provides convenience methods -// mirroring the ABCI interface. -// -// Although the ABCI interface (and this manager) passes chunks as byte slices, the internal -// snapshot/restore APIs use IO streams (i.e. chan io.ReadCloser), for two reasons: -// -// 1. In the future, ABCI should support streaming. Consider e.g. InitChain during chain -// upgrades, which currently passes the entire chain state as an in-memory byte slice. -// https://github.com/tendermint/tendermint/issues/5184 -// -// 2. io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary -// errors via io.Pipe.CloseWithError(). -type Manager struct { - extensions map[string]ExtensionSnapshotter - // store is the snapshot store where all completed snapshots are persisted. - store *Store - opts SnapshotOptions - // commitSnapshotter is the snapshotter for the commitment state. - commitSnapshotter CommitSnapshotter - - logger corelog.Logger - - mtx sync.Mutex - operation operation - chRestore chan<- uint32 - chRestoreDone <-chan restoreDone - restoreSnapshot *types.Snapshot - restoreChunkIndex uint32 -} - -// operation represents a Manager operation. Only one operation can be in progress at a time. -type operation string - -// restoreDone represents the result of a restore operation. -type restoreDone struct { - complete bool // if true, restore completed successfully (not prematurely) - err error // if non-nil, restore errored -} - -const ( - opNone operation = "" - opSnapshot operation = "snapshot" - opPrune operation = "prune" - opRestore operation = "restore" - - chunkBufferSize = 4 - chunkIDBufferSize = 1024 - defaultStorageChannelBufferSize = 1024 - - snapshotMaxItemSize = int(64e6) // SDK has no key/value size limit, so we set an arbitrary limit -) - -var ErrOptsZeroSnapshotInterval = errors.New("snapshot-interval must not be 0") - -// NewManager creates a new manager. -func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager { - if extensions == nil { - extensions = map[string]ExtensionSnapshotter{} - } - return &Manager{ - store: store, - opts: opts, - commitSnapshotter: commitSnapshotter, - extensions: extensions, - logger: logger, - } -} - -// RegisterExtensions register extension snapshotters to manager -func (m *Manager) RegisterExtensions(extensions ...ExtensionSnapshotter) error { - if m.extensions == nil { - m.extensions = make(map[string]ExtensionSnapshotter, len(extensions)) - } - for _, extension := range extensions { - name := extension.SnapshotName() - if _, ok := m.extensions[name]; ok { - return fmt.Errorf("duplicated snapshotter name: %s", name) - } - if !IsFormatSupported(extension, extension.SnapshotFormat()) { - return fmt.Errorf("snapshotter don't support it's own snapshot format: %s %d", name, extension.SnapshotFormat()) - } - m.extensions[name] = extension - } - return nil -} - -// begin starts an operation, or errors if one is in progress. It manages the mutex itself. -func (m *Manager) begin(op operation) error { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.beginLocked(op) -} - -// beginLocked begins an operation while already holding the mutex. -func (m *Manager) beginLocked(op operation) error { - if op == opNone { - return errorsmod.Wrap(storeerrors.ErrLogic, "can't begin a none operation") - } - if m.operation != opNone { - return errorsmod.Wrapf(storeerrors.ErrConflict, "a %v operation is in progress", m.operation) - } - m.operation = op - return nil -} - -// end ends the current operation. -func (m *Manager) end() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.endLocked() -} - -// endLocked ends the current operation while already holding the mutex. -func (m *Manager) endLocked() { - m.operation = opNone - if m.chRestore != nil { - close(m.chRestore) - m.chRestore = nil - } - m.chRestoreDone = nil - m.restoreSnapshot = nil - m.restoreChunkIndex = 0 -} - -// GetInterval returns snapshot interval represented in heights. -func (m *Manager) GetInterval() uint64 { - return m.opts.Interval -} - -// GetKeepRecent returns snapshot keep-recent represented in heights. -func (m *Manager) GetKeepRecent() uint32 { - return m.opts.KeepRecent -} - -// GetSnapshotBlockRetentionHeights returns the number of heights needed -// for block retention. Blocks since the oldest available snapshot must be -// available for state sync nodes to catch up (oldest because a node may be -// restoring an old snapshot while a new snapshot was taken). -func (m *Manager) GetSnapshotBlockRetentionHeights() int64 { - return int64(m.opts.Interval * uint64(m.opts.KeepRecent)) -} - -// Create creates a snapshot and returns its metadata. -func (m *Manager) Create(height uint64) (*types.Snapshot, error) { - if m == nil { - return nil, errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil") - } - - err := m.begin(opSnapshot) - if err != nil { - return nil, err - } - defer m.end() - - latest, err := m.store.GetLatest() - if err != nil { - return nil, errorsmod.Wrap(err, "failed to examine latest snapshot") - } - if latest != nil && latest.Height >= height { - return nil, errorsmod.Wrapf(storeerrors.ErrConflict, - "a more recent snapshot already exists at height %v", latest.Height) - } - - // Spawn goroutine to generate snapshot chunks and pass their io.ReadClosers through a channel - ch := make(chan io.ReadCloser) - go m.createSnapshot(height, ch) - - return m.store.Save(height, types.CurrentFormat, ch) -} - -// createSnapshot do the heavy work of snapshotting after the validations of request are done -// the produced chunks are written to the channel. -func (m *Manager) createSnapshot(height uint64, ch chan<- io.ReadCloser) { - streamWriter := NewStreamWriter(ch) - if streamWriter == nil { - return - } - defer func() { - if err := streamWriter.Close(); err != nil { - streamWriter.CloseWithError(err) - } - }() - - if err := m.commitSnapshotter.Snapshot(height, streamWriter); err != nil { - streamWriter.CloseWithError(err) - return - } - for _, name := range m.sortedExtensionNames() { - extension := m.extensions[name] - // write extension metadata - err := streamWriter.WriteMsg(&types.SnapshotItem{ - Item: &types.SnapshotItem_Extension{ - Extension: &types.SnapshotExtensionMeta{ - Name: name, - Format: extension.SnapshotFormat(), - }, - }, - }) - if err != nil { - streamWriter.CloseWithError(err) - return - } - payloadWriter := func(payload []byte) error { - return types.WriteExtensionPayload(streamWriter, payload) - } - if err := extension.SnapshotExtension(height, payloadWriter); err != nil { - streamWriter.CloseWithError(err) - return - } - } -} - -// CreateMigration creates a migration snapshot and writes it to the given writer. -// It is used to migrate the state from the original store to the store/v2. -func (m *Manager) CreateMigration(height uint64, protoWriter WriteCloser) error { - if m == nil { - return errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil") - } - - err := m.begin(opSnapshot) - if err != nil { - return err - } - // m.end() will be called by the migration manager with EndMigration(). - - go func() { - if err := m.commitSnapshotter.Snapshot(height, protoWriter); err != nil { - protoWriter.CloseWithError(err) - return - } - _ = protoWriter.Close() // always return nil - }() - - return nil -} - -// EndMigration ends the migration operation. -// It will replace the current commitSnapshotter with the new one. -func (m *Manager) EndMigration(commitSnapshotter CommitSnapshotter) { - defer m.end() - m.commitSnapshotter = commitSnapshotter -} - -// List lists snapshots, mirroring ABCI ListSnapshots. It can be concurrent with other operations. -func (m *Manager) List() ([]*types.Snapshot, error) { - return m.store.List() -} - -// LoadChunk loads a chunk into a byte slice, mirroring ABCI LoadChunk. It can be called -// concurrently with other operations. If the chunk does not exist, nil is returned. -func (m *Manager) LoadChunk(height uint64, format, chunk uint32) ([]byte, error) { - reader, err := m.store.LoadChunk(height, format, chunk) - if err != nil { - return nil, err - } - if reader == nil { - return nil, nil - } - defer reader.Close() - - return io.ReadAll(reader) -} - -// Prune prunes snapshots, if no other operations are in progress. -func (m *Manager) Prune(retain uint32) (uint64, error) { - err := m.begin(opPrune) - if err != nil { - return 0, err - } - defer m.end() - return m.store.Prune(retain) -} - -// Restore begins an async snapshot restoration, mirroring ABCI OfferSnapshot. Chunks must be fed -// via RestoreChunk() until the restore is complete or a chunk fails. -func (m *Manager) Restore(snapshot types.Snapshot) error { - if snapshot.Chunks == 0 { - return errorsmod.Wrap(types.ErrInvalidMetadata, "no chunks") - } - if uint32(len(snapshot.Metadata.ChunkHashes)) != snapshot.Chunks { - return errorsmod.Wrapf(types.ErrInvalidMetadata, "snapshot has %v chunk hashes, but %v chunks", - uint32(len(snapshot.Metadata.ChunkHashes)), - snapshot.Chunks) - } - m.mtx.Lock() - defer m.mtx.Unlock() - - // check multistore supported format preemptive - if snapshot.Format != types.CurrentFormat { - return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format) - } - if snapshot.Height == 0 { - return errorsmod.Wrap(storeerrors.ErrLogic, "cannot restore snapshot at height 0") - } - if snapshot.Height > uint64(math.MaxInt64) { - return errorsmod.Wrapf(types.ErrInvalidMetadata, - "snapshot height %v cannot exceed %v", snapshot.Height, int64(math.MaxInt64)) - } - - err := m.beginLocked(opRestore) - if err != nil { - return err - } - - // Start an asynchronous snapshot restoration, passing chunks and completion status via channels. - chChunkIDs := make(chan uint32, chunkIDBufferSize) - chDone := make(chan restoreDone, 1) - - dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) - if err := os.MkdirAll(dir, 0o750); err != nil { - return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - chChunks := m.loadChunkStream(snapshot.Height, snapshot.Format, chChunkIDs) - - go func() { - err := m.doRestoreSnapshot(snapshot, chChunks) - chDone <- restoreDone{ - complete: err == nil, - err: err, - } - close(chDone) - }() - - m.chRestore = chChunkIDs - m.chRestoreDone = chDone - m.restoreSnapshot = &snapshot - m.restoreChunkIndex = 0 - return nil -} - -func (m *Manager) loadChunkStream(height uint64, format uint32, chunkIDs <-chan uint32) <-chan io.ReadCloser { - chunks := make(chan io.ReadCloser, chunkBufferSize) - go func() { - defer close(chunks) - - for chunkID := range chunkIDs { - chunk, err := m.store.loadChunkFile(height, format, chunkID) - if err != nil { - m.logger.Error("load chunk file failed", "height", height, "format", format, "chunk", chunkID, "err", err) - break - } - chunks <- chunk - } - }() - - return chunks -} - -// doRestoreSnapshot do the heavy work of snapshot restoration after preliminary checks on request have passed. -func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.ReadCloser) error { - dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) - if err := os.MkdirAll(dir, 0o750); err != nil { - return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - var nextItem types.SnapshotItem - streamReader, err := NewStreamReader(chChunks) - if err != nil { - return err - } - defer streamReader.Close() - - // payloadReader reads an extension payload for extension snapshotter, it returns `io.EOF` at extension boundaries. - payloadReader := func() ([]byte, error) { - nextItem.Reset() - if err := streamReader.ReadMsg(&nextItem); err != nil { - return nil, err - } - payload := nextItem.GetExtensionPayload() - if payload == nil { - return nil, io.EOF - } - return payload.Payload, nil - } - - nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader) - if err != nil { - return errorsmod.Wrap(err, "multistore restore") - } - - for { - if nextItem.Item == nil { - // end of stream - break - } - metadata := nextItem.GetExtension() - if metadata == nil { - return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown snapshot item %T", nextItem.Item) - } - extension, ok := m.extensions[metadata.Name] - if !ok { - return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown extension snapshotter %s", metadata.Name) - } - if !IsFormatSupported(extension, metadata.Format) { - return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name) - } - - if err := extension.RestoreExtension(snapshot.Height, metadata.Format, payloadReader); err != nil { - return errorsmod.Wrapf(err, "extension %s restore", metadata.Name) - } - - payload := nextItem.GetExtensionPayload() - if payload != nil && len(payload.Payload) != 0 { - return fmt.Errorf("extension %s don't exhausted payload stream", metadata.Name) - } else { - break - } - } - - return nil -} - -// RestoreChunk adds a chunk to an active snapshot restoration, mirroring ABCI ApplySnapshotChunk. -// Chunks must be given until the restore is complete, returning true, or a chunk errors. -func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - if m.operation != opRestore { - return false, errorsmod.Wrap(storeerrors.ErrLogic, "no restore operation in progress") - } - - if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { - return false, errorsmod.Wrap(storeerrors.ErrLogic, "received unexpected chunk") - } - - // Check if any errors have occurred yet. - select { - case done := <-m.chRestoreDone: - m.endLocked() - if done.err != nil { - return false, done.err - } - return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended unexpectedly") - default: - } - - // Verify the chunk hash. - hash := sha256.Sum256(chunk) - expected := m.restoreSnapshot.Metadata.ChunkHashes[m.restoreChunkIndex] - if !bytes.Equal(hash[:], expected) { - return false, errorsmod.Wrapf(types.ErrChunkHashMismatch, - "expected %x, got %x", hash, expected) - } - - if err := m.store.saveChunkContent(chunk, m.restoreChunkIndex, m.restoreSnapshot); err != nil { - return false, errorsmod.Wrapf(err, "save chunk content %d", m.restoreChunkIndex) - } - - // Pass the chunk to the restore, and wait for completion if it was the final one. - m.chRestore <- m.restoreChunkIndex - m.restoreChunkIndex++ - - if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { - close(m.chRestore) - m.chRestore = nil - - // the chunks are all written into files, we can save the snapshot to the db, - // even if the restoration may not completed yet. - if err := m.store.saveSnapshot(m.restoreSnapshot); err != nil { - return false, errorsmod.Wrap(err, "save restoring snapshot") - } - - done := <-m.chRestoreDone - m.endLocked() - if done.err != nil { - return false, done.err - } - if !done.complete { - return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended prematurely") - } - - return true, nil - } - return false, nil -} - -// RestoreLocalSnapshot restores app state from a local snapshot. -func (m *Manager) RestoreLocalSnapshot(height uint64, format uint32) error { - snapshot, ch, err := m.store.Load(height, format) - if err != nil { - return err - } - - if snapshot == nil { - return fmt.Errorf("snapshot doesn't exist, height: %d, format: %d", height, format) - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - err = m.beginLocked(opRestore) - if err != nil { - return err - } - defer m.endLocked() - - return m.doRestoreSnapshot(*snapshot, ch) -} - -// sortedExtensionNames sort extension names for deterministic iteration. -func (m *Manager) sortedExtensionNames() []string { - names := make([]string, 0, len(m.extensions)) - for name := range m.extensions { - names = append(names, name) - } - - sort.Strings(names) - return names -} - -// IsFormatSupported returns if the snapshotter supports restoration from given format. -func IsFormatSupported(snapshotter ExtensionSnapshotter, format uint32) bool { - for _, i := range snapshotter.SupportedFormats() { - if i == format { - return true - } - } - return false -} - -// SnapshotIfApplicable takes a snapshot of the current state if we are on a snapshot height. -// It also prunes any old snapshots. -func (m *Manager) SnapshotIfApplicable(height int64) { - if m == nil { - return - } - if !m.shouldTakeSnapshot(height) { - m.logger.Debug("snapshot is skipped", "height", height) - return - } - // start the routine after need to create a snapshot - go m.snapshot(height) -} - -// shouldTakeSnapshot returns true is snapshot should be taken at height. -func (m *Manager) shouldTakeSnapshot(height int64) bool { - return m.opts.Interval > 0 && uint64(height)%m.opts.Interval == 0 -} - -func (m *Manager) snapshot(height int64) { - m.logger.Info("creating state snapshot", "height", height) - - if height <= 0 { - m.logger.Error("snapshot height must be positive", "height", height) - return - } - - snapshot, err := m.Create(uint64(height)) - if err != nil { - m.logger.Error("failed to create state snapshot", "height", height, "err", err) - return - } - - m.logger.Info("completed state snapshot", "height", height, "format", snapshot.Format) - - if m.opts.KeepRecent > 0 { - m.logger.Debug("pruning state snapshots") - - pruned, err := m.Prune(m.opts.KeepRecent) - if err != nil { - m.logger.Error("Failed to prune state snapshots", "err", err) - return - } - - m.logger.Debug("pruned state snapshots", "pruned", pruned) - } -} - -// Close the snapshot database. -func (m *Manager) Close() error { return nil } diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go deleted file mode 100644 index e374b4c75cd0..000000000000 --- a/store/v2/snapshots/manager_test.go +++ /dev/null @@ -1,525 +0,0 @@ -package snapshots_test - -import ( - "errors" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/snapshots/types" -) - -var opts = snapshots.NewSnapshotOptions(1500, 2) - -func TestManager_List(t *testing.T) { - store := setupStore(t) - commitSnapshotter := &mockCommitSnapshotter{} - manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) - - mgrList, err := manager.List() - require.NoError(t, err) - storeList, err := store.List() - require.NoError(t, err) - - require.NotEmpty(t, storeList) - assert.Equal(t, storeList, mgrList) - - // list should not block or error on busy managers - manager = setupBusyManager(t) - list, err := manager.List() - require.NoError(t, err) - assert.Equal(t, []*types.Snapshot{}, list) - - require.NoError(t, manager.Close()) -} - -func TestManager_LoadChunk(t *testing.T) { - store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) - - // Existing chunk should return body - chunk, err := manager.LoadChunk(2, 1, 1) - require.NoError(t, err) - assert.Equal(t, []byte{2, 1, 1}, chunk) - - // Missing chunk should return nil - chunk, err = manager.LoadChunk(2, 1, 9) - require.NoError(t, err) - assert.Nil(t, chunk) - - // LoadChunk should not block or error on busy managers - manager = setupBusyManager(t) - chunk, err = manager.LoadChunk(2, 1, 0) - require.NoError(t, err) - assert.Nil(t, chunk) -} - -func TestManager_Take(t *testing.T) { - store := setupStore(t) - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - // nil manager should return error - _, err = (*snapshots.Manager)(nil).Create(1) - require.Error(t, err) - - // creating a snapshot at a lower height than the latest should error - _, err = manager.Create(3) - require.Error(t, err) - - // creating a snapshot at a higher height should be fine, and should return it - snapshot, err := manager.Create(5) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 5, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - // creating a snapshot while a different snapshot is being created should error - manager = setupBusyManager(t) - _, err = manager.Create(9) - require.Error(t, err) -} - -func TestManager_Prune(t *testing.T) { - store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) - - pruned, err := manager.Prune(2) - require.NoError(t, err) - assert.EqualValues(t, 1, pruned) - - list, err := manager.List() - require.NoError(t, err) - assert.Len(t, list, 3) - - // Prune should error while a snapshot is being taken - manager = setupBusyManager(t) - _, err = manager.Prune(2) - require.Error(t, err) -} - -func TestManager_Restore(t *testing.T) { - store := setupStore(t) - target := &mockCommitSnapshotter{} - extSnapshotter := newExtSnapshotter(0) - manager := snapshots.NewManager(store, opts, target, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - expectItems := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - - chunks := snapshotItems(expectItems, newExtSnapshotter(10)) - - // Restore errors on invalid format - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: 0, - Hash: []byte{1, 2, 3}, - Chunks: uint32(len(chunks)), - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - require.ErrorIs(t, err, types.ErrUnknownFormat) - - // Restore errors on no chunks - err = manager.Restore(types.Snapshot{Height: 3, Format: types.CurrentFormat, Hash: []byte{1, 2, 3}}) - require.Error(t, err) - - // Restore errors on chunk and chunkhashes mismatch - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 4, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - - // Starting a restore works - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 1, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.NoError(t, err) - - // While the restore is in progress, any other operations fail - _, err = manager.Create(4) - require.Error(t, err) - - _, err = manager.Prune(1) - require.Error(t, err) - - // Feeding an invalid chunk should error due to invalid checksum, but not abort restoration. - _, err = manager.RestoreChunk([]byte{9, 9, 9}) - require.Error(t, err) - require.True(t, errors.Is(err, types.ErrChunkHashMismatch)) - - // Feeding the chunks should work - for i, chunk := range chunks { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } - - assert.Equal(t, expectItems, target.items) - assert.Equal(t, 10, len(extSnapshotter.state)) - - // The snapshot is saved in local snapshot store - snapshots, err := store.List() - require.NoError(t, err) - snapshot := snapshots[0] - require.Equal(t, uint64(3), snapshot.Height) - require.Equal(t, types.CurrentFormat, snapshot.Format) - - // Starting a new restore should fail now, because the target already has contents. - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 3, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - - // But if we clear out the target we should be able to start a new restore. This time we'll - // fail it with a checksum error. That error should stop the operation, so that we can do - // a prune operation right after. - target.items = nil - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 1, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.NoError(t, err) - - // Feeding the chunks should work - for i, chunk := range chunks { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } -} - -func TestManager_TakeError(t *testing.T) { - snapshotter := &mockErrorCommitSnapshotter{} - store, err := snapshots.NewStore(t.TempDir()) - require.NoError(t, err) - manager := snapshots.NewManager(store, opts, snapshotter, nil, coretesting.NewNopLogger()) - - _, err = manager.Create(1) - require.Error(t, err) -} - -func TestSnapshot_Take_Restore(t *testing.T) { - store := setupStore(t) - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - // creating a snapshot at a higher height should be fine, and should return it - snapshot, err := manager.Create(5) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 5, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - err = manager.Restore(*snapshot) - require.NoError(t, err) - - // Feeding the chunks should work - for i, chunk := range readChunks(chunks) { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } - - // The snapshot is saved in local snapshot store - snapshots, err := store.List() - require.NoError(t, err) - require.Equal(t, uint64(5), snapshots[0].Height) - require.Equal(t, types.CurrentFormat, snapshots[0].Format) - - // Starting a new restore should fail now, because the target already has contents. - err = manager.Restore(*snapshot) - require.Error(t, err) - - storeSnapshot, chunks, err = store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - // Feeding the chunks should work - for i, chunk := range readChunks(chunks) { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } - - assert.Equal(t, items, commitSnapshotter.items) - assert.Equal(t, 10, len(extSnapshotter.state)) - - snapshots, err = store.List() - require.NoError(t, err) - require.Equal(t, uint64(5), snapshots[0].Height) - require.Equal(t, types.CurrentFormat, snapshots[0].Format) -} - -func TestSnapshot_Take_Prune(t *testing.T) { - store := setupStore(t) - - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - // creating a snapshot at height 4 - snapshot, err := manager.Create(4) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 4, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - pruned, err := manager.Prune(1) - require.NoError(t, err) - assert.EqualValues(t, 4, pruned) - - // creating a snapshot at a same height 4, should be error - // since we prune all the previous snapshot except the latest at height 4 - _, err = manager.Create(4) - require.Error(t, err) - - // prune all - pruned, err = manager.Prune(0) - require.NoError(t, err) - assert.EqualValues(t, 1, pruned) - - // creating a snapshot at a same height 4, should be true since we prune all the previous snapshot - snapshot, err = manager.Create(4) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 4, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - pruned, err = manager.Prune(2) - require.NoError(t, err) - assert.EqualValues(t, 0, pruned) - - list, err := manager.List() - require.NoError(t, err) - assert.Len(t, list, 1) - - // Prune should error while a snapshot is being taken - manager = setupBusyManager(t) - _, err = manager.Prune(2) - require.Error(t, err) -} - -func TestSnapshot_Pruning_Take_Snapshot_Parallel(t *testing.T) { - store := setupStore(t) - - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - var prunedCount uint64 - // try take snapshot and pruning parallel while prune operation begins first - go func() { - checkError := func() bool { - _, err := manager.Create(4) - return err != nil - } - - require.Eventually(t, checkError, time.Millisecond*200, time.Millisecond) - }() - - prunedCount, err = manager.Prune(1) - require.NoError(t, err) - assert.EqualValues(t, 3, prunedCount) - - // creating a snapshot at a same height 4, should be true since we prune has finished - snapshot, err := manager.Create(4) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 4, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - // try take snapshot and pruning parallel while snapshot operation begins first - go func() { - checkError := func() bool { - _, err = manager.Prune(1) - return err != nil - } - - require.Eventually(t, checkError, time.Millisecond*200, time.Millisecond) - }() - - snapshot, err = manager.Create(5) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 5, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) -} - -func TestSnapshot_SnapshotIfApplicable(t *testing.T) { - store := setupStore(t) - - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - extSnapshotter := newExtSnapshotter(10) - - snapshotOpts := snapshots.NewSnapshotOptions(1, 1) - - manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - manager.SnapshotIfApplicable(4) - - checkLatestHeight := func() bool { - latestSnapshot, _ := store.GetLatest() - return latestSnapshot.Height == 4 - } - - require.Eventually(t, checkLatestHeight, time.Second*10, time.Second) - - pruned, err := manager.Prune(1) - require.NoError(t, err) - require.Equal(t, uint64(0), pruned) -} diff --git a/store/v2/snapshots/snapshotter.go b/store/v2/snapshots/snapshotter.go deleted file mode 100644 index f3f4d33f1cf5..000000000000 --- a/store/v2/snapshots/snapshotter.go +++ /dev/null @@ -1,46 +0,0 @@ -package snapshots - -import ( - protoio "github.com/cosmos/gogoproto/io" - - "cosmossdk.io/store/v2/snapshots/types" -) - -// CommitSnapshotter defines an API for creating and restoring snapshots of the -// commitment state. -type CommitSnapshotter interface { - // Snapshot writes a snapshot of the commitment state at the given version. - Snapshot(version uint64, protoWriter protoio.Writer) error - - // Restore restores the commitment state from the snapshot reader. - Restore(version uint64, format uint32, protoReader protoio.Reader) (types.SnapshotItem, error) -} - -// ExtensionPayloadReader read extension payloads, -// it returns io.EOF when reached either end of stream or the extension boundaries. -type ExtensionPayloadReader = func() ([]byte, error) - -// ExtensionPayloadWriter is a helper to write extension payloads to underlying stream. -type ExtensionPayloadWriter = func([]byte) error - -// ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream. -// ExtensionSnapshotter has an unique name and manages it's own internal formats. -type ExtensionSnapshotter interface { - // SnapshotName returns the name of snapshotter, it should be unique in the manager. - SnapshotName() string - - // SnapshotFormat returns the default format the extension snapshotter use to encode the - // payloads when taking a snapshot. - // It's defined within the extension, different from the global format for the whole state-sync snapshot. - SnapshotFormat() uint32 - - // SupportedFormats returns a list of formats it can restore from. - SupportedFormats() []uint32 - - // SnapshotExtension writes extension payloads into the underlying protobuf stream. - SnapshotExtension(height uint64, payloadWriter ExtensionPayloadWriter) error - - // RestoreExtension restores an extension state snapshot, - // the payload reader returns `io.EOF` when reached the extension boundaries. - RestoreExtension(height uint64, format uint32, payloadReader ExtensionPayloadReader) error -} diff --git a/store/v2/store.go b/store/v2/store.go deleted file mode 100644 index 20c6ab3c8ef2..000000000000 --- a/store/v2/store.go +++ /dev/null @@ -1,101 +0,0 @@ -package store - -import ( - "io" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/metrics" - "cosmossdk.io/store/v2/proof" -) - -// RootStore defines an abstraction layer containing a State Storage (SS) engine -// and one or more State Commitment (SC) engines. -type RootStore interface { - Pruner - Backend - - // StateLatest returns a read-only version of the RootStore at the latest - // height, alongside the associated version. - StateLatest() (uint64, corestore.ReaderMap, error) - - // StateAt is analogous to StateLatest() except it returns a read-only version - // of the RootStore at the provided version. If such a version cannot be found, - // an error must be returned. - StateAt(version uint64) (corestore.ReaderMap, error) - - // Query performs a query on the RootStore for a given store key, version (height), - // and key tuple. Queries should be routed to the underlying SS engine. - Query(storeKey []byte, version uint64, key []byte, prove bool) (QueryResult, error) - - // LoadVersion loads the RootStore to the given version. - LoadVersion(version uint64) error - - // LoadVersionForOverwriting loads the state at the given version. - // Any versions greater than targetVersion will be deleted. - LoadVersionForOverwriting(version uint64) error - - // LoadLatestVersion behaves identically to LoadVersion except it loads the - // latest version implicitly. - LoadLatestVersion() error - - // GetLatestVersion returns the latest version, i.e. height, committed. - GetLatestVersion() (uint64, error) - - // SetInitialVersion sets the initial version on the RootStore. - SetInitialVersion(v uint64) error - - // Commit should be responsible for taking the provided changeset and flushing - // it to disk. Note, it will overwrite the changeset if WorkingHash() was called. - // Commit() should ensure the changeset is committed to all SC and SS backends - // and flushed to disk. It must return a hash of the merkle-ized committed state. - Commit(cs *corestore.Changeset) ([]byte, error) - - // LastCommitID returns a CommitID pertaining to the last commitment. - LastCommitID() (proof.CommitID, error) - - // SetMetrics sets the telemetry handler on the RootStore. - SetMetrics(m metrics.Metrics) - - io.Closer -} - -// Backend defines the interface for the RootStore backends. -type Backend interface { - // GetStateCommitment returns the SC backend. - GetStateCommitment() Committer -} - -// UpgradeableStore defines the interface for upgrading store keys. -type UpgradeableStore interface { - // LoadVersionAndUpgrade behaves identically to LoadVersion except it also - // accepts a StoreUpgrades object that defines a series of transformations to - // apply to store keys (if any). - // - // Note, handling StoreUpgrades is optional depending on the underlying store - // implementation. - LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreUpgrades) error -} - -// Pruner defines the interface for pruning old versions of the store or database. -type Pruner interface { - // Prune prunes the store to the provided version. - Prune(version uint64) error -} - -// PausablePruner extends the Pruner interface to include the API for pausing -// the pruning process. -type PausablePruner interface { - Pruner - - // PausePruning pauses or resumes the pruning process to avoid the parallel writes - // while committing the state. - PausePruning(pause bool) -} - -// QueryResult defines the response type to performing a query on a RootStore. -type QueryResult struct { - Key []byte - Value []byte - Version uint64 - ProofOps []proof.CommitmentOp -} diff --git a/tests/integration/v2/auth/app_test.go b/tests/integration/v2/auth/app_test.go deleted file mode 100644 index 6331492b014c..000000000000 --- a/tests/integration/v2/auth/app_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package auth - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/core/router" - "cosmossdk.io/core/transaction" - "cosmossdk.io/depinject" - "cosmossdk.io/log" - "cosmossdk.io/runtime/v2" - "cosmossdk.io/runtime/v2/services" - "cosmossdk.io/server/v2/stf" - "cosmossdk.io/x/accounts" - basedepinject "cosmossdk.io/x/accounts/defaults/base/depinject" - accountsv1 "cosmossdk.io/x/accounts/v1" - _ "cosmossdk.io/x/bank" // import as blank for app wiring - bankkeeper "cosmossdk.io/x/bank/keeper" - banktypes "cosmossdk.io/x/bank/types" - _ "cosmossdk.io/x/consensus" // import as blank for app wiring - _ "cosmossdk.io/x/staking" // import as blank for app wirings - - "github.com/cosmos/cosmos-sdk/tests/integration/v2" - "github.com/cosmos/cosmos-sdk/testutil/configurator" - _ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring - authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" - _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import as blank for app wiring`` - _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import as blank for app wiring - _ "github.com/cosmos/cosmos-sdk/x/genutil" // import as blank for app wiring -) - -type suite struct { - app *integration.App - - ctx context.Context - - authKeeper authkeeper.AccountKeeper - accountsKeeper accounts.Keeper - bankKeeper bankkeeper.Keeper -} - -func (s suite) mustAddr(address []byte) string { - str, _ := s.authKeeper.AddressCodec().BytesToString(address) - return str -} - -func createTestSuite(t *testing.T) *suite { - t.Helper() - res := suite{} - - moduleConfigs := []configurator.ModuleOption{ - configurator.AccountsModule(), - configurator.AuthModule(), - configurator.BankModule(), - configurator.VestingModule(), - configurator.StakingModule(), - configurator.TxModule(), - configurator.ValidateModule(), - configurator.ConsensusModule(), - configurator.GenutilModule(), - } - - var err error - startupCfg := integration.DefaultStartUpConfig(t) - - msgRouterService := integration.NewRouterService() - res.registerMsgRouterService(msgRouterService) - - var routerFactory runtime.RouterServiceFactory = func(_ []byte) router.Service { - return msgRouterService - } - - queryRouterService := integration.NewRouterService() - res.registerQueryRouterService(queryRouterService) - - serviceBuilder := runtime.NewRouterBuilder(routerFactory, queryRouterService) - - startupCfg.BranchService = &integration.BranchService{} - startupCfg.RouterServiceBuilder = serviceBuilder - startupCfg.HeaderService = services.NewGenesisHeaderService(stf.HeaderService{}) - - res.app, err = integration.NewApp( - depinject.Configs(configurator.NewAppV2Config(moduleConfigs...), depinject.Provide( - // inject desired account types: - basedepinject.ProvideAccount, - - // provide base account options - basedepinject.ProvideSecp256K1PubKey, - - // provide extra accounts - ProvideMockRetroCompatAccountValid, - ProvideMockRetroCompatAccountNoInfo, - ProvideMockRetroCompatAccountNoImplement, - ), depinject.Supply(log.NewNopLogger())), - startupCfg, - &res.bankKeeper, &res.accountsKeeper, &res.authKeeper) - require.NoError(t, err) - - res.ctx = res.app.StateLatestContext(t) - - return &res -} - -func (s *suite) registerMsgRouterService(router *integration.RouterService) { - // register custom router service - bankSendHandler := func(ctx context.Context, req transaction.Msg) (transaction.Msg, error) { - msg, ok := req.(*banktypes.MsgSend) - if !ok { - return nil, integration.ErrInvalidMsgType - } - msgServer := bankkeeper.NewMsgServerImpl(s.bankKeeper) - resp, err := msgServer.Send(ctx, msg) - return resp, err - } - - router.RegisterHandler(bankSendHandler, "cosmos.bank.v1beta1.MsgSend") -} - -func (s *suite) registerQueryRouterService(router *integration.RouterService) { - // register custom router service - queryHandler := func(ctx context.Context, msg transaction.Msg) (transaction.Msg, error) { - req, ok := msg.(*accountsv1.AccountNumberRequest) - if !ok { - return nil, integration.ErrInvalidMsgType - } - qs := accounts.NewQueryServer(s.accountsKeeper) - resp, err := qs.AccountNumber(ctx, req) - return resp, err - } - - router.RegisterHandler(queryHandler, "cosmos.accounts.v1.AccountNumberRequest") -} From 0675deebc0968802d6e257552b104029bad68dad Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 3 Dec 2024 17:37:04 +0100 Subject: [PATCH 3/4] bump deps --- server/v2/cometbft/go.mod | 8 ++++---- server/v2/cometbft/go.sum | 16 ++++++++-------- simapp/v2/go.mod | 38 ++++++++++++++++---------------------- simapp/v2/go.sum | 20 ++++++++++---------- 4 files changed, 38 insertions(+), 44 deletions(-) diff --git a/server/v2/cometbft/go.mod b/server/v2/cometbft/go.mod index 0298fdf9a837..4c749ad58a31 100644 --- a/server/v2/cometbft/go.mod +++ b/server/v2/cometbft/go.mod @@ -20,10 +20,10 @@ require ( cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 cosmossdk.io/log v1.5.0 cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b //main - cosmossdk.io/server/v2 v2.0.0-20241202115147-f350775d0ed2 // main - cosmossdk.io/server/v2/appmanager v0.0.0-20241119134933-d697a3de0f95 // main - cosmossdk.io/server/v2/stf v0.0.0-20241119134933-d697a3de0f95 // main - cosmossdk.io/store/v2 v2.0.0-20241202115147-f350775d0ed2 // main + cosmossdk.io/server/v2 v2.0.0-20241203161819-94cfcc11aaf5 // main + cosmossdk.io/server/v2/appmanager v0.0.0-20241203161819-94cfcc11aaf5 // main + cosmossdk.io/server/v2/stf v0.0.0-20241203161819-94cfcc11aaf5 // main + cosmossdk.io/store/v2 v2.0.0-20241203161819-94cfcc11aaf5 // main cosmossdk.io/x/consensus v0.0.0-00010101000000-000000000000 github.com/cometbft/cometbft v1.0.0-rc2.0.20241127125717-4ce33b646ac9 github.com/cometbft/cometbft/api v1.0.0-rc2 diff --git a/server/v2/cometbft/go.sum b/server/v2/cometbft/go.sum index 76a27890d6ba..e7059b6cb4b7 100644 --- a/server/v2/cometbft/go.sum +++ b/server/v2/cometbft/go.sum @@ -24,16 +24,16 @@ cosmossdk.io/math v1.4.0 h1:XbgExXFnXmF/CccPPEto40gOO7FpWu9yWNAZPN3nkNQ= cosmossdk.io/math v1.4.0/go.mod h1:O5PkD4apz2jZs4zqFdTr16e1dcaQCc5z6lkEnrrppuk= cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b h1:svpFdulZRrYz+RTHu2u9CeKkMKrIHx5354vjiHerovo= cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= -cosmossdk.io/server/v2 v2.0.0-20241202115147-f350775d0ed2 h1:PBV8S+nGjXNEW/sGUgYYwsyI315I4LtXQTrKk5g8G2A= -cosmossdk.io/server/v2 v2.0.0-20241202115147-f350775d0ed2/go.mod h1:NT1O+DPv2bWxqX8QTtvBbjXpeXFw5doT2hoRRNr8ob4= -cosmossdk.io/server/v2/appmanager v0.0.0-20241119134933-d697a3de0f95 h1:GOznErJieaI0OS0LDUsu5Vy3qPnCyjdvkncejP0Zv5s= -cosmossdk.io/server/v2/appmanager v0.0.0-20241119134933-d697a3de0f95/go.mod h1:elhlrldWtm+9U4PxE0G3wjz83yQwVVGVAOncXJPY1Xc= -cosmossdk.io/server/v2/stf v0.0.0-20241119134933-d697a3de0f95 h1:cK7wvmlA18AvLcaInseKTBmt5EXtLwafe7oH1rx7veU= -cosmossdk.io/server/v2/stf v0.0.0-20241119134933-d697a3de0f95/go.mod h1:4e9SzLyeGptQ3tSR6nKCNwCu7Ye4uUS2WIJih29dG2c= +cosmossdk.io/server/v2 v2.0.0-20241203161819-94cfcc11aaf5 h1:TGvCN7MYep68o1X3/mgEt+raIf5x6m9wHC3erLMWfr0= +cosmossdk.io/server/v2 v2.0.0-20241203161819-94cfcc11aaf5/go.mod h1:NT1O+DPv2bWxqX8QTtvBbjXpeXFw5doT2hoRRNr8ob4= +cosmossdk.io/server/v2/appmanager v0.0.0-20241203161819-94cfcc11aaf5 h1:uQk1cl7ZvDud6FkZgbGj4LMQ2jKZe6wfqrjSwuLDABw= +cosmossdk.io/server/v2/appmanager v0.0.0-20241203161819-94cfcc11aaf5/go.mod h1:elhlrldWtm+9U4PxE0G3wjz83yQwVVGVAOncXJPY1Xc= +cosmossdk.io/server/v2/stf v0.0.0-20241203161819-94cfcc11aaf5 h1:K5stPleqQZGWZuvPJdlMWNgddkUeT55hw1R6ZXnXEqk= +cosmossdk.io/server/v2/stf v0.0.0-20241203161819-94cfcc11aaf5/go.mod h1:4e9SzLyeGptQ3tSR6nKCNwCu7Ye4uUS2WIJih29dG2c= cosmossdk.io/store v1.0.0-rc.0.0.20241202115147-f350775d0ed2 h1:UCe04NMBR+1M5JRpZJvM+I0EZzD3zXrk9YOm2RZdKDg= cosmossdk.io/store v1.0.0-rc.0.0.20241202115147-f350775d0ed2/go.mod h1:oZBBY4BrkYnghr6MFL0MP5mGqpkPedHcWkXwXddd6tU= -cosmossdk.io/store/v2 v2.0.0-20241202115147-f350775d0ed2 h1:SmP9frnyUuBXW6rqNfPXDGaLfgN9INaHzBMZZW8ZraU= -cosmossdk.io/store/v2 v2.0.0-20241202115147-f350775d0ed2/go.mod h1:1DbksNgjyQ1XxlCYqkU82WqBGWcxmgciO5dBf6dRhKM= +cosmossdk.io/store/v2 v2.0.0-20241203161819-94cfcc11aaf5 h1:pITcGoEspRSWxCpCaAkSoMDuikoapkfo7eg1OYFE2M8= +cosmossdk.io/store/v2 v2.0.0-20241203161819-94cfcc11aaf5/go.mod h1:1DbksNgjyQ1XxlCYqkU82WqBGWcxmgciO5dBf6dRhKM= cosmossdk.io/x/tx v1.0.0-alpha.2 h1:UW80FMm7B0fiAMsrfe5+HabSJ3XBg+tQa6/GK9prqWk= cosmossdk.io/x/tx v1.0.0-alpha.2/go.mod h1:r4yTKSJ7ZCCR95YbBfY3nfvbgNw6m9F6f25efWYYQWo= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= diff --git a/simapp/v2/go.mod b/simapp/v2/go.mod index 68c5655414cd..d64009906591 100644 --- a/simapp/v2/go.mod +++ b/simapp/v2/go.mod @@ -10,12 +10,15 @@ require ( cosmossdk.io/depinject v1.1.0 cosmossdk.io/log v1.5.0 cosmossdk.io/math v1.4.0 - cosmossdk.io/runtime/v2 v2.0.0-20241119134933-d697a3de0f95 // main - cosmossdk.io/server/v2 v2.0.0-20241202115147-f350775d0ed2 // main + cosmossdk.io/runtime/v2 v2.0.0-20241203161819-94cfcc11aaf5 // main + cosmossdk.io/server/v2 v2.0.0-20241203161819-94cfcc11aaf5 // main cosmossdk.io/server/v2/cometbft v0.0.0-00010101000000-000000000000 - cosmossdk.io/store/v2 v2.0.0-20241202115147-f350775d0ed2 // main + cosmossdk.io/store/v2 v2.0.0-20241203161819-94cfcc11aaf5 // main cosmossdk.io/tools/confix v0.0.0-00010101000000-000000000000 cosmossdk.io/x/accounts v0.0.0-20240913065641-0064ccbce64e + cosmossdk.io/x/accounts/defaults/base v0.0.0-00010101000000-000000000000 + cosmossdk.io/x/accounts/defaults/lockup v0.0.0-20240417181816-5e7aae0db1f5 + cosmossdk.io/x/accounts/defaults/multisig v0.0.0-00010101000000-000000000000 cosmossdk.io/x/authz v0.0.0-00010101000000-000000000000 cosmossdk.io/x/bank v0.0.0-20240226161501-23359a0b6d91 cosmossdk.io/x/circuit v0.0.0-20230613133644-0a778132a60f @@ -34,7 +37,7 @@ require ( cosmossdk.io/x/upgrade v0.0.0-20230613133644-0a778132a60f github.com/cometbft/cometbft v1.0.0-rc2.0.20241127125717-4ce33b646ac9 // this version is not used as it is always replaced by the latest Cosmos SDK version - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 @@ -42,22 +45,6 @@ require ( google.golang.org/protobuf v1.35.2 ) -require ( - cosmossdk.io/x/accounts/defaults/base v0.0.0-00010101000000-000000000000 - cosmossdk.io/x/accounts/defaults/lockup v0.0.0-20240417181816-5e7aae0db1f5 - cosmossdk.io/x/accounts/defaults/multisig v0.0.0-00010101000000-000000000000 -) - -require ( - github.com/bytedance/sonic v1.12.4 // indirect - github.com/bytedance/sonic/loader v0.2.1 // indirect - github.com/cloudwego/base64x v0.1.4 // indirect - github.com/cloudwego/iasm v0.2.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.9 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - golang.org/x/arch v0.12.0 // indirect -) - require ( buf.build/gen/go/cometbft/cometbft/protocolbuffers/go v1.35.2-20241120201313-68e42a58b301.1 // indirect buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.35.2-20240130113600-88ef6483f90f.1 // indirect @@ -71,8 +58,8 @@ require ( cosmossdk.io/errors v1.0.1 // indirect cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 // indirect cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b // indirect - cosmossdk.io/server/v2/appmanager v0.0.0-20241119134933-d697a3de0f95 // indirect; main - cosmossdk.io/server/v2/stf v0.0.0-20241119134933-d697a3de0f95 // indirect; main + cosmossdk.io/server/v2/appmanager v0.0.0-20241203161819-94cfcc11aaf5 // indirect; main + cosmossdk.io/server/v2/stf v0.0.0-20241203161819-94cfcc11aaf5 // indirect; main cosmossdk.io/store v1.1.1-0.20240909133312-50288938d1b6 // indirect; main cosmossdk.io/x/tx v1.0.0-alpha.2 // indirect; main filippo.io/edwards25519 v1.1.0 // indirect @@ -87,8 +74,12 @@ require ( github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/bytedance/sonic v1.12.4 // indirect + github.com/bytedance/sonic/loader v0.2.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/cockroachdb/apd/v2 v2.0.2 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect @@ -165,6 +156,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.9 // indirect @@ -209,6 +201,7 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tendermint/go-amino v0.16.0 // indirect github.com/tidwall/btree v1.7.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect @@ -222,6 +215,7 @@ require ( go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/multierr v1.11.0 // indirect + golang.org/x/arch v0.12.0 // indirect golang.org/x/crypto v0.29.0 // indirect golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect golang.org/x/mod v0.22.0 // indirect diff --git a/simapp/v2/go.sum b/simapp/v2/go.sum index 37323eae69c5..30cc00d2f702 100644 --- a/simapp/v2/go.sum +++ b/simapp/v2/go.sum @@ -210,20 +210,20 @@ cosmossdk.io/log v1.5.0 h1:dVdzPJW9kMrnAYyMf1duqacoidB9uZIl+7c6z0mnq0g= cosmossdk.io/log v1.5.0/go.mod h1:Tr46PUJjiUthlwQ+hxYtUtPn4D/oCZXAkYevBeh5+FI= cosmossdk.io/math v1.4.0 h1:XbgExXFnXmF/CccPPEto40gOO7FpWu9yWNAZPN3nkNQ= cosmossdk.io/math v1.4.0/go.mod h1:O5PkD4apz2jZs4zqFdTr16e1dcaQCc5z6lkEnrrppuk= -cosmossdk.io/runtime/v2 v2.0.0-20241119134933-d697a3de0f95 h1:hYI7pvrmdkgFZJ4HVQow7ubopsTmleYet4S56tFMdI0= -cosmossdk.io/runtime/v2 v2.0.0-20241119134933-d697a3de0f95/go.mod h1:J4Wv2eOwAz8t14Ak8XBMWDoFbwqwyllMaJF91O7n/wI= +cosmossdk.io/runtime/v2 v2.0.0-20241203161819-94cfcc11aaf5 h1:g8rsvk80KaR5SNIr9cs0RnRM+yFgkKV5OjCu0ARHTJo= +cosmossdk.io/runtime/v2 v2.0.0-20241203161819-94cfcc11aaf5/go.mod h1:7DCLOq3Xzyq+DA0WoMD5HcZUuflShDmq6g2qgh2yVdI= cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b h1:svpFdulZRrYz+RTHu2u9CeKkMKrIHx5354vjiHerovo= cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= -cosmossdk.io/server/v2 v2.0.0-20241202115147-f350775d0ed2 h1:PBV8S+nGjXNEW/sGUgYYwsyI315I4LtXQTrKk5g8G2A= -cosmossdk.io/server/v2 v2.0.0-20241202115147-f350775d0ed2/go.mod h1:NT1O+DPv2bWxqX8QTtvBbjXpeXFw5doT2hoRRNr8ob4= -cosmossdk.io/server/v2/appmanager v0.0.0-20241119134933-d697a3de0f95 h1:GOznErJieaI0OS0LDUsu5Vy3qPnCyjdvkncejP0Zv5s= -cosmossdk.io/server/v2/appmanager v0.0.0-20241119134933-d697a3de0f95/go.mod h1:elhlrldWtm+9U4PxE0G3wjz83yQwVVGVAOncXJPY1Xc= -cosmossdk.io/server/v2/stf v0.0.0-20241119134933-d697a3de0f95 h1:cK7wvmlA18AvLcaInseKTBmt5EXtLwafe7oH1rx7veU= -cosmossdk.io/server/v2/stf v0.0.0-20241119134933-d697a3de0f95/go.mod h1:4e9SzLyeGptQ3tSR6nKCNwCu7Ye4uUS2WIJih29dG2c= +cosmossdk.io/server/v2 v2.0.0-20241203161819-94cfcc11aaf5 h1:TGvCN7MYep68o1X3/mgEt+raIf5x6m9wHC3erLMWfr0= +cosmossdk.io/server/v2 v2.0.0-20241203161819-94cfcc11aaf5/go.mod h1:NT1O+DPv2bWxqX8QTtvBbjXpeXFw5doT2hoRRNr8ob4= +cosmossdk.io/server/v2/appmanager v0.0.0-20241203161819-94cfcc11aaf5 h1:uQk1cl7ZvDud6FkZgbGj4LMQ2jKZe6wfqrjSwuLDABw= +cosmossdk.io/server/v2/appmanager v0.0.0-20241203161819-94cfcc11aaf5/go.mod h1:elhlrldWtm+9U4PxE0G3wjz83yQwVVGVAOncXJPY1Xc= +cosmossdk.io/server/v2/stf v0.0.0-20241203161819-94cfcc11aaf5 h1:K5stPleqQZGWZuvPJdlMWNgddkUeT55hw1R6ZXnXEqk= +cosmossdk.io/server/v2/stf v0.0.0-20241203161819-94cfcc11aaf5/go.mod h1:4e9SzLyeGptQ3tSR6nKCNwCu7Ye4uUS2WIJih29dG2c= cosmossdk.io/store v1.0.0-rc.0.0.20241119134933-d697a3de0f95 h1:5hIgRL6VsicdJ7FVK6AG7cSy1C8tiVbCp6W3Y+QQ5ko= cosmossdk.io/store v1.0.0-rc.0.0.20241119134933-d697a3de0f95/go.mod h1:ceNwMZIU8ZIDoeUdA9+sGxz3GVt0orEGoVpkBfa/UtU= -cosmossdk.io/store/v2 v2.0.0-20241202115147-f350775d0ed2 h1:SmP9frnyUuBXW6rqNfPXDGaLfgN9INaHzBMZZW8ZraU= -cosmossdk.io/store/v2 v2.0.0-20241202115147-f350775d0ed2/go.mod h1:1DbksNgjyQ1XxlCYqkU82WqBGWcxmgciO5dBf6dRhKM= +cosmossdk.io/store/v2 v2.0.0-20241203161819-94cfcc11aaf5 h1:pITcGoEspRSWxCpCaAkSoMDuikoapkfo7eg1OYFE2M8= +cosmossdk.io/store/v2 v2.0.0-20241203161819-94cfcc11aaf5/go.mod h1:1DbksNgjyQ1XxlCYqkU82WqBGWcxmgciO5dBf6dRhKM= cosmossdk.io/x/tx v1.0.0-alpha.2 h1:UW80FMm7B0fiAMsrfe5+HabSJ3XBg+tQa6/GK9prqWk= cosmossdk.io/x/tx v1.0.0-alpha.2/go.mod h1:r4yTKSJ7ZCCR95YbBfY3nfvbgNw6m9F6f25efWYYQWo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= From d77a45db91cabe42a0eff92142a897023f510635 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 3 Dec 2024 17:42:06 +0100 Subject: [PATCH 4/4] go mod tidy --- simapp/go.mod | 2 +- tests/go.mod | 2 +- x/circuit/go.mod | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/simapp/go.mod b/simapp/go.mod index 325a17d264f3..3faff615dbd1 100644 --- a/simapp/go.mod +++ b/simapp/go.mod @@ -38,7 +38,7 @@ require ( github.com/cometbft/cometbft v1.0.0-rc2.0.20241127125717-4ce33b646ac9 github.com/cometbft/cometbft/api v1.0.0-rc2 // this version is not used as it is always replaced by the latest Cosmos SDK version - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.7.0 github.com/golang/mock v1.6.0 github.com/jackc/pgx/v5 v5.7.1 diff --git a/tests/go.mod b/tests/go.mod index 1735b220bff0..59138590563d 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -23,7 +23,7 @@ require ( github.com/cometbft/cometbft v1.0.0-rc2.0.20241127125717-4ce33b646ac9 github.com/cosmos/cosmos-proto v1.0.0-beta.5 // this version is not used as it is always replaced by the latest Cosmos SDK version - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.7.0 github.com/spf13/cobra v1.8.1 // indirect github.com/stretchr/testify v1.10.0 diff --git a/x/circuit/go.mod b/x/circuit/go.mod index 7667fc39183e..1843f8a42536 100644 --- a/x/circuit/go.mod +++ b/x/circuit/go.mod @@ -11,7 +11,7 @@ require ( cosmossdk.io/errors v1.0.1 cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b cosmossdk.io/store v1.1.1-0.20240909133312-50288938d1b6 // main - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.7.0 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0