diff --git a/cmd/bee/cmd/db_test.go b/cmd/bee/cmd/db_test.go index 02dcb66b571..1bfc60b8fc2 100644 --- a/cmd/bee/cmd/db_test.go +++ b/cmd/bee/cmd/db_test.go @@ -46,7 +46,6 @@ func TestDBExportImport(t *testing.T) { if err != nil { t.Fatal(err) } - fmt.Println("put chunk: ", ch.Address().String()) chunks[ch.Address().String()] = 0 } db1.Close() @@ -115,7 +114,6 @@ func TestDBExportImportPinning(t *testing.T) { if err != nil { t.Fatal(err) } - fmt.Println("collection ", rootAddr.String(), " put chunk: ", ch.Address().String()) chunks[ch.Address().String()] = 0 } err = collection.Done(rootAddr) @@ -125,16 +123,9 @@ func TestDBExportImportPinning(t *testing.T) { pins[rootAddr.String()] = nil } - addresses, err := db1.Pins() - if err != nil { - t.Fatal(err) - } - for _, addr := range addresses { - fmt.Println("pin: ", addr.String()) - } db1.Close() - err = newCommand(t, cmd.WithArgs("db", "export", "pinning", export, "--data-dir", dir1)).Execute() + err := newCommand(t, cmd.WithArgs("db", "export", "pinning", export, "--data-dir", dir1)).Execute() if err != nil { t.Fatal(err) } @@ -150,7 +141,7 @@ func TestDBExportImportPinning(t *testing.T) { Logger: testutil.NewLogger(t), ReserveCapacity: node.ReserveCapacity, }, dir2) - addresses, err = db2.Pins() + addresses, err := db2.Pins() if err != nil { t.Fatal(err) } diff --git a/pkg/node/node.go b/pkg/node/node.go index 36cd9361f73..78bdd17b87c 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -184,8 +184,8 @@ const ( minPaymentThreshold = 2 * refreshRate // minimal accepted payment threshold of full nodes maxPaymentThreshold = 24 * refreshRate // maximal accepted payment threshold of full nodes mainnetNetworkID = uint64(1) // - ReserveCapacity = 4_194_304 // 2^22 chunks - reserveWakeUpDuration = 30 * time.Minute // time to wait before waking up reserveWorker + ReserveCapacity = 16384 // 2^14 chunks + reserveWakeUpDuration = 15 * time.Minute // time to wait before waking up reserveWorker reserveTreshold = ReserveCapacity * 5 / 10 reserveMinimumRadius = 0 ) diff --git a/pkg/node/statestore.go b/pkg/node/statestore.go index 5fdef7dc418..69df92210f8 100644 --- a/pkg/node/statestore.go +++ b/pkg/node/statestore.go @@ -33,10 +33,7 @@ func InitStateStore(logger log.Logger, dataDir string, cacheCapacity uint64) (st return nil, nil, err } - caching, err := cache.Wrap(ldb, int(cacheCapacity)) - if err != nil { - return nil, nil, err - } + caching := cache.MustWrap(ldb, int(cacheCapacity)) stateStore, err := storeadapter.NewStateStorerAdapter(caching) return stateStore, caching, err diff --git a/pkg/pullsync/pullsync.go b/pkg/pullsync/pullsync.go index 42691cbbe0c..12fba29f4c0 100644 --- a/pkg/pullsync/pullsync.go +++ b/pkg/pullsync/pullsync.go @@ -12,7 +12,6 @@ import ( "fmt" "io" "math" - "strconv" "sync/atomic" "time" @@ -27,7 +26,6 @@ import ( "github.com/ethersphere/bee/pkg/storage" storer "github.com/ethersphere/bee/pkg/storer" "github.com/ethersphere/bee/pkg/swarm" - "resenje.org/multex" "resenje.org/singleflight" ) @@ -72,7 +70,6 @@ type Syncer struct { validStamp postage.ValidStampFn intervalsSF singleflight.Group[string, *collectAddrsResult] syncInProgress atomic.Int32 - binLock *multex.Multex maxPage uint64 @@ -98,7 +95,6 @@ func New( logger: logger.WithName(loggerName).Register(), quit: make(chan struct{}), maxPage: maxPage, - binLock: multex.New(), } } @@ -261,13 +257,6 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start s.metrics.Delivered.Add(float64(len(chunksToPut))) s.metrics.LastReceived.WithLabelValues(fmt.Sprintf("%d", bin)).Add(float64(len(chunksToPut))) - // if we have parallel sync workers for the same bin, we need to rate limit them - // in order to not overload the storage with unnecessary requests as there is - // a chance that the same chunk is being synced by multiple workers. - key := strconv.Itoa(int(bin)) - s.binLock.Lock(key) - defer s.binLock.Unlock(key) - for _, c := range chunksToPut { if err := s.store.ReservePutter().Put(ctx, c); err != nil { // in case of these errors, no new items are added to the storage, so it diff --git a/pkg/statestore/storeadapter/migration.go b/pkg/statestore/storeadapter/migration.go index 171f31ccf57..db856e7f569 100644 --- a/pkg/statestore/storeadapter/migration.go +++ b/pkg/statestore/storeadapter/migration.go @@ -11,20 +11,20 @@ import ( "github.com/ethersphere/bee/pkg/storage/migration" ) -func allSteps() migration.Steps { +func allSteps(st storage.BatchStore) migration.Steps { return map[uint64]migration.StepFn{ - 1: epochMigration, - 2: deletePrefix("sync_interval"), - 3: deletePrefix("sync_interval"), - 4: deletePrefix("blocklist"), - 5: deletePrefix("batchstore"), - 6: deletePrefix("sync_interval"), - 7: deletePrefix("sync_interval"), + 1: epochMigration(st), + 2: deletePrefix(st, "sync_interval"), + 3: deletePrefix(st, "sync_interval"), + 4: deletePrefix(st, "blocklist"), + 5: deletePrefix(st, "batchstore"), + 6: deletePrefix(st, "sync_interval"), + 7: deletePrefix(st, "sync_interval"), } } -func deletePrefix(prefix string) migration.StepFn { - return func(s storage.BatchedStore) error { +func deletePrefix(s storage.BatchStore, prefix string) migration.StepFn { + return func() error { store := &StateStorerAdapter{s} return store.Iterate(prefix, func(key, val []byte) (stop bool, err error) { return false, store.Delete(string(key)) @@ -32,37 +32,40 @@ func deletePrefix(prefix string) migration.StepFn { } } -func epochMigration(s storage.BatchedStore) error { +func epochMigration(s storage.BatchStore) migration.StepFn { - var deleteEntries = []string{ - "statestore_schema", - "tags", - "sync_interval", - "kademlia-counters", - "addressbook", - "batch", - } + return func() error { - return s.Iterate(storage.Query{ - Factory: func() storage.Item { return &rawItem{&proxyItem{obj: []byte(nil)}} }, - }, func(res storage.Result) (stop bool, err error) { - if strings.HasPrefix(res.ID, stateStoreNamespace) { - return false, nil + var deleteEntries = []string{ + "statestore_schema", + "tags", + "sync_interval", + "kademlia-counters", + "addressbook", + "batch", } - for _, e := range deleteEntries { - if strings.HasPrefix(res.ID, e) { - _ = s.Delete(&rawItem{&proxyItem{key: res.ID}}) + + return s.Iterate(storage.Query{ + Factory: func() storage.Item { return &rawItem{&proxyItem{obj: []byte(nil)}} }, + }, func(res storage.Result) (stop bool, err error) { + if strings.HasPrefix(res.ID, stateStoreNamespace) { return false, nil } - } + for _, e := range deleteEntries { + if strings.HasPrefix(res.ID, e) { + _ = s.Delete(&rawItem{&proxyItem{key: res.ID}}) + return false, nil + } + } - item := res.Entry.(*rawItem) - item.key = res.ID - item.ns = stateStoreNamespace - if err := s.Put(item); err != nil { - return true, err - } - _ = s.Delete(&rawItem{&proxyItem{key: res.ID}}) - return false, nil - }) + item := res.Entry.(*rawItem) + item.key = res.ID + item.ns = stateStoreNamespace + if err := s.Put(item); err != nil { + return true, err + } + _ = s.Delete(&rawItem{&proxyItem{key: res.ID}}) + return false, nil + }) + } } diff --git a/pkg/statestore/storeadapter/storeadapter.go b/pkg/statestore/storeadapter/storeadapter.go index 03327e83cdd..e34ef923792 100644 --- a/pkg/statestore/storeadapter/storeadapter.go +++ b/pkg/statestore/storeadapter/storeadapter.go @@ -229,8 +229,8 @@ func (s *StateStorerAdapter) deleteKeys(keys []string) error { } // NewStateStorerAdapter creates a new StateStorerAdapter. -func NewStateStorerAdapter(storage storage.BatchedStore) (*StateStorerAdapter, error) { - err := migration.Migrate(storage, "migration", allSteps()) +func NewStateStorerAdapter(storage storage.BatchStore) (*StateStorerAdapter, error) { + err := migration.Migrate(storage, "migration", allSteps(storage)) if err != nil { return nil, err } diff --git a/pkg/storage/batch.go b/pkg/storage/batch.go index d159f7ad646..7a5bce20400 100644 --- a/pkg/storage/batch.go +++ b/pkg/storage/batch.go @@ -28,5 +28,5 @@ type Batch interface { // Batcher specifies a constructor for creating new batches. type Batcher interface { // Batch returns a new Batch. - Batch(context.Context) (Batch, error) + Batch(context.Context) Batch } diff --git a/pkg/storage/cache/cache.go b/pkg/storage/cache/cache.go index 880a408f7aa..fa1647ae673 100644 --- a/pkg/storage/cache/cache.go +++ b/pkg/storage/cache/cache.go @@ -5,8 +5,6 @@ package cache import ( - "errors" - "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/storageutil" lru "github.com/hashicorp/golang-lru/v2" @@ -17,12 +15,12 @@ func key(key storage.Key) string { return storageutil.JoinFields(key.Namespace(), key.ID()) } -var _ storage.BatchedStore = (*Cache)(nil) +var _ storage.BatchStore = (*Cache)(nil) // Cache is a wrapper around a storage.Store that adds a layer // of in-memory caching for the Get and Has operations. type Cache struct { - storage.BatchedStore + storage.BatchStore lru *lru.Cache[string, []byte] metrics metrics @@ -31,11 +29,7 @@ type Cache struct { // Wrap adds a layer of in-memory caching to storage.Reader Get and Has operations. // It returns an error if the capacity is less than or equal to zero or if the // given store implements storage.Tx -func Wrap(store storage.BatchedStore, capacity int) (*Cache, error) { - if _, ok := store.(storage.Tx); ok { - return nil, errors.New("cache should not be used with transactions") - } - +func Wrap(store storage.BatchStore, capacity int) (*Cache, error) { lru, err := lru.New[string, []byte](capacity) if err != nil { return nil, err @@ -45,7 +39,7 @@ func Wrap(store storage.BatchedStore, capacity int) (*Cache, error) { } // MustWrap is like Wrap but panics on error. -func MustWrap(store storage.BatchedStore, capacity int) *Cache { +func MustWrap(store storage.BatchStore, capacity int) *Cache { c, err := Wrap(store, capacity) if err != nil { panic(err) @@ -72,7 +66,7 @@ func (c *Cache) Get(i storage.Item) error { return i.Unmarshal(val) } - if err := c.BatchedStore.Get(i); err != nil { + if err := c.BatchStore.Get(i); err != nil { return err } @@ -93,7 +87,7 @@ func (c *Cache) Has(k storage.Key) (bool, error) { } c.metrics.CacheMiss.Inc() - return c.BatchedStore.Has(k) + return c.BatchStore.Has(k) } // Put implements storage.Store interface. @@ -101,12 +95,12 @@ func (c *Cache) Has(k storage.Key) (bool, error) { // call to Put and Has will be able to retrieve the item from cache. func (c *Cache) Put(i storage.Item) error { c.add(i) - return c.BatchedStore.Put(i) + return c.BatchStore.Put(i) } // Delete implements storage.Store interface. // On a call it also removes the item from the cache. func (c *Cache) Delete(i storage.Item) error { _ = c.lru.Remove(key(i)) - return c.BatchedStore.Delete(i) + return c.BatchStore.Delete(i) } diff --git a/pkg/storage/chunkstore.go b/pkg/storage/chunkstore.go index aec30e6afc9..f95d351ad0f 100644 --- a/pkg/storage/chunkstore.go +++ b/pkg/storage/chunkstore.go @@ -6,8 +6,6 @@ package storage import ( "context" - "fmt" - "io" "github.com/ethersphere/bee/pkg/swarm" ) @@ -68,7 +66,6 @@ type ChunkGetterDeleter interface { } type ChunkStore interface { - io.Closer Getter Putter Deleter @@ -82,53 +79,3 @@ type ReadOnlyChunkStore interface { Getter Hasser } - -type SizeReporter interface { - Size() (uint64, error) - Capacity() uint64 -} - -// Descriptor holds information required for Pull syncing. This struct -// is provided by subscribing to pull index. -type Descriptor struct { - Address swarm.Address - BinID uint64 -} - -func (d *Descriptor) String() string { - if d == nil { - return "" - } - return fmt.Sprintf("%s bin id %v", d.Address, d.BinID) -} - -type PullSubscriber interface { - SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, closed <-chan struct{}, stop func()) -} - -type PushSubscriber interface { - SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop func()) -} - -type ChunkState = int - -const ( - // ChunkSent is used by the pusher component to notify about successful push of chunk from - // the node. A chunk could be retried on failure so, this sent count is maintained to - // understand how many attempts were made by the node while pushing. The attempts are - // registered only when an actual request was sent from this node. - ChunkSent ChunkState = iota - // ChunkStored is used by the pusher component to notify that the uploader node is - // the closest node and has stored the chunk. - ChunkStored - // ChunkSynced is used by the pusher component to notify that the chunk is synced to the - // network. This is reported when a valid receipt was received after the chunk was - // pushed. - ChunkSynced - ChunkCouldNotSync -) - -// PushReporter is used to report chunk state. -type PushReporter interface { - Report(context.Context, swarm.Chunk, ChunkState) error -} diff --git a/pkg/storage/inmemchunkstore/transaction.go b/pkg/storage/inmemchunkstore/transaction.go deleted file mode 100644 index 5afaeef9386..00000000000 --- a/pkg/storage/inmemchunkstore/transaction.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package inmemchunkstore - -import ( - "context" - "errors" - "fmt" - - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/swarm" -) - -var _ storage.TxChunkStore = (*TxChunkStore)(nil) - -// TxChunkStore is an implementation of in-memory Store -// where all Store operations are done in a transaction. -type TxChunkStore struct { - *storage.TxChunkStoreBase - - // Bookkeeping of invasive operations executed - // on the ChunkStore to support rollback functionality. - revOps storage.TxRevertOpStore[swarm.Address, swarm.Chunk] -} - -// release releases the TxStore transaction associated resources. -func (s *TxChunkStore) release() { - s.TxChunkStoreBase.ChunkStore = nil - s.revOps = nil -} - -// Put implements the Store interface. -func (s *TxChunkStore) Put(ctx context.Context, chunk swarm.Chunk) (err error) { - err = s.TxChunkStoreBase.Put(ctx, chunk) - if err == nil { - err = s.revOps.Append(&storage.TxRevertOp[swarm.Address, swarm.Chunk]{ - Origin: storage.PutOp, - ObjectID: chunk.Address().String(), - Key: chunk.Address(), - }) - } - return err -} - -// Delete implements the Store interface. -func (s *TxChunkStore) Delete(ctx context.Context, addr swarm.Address) error { - chunk, err := s.Get(ctx, addr) - if err != nil { - return err - } - err = s.TxChunkStoreBase.Delete(ctx, addr) - if err == nil { - err = s.revOps.Append(&storage.TxRevertOp[swarm.Address, swarm.Chunk]{ - Origin: storage.DeleteOp, - ObjectID: addr.String(), - Val: chunk, - }) - } - return err -} - -// Commit implements the Tx interface. -func (s *TxChunkStore) Commit() error { - defer s.release() - - if err := s.TxState.Done(); err != nil { - return err - } - if err := s.revOps.Clean(); err != nil { - return fmt.Errorf("inmemchunkstore: unable to clean revert operations: %w", err) - } - return nil -} - -// Rollback implements the Tx interface. -func (s *TxChunkStore) Rollback() error { - defer s.release() - - if err := s.TxChunkStoreBase.Rollback(); err != nil { - return fmt.Errorf("inmemchunkstore: unable to rollback: %w", err) - } - - if err := s.revOps.Revert(); err != nil { - return fmt.Errorf("inmemchunkstore: unable to revert operations: %w", err) - } - return nil -} - -// NewTx implements the TxStore interface. -func (s *TxChunkStore) NewTx(state *storage.TxState) storage.TxChunkStore { - if s.ChunkStore == nil { - panic(errors.New("inmemchunkstore: nil store")) - } - - return &TxChunkStore{ - TxChunkStoreBase: &storage.TxChunkStoreBase{ - TxState: state, - ChunkStore: s.ChunkStore, - }, - revOps: storage.NewInMemTxRevertOpStore( - map[storage.TxOpCode]storage.TxRevertFn[swarm.Address, swarm.Chunk]{ - storage.PutOp: func(address swarm.Address, _ swarm.Chunk) error { - return s.ChunkStore.Delete(context.Background(), address) - }, - storage.DeleteOp: func(_ swarm.Address, chunk swarm.Chunk) error { - return s.ChunkStore.Put(context.Background(), chunk) - }, - }, - ), - } -} - -// NewTxChunkStore returns a new TxChunkStore instance backed by the given chunk store. -func NewTxChunkStore(store storage.ChunkStore) *TxChunkStore { - return &TxChunkStore{ - TxChunkStoreBase: &storage.TxChunkStoreBase{ChunkStore: store}, - revOps: new(storage.NoOpTxRevertOpStore[swarm.Address, swarm.Chunk]), - } -} diff --git a/pkg/storage/inmemchunkstore/transaction_test.go b/pkg/storage/inmemchunkstore/transaction_test.go deleted file mode 100644 index 646298075f7..00000000000 --- a/pkg/storage/inmemchunkstore/transaction_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package inmemchunkstore_test - -import ( - "testing" - - "github.com/ethersphere/bee/pkg/storage/inmemchunkstore" - "github.com/ethersphere/bee/pkg/storage/storagetest" -) - -func TestTxChunkStore(t *testing.T) { - t.Parallel() - - storagetest.TestTxChunkStore(t, inmemchunkstore.NewTxChunkStore(inmemchunkstore.New())) -} diff --git a/pkg/storage/inmemstore/inmembatch.go b/pkg/storage/inmemstore/inmembatch.go index 309316e3c6f..6720037c73d 100644 --- a/pkg/storage/inmemstore/inmembatch.go +++ b/pkg/storage/inmemstore/inmembatch.go @@ -38,12 +38,12 @@ type Batch struct { } // Batch implements storage.BatchedStore interface Batch method. -func (s *Store) Batch(ctx context.Context) (storage.Batch, error) { +func (s *Store) Batch(ctx context.Context) storage.Batch { return &Batch{ ctx: ctx, ops: make(map[string]batchOp), store: s, - }, nil + } } // Put implements storage.Batch interface Put method. diff --git a/pkg/storage/inmemstore/transaction.go b/pkg/storage/inmemstore/transaction.go deleted file mode 100644 index b0d600444bc..00000000000 --- a/pkg/storage/inmemstore/transaction.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package inmemstore - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/ethersphere/bee/pkg/storage" -) - -var ( - _ storage.TxStore = (*TxStore)(nil) - _ storage.Batcher = (*TxStore)(nil) -) - -func put( - reader storage.Reader, - writer storage.Writer, - item storage.Item, -) (*storage.TxRevertOp[storage.Key, storage.Item], error) { - prev := item.Clone() - var reverseOp *storage.TxRevertOp[storage.Key, storage.Item] - switch err := reader.Get(prev); { - case errors.Is(err, storage.ErrNotFound): - reverseOp = &storage.TxRevertOp[storage.Key, storage.Item]{ - Origin: storage.PutCreateOp, - ObjectID: item.String(), - Val: item, - } - case err != nil: - return nil, err - default: - reverseOp = &storage.TxRevertOp[storage.Key, storage.Item]{ - Origin: storage.PutUpdateOp, - ObjectID: prev.String(), - Val: prev, - } - } - - err := writer.Put(item) - if err == nil { - return reverseOp, nil - } - return nil, err -} - -func del( - reader storage.Reader, - writer storage.Writer, - item storage.Item, -) (*storage.TxRevertOp[storage.Key, storage.Item], error) { - prev := item.Clone() - var reverseOp *storage.TxRevertOp[storage.Key, storage.Item] - if err := reader.Get(prev); err == nil { - reverseOp = &storage.TxRevertOp[storage.Key, storage.Item]{ - Origin: storage.DeleteOp, - ObjectID: item.String(), - Val: prev, - } - } - - err := writer.Delete(item) - if err == nil { - return reverseOp, nil - } - return nil, err -} - -// txBatch is a batch that is used in a transaction. -type txBatch struct { - batch storage.Batch - store *TxStore - revOpsMu sync.Mutex - revOps []*storage.TxRevertOp[storage.Key, storage.Item] - onCommit func(revOps ...*storage.TxRevertOp[storage.Key, storage.Item]) error -} - -// Put implements the Batch interface. -func (b *txBatch) Put(item storage.Item) error { - if err := b.store.IsDone(); err != nil { - return err - } - - reverseOp, err := put(b.store, b.batch, item) - if err == nil && reverseOp != nil { - b.revOpsMu.Lock() - b.revOps = append(b.revOps, reverseOp) - b.revOpsMu.Unlock() - } - return err -} - -// Delete implements the Batch interface. -func (b *txBatch) Delete(item storage.Item) error { - if err := b.store.IsDone(); err != nil { - return err - } - - reverseOp, err := del(b.store, b.batch, item) - if err == nil && reverseOp != nil { - b.revOpsMu.Lock() - b.revOps = append(b.revOps, reverseOp) - b.revOpsMu.Unlock() - } - return err -} - -// Commit implements the Batch interface. -func (b *txBatch) Commit() error { - if err := b.batch.Commit(); err != nil { - return err - } - b.revOpsMu.Lock() - defer b.revOpsMu.Unlock() - defer func() { - b.revOps = nil - }() - return b.onCommit(b.revOps...) -} - -// TxStore is an implementation of in-memory Store -// where all Store operations are done in a transaction. -type TxStore struct { - *storage.TxStoreBase - - // Bookkeeping of invasive operations executed - // on the Store to support rollback functionality. - revOps storage.TxRevertOpStore[storage.Key, storage.Item] -} - -// release releases the TxStore transaction associated resources. -func (s *TxStore) release() { - s.TxStoreBase.BatchedStore = nil - s.revOps = nil -} - -// Put implements the Store interface. -func (s *TxStore) Put(item storage.Item) error { - if err := s.IsDone(); err != nil { - return err - } - - reverseOp, err := put(s.TxStoreBase, s.TxStoreBase, item) - if err == nil { - err = s.revOps.Append(reverseOp) - } - return err -} - -// Delete implements the Store interface. -func (s *TxStore) Delete(item storage.Item) error { - if err := s.IsDone(); err != nil { - return err - } - - reverseOp, err := del(s.TxStoreBase, s.TxStoreBase, item) - if err == nil { - err = s.revOps.Append(reverseOp) - } - return err -} - -// Commit implements the Tx interface. -func (s *TxStore) Commit() error { - defer s.release() - - if err := s.TxState.Done(); err != nil { - return err - } - if err := s.revOps.Clean(); err != nil { - return fmt.Errorf("inmemstore: unable to clean revert operations: %w", err) - } - return nil -} - -// Rollback implements the Tx interface. -func (s *TxStore) Rollback() error { - defer s.release() - - if err := s.TxStoreBase.Rollback(); err != nil { - return fmt.Errorf("inmemstore: unable to rollback: %w", err) - } - - if err := s.revOps.Revert(); err != nil { - return fmt.Errorf("inmemstore: unable to revert operations: %w", err) - } - return nil -} - -// Batch implements the Batcher interface. -func (s *TxStore) Batch(ctx context.Context) (storage.Batch, error) { - batch, err := s.TxStoreBase.BatchedStore.Batch(ctx) - if err != nil { - return nil, err - } - - return &txBatch{ - batch: batch, - store: s, - onCommit: func(revOps ...*storage.TxRevertOp[storage.Key, storage.Item]) error { - return s.revOps.Append(revOps...) - }, - }, nil -} - -// NewTx implements the TxStore interface. -func (s *TxStore) NewTx(state *storage.TxState) storage.TxStore { - if s.BatchedStore == nil { - panic(errors.New("inmemstore: nil store")) - } - - return &TxStore{ - TxStoreBase: &storage.TxStoreBase{ - TxState: state, - BatchedStore: s.BatchedStore, - }, - revOps: storage.NewInMemTxRevertOpStore( - map[storage.TxOpCode]storage.TxRevertFn[storage.Key, storage.Item]{ - storage.PutCreateOp: func(_ storage.Key, item storage.Item) error { - return s.BatchedStore.Delete(item) - }, - storage.PutUpdateOp: func(_ storage.Key, item storage.Item) error { - return s.BatchedStore.Put(item) - }, - storage.DeleteOp: func(_ storage.Key, item storage.Item) error { - return s.BatchedStore.Put(item) - }, - }, - ), - } -} - -// NewTxStore returns a new TxStore instance backed by the given store. -func NewTxStore(store storage.BatchedStore) *TxStore { - return &TxStore{ - TxStoreBase: &storage.TxStoreBase{BatchedStore: store}, - revOps: new(storage.NoOpTxRevertOpStore[storage.Key, storage.Item]), - } -} diff --git a/pkg/storage/inmemstore/transaction_test.go b/pkg/storage/inmemstore/transaction_test.go deleted file mode 100644 index 552088fa311..00000000000 --- a/pkg/storage/inmemstore/transaction_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package inmemstore_test - -import ( - "testing" - - "github.com/ethersphere/bee/pkg/storage/inmemstore" - "github.com/ethersphere/bee/pkg/storage/storagetest" -) - -func TestTxStore(t *testing.T) { - t.Parallel() - - storagetest.TestTxStore(t, inmemstore.NewTxStore(inmemstore.New())) -} diff --git a/pkg/storage/leveldbstore/batch.go b/pkg/storage/leveldbstore/batch.go index 96bbf2dbf49..392cdc6efd3 100644 --- a/pkg/storage/leveldbstore/batch.go +++ b/pkg/storage/leveldbstore/batch.go @@ -14,12 +14,12 @@ import ( ) // Batch implements storage.BatchedStore interface Batch method. -func (s *Store) Batch(ctx context.Context) (storage.Batch, error) { +func (s *Store) Batch(ctx context.Context) storage.Batch { return &Batch{ ctx: ctx, batch: new(ldb.Batch), store: s, - }, nil + } } type Batch struct { diff --git a/pkg/storage/leveldbstore/recovery.go b/pkg/storage/leveldbstore/recovery.go deleted file mode 100644 index f52701275f0..00000000000 --- a/pkg/storage/leveldbstore/recovery.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package leveldbstore - -import ( - "fmt" - - "github.com/ethersphere/bee/pkg/log" - "github.com/ethersphere/bee/pkg/storage" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -var _ storage.Item = (*pendingTx)(nil) - -// pendingTx is a storage.Item that holds a batch of operations. -type pendingTx struct { - storage.Item - - val *leveldb.Batch -} - -// Namespace implements storage.Item. -func (p *pendingTx) Namespace() string { - return "pending-indexstore-tx" -} - -// Unmarshal implements storage.Item. -func (p *pendingTx) Unmarshal(bytes []byte) error { - p.val = new(leveldb.Batch) - return p.val.Load(bytes) -} - -// Recover attempts to recover from a previous -// crash by reverting all uncommitted transactions. -func (s *TxStore) Recover() error { - logger := log.NewLogger("node").WithName("tx_store_recovery").Register() // "node" - copies the node.LoggerName in order to avoid circular import. - - batch := new(leveldb.Batch) - - logger.Info("checking for uncommitted transactions") - err := s.Iterate(storage.Query{ - Factory: func() storage.Item { return new(pendingTx) }, - ItemProperty: storage.QueryItem, - }, func(r storage.Result) (bool, error) { - logger.Info("uncommitted transaction found", "id", r.ID) - if err := r.Entry.(*pendingTx).val.Replay(batch); err != nil { - logger.Debug("unable to replay uncommitted transaction", "id", r.ID, "err", err) - return true, fmt.Errorf("unable to replay batch for %s: %w", r.ID, err) - } - batch.Delete(id(r.ID)) - return false, nil - }) - if err != nil { - return fmt.Errorf("leveldbstore: recovery: iteration failed: %w", err) - } - - if batch.Len() == 0 { - logger.Info("no uncommitted transactions found") - return nil - } - - logger.Info("reversing uncommitted transactions", "count", batch.Len()) - if err := s.BatchedStore.(*Store).db.Write(batch, &opt.WriteOptions{Sync: true}); err != nil { - return fmt.Errorf("leveldbstore: recovery: unable to write batch: %w", err) - } - logger.Info("recovery successful") - return nil -} diff --git a/pkg/storage/leveldbstore/recovery_test.go b/pkg/storage/leveldbstore/recovery_test.go deleted file mode 100644 index f3d504f6e62..00000000000 --- a/pkg/storage/leveldbstore/recovery_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package leveldbstore_test - -import ( - "context" - "encoding/json" - "fmt" - "slices" - "testing" - - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/leveldbstore" - "github.com/ethersphere/bee/pkg/storage/storageutil" - "github.com/google/go-cmp/cmp" -) - -type obj struct { - Key string - Val []byte -} - -func (o *obj) ID() string { return o.Key } -func (_ *obj) Namespace() string { return "obj" } -func (o *obj) Marshal() ([]byte, error) { return json.Marshal(o) } -func (o *obj) Unmarshal(buf []byte) error { return json.Unmarshal(buf, o) } -func (o *obj) Clone() storage.Item { return &obj{Key: o.Key, Val: slices.Clone(o.Val)} } -func (o *obj) String() string { return storageutil.JoinFields(o.Namespace(), o.ID()) } - -func TestTxStore_Recovery(t *testing.T) { - t.Parallel() - - store, err := leveldbstore.New(t.TempDir(), nil) - if err != nil { - t.Fatal(err) - } - txStore := leveldbstore.NewTxStore(store) - t.Cleanup(func() { - if err := txStore.Close(); err != nil { - t.Fatalf("close: %v", err) - } - }) - - objects := make([]*obj, 10) - for i := range objects { - objects[i] = &obj{ - Key: fmt.Sprintf("Key-%d", i), - Val: []byte(fmt.Sprintf("value-%d", i)), - } - } - - // Sore half of the objects within a transaction and commit it. - tx := txStore.NewTx(storage.NewTxState(context.TODO())) - for i := 0; i < len(objects)/2; i++ { - if err := tx.Put(objects[i]); err != nil { - t.Fatalf("put %d: %v", i, err) - } - } - if err := tx.Commit(); err != nil { - t.Fatalf("commit: %v", err) - } - - // Delete the first stored half of the objects and store - // the other half and don't commit or revert the transaction. - tx = txStore.NewTx(storage.NewTxState(context.TODO())) - for i := 0; i < len(objects)/2; i++ { - if err := tx.Delete(objects[i]); err != nil { - t.Fatalf("put %d: %v", i, err) - } - } - for i := len(objects) / 2; i < len(objects); i++ { - if err := tx.Put(objects[i]); err != nil { - t.Fatalf("put %d: %v", i, err) - } - } - // Do not commit or rollback the transaction as - // if the process crashes and attempt to recover. - if err := txStore.Recover(); err != nil { - t.Fatalf("recover: %v", err) - } - - // Check that the store is in the state we expect. - var ( - have []*obj - want = objects[:len(objects)/2] - ) - if err := txStore.Iterate( - storage.Query{ - Factory: func() storage.Item { return new(obj) }, - ItemProperty: storage.QueryItem, - }, - func(r storage.Result) (bool, error) { - have = append(have, r.Entry.(*obj)) - return false, nil - }, - ); err != nil { - t.Fatalf("iterate: %v", err) - } - if diff := cmp.Diff(want, have); diff != "" { - t.Fatalf("recovered store data mismatch (-want +have):\n%s", diff) - } -} diff --git a/pkg/storage/leveldbstore/transaction.go b/pkg/storage/leveldbstore/transaction.go deleted file mode 100644 index 9a18b904ff2..00000000000 --- a/pkg/storage/leveldbstore/transaction.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package leveldbstore - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/storageutil" - "github.com/google/uuid" - "github.com/syndtr/goleveldb/leveldb" -) - -var _ storage.TxRevertOpStore[[]byte, []byte] = (*txRevertOpStore)(nil) - -// txRevertOpStore is a storage.TxRevertOpStore that -// stores revert operations in the LevelDB instance. -type txRevertOpStore struct { - id []byte - db *leveldb.DB - batch *leveldb.Batch - batchMu sync.Mutex - revOpsFn map[storage.TxOpCode]storage.TxRevertFn[[]byte, []byte] -} - -// Append implements storage.TxRevertOpStore. -func (s *txRevertOpStore) Append(ops ...*storage.TxRevertOp[[]byte, []byte]) error { - if s == nil || len(ops) == 0 { - return nil - } - - s.batchMu.Lock() - defer s.batchMu.Unlock() - - for _, op := range ops { - if op == nil { - continue - } - var errs error - if fn, ok := s.revOpsFn[op.Origin]; !ok { - errs = errors.Join(errs, fmt.Errorf( - "revert operation %q for object %s not found", - op.Origin, - op.ObjectID, - )) - } else if err := fn(op.Key, op.Val); err != nil { - errs = errors.Join(errs, fmt.Errorf( - "revert operation %q for object %s failed: %w", - op.Origin, - op.ObjectID, - err, - )) - } - if errs != nil { - return errs - } - } - return s.db.Put(s.id, s.batch.Dump(), nil) -} - -// Revert implements storage.TxRevertOpStore. -func (s *txRevertOpStore) Revert() error { - if s == nil { - return nil - } - - s.batchMu.Lock() - defer s.batchMu.Unlock() - defer s.batch.Reset() - - s.batch.Delete(s.id) - return s.db.Write(s.batch, nil) -} - -// Clean implements storage.TxRevertOpStore. -func (s *txRevertOpStore) Clean() error { - if s == nil { - return nil - } - - return s.db.Delete(s.id, nil) -} - -// txBatch is a batch that is used in a transaction. -type txBatch struct { - batch storage.Batch - store *TxStore - revOpsMu sync.Mutex - revOps []*storage.TxRevertOp[[]byte, []byte] - onCommit func(revOps ...*storage.TxRevertOp[[]byte, []byte]) error -} - -// Put implements the Batch interface. -func (b *txBatch) Put(item storage.Item) error { - if b.store.TxState == nil { - return b.batch.Put(item) - } - if err := b.store.IsDone(); err != nil { - return err - } - - reverseOp, err := put(b.store, b.batch, item) - if err == nil && reverseOp != nil { - b.revOpsMu.Lock() - b.revOps = append(b.revOps, reverseOp) - b.revOpsMu.Unlock() - } - return err -} - -// Delete implements the Batch interface. -func (b *txBatch) Delete(item storage.Item) error { - if b.store.TxState == nil { - return b.batch.Delete(item) - } - if err := b.store.IsDone(); err != nil { - return err - } - - reverseOp, err := del(b.store, b.batch, item) - if err == nil && reverseOp != nil { - b.revOpsMu.Lock() - b.revOps = append(b.revOps, reverseOp) - b.revOpsMu.Unlock() - } - return err -} - -// Commit implements the Batch interface. -func (b *txBatch) Commit() error { - if b.store.TxState == nil { - return b.batch.Commit() - } - if err := b.batch.Commit(); err != nil { - return err - } - b.revOpsMu.Lock() - defer b.revOpsMu.Unlock() - defer func() { - b.revOps = nil - }() - return b.onCommit(b.revOps...) -} - -var ( - _ storage.TxStore = (*TxStore)(nil) - _ storage.Batcher = (*TxStore)(nil) - _ storage.Recoverer = (*TxStore)(nil) -) - -func put( - reader storage.Reader, - writer storage.Writer, - item storage.Item, -) (*storage.TxRevertOp[[]byte, []byte], error) { - prev := item.Clone() - var reverseOp *storage.TxRevertOp[[]byte, []byte] - switch err := reader.Get(prev); { - case errors.Is(err, storage.ErrNotFound): - reverseOp = &storage.TxRevertOp[[]byte, []byte]{ - Origin: storage.PutCreateOp, - ObjectID: item.String(), - Key: key(item), - } - case err != nil: - return nil, err - default: - val, err := prev.Marshal() - if err != nil { - return nil, err - } - reverseOp = &storage.TxRevertOp[[]byte, []byte]{ - Origin: storage.PutUpdateOp, - ObjectID: prev.String(), - Key: key(prev), - Val: val, - } - } - - err := writer.Put(item) - if err == nil { - return reverseOp, nil - } - return nil, err -} - -func del( - reader storage.Reader, - writer storage.Writer, - item storage.Item, -) (*storage.TxRevertOp[[]byte, []byte], error) { - prev := item.Clone() - var reverseOp *storage.TxRevertOp[[]byte, []byte] - if err := reader.Get(prev); err == nil { - val, err := prev.Marshal() - if err != nil { - return nil, err - } - reverseOp = &storage.TxRevertOp[[]byte, []byte]{ - Origin: storage.DeleteOp, - ObjectID: item.String(), - Key: key(item), - Val: val, - } - } - - err := writer.Delete(item) - if err == nil { - return reverseOp, nil - } - return nil, err -} - -// TxStore is an implementation of in-memory Store -// where all Store operations are done in a transaction. -type TxStore struct { - *storage.TxStoreBase - - // Bookkeeping of invasive operations executed - // on the Store to support rollback functionality. - revOps storage.TxRevertOpStore[[]byte, []byte] -} - -// release releases the TxStore transaction associated resources. -func (s *TxStore) release() { - s.TxStoreBase.BatchedStore = nil - s.revOps = nil -} - -// Put implements the Store interface. -func (s *TxStore) Put(item storage.Item) error { - if s.TxState == nil { - return s.TxStoreBase.Put(item) - } - if err := s.IsDone(); err != nil { - return err - } - - reverseOp, err := put(s.TxStoreBase, s.TxStoreBase, item) - if err == nil && reverseOp != nil { - err = s.revOps.Append(reverseOp) - } - return err -} - -// Delete implements the Store interface. -func (s *TxStore) Delete(item storage.Item) error { - if s.TxState == nil { - return s.TxStoreBase.Delete(item) - } - if err := s.IsDone(); err != nil { - return err - } - - reverseOp, err := del(s.TxStoreBase, s.TxStoreBase, item) - if err == nil && reverseOp != nil { - err = s.revOps.Append(reverseOp) - } - return err -} - -// Commit implements the Tx interface. -func (s *TxStore) Commit() error { - if s.TxState == nil { - return nil - } - defer s.release() - - if err := s.TxState.Done(); err != nil { - return err - } - if err := s.revOps.Clean(); err != nil { - return fmt.Errorf("leveldbstore: unable to clean revert operations: %w", err) - } - return nil -} - -// Rollback implements the Tx interface. -func (s *TxStore) Rollback() error { - if s.TxState == nil { - return nil - } - defer s.release() - - if err := s.TxStoreBase.Rollback(); err != nil { - return fmt.Errorf("leveldbstore: unable to rollback: %w", err) - } - - if err := s.revOps.Revert(); err != nil { - return fmt.Errorf("leveldbstore: unable to revert operations: %w", err) - } - return nil -} - -// Batch implements the Batcher interface. -func (s *TxStore) Batch(ctx context.Context) (storage.Batch, error) { - batch, err := s.TxStoreBase.BatchedStore.Batch(ctx) - if err != nil { - return nil, err - } - - return &txBatch{ - batch: batch, - store: s, - onCommit: func(revOps ...*storage.TxRevertOp[[]byte, []byte]) error { - return s.revOps.Append(revOps...) - }, - }, nil -} - -// pendingTxNamespace exist for cashing the namespace of pendingTx -var pendingTxNamespace = new(pendingTx).Namespace() - -// id returns the key for the stored revert operations. -func id(uuid string) []byte { - return []byte(storageutil.JoinFields(pendingTxNamespace, uuid)) -} - -// NewTx implements the TxStore interface. -func (s *TxStore) NewTx(state *storage.TxState) storage.TxStore { - if s.BatchedStore == nil { - panic(errors.New("leveldbstore: nil store")) - } - - batch := new(leveldb.Batch) - return &TxStore{ - TxStoreBase: &storage.TxStoreBase{ - TxState: state, - BatchedStore: s.BatchedStore, - }, - revOps: &txRevertOpStore{ - id: id(uuid.NewString()), - db: s.BatchedStore.(*Store).db, - batch: batch, - revOpsFn: map[storage.TxOpCode]storage.TxRevertFn[[]byte, []byte]{ - storage.PutCreateOp: func(k, _ []byte) error { - batch.Delete(k) - return nil - }, - storage.PutUpdateOp: func(k, v []byte) error { - batch.Put(k, v) - return nil - }, - storage.DeleteOp: func(k, v []byte) error { - batch.Put(k, v) - return nil - }, - }, - }, - } -} - -// NewTxStore returns a new TxStore instance backed by the given store. -func NewTxStore(store storage.BatchedStore) *TxStore { - return &TxStore{TxStoreBase: &storage.TxStoreBase{BatchedStore: store}} -} diff --git a/pkg/storage/leveldbstore/transaction_test.go b/pkg/storage/leveldbstore/transaction_test.go deleted file mode 100644 index cc18e8f8632..00000000000 --- a/pkg/storage/leveldbstore/transaction_test.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package leveldbstore_test - -import ( - "testing" - - "github.com/ethersphere/bee/pkg/storage/leveldbstore" - "github.com/ethersphere/bee/pkg/storage/storagetest" -) - -func TestTxStore(t *testing.T) { - t.Parallel() - - store, err := leveldbstore.New(t.TempDir(), nil) - if err != nil { - t.Fatal(err) - } - storagetest.TestTxStore(t, leveldbstore.NewTxStore(store)) -} diff --git a/pkg/storage/metrics.go b/pkg/storage/metrics.go index 00c90e8b361..66cef61ab87 100644 --- a/pkg/storage/metrics.go +++ b/pkg/storage/metrics.go @@ -4,332 +4,332 @@ package storage -import ( - "context" - "errors" - "time" - - m "github.com/ethersphere/bee/pkg/metrics" - "github.com/ethersphere/bee/pkg/swarm" - "github.com/prometheus/client_golang/prometheus" -) - -// metrics groups repository related prometheus counters. -type metrics struct { - TxTotalDuration prometheus.Histogram - - IndexStoreCalls prometheus.CounterVec - IndexStoreCallsDuration prometheus.HistogramVec - - ChunkStoreCalls prometheus.CounterVec - ChunkStoreCallsDuration prometheus.HistogramVec -} - -// newMetrics is a convenient constructor for creating new metrics. -func newMetrics() metrics { - const subsystem = "storage" - - return metrics{ - TxTotalDuration: prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: m.Namespace, - Subsystem: subsystem, - Name: "tx_total_duration", - Help: "Total duration of transaction.", - }, - ), - IndexStoreCalls: *prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: m.Namespace, - Subsystem: subsystem, - Name: "index_store_calls", - Help: "Number of index store method calls.", - }, - []string{"method", "status"}, - ), - IndexStoreCallsDuration: *prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: m.Namespace, - Subsystem: subsystem, - Name: "index_store_calls_duration", - Help: "Duration of index store method calls.", - }, - []string{"method"}, - ), - ChunkStoreCalls: *prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: m.Namespace, - Subsystem: subsystem, - Name: "chunk_store_calls", - Help: "Number of chunk store method calls.", - }, - []string{"method", "status"}, - ), - ChunkStoreCallsDuration: *prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: m.Namespace, - Subsystem: subsystem, - Name: "chunk_store_calls_duration", - Help: "Duration of chunk store method calls.", - }, - []string{"method"}, - ), - } -} - -var _ TxStore = (*txIndexStoreWithMetrics)(nil) - -// txIndexStoreWithMetrics wraps TxStore and adds metrics. -type txIndexStoreWithMetrics struct { - TxStore - - metrics metrics -} - -// Commit implements the TxStore interface. -func (m txIndexStoreWithMetrics) Commit() error { - dur := captureDuration(time.Now()) - err := m.TxStore.Commit() - m.metrics.IndexStoreCallsDuration.WithLabelValues("Commit").Observe(dur()) - if err == nil { - m.metrics.IndexStoreCalls.WithLabelValues("Commit", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Commit", "failure").Inc() - } - return err -} - -// Rollback implements the TxStore interface. -func (m txIndexStoreWithMetrics) Rollback() error { - dur := captureDuration(time.Now()) - err := m.TxStore.Rollback() - m.metrics.IndexStoreCallsDuration.WithLabelValues("Rollback").Observe(dur()) - if err == nil { - m.metrics.IndexStoreCalls.WithLabelValues("Rollback", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Rollback", "failure").Inc() - } - return err -} - -// Close implements the TxStore interface. -func (m txIndexStoreWithMetrics) Close() error { - dur := captureDuration(time.Now()) - err := m.TxStore.Close() - m.metrics.IndexStoreCallsDuration.WithLabelValues("Close").Observe(dur()) - if err == nil { - m.metrics.IndexStoreCalls.WithLabelValues("Close", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Close", "failure").Inc() - } - return err -} - -// Get implements the TxStore interface. -func (m txIndexStoreWithMetrics) Get(item Item) error { - dur := captureDuration(time.Now()) - err := m.TxStore.Get(item) - m.metrics.IndexStoreCallsDuration.WithLabelValues("Get").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.IndexStoreCalls.WithLabelValues("Get", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Get", "failure").Inc() - } - return err -} - -// Has implements the TxStore interface. -func (m txIndexStoreWithMetrics) Has(key Key) (bool, error) { - dur := captureDuration(time.Now()) - has, err := m.TxStore.Has(key) - m.metrics.IndexStoreCallsDuration.WithLabelValues("Has").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.IndexStoreCalls.WithLabelValues("Has", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Has", "failure").Inc() - } - return has, err -} - -// GetSize implements the TxStore interface. -func (m txIndexStoreWithMetrics) GetSize(key Key) (int, error) { - dur := captureDuration(time.Now()) - size, err := m.TxStore.GetSize(key) - m.metrics.IndexStoreCallsDuration.WithLabelValues("GetSize").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.IndexStoreCalls.WithLabelValues("GetSize", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("GetSize", "failure").Inc() - } - return size, err -} - -// Iterate implements the TxStore interface. -func (m txIndexStoreWithMetrics) Iterate(query Query, fn IterateFn) error { - dur := captureDuration(time.Now()) - err := m.TxStore.Iterate(query, fn) - m.metrics.IndexStoreCallsDuration.WithLabelValues("Iterate").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.IndexStoreCalls.WithLabelValues("Iterate", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Iterate", "failure").Inc() - } - return err -} - -// Count implements the TxStore interface. -func (m txIndexStoreWithMetrics) Count(key Key) (int, error) { - dur := captureDuration(time.Now()) - cnt, err := m.TxStore.Count(key) - m.metrics.IndexStoreCallsDuration.WithLabelValues("Count").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.IndexStoreCalls.WithLabelValues("Count", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Count", "failure").Inc() - } - return cnt, err -} - -// Put implements the TxStore interface. -func (m txIndexStoreWithMetrics) Put(item Item) error { - dur := captureDuration(time.Now()) - err := m.TxStore.Put(item) - m.metrics.IndexStoreCallsDuration.WithLabelValues("Put").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.IndexStoreCalls.WithLabelValues("Put", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Put", "failure").Inc() - } - return err -} - -// Delete implements the TxStore interface. -func (m txIndexStoreWithMetrics) Delete(item Item) error { - dur := captureDuration(time.Now()) - err := m.TxStore.Delete(item) - m.metrics.IndexStoreCallsDuration.WithLabelValues("Delete").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.IndexStoreCalls.WithLabelValues("Delete", "success").Inc() - } else { - m.metrics.IndexStoreCalls.WithLabelValues("Delete", "failure").Inc() - } - return err -} - -var _ TxChunkStore = (*txChunkStoreWithMetrics)(nil) - -// txChunkStoreWithMetrics wraps TxChunkStore and adds metrics. -type txChunkStoreWithMetrics struct { - TxChunkStore - - metrics metrics -} - -// Commit implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Commit() error { - dur := captureDuration(time.Now()) - err := m.TxChunkStore.Commit() - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Commit").Observe(dur()) - if err == nil { - m.metrics.ChunkStoreCalls.WithLabelValues("Commit", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Commit", "failure").Inc() - } - return err -} - -// Rollback implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Rollback() error { - dur := captureDuration(time.Now()) - err := m.TxChunkStore.Rollback() - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Rollback").Observe(dur()) - if err == nil { - m.metrics.ChunkStoreCalls.WithLabelValues("Rollback", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Rollback", "failure").Inc() - } - return err -} - -// Close implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Close() error { - dur := captureDuration(time.Now()) - err := m.TxChunkStore.Close() - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Close").Observe(dur()) - if err == nil { - m.metrics.ChunkStoreCalls.WithLabelValues("Close", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Close", "failure").Inc() - } - return err -} - -// Get implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Get(ctx context.Context, address swarm.Address) (swarm.Chunk, error) { - dur := captureDuration(time.Now()) - chunk, err := m.TxChunkStore.Get(ctx, address) - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Get").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.ChunkStoreCalls.WithLabelValues("Get", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Get", "failure").Inc() - } - return chunk, err -} - -// Put implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Put(ctx context.Context, chunk swarm.Chunk) error { - dur := captureDuration(time.Now()) - err := m.TxChunkStore.Put(ctx, chunk) - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Put").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.ChunkStoreCalls.WithLabelValues("Put", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Put", "failure").Inc() - } - return err -} - -// Delete implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Delete(ctx context.Context, address swarm.Address) error { - dur := captureDuration(time.Now()) - err := m.TxChunkStore.Delete(ctx, address) - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Delete").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.ChunkStoreCalls.WithLabelValues("Delete", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Delete", "failure").Inc() - } - return err -} - -// Has implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Has(ctx context.Context, address swarm.Address) (bool, error) { - dur := captureDuration(time.Now()) - has, err := m.TxChunkStore.Has(ctx, address) - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Has").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.ChunkStoreCalls.WithLabelValues("Has", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Has", "failure").Inc() - } - return has, err -} - -// Iterate implements the TxChunkStore interface. -func (m txChunkStoreWithMetrics) Iterate(ctx context.Context, fn IterateChunkFn) error { - dur := captureDuration(time.Now()) - err := m.TxChunkStore.Iterate(ctx, fn) - m.metrics.ChunkStoreCallsDuration.WithLabelValues("Iterate").Observe(dur()) - if err == nil || errors.Is(err, ErrNotFound) { - m.metrics.ChunkStoreCalls.WithLabelValues("Iterate", "success").Inc() - } else { - m.metrics.ChunkStoreCalls.WithLabelValues("Iterate", "failure").Inc() - } - return err -} - -// captureDuration returns a function that returns the duration since the given start. -func captureDuration(start time.Time) (elapsed func() float64) { - return func() float64 { return time.Since(start).Seconds() } -} +// import ( +// "context" +// "errors" +// "time" + +// m "github.com/ethersphere/bee/pkg/metrics" +// "github.com/ethersphere/bee/pkg/swarm" +// "github.com/prometheus/client_golang/prometheus" +// ) + +// // metrics groups repository related prometheus counters. +// type metrics struct { +// TxTotalDuration prometheus.Histogram + +// IndexStoreCalls prometheus.CounterVec +// IndexStoreCallsDuration prometheus.HistogramVec + +// ChunkStoreCalls prometheus.CounterVec +// ChunkStoreCallsDuration prometheus.HistogramVec +// } + +// // newMetrics is a convenient constructor for creating new metrics. +// func newMetrics() metrics { +// const subsystem = "storage" + +// return metrics{ +// TxTotalDuration: prometheus.NewHistogram( +// prometheus.HistogramOpts{ +// Namespace: m.Namespace, +// Subsystem: subsystem, +// Name: "tx_total_duration", +// Help: "Total duration of transaction.", +// }, +// ), +// IndexStoreCalls: *prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Namespace: m.Namespace, +// Subsystem: subsystem, +// Name: "index_store_calls", +// Help: "Number of index store method calls.", +// }, +// []string{"method", "status"}, +// ), +// IndexStoreCallsDuration: *prometheus.NewHistogramVec( +// prometheus.HistogramOpts{ +// Namespace: m.Namespace, +// Subsystem: subsystem, +// Name: "index_store_calls_duration", +// Help: "Duration of index store method calls.", +// }, +// []string{"method"}, +// ), +// ChunkStoreCalls: *prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Namespace: m.Namespace, +// Subsystem: subsystem, +// Name: "chunk_store_calls", +// Help: "Number of chunk store method calls.", +// }, +// []string{"method", "status"}, +// ), +// ChunkStoreCallsDuration: *prometheus.NewHistogramVec( +// prometheus.HistogramOpts{ +// Namespace: m.Namespace, +// Subsystem: subsystem, +// Name: "chunk_store_calls_duration", +// Help: "Duration of chunk store method calls.", +// }, +// []string{"method"}, +// ), +// } +// } + +// var _ TxStore = (*txIndexStoreWithMetrics)(nil) + +// // txIndexStoreWithMetrics wraps TxStore and adds metrics. +// type txIndexStoreWithMetrics struct { +// TxStore + +// metrics metrics +// } + +// // Commit implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Commit() error { +// dur := captureDuration(time.Now()) +// err := m.TxStore.Commit() +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Commit").Observe(dur()) +// if err == nil { +// m.metrics.IndexStoreCalls.WithLabelValues("Commit", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Commit", "failure").Inc() +// } +// return err +// } + +// // Rollback implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Rollback() error { +// dur := captureDuration(time.Now()) +// err := m.TxStore.Rollback() +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Rollback").Observe(dur()) +// if err == nil { +// m.metrics.IndexStoreCalls.WithLabelValues("Rollback", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Rollback", "failure").Inc() +// } +// return err +// } + +// // Close implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Close() error { +// dur := captureDuration(time.Now()) +// err := m.TxStore.Close() +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Close").Observe(dur()) +// if err == nil { +// m.metrics.IndexStoreCalls.WithLabelValues("Close", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Close", "failure").Inc() +// } +// return err +// } + +// // Get implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Get(item Item) error { +// dur := captureDuration(time.Now()) +// err := m.TxStore.Get(item) +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Get").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.IndexStoreCalls.WithLabelValues("Get", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Get", "failure").Inc() +// } +// return err +// } + +// // Has implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Has(key Key) (bool, error) { +// dur := captureDuration(time.Now()) +// has, err := m.TxStore.Has(key) +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Has").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.IndexStoreCalls.WithLabelValues("Has", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Has", "failure").Inc() +// } +// return has, err +// } + +// // GetSize implements the TxStore interface. +// func (m txIndexStoreWithMetrics) GetSize(key Key) (int, error) { +// dur := captureDuration(time.Now()) +// size, err := m.TxStore.GetSize(key) +// m.metrics.IndexStoreCallsDuration.WithLabelValues("GetSize").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.IndexStoreCalls.WithLabelValues("GetSize", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("GetSize", "failure").Inc() +// } +// return size, err +// } + +// // Iterate implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Iterate(query Query, fn IterateFn) error { +// dur := captureDuration(time.Now()) +// err := m.TxStore.Iterate(query, fn) +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Iterate").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.IndexStoreCalls.WithLabelValues("Iterate", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Iterate", "failure").Inc() +// } +// return err +// } + +// // Count implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Count(key Key) (int, error) { +// dur := captureDuration(time.Now()) +// cnt, err := m.TxStore.Count(key) +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Count").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.IndexStoreCalls.WithLabelValues("Count", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Count", "failure").Inc() +// } +// return cnt, err +// } + +// // Put implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Put(item Item) error { +// dur := captureDuration(time.Now()) +// err := m.TxStore.Put(item) +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Put").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.IndexStoreCalls.WithLabelValues("Put", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Put", "failure").Inc() +// } +// return err +// } + +// // Delete implements the TxStore interface. +// func (m txIndexStoreWithMetrics) Delete(item Item) error { +// dur := captureDuration(time.Now()) +// err := m.TxStore.Delete(item) +// m.metrics.IndexStoreCallsDuration.WithLabelValues("Delete").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.IndexStoreCalls.WithLabelValues("Delete", "success").Inc() +// } else { +// m.metrics.IndexStoreCalls.WithLabelValues("Delete", "failure").Inc() +// } +// return err +// } + +// var _ TxChunkStore = (*txChunkStoreWithMetrics)(nil) + +// // txChunkStoreWithMetrics wraps TxChunkStore and adds metrics. +// type txChunkStoreWithMetrics struct { +// TxChunkStore + +// metrics metrics +// } + +// // Commit implements the TxChunkStore interface. +// func (m txChunkStoreWithMetrics) Commit() error { +// dur := captureDuration(time.Now()) +// err := m.TxChunkStore.Commit() +// m.metrics.ChunkStoreCallsDuration.WithLabelValues("Commit").Observe(dur()) +// if err == nil { +// m.metrics.ChunkStoreCalls.WithLabelValues("Commit", "success").Inc() +// } else { +// m.metrics.ChunkStoreCalls.WithLabelValues("Commit", "failure").Inc() +// } +// return err +// } + +// // Rollback implements the TxChunkStore interface. +// func (m txChunkStoreWithMetrics) Rollback() error { +// dur := captureDuration(time.Now()) +// err := m.TxChunkStore.Rollback() +// m.metrics.ChunkStoreCallsDuration.WithLabelValues("Rollback").Observe(dur()) +// if err == nil { +// m.metrics.ChunkStoreCalls.WithLabelValues("Rollback", "success").Inc() +// } else { +// m.metrics.ChunkStoreCalls.WithLabelValues("Rollback", "failure").Inc() +// } +// return err +// } + +// // Close implements the TxChunkStore interface. +// // func (m txChunkStoreWithMetrics) Close() error { +// // dur := captureDuration(time.Now()) +// // err := m.TxChunkStore.Close() +// // m.metrics.ChunkStoreCallsDuration.WithLabelValues("Close").Observe(dur()) +// // if err == nil { +// // m.metrics.ChunkStoreCalls.WithLabelValues("Close", "success").Inc() +// // } else { +// // m.metrics.ChunkStoreCalls.WithLabelValues("Close", "failure").Inc() +// // } +// // return err +// // } + +// // Get implements the TxChunkStore interface. +// func (m txChunkStoreWithMetrics) Get(ctx context.Context, address swarm.Address) (swarm.Chunk, error) { +// dur := captureDuration(time.Now()) +// chunk, err := m.TxChunkStore.Get(ctx, address) +// m.metrics.ChunkStoreCallsDuration.WithLabelValues("Get").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.ChunkStoreCalls.WithLabelValues("Get", "success").Inc() +// } else { +// m.metrics.ChunkStoreCalls.WithLabelValues("Get", "failure").Inc() +// } +// return chunk, err +// } + +// // Put implements the TxChunkStore interface. +// func (m txChunkStoreWithMetrics) Put(ctx context.Context, chunk swarm.Chunk) error { +// dur := captureDuration(time.Now()) +// err := m.TxChunkStore.Put(ctx, chunk) +// m.metrics.ChunkStoreCallsDuration.WithLabelValues("Put").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.ChunkStoreCalls.WithLabelValues("Put", "success").Inc() +// } else { +// m.metrics.ChunkStoreCalls.WithLabelValues("Put", "failure").Inc() +// } +// return err +// } + +// // Delete implements the TxChunkStore interface. +// func (m txChunkStoreWithMetrics) Delete(ctx context.Context, address swarm.Address) error { +// dur := captureDuration(time.Now()) +// err := m.TxChunkStore.Delete(ctx, address) +// m.metrics.ChunkStoreCallsDuration.WithLabelValues("Delete").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.ChunkStoreCalls.WithLabelValues("Delete", "success").Inc() +// } else { +// m.metrics.ChunkStoreCalls.WithLabelValues("Delete", "failure").Inc() +// } +// return err +// } + +// // Has implements the TxChunkStore interface. +// func (m txChunkStoreWithMetrics) Has(ctx context.Context, address swarm.Address) (bool, error) { +// dur := captureDuration(time.Now()) +// has, err := m.TxChunkStore.Has(ctx, address) +// m.metrics.ChunkStoreCallsDuration.WithLabelValues("Has").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.ChunkStoreCalls.WithLabelValues("Has", "success").Inc() +// } else { +// m.metrics.ChunkStoreCalls.WithLabelValues("Has", "failure").Inc() +// } +// return has, err +// } + +// // Iterate implements the TxChunkStore interface. +// func (m txChunkStoreWithMetrics) Iterate(ctx context.Context, fn IterateChunkFn) error { +// dur := captureDuration(time.Now()) +// err := m.TxChunkStore.Iterate(ctx, fn) +// m.metrics.ChunkStoreCallsDuration.WithLabelValues("Iterate").Observe(dur()) +// if err == nil || errors.Is(err, ErrNotFound) { +// m.metrics.ChunkStoreCalls.WithLabelValues("Iterate", "success").Inc() +// } else { +// m.metrics.ChunkStoreCalls.WithLabelValues("Iterate", "failure").Inc() +// } +// return err +// } + +// // captureDuration returns a function that returns the duration since the given start. +// func captureDuration(start time.Time) (elapsed func() float64) { +// return func() float64 { return time.Since(start).Seconds() } +// } diff --git a/pkg/storage/migration/index.go b/pkg/storage/migration/index.go index bc08d6f2d2d..5365b5fe910 100644 --- a/pkg/storage/migration/index.go +++ b/pkg/storage/migration/index.go @@ -62,11 +62,11 @@ func (o *options) applyAll(opts []option) { // NewStepOnIndex creates new migration step with update and/or delete operation. // Migration will iterate on all elements selected by query and delete or update items // based on supplied callback functions. -func NewStepOnIndex(query storage.Query, opts ...option) StepFn { +func NewStepOnIndex(s storage.BatchStore, query storage.Query, opts ...option) StepFn { o := defaultOptions() o.applyAll(opts) - return func(s storage.BatchedStore) error { + return func() error { return stepOnIndex(s, query, o) } } diff --git a/pkg/storage/migration/index_test.go b/pkg/storage/migration/index_test.go index cf2e0ab8ded..a44998d08f6 100644 --- a/pkg/storage/migration/index_test.go +++ b/pkg/storage/migration/index_test.go @@ -25,6 +25,7 @@ func TestNewStepOnIndex(t *testing.T) { populateStore(t, store, populateItemsCount) stepFn := migration.NewStepOnIndex( + store, storage.Query{ Factory: newObjFactory, }, @@ -38,7 +39,7 @@ func TestNewStepOnIndex(t *testing.T) { t.Fatalf("have %d, want %d", initialCount, populateItemsCount) } - if err := stepFn(store); err != nil { + if err := stepFn(); err != nil { t.Fatalf("step migration should successed: %v", err) } @@ -59,7 +60,7 @@ func TestNewStepOnIndex(t *testing.T) { store := inmemstore.New() populateStore(t, store, populateItemsCount) - stepFn := migration.NewStepOnIndex( + stepFn := migration.NewStepOnIndex(store, storage.Query{ Factory: newObjFactory, ItemProperty: storage.QueryItem, @@ -71,7 +72,7 @@ func TestNewStepOnIndex(t *testing.T) { migration.WithOpPerBatch(3), ) - if err := stepFn(store); err != nil { + if err := stepFn(); err != nil { t.Fatalf("step migration should successed: %v", err) } @@ -86,7 +87,7 @@ func TestNewStepOnIndex(t *testing.T) { store := inmemstore.New() populateStore(t, store, populateItemsCount) - stepFn := migration.NewStepOnIndex( + stepFn := migration.NewStepOnIndex(store, storage.Query{ Factory: newObjFactory, ItemProperty: storage.QueryItem, @@ -104,7 +105,7 @@ func TestNewStepOnIndex(t *testing.T) { migration.WithOpPerBatch(3), ) - if err := stepFn(store); err != nil { + if err := stepFn(); err != nil { t.Fatalf("step migration should successed: %v", err) } @@ -119,6 +120,7 @@ func TestNewStepOnIndex(t *testing.T) { populateStore(t, store, populateItemsCount) step := migration.NewStepOnIndex( + store, storage.Query{ Factory: newObjFactory, ItemProperty: storage.QueryItem, @@ -141,7 +143,7 @@ func TestNewStepOnIndex(t *testing.T) { migration.WithOpPerBatch(3), ) - if err := step(store); err != nil { + if err := step(); err != nil { t.Fatalf("step migration should successed: %v", err) } @@ -156,6 +158,7 @@ func TestNewStepOnIndex(t *testing.T) { populateStore(t, store, populateItemsCount) step := migration.NewStepOnIndex( + store, storage.Query{ Factory: newObjFactory, ItemProperty: storage.QueryItem, @@ -168,7 +171,7 @@ func TestNewStepOnIndex(t *testing.T) { migration.WithOpPerBatch(3), ) - if err := step(store); err == nil { + if err := step(); err == nil { t.Fatalf("step migration should fail") } @@ -192,6 +195,7 @@ func TestStepIndex_BatchSize(t *testing.T) { updateItemCallMap := make(map[int]struct{}) stepFn := migration.NewStepOnIndex( + store, storage.Query{ Factory: newObjFactory, ItemProperty: storage.QueryItem, @@ -217,7 +221,7 @@ func TestStepIndex_BatchSize(t *testing.T) { migration.WithOpPerBatch(i), ) - if err := stepFn(store); err != nil { + if err := stepFn(); err != nil { t.Fatalf("step migration should successed: %v", err) } diff --git a/pkg/storage/migration/migration.go b/pkg/storage/migration/migration.go index 528831f0da8..60dc1015c96 100644 --- a/pkg/storage/migration/migration.go +++ b/pkg/storage/migration/migration.go @@ -16,7 +16,7 @@ import ( type ( // StepFn is a function that migrates the storage to the next version - StepFn func(storage.BatchedStore) error + StepFn func() error // Steps is a map of versions and their migration functions Steps = map[uint64]StepFn ) @@ -30,7 +30,7 @@ var ( // Migrate migrates the storage to the latest version. // The steps are separated by groups so different lists of steps can run individually, for example, // two groups of migrations that run before and after the storer is initialized. -func Migrate(s storage.BatchedStore, group string, sm Steps) error { +func Migrate(s storage.IndexStore, group string, sm Steps) error { if err := ValidateVersions(sm); err != nil { return err } @@ -45,7 +45,7 @@ func Migrate(s storage.BatchedStore, group string, sm Steps) error { if !ok { return nil } - err := stepFn(s) + err := stepFn() if err != nil { return err } @@ -127,7 +127,7 @@ func (s StorageVersionItem) String() string { } // Version returns the current version of the storage -func Version(s storage.Store, group string) (uint64, error) { +func Version(s storage.Reader, group string) (uint64, error) { item := StorageVersionItem{Group: group} err := s.Get(&item) if err != nil { @@ -140,7 +140,7 @@ func Version(s storage.Store, group string) (uint64, error) { } // setVersion sets the current version of the storage -func setVersion(s storage.Store, v uint64, g string) error { +func setVersion(s storage.Writer, v uint64, g string) error { return s.Put(&StorageVersionItem{Version: v, Group: g}) } diff --git a/pkg/storage/migration/migration_test.go b/pkg/storage/migration/migration_test.go index c1de785f7d3..84242348746 100644 --- a/pkg/storage/migration/migration_test.go +++ b/pkg/storage/migration/migration_test.go @@ -28,9 +28,9 @@ func TestLatestVersion(t *testing.T) { const expectedLatestVersion = 8 steps := migration.Steps{ - 8: func(s storage.BatchedStore) error { return nil }, - 7: func(s storage.BatchedStore) error { return nil }, - 6: func(s storage.BatchedStore) error { return nil }, + 8: func() error { return nil }, + 7: func() error { return nil }, + 6: func() error { return nil }, } latestVersion := migration.LatestVersion(steps) @@ -85,6 +85,8 @@ func TestValidateVersions(t *testing.T) { objT2 := &obj{id: 222, val: 2} objT3 := &obj{id: 333, val: 3} + s := inmemstore.New() + tests := []struct { name string input migration.Steps @@ -98,13 +100,13 @@ func TestValidateVersions(t *testing.T) { { name: "missing version 3", input: migration.Steps{ - 1: func(s storage.BatchedStore) error { + 1: func() error { return s.Put(objT1) }, - 2: func(s storage.BatchedStore) error { + 2: func() error { return s.Put(objT2) }, - 4: func(s storage.BatchedStore) error { + 4: func() error { return s.Put(objT3) }, }, @@ -113,13 +115,13 @@ func TestValidateVersions(t *testing.T) { { name: "not missing", input: migration.Steps{ - 1: func(s storage.BatchedStore) error { + 1: func() error { return s.Put(objT1) }, - 2: func(s storage.BatchedStore) error { + 2: func() error { return s.Put(objT2) }, - 3: func(s storage.BatchedStore) error { + 3: func() error { return s.Put(objT3) }, }, @@ -128,13 +130,13 @@ func TestValidateVersions(t *testing.T) { { name: "desc order versions", input: migration.Steps{ - 3: func(s storage.BatchedStore) error { + 3: func() error { return s.Put(objT1) }, - 2: func(s storage.BatchedStore) error { + 2: func() error { return s.Put(objT2) }, - 1: func(s storage.BatchedStore) error { + 1: func() error { return s.Put(objT3) }, }, @@ -143,13 +145,13 @@ func TestValidateVersions(t *testing.T) { { name: "desc order version missing", input: migration.Steps{ - 4: func(s storage.BatchedStore) error { + 4: func() error { return s.Put(objT1) }, - 2: func(s storage.BatchedStore) error { + 2: func() error { return s.Put(objT2) }, - 1: func(s storage.BatchedStore) error { + 1: func() error { return s.Put(objT3) }, }, @@ -176,20 +178,20 @@ func TestMigrate(t *testing.T) { t.Run("migration: 0 to 3", func(t *testing.T) { t.Parallel() + s := inmemstore.New() + steps := migration.Steps{ - 1: func(s storage.BatchedStore) error { + 1: func() error { return s.Put(objT1) }, - 2: func(s storage.BatchedStore) error { + 2: func() error { return s.Put(objT2) }, - 3: func(s storage.BatchedStore) error { + 3: func() error { return s.Put(objT3) }, } - s := inmemstore.New() - if err := migration.Migrate(s, "migration", steps); err != nil { t.Errorf("Migrate() unexpected error: %v", err) } @@ -208,20 +210,20 @@ func TestMigrate(t *testing.T) { t.Run("migration: 5 to 8", func(t *testing.T) { t.Parallel() + s := inmemstore.New() + steps := migration.Steps{ - 8: func(s storage.BatchedStore) error { + 8: func() error { return s.Put(objT1) }, - 7: func(s storage.BatchedStore) error { + 7: func() error { return s.Put(objT2) }, - 6: func(s storage.BatchedStore) error { + 6: func() error { return s.Put(objT3) }, } - s := inmemstore.New() - err := migration.SetVersion(s, 5, "migration") if err != nil { t.Errorf("SetVersion() unexpected error: %v", err) @@ -245,20 +247,20 @@ func TestMigrate(t *testing.T) { t.Run("migration: 5 to 8 with steps error", func(t *testing.T) { t.Parallel() + s := inmemstore.New() + steps := migration.Steps{ - 8: func(s storage.BatchedStore) error { + 8: func() error { return s.Put(objT1) }, - 7: func(s storage.BatchedStore) error { + 7: func() error { return errStep }, - 6: func(s storage.BatchedStore) error { + 6: func() error { return s.Put(objT3) }, } - s := inmemstore.New() - err := migration.SetVersion(s, 5, "migration") if err != nil { t.Errorf("SetVersion() unexpected error: %v", err) @@ -279,7 +281,7 @@ func TestMigrate(t *testing.T) { }) } -func assertObjectExists(t *testing.T, s storage.BatchedStore, keys ...storage.Key) { +func assertObjectExists(t *testing.T, s storage.BatchStore, keys ...storage.Key) { t.Helper() for _, key := range keys { diff --git a/pkg/storage/migration/steps_chain.go b/pkg/storage/migration/steps_chain.go index 96f8835b614..c6b9e2c901f 100644 --- a/pkg/storage/migration/steps_chain.go +++ b/pkg/storage/migration/steps_chain.go @@ -4,14 +4,12 @@ package migration -import storage "github.com/ethersphere/bee/pkg/storage" - // NewStepsChain returns new StepFn which combines all supplied StepFn // into single StepFn. func NewStepsChain(steps ...StepFn) StepFn { - return func(s storage.BatchedStore) error { + return func() error { for _, stepFn := range steps { - if err := stepFn(s); err != nil { + if err := stepFn(); err != nil { return err } } diff --git a/pkg/storage/migration/steps_chain_test.go b/pkg/storage/migration/steps_chain_test.go index 4488d6805b0..3d73b074af6 100644 --- a/pkg/storage/migration/steps_chain_test.go +++ b/pkg/storage/migration/steps_chain_test.go @@ -30,6 +30,7 @@ func TestNewStepsChain(t *testing.T) { // behavior where each should remove only one element from store if i%2 == 0 { stepFn = migration.NewStepOnIndex( + store, storage.Query{ Factory: newObjFactory, ItemProperty: storage.QueryItem, @@ -40,8 +41,8 @@ func TestNewStepsChain(t *testing.T) { }), ) } else { - stepFn = func(s storage.BatchedStore) error { - return s.Delete(&obj{id: valForRemoval}) + stepFn = func() error { + return store.Delete(&obj{id: valForRemoval}) } } @@ -49,7 +50,7 @@ func TestNewStepsChain(t *testing.T) { } stepFn := migration.NewStepsChain(stepsFn...) - if err := stepFn(store); err != nil { + if err := stepFn(); err != nil { t.Fatalf("step migration should successed: %v", err) } diff --git a/pkg/storage/repository.go b/pkg/storage/repository.go deleted file mode 100644 index f681ee76d88..00000000000 --- a/pkg/storage/repository.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package storage - -import ( - "context" - "errors" - "time" - - m "github.com/ethersphere/bee/pkg/metrics" - "github.com/ethersphere/bee/pkg/swarm" - "github.com/prometheus/client_golang/prometheus" -) - -// Repository is a collection of stores that provides a unified interface -// to access them. Access to all stores can be guarded by a transaction. -type Repository interface { - IndexStore() BatchedStore - ChunkStore() ChunkStore - - NewTx(context.Context) (repo Repository, commit func() error, rollback func() error) -} - -type repository struct { - metrics metrics - txStart time.Time - - txIndexStore TxStore - txChunkStore TxChunkStore - locker ChunkLocker -} - -// IndexStore returns Store. -func (r *repository) IndexStore() BatchedStore { - return r.txIndexStore -} - -// ChunkStore returns ChunkStore. -func (r *repository) ChunkStore() ChunkStore { - return r.txChunkStore -} - -// NewTx returns a new transaction that guards all the Repository -// stores. The transaction must be committed or rolled back. -func (r *repository) NewTx(ctx context.Context) (Repository, func() error, func() error) { - repo := &repository{ - metrics: r.metrics, - txStart: time.Now(), - txIndexStore: txIndexStoreWithMetrics{r.txIndexStore.NewTx(NewTxState(ctx)), r.metrics}, - txChunkStore: txChunkStoreWithMetrics{ - wrapSync(r.txChunkStore.NewTx(NewTxState(ctx)), r.locker), - r.metrics, - }, - } - - txs := []Tx{repo.txIndexStore, repo.txChunkStore} - - commit := func() error { - var err error - for _, tx := range txs { - err = tx.Commit() - if err != nil { - break - } - } - if !errors.Is(err, ErrTxDone) { - repo.metrics.TxTotalDuration.Observe(captureDuration(repo.txStart)()) - } - return err - } - - rollback := func() error { - var errs error - for i := len(txs) - 1; i >= 0; i-- { - if err := txs[i].Rollback(); err != nil { - errs = errors.Join(errs, err) - } - } - if !errors.Is(errs, ErrTxDone) { - repo.metrics.TxTotalDuration.Observe(captureDuration(repo.txStart)()) - } - return errs - } - - return repo, commit, rollback -} - -// Metrics returns set of prometheus collectors. -func (r *repository) Metrics() []prometheus.Collector { - return m.PrometheusCollectorsFromFields(r.metrics) -} - -type ChunkLocker func(chunk swarm.Address) func() - -// NewRepository returns a new Repository instance. -func NewRepository( - txIndexStore TxStore, - txChunkStore TxChunkStore, - locker ChunkLocker, -) Repository { - metrics := newMetrics() - return &repository{ - metrics: metrics, - txIndexStore: txIndexStoreWithMetrics{txIndexStore, metrics}, - txChunkStore: txChunkStoreWithMetrics{wrapSync(txChunkStore, locker), metrics}, - locker: locker, - } -} - -type syncChunkStore struct { - TxChunkStore - locker ChunkLocker -} - -func wrapSync(store TxChunkStore, locker ChunkLocker) TxChunkStore { - return &syncChunkStore{store, locker} -} - -func (s *syncChunkStore) Put(ctx context.Context, chunk swarm.Chunk) error { - unlock := s.locker(chunk.Address()) - defer unlock() - return s.TxChunkStore.Put(ctx, chunk) -} - -func (s *syncChunkStore) Delete(ctx context.Context, addr swarm.Address) error { - unlock := s.locker(addr) - defer unlock() - return s.TxChunkStore.Delete(ctx, addr) -} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index f61099b4188..b76d3f9a275 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -5,9 +5,13 @@ package storage import ( + "context" "errors" "fmt" "io" + + "github.com/ethersphere/bee/pkg/sharky" + "github.com/ethersphere/bee/pkg/swarm" ) var ( @@ -185,8 +189,8 @@ type Writer interface { Delete(Item) error } -// BatchedStore is a store that supports batching of Writer method calls. -type BatchedStore interface { +// BatchStore is a store that supports batching of Writer method calls. +type BatchStore interface { Store Batcher } @@ -196,3 +200,64 @@ type BatchedStore interface { type Recoverer interface { Recover() error } + +type IndexStore interface { + Reader + Writer +} + +type Sharky interface { + Read(context.Context, sharky.Location, []byte) error + Write(context.Context, []byte) (sharky.Location, error) + Release(context.Context, sharky.Location) error +} + +type SizeReporter interface { + Size() (uint64, error) + Capacity() uint64 +} + +// Descriptor holds information required for Pull syncing. This struct +// is provided by subscribing to pull index. +type Descriptor struct { + Address swarm.Address + BinID uint64 +} + +func (d *Descriptor) String() string { + if d == nil { + return "" + } + return fmt.Sprintf("%s bin id %v", d.Address, d.BinID) +} + +type PullSubscriber interface { + SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, closed <-chan struct{}, stop func()) +} + +type PushSubscriber interface { + SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop func()) +} + +type ChunkState = int + +const ( + // ChunkSent is used by the pusher component to notify about successful push of chunk from + // the node. A chunk could be retried on failure so, this sent count is maintained to + // understand how many attempts were made by the node while pushing. The attempts are + // registered only when an actual request was sent from this node. + ChunkSent ChunkState = iota + // ChunkStored is used by the pusher component to notify that the uploader node is + // the closest node and has stored the chunk. + ChunkStored + // ChunkSynced is used by the pusher component to notify that the chunk is synced to the + // network. This is reported when a valid receipt was received after the chunk was + // pushed. + ChunkSynced + ChunkCouldNotSync +) + +// PushReporter is used to report chunk state. +type PushReporter interface { + Report(context.Context, swarm.Chunk, ChunkState) error +} diff --git a/pkg/storage/storagetest/batch.go b/pkg/storage/storagetest/batch.go index b144bc04908..16fa04a2dcd 100644 --- a/pkg/storage/storagetest/batch.go +++ b/pkg/storage/storagetest/batch.go @@ -13,14 +13,11 @@ import ( "github.com/google/go-cmp/cmp" ) -func TestBatchedStore(t *testing.T, bs storage.BatchedStore) { +func TestBatchedStore(t *testing.T, bs storage.BatchStore) { item := &obj1{Id: "id", SomeInt: 1, Buf: []byte("data")} t.Run("duplicates are rejected", func(t *testing.T) { - batch, err := bs.Batch(context.Background()) - if err != nil { - t.Fatalf("Batch(...): unexpected error: %v", err) - } + batch := bs.Batch(context.Background()) if err := batch.Put(item); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) @@ -34,7 +31,7 @@ func TestBatchedStore(t *testing.T, bs storage.BatchedStore) { } var cnt int - err = bs.Iterate(storage.Query{ + err := bs.Iterate(storage.Query{ Factory: func() storage.Item { return new(obj1) }, ItemProperty: storage.QueryItem, }, func(r storage.Result) (bool, error) { @@ -58,10 +55,7 @@ func TestBatchedStore(t *testing.T, bs storage.BatchedStore) { t.Fatalf("Put(...): unexpected error: %v", err) } - batch, err := bs.Batch(context.Background()) - if err != nil { - t.Fatalf("Batch(...): unexpected error: %v", err) - } + batch := bs.Batch(context.Background()) if err := batch.Put(item); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) @@ -74,7 +68,7 @@ func TestBatchedStore(t *testing.T, bs storage.BatchedStore) { t.Fatalf("Commit(): unexpected error: %v", err) } - err = bs.Iterate(storage.Query{ + err := bs.Iterate(storage.Query{ Factory: func() storage.Item { return new(obj1) }, ItemProperty: storage.QueryItem, }, func(r storage.Result) (bool, error) { @@ -87,10 +81,7 @@ func TestBatchedStore(t *testing.T, bs storage.BatchedStore) { }) t.Run("batch not reusable after commit", func(t *testing.T) { - batch, err := bs.Batch(context.Background()) - if err != nil { - t.Fatalf("Batch(...): unexpected error: %v", err) - } + batch := bs.Batch(context.Background()) if err := batch.Commit(); err != nil { t.Fatalf("Commit(): unexpected error: %v", err) } @@ -102,10 +93,7 @@ func TestBatchedStore(t *testing.T, bs storage.BatchedStore) { t.Run("batch not usable with expired context", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - batch, err := bs.Batch(ctx) - if err != nil { - t.Fatalf("Batch(...): unexpected error: %v", err) - } + batch := bs.Batch(ctx) if err := batch.Put(item); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) diff --git a/pkg/storage/storagetest/benchmark.go b/pkg/storage/storagetest/benchmark.go index 09cd3241ecb..0af26254d20 100644 --- a/pkg/storage/storagetest/benchmark.go +++ b/pkg/storage/storagetest/benchmark.go @@ -356,7 +356,7 @@ func (w *batchDBWriter) commit(max int) { if w.count >= max { _ = w.batch.Commit() w.count = 0 - w.batch, _ = w.db.Batch(context.Background()) + w.batch = w.db.Batch(context.Background()) } } @@ -380,7 +380,7 @@ func (w *batchDBWriter) Delete(key []byte) { } func newBatchDBWriter(db storage.Batcher) *batchDBWriter { - batch, _ := db.Batch(context.Background()) + batch := db.Batch(context.Background()) return &batchDBWriter{ db: db, batch: batch, diff --git a/pkg/storage/storagetest/chunkstore.go b/pkg/storage/storagetest/chunkstore.go index dbd3e4d79a3..a3bcf028bd3 100644 --- a/pkg/storage/storagetest/chunkstore.go +++ b/pkg/storage/storagetest/chunkstore.go @@ -147,13 +147,6 @@ func TestChunkStore(t *testing.T, st storage.ChunkStore) { t.Fatalf("unexpected no of chunks, exp: %d, found: %d", 25, count) } }) - - t.Run("close store", func(t *testing.T) { - err := st.Close() - if err != nil { - t.Fatalf("unexpected error during close: %v", err) - } - }) } func RunChunkStoreBenchmarkTests(b *testing.B, s storage.ChunkStore) { diff --git a/pkg/storage/storagetest/storage.go b/pkg/storage/storagetest/storage.go index 55f21ad156e..3a7327949ef 100644 --- a/pkg/storage/storagetest/storage.go +++ b/pkg/storage/storagetest/storage.go @@ -851,7 +851,7 @@ func BenchmarkStore(b *testing.B, s storage.Store) { // BenchmarkBatchedStore provides a benchmark suite for the // storage.BatchedStore. Only the Write and Delete methods are tested. -func BenchmarkBatchedStore(b *testing.B, bs storage.BatchedStore) { +func BenchmarkBatchedStore(b *testing.B, bs storage.BatchStore) { b.Run("WriteInBatches", func(b *testing.B) { BenchmarkWriteInBatches(b, bs) }) @@ -946,9 +946,9 @@ func BenchmarkWriteSequential(b *testing.B, db storage.Store) { doWrite(b, db, g) } -func BenchmarkWriteInBatches(b *testing.B, bs storage.BatchedStore) { +func BenchmarkWriteInBatches(b *testing.B, bs storage.BatchStore) { g := newSequentialEntryGenerator(b.N) - batch, _ := bs.Batch(context.Background()) + batch := bs.Batch(context.Background()) resetBenchmark(b) for i := 0; i < b.N; i++ { key := g.Key(i) @@ -965,7 +965,7 @@ func BenchmarkWriteInBatches(b *testing.B, bs storage.BatchedStore) { } } -func BenchmarkWriteInFixedSizeBatches(b *testing.B, bs storage.BatchedStore) { +func BenchmarkWriteInFixedSizeBatches(b *testing.B, bs storage.BatchStore) { g := newSequentialEntryGenerator(b.N) writer := newBatchDBWriter(bs) resetBenchmark(b) @@ -1016,11 +1016,11 @@ func BenchmarkDeleteSequential(b *testing.B, db storage.Store) { doDelete(b, db, g) } -func BenchmarkDeleteInBatches(b *testing.B, bs storage.BatchedStore) { +func BenchmarkDeleteInBatches(b *testing.B, bs storage.BatchStore) { g := newSequentialEntryGenerator(b.N) doWrite(b, bs, g) resetBenchmark(b) - batch, _ := bs.Batch(context.Background()) + batch := bs.Batch(context.Background()) for i := 0; i < b.N; i++ { item := &obj1{ Id: string(g.Key(i)), @@ -1034,7 +1034,7 @@ func BenchmarkDeleteInBatches(b *testing.B, bs storage.BatchedStore) { } } -func BenchmarkDeleteInFixedSizeBatches(b *testing.B, bs storage.BatchedStore) { +func BenchmarkDeleteInFixedSizeBatches(b *testing.B, bs storage.BatchStore) { g := newSequentialEntryGenerator(b.N) doWrite(b, bs, g) resetBenchmark(b) diff --git a/pkg/storage/storagetest/transaction.go b/pkg/storage/storagetest/transaction.go deleted file mode 100644 index 1275bedb7d9..00000000000 --- a/pkg/storage/storagetest/transaction.go +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package storagetest - -import ( - "bytes" - "context" - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/storageutil" - chunktest "github.com/ethersphere/bee/pkg/storage/testing" - "github.com/ethersphere/bee/pkg/swarm" - "github.com/google/go-cmp/cmp" -) - -var _ storage.Item = (*object)(nil) - -// object is a simple struct that implements -// the storage.Item interface. -type object struct { - id string // 10 bytes. - data []byte -} - -func (o object) ID() string { return o.id } -func (object) Namespace() string { return "object" } - -func (o object) Marshal() ([]byte, error) { - buf := make([]byte, 10+len(o.data)) - copy(buf[:10], o.id) - copy(buf[10:], o.data) - return buf, nil -} - -func (o *object) Unmarshal(buf []byte) error { - if len(buf) < 10 { - return errors.New("invalid length") - } - o.id = string(buf[:10]) - o.data = make([]byte, len(buf)-10) - copy(o.data, buf[10:]) - return nil -} - -func (o *object) Clone() storage.Item { - if o == nil { - return nil - } - return &object{ - id: o.id, - data: append([]byte(nil), o.data...), - } -} - -func (o object) String() string { - return storageutil.JoinFields(o.Namespace(), o.ID()) -} - -// initStore initializes the given store with the given objects. -func initStore(t *testing.T, store storage.BatchedStore, batched bool, objects ...*object) { - t.Helper() - - var writer storage.Writer = store - - if batched { - b, err := store.Batch(context.Background()) - if err != nil { - t.Fatalf("Batch(): unexpected error: %v", err) - } - defer func() { - if err := b.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - }() - writer = b - } - - for _, o := range objects { - if err := writer.Put(o); err != nil { - t.Fatalf("Put(%q): unexpected error: %v", o.id, err) - } - } -} - -func deleteStore(t *testing.T, store storage.BatchedStore, batched bool, objects ...*object) { - t.Helper() - - var writer storage.Writer = store - - if batched { - b, err := store.Batch(context.Background()) - if err != nil { - t.Fatalf("Batch(): unexpected error: %v", err) - } - defer func() { - if err := b.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - }() - writer = b - } - - for _, o := range objects { - if err := writer.Delete(o); err != nil { - t.Fatalf("Delete(%q): unexpected error: %v", o.id, err) - } - } -} - -// checkTxStoreFinishedTxInvariants check if all the store operations behave -// as expected after the transaction has been committed or rolled back. -func checkTxStoreFinishedTxInvariants(t *testing.T, store storage.TxStore, want error) { - t.Helper() - - o007 := &object{id: "007", data: []byte("Hello, World!")} - - if have := store.Get(o007); !errors.Is(have, want) { - t.Fatalf("Get(...):\n\thave: %v\n\twant: %v", have, want) - } - - if _, have := store.Has(o007); !errors.Is(have, want) { - t.Fatalf("Has(...):\n\thave: %v\n\twant: %v", have, want) - } - - if _, have := store.GetSize(o007); !errors.Is(have, want) { - t.Fatalf("GetSize(...):\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Iterate(storage.Query{}, nil); !errors.Is(have, want) { - t.Fatalf("Iterate(...):\n\thave: %v\n\twant: %v", have, want) - } - - if _, have := store.Count(o007); !errors.Is(have, want) { - t.Fatalf("Count(...):\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Put(o007); !errors.Is(have, want) { - t.Fatalf("Put(...):\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Delete(o007); !errors.Is(have, want) { - t.Fatalf("Delete(...):\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Commit(); !errors.Is(have, want) { - t.Fatalf("Commit():\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Rollback(); !errors.Is(have, want) { - t.Fatalf("Rollback():\n\thave: %v\n\twant: %v", have, want) - } -} - -// TestTxStore provides correctness testsuite for storage.TxStore interface. -func TestTxStore(t *testing.T, store storage.TxStore) { - t.Helper() - - t.Cleanup(func() { - var closed int32 - time.AfterFunc(100*time.Millisecond, func() { - if atomic.LoadInt32(&closed) == 0 { - t.Fatal("store did not close") - } - }) - if err := store.Close(); err != nil { - t.Fatalf("Close(): unexpected error: %v", err) - } - atomic.StoreInt32(&closed, 1) - }) - - t.Run("commit empty", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - - checkTxStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - tCases := []struct { - name string - batched bool - }{ - {"single", false}, - {"batchd", true}, - } - - for _, tCase := range tCases { - t.Run(tCase.name, func(t *testing.T) { - t.Run("commit", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - objects := []*object{ - {id: "0001", data: []byte("data1")}, - {id: "0002", data: []byte("data2")}, - {id: "0003", data: []byte("data3")}, - } - - t.Run("add new objects", func(t *testing.T) { - tx := store.NewTx(storage.NewTxState(ctx)) - - initStore(t, tx, tCase.batched, objects...) - - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - - for _, o := range objects { - err := store.Get(&object{id: o.id}) - if err != nil { - t.Fatalf("Get(%q): unexpected error: %v", o.id, err) - } - } - - checkTxStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("delete existing objects", func(t *testing.T) { - tx := store.NewTx(storage.NewTxState(ctx)) - - deleteStore(t, tx, tCase.batched, objects...) - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - want := storage.ErrNotFound - for _, o := range objects { - have := store.Get(&object{id: o.id}) - if !errors.Is(have, want) { - t.Fatalf("Get(%q):\n\thave: %v\n\twant: %v", o.id, want, have) - } - } - - checkTxStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - }) - }) - } - - t.Run("rollback empty", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - - checkTxStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("rollback canceled context", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - tx := store.NewTx(storage.NewTxState(ctx)) - - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - - checkTxStoreFinishedTxInvariants(t, tx, context.Canceled) - }) - - for _, tCase := range tCases { - t.Run(tCase.name, func(t *testing.T) { - t.Run("rollback added objects", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - - objects := []*object{ - {id: "0001" + tCase.name, data: []byte("data1")}, - {id: "0002" + tCase.name, data: []byte("data2")}, - {id: "0003" + tCase.name, data: []byte("data3")}, - } - initStore(t, tx, tCase.batched, objects...) - - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - - want := storage.ErrNotFound - for _, o := range objects { - have := store.Get(&object{id: o.id}) - if !errors.Is(have, want) { - t.Fatalf("Get(%q):\n\thave: %v\n\twant: %v", o.id, have, want) - } - } - - checkTxStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("rollback updated objects", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - oldObjects := []*object{ - {id: "0001" + tCase.name, data: []byte("data1")}, - {id: "0002" + tCase.name, data: []byte("data2")}, - {id: "0003" + tCase.name, data: []byte("data3")}, - } - initStore(t, tx, tCase.batched, oldObjects...) - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - - tx = store.NewTx(storage.NewTxState(ctx)) - newObjects := []*object{ - {id: "0001" + tCase.name, data: []byte("data11")}, - {id: "0002" + tCase.name, data: []byte("data22")}, - {id: "0003" + tCase.name, data: []byte("data33")}, - } - initStore(t, tx, tCase.batched, newObjects...) - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - - for _, o := range oldObjects { - want := o - have := &object{id: o.id} - if err := store.Get(have); err != nil { - t.Fatalf("Get(%q): unexpected error: %v", o.id, err) - } - if diff := cmp.Diff(want, have, cmp.AllowUnexported(object{})); diff != "" { - t.Errorf("Get(%q): unexpected result: (-want +have):\n%s", o.id, diff) - } - } - - checkTxStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("rollback removed objects", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - objects := []*object{ - {id: "0001" + tCase.name, data: []byte("data1")}, - {id: "0002" + tCase.name, data: []byte("data2")}, - {id: "0003" + tCase.name, data: []byte("data3")}, - } - initStore(t, tx, tCase.batched, objects...) - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - - tx = store.NewTx(storage.NewTxState(ctx)) - deleteStore(t, tx, tCase.batched, objects...) - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - for _, want := range objects { - have := &object{id: want.id} - if err := store.Get(have); err != nil { - t.Errorf("Get(%q): unexpected error: %v", want.id, err) - } - if have.id != want.id { - t.Errorf("Get(%q):\n\thave: %q\n\twant: %q", want.id, have.id, want.id) - } - if !bytes.Equal(have.data, want.data) { - t.Errorf("Get(%q):\n\thave: %x\n\twant: %x", want.id, have.data, want.data) - } - } - - checkTxStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - }) - } -} - -// initChunkStore initializes the given store with the given chunks. -func initChunkStore(t *testing.T, store storage.ChunkStore, chunks ...swarm.Chunk) { - t.Helper() - - ctx := context.Background() - for _, chunk := range chunks { - if err := store.Put(ctx, chunk); err != nil { - t.Fatalf("Put(%q): unexpected error: %v", chunk.Address(), err) - } - } -} - -// checkTxChunkStoreFinishedTxInvariants check if all the store operations behave -// as expected after the transaction has been committed or rolled back. -func checkTxChunkStoreFinishedTxInvariants(t *testing.T, store storage.TxChunkStore, want error) { - t.Helper() - - ctx := context.Background() - randomChunk := chunktest.GenerateTestRandomChunk() - - if chunk, have := store.Get(ctx, randomChunk.Address()); !errors.Is(have, want) || chunk != nil { - t.Fatalf("Get(...)\n\thave: %v, %v\n\twant: , %v", chunk, have, want) - } - - if have := store.Put(ctx, randomChunk); !errors.Is(have, want) { - t.Fatalf("Put(...):\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Delete(ctx, randomChunk.Address()); !errors.Is(have, want) { - t.Fatalf("Delete(...):\n\thave: %v\n\twant: %v", have, want) - } - - if _, have := store.Has(ctx, swarm.ZeroAddress); !errors.Is(have, want) { - t.Fatalf("Has(...):\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Iterate(ctx, func(_ swarm.Chunk) (stop bool, err error) { - return false, nil - }); !errors.Is(have, want) { - t.Fatalf("Iterate(...):\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Commit(); !errors.Is(have, want) { - t.Fatalf("Commit():\n\thave: %v\n\twant: %v", have, want) - } - - if have := store.Rollback(); !errors.Is(have, want) { - t.Fatalf("Rollback():\n\thave: %v\n\twant: %v", have, want) - } -} - -// TestTxChunkStore provides correctness testsuite for storage.TxChunkStore interface. -func TestTxChunkStore(t *testing.T, store storage.TxChunkStore) { - t.Helper() - - t.Cleanup(func() { - var closed int32 - time.AfterFunc(100*time.Millisecond, func() { - if atomic.LoadInt32(&closed) == 0 { - t.Fatal("store did not close") - } - }) - if err := store.Close(); err != nil { - t.Fatalf("Close(): unexpected error: %v", err) - } - atomic.StoreInt32(&closed, 1) - }) - - t.Run("commit empty", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - - checkTxChunkStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("commit", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - chunks := chunktest.GenerateTestRandomChunks(3) - - t.Run("add new chunks", func(t *testing.T) { - tx := store.NewTx(storage.NewTxState(ctx)) - - initChunkStore(t, tx, chunks...) - - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - - for _, want := range chunks { - have, err := store.Get(context.Background(), want.Address()) - if err != nil { - t.Fatalf("Get(%q): unexpected error: %v", want.Address(), err) - } - if !have.Equal(want) { - t.Fatalf("Get(%q): \n\thave: %v\n\twant: %v", want.Address(), have, want) - } - } - - checkTxChunkStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("delete existing chunks", func(t *testing.T) { - tx := store.NewTx(storage.NewTxState(ctx)) - - for _, chunk := range chunks { - if err := tx.Delete(context.Background(), chunk.Address()); err != nil { - t.Fatalf("Delete(%q): unexpected error: %v", chunk.Address(), err) - } - } - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - want := storage.ErrNotFound - for _, ch := range chunks { - chunk, have := store.Get(context.Background(), ch.Address()) - if !errors.Is(have, want) || chunk != nil { - t.Fatalf("Get(...)\n\thave: %v, %v\n\twant: , %v", chunk, have, want) - } - } - - checkTxChunkStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - }) - - t.Run("rollback empty", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - - checkTxChunkStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("rollback canceled context", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - tx := store.NewTx(storage.NewTxState(ctx)) - - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - - checkTxChunkStoreFinishedTxInvariants(t, tx, context.Canceled) - }) - - t.Run("rollback added chunks", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - - chunks := chunktest.GenerateTestRandomChunks(3) - initChunkStore(t, tx, chunks...) - - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - - want := storage.ErrNotFound - for _, ch := range chunks { - chunk, have := store.Get(context.Background(), ch.Address()) - if !errors.Is(have, want) || chunk != nil { - t.Fatalf("Get(...)\n\thave: %v, %v\n\twant: , %v", chunk, have, want) - } - } - - checkTxChunkStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) - - t.Run("rollback removed chunks", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tx := store.NewTx(storage.NewTxState(ctx)) - chunks := chunktest.GenerateTestRandomChunks(3) - initChunkStore(t, tx, chunks...) - if err := tx.Commit(); err != nil { - t.Fatalf("Commit(): unexpected error: %v", err) - } - - tx = store.NewTx(storage.NewTxState(ctx)) - for _, ch := range chunks { - if err := tx.Delete(context.Background(), ch.Address()); err != nil { - t.Fatalf("Delete(%q): unexpected error: %v", ch.Address(), err) - } - } - if err := tx.Rollback(); err != nil { - t.Fatalf("Rollback(): unexpected error: %v", err) - } - for _, want := range chunks { - have, err := store.Get(context.Background(), want.Address()) - if err != nil { - t.Fatalf("Get(%q): unexpected error: %v", want.Address(), err) - } - if !have.Equal(want) { - t.Fatalf("Get(%q): \n\thave: %v\n\twant: %v", want.Address(), have, want) - } - } - - checkTxChunkStoreFinishedTxInvariants(t, tx, storage.ErrTxDone) - }) -} diff --git a/pkg/storage/transaction.go b/pkg/storage/transaction.go deleted file mode 100644 index 1268e45da86..00000000000 --- a/pkg/storage/transaction.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package storage - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - - "github.com/ethersphere/bee/pkg/swarm" -) - -// ErrTxDone is returned by any operation that is performed on -// a transaction that has already been committed or rolled back. -var ErrTxDone = errors.New("storage: transaction has already been committed or rolled back") - -// Tx represents an in-progress Store transaction. -// A transaction must end with a call to Commit or Rollback. -type Tx interface { - // Commit commits the transaction. - Commit() error - - // Rollback aborts the transaction. - Rollback() error -} - -// TxStore represents a Tx Store where all -// operations are completed in a transaction. -type TxStore interface { - Tx - Store - Batcher - - NewTx(*TxState) TxStore -} - -// TxChunkStore represents a Tx ChunkStore where -// all operations are completed in a transaction. -type TxChunkStore interface { - Tx - ChunkStore - - NewTx(*TxState) TxChunkStore -} - -// TxState is a mix-in for Tx. It provides basic -// functionality for transaction state lifecycle. -type TxState struct { - // ctx lives for the life of the transaction. - ctx context.Context - - // cancel is this context cancel function - // that signals the end of this transaction. - cancel context.CancelCauseFunc -} - -// AwaitDone returns a channel that blocks until the context -// in TxState is canceled or the transaction is done. -func (tx *TxState) AwaitDone() <-chan struct{} { - if tx == nil { - c := make(chan struct{}) - close(c) - return c - } - - // Wait for either the transaction to be committed or rolled - // back, or for the associated context to be closed. - return tx.ctx.Done() -} - -// IsDone returns ErrTxDone if the transaction has already been committed -// or rolled back. If the transaction was in progress and the context was -// canceled, it returns the context.Canceled error. -func (tx *TxState) IsDone() error { - if tx == nil { - return nil - } - - return context.Cause(tx.ctx) -} - -// Done marks this transaction as complete. It returns ErrTxDone if the -// transaction has already been committed or rolled back or if the transaction -// was in progress and the context was canceled, it returns the context.Canceled -// error. -func (tx *TxState) Done() error { - if tx == nil { - return nil - } - - if tx.ctx.Err() == nil { - tx.cancel(ErrTxDone) - return nil - } - return context.Cause(tx.ctx) -} - -// NewTxState is a convenient constructor for creating instances of TxState. -func NewTxState(ctx context.Context) *TxState { - ctx, cancel := context.WithCancelCause(ctx) - return &TxState{ctx: ctx, cancel: cancel} -} - -var _ Store = (*TxStoreBase)(nil) -var _ Batcher = (*TxStoreBase)(nil) - -// TxStoreBase implements the Store interface where -// the operations are guarded by a transaction. -type TxStoreBase struct { - *TxState - BatchedStore - - rolledBack atomic.Bool -} - -// Close implements the Store interface. -// The operation is blocked until the -// transaction is not done. -func (s *TxStoreBase) Close() error { - <-s.AwaitDone() - return s.BatchedStore.Close() -} - -// Get implements the Store interface. -func (s *TxStoreBase) Get(item Item) error { - if err := s.IsDone(); err != nil { - return err - } - return s.BatchedStore.Get(item) -} - -// Has implements the Store interface. -func (s *TxStoreBase) Has(key Key) (bool, error) { - if err := s.IsDone(); err != nil { - return false, err - } - return s.BatchedStore.Has(key) -} - -// GetSize implements the Store interface. -func (s *TxStoreBase) GetSize(key Key) (int, error) { - if err := s.IsDone(); err != nil { - return 0, err - } - return s.BatchedStore.GetSize(key) -} - -// Iterate implements the Store interface. -func (s *TxStoreBase) Iterate(query Query, fn IterateFn) error { - if err := s.IsDone(); err != nil { - return err - } - return s.BatchedStore.Iterate(query, fn) -} - -// Count implements the Store interface. -func (s *TxStoreBase) Count(key Key) (int, error) { - if err := s.IsDone(); err != nil { - return 0, err - } - return s.BatchedStore.Count(key) -} - -// Put implements the Store interface. -func (s *TxStoreBase) Put(item Item) error { - if err := s.IsDone(); err != nil { - return err - } - return s.BatchedStore.Put(item) -} - -// Delete implements the Store interface. -func (s *TxStoreBase) Delete(item Item) error { - if err := s.IsDone(); err != nil { - return err - } - return s.BatchedStore.Delete(item) -} - -func (s *TxStoreBase) Batch(ctx context.Context) (Batch, error) { - if err := s.IsDone(); err != nil { - return nil, err - } - - return s.BatchedStore.Batch(ctx) -} - -// Rollback implements the TxStore interface. -func (s *TxStoreBase) Rollback() error { - if s.rolledBack.CompareAndSwap(false, true) { - if err := s.Done(); err == nil || - errors.Is(err, context.Canceled) || - errors.Is(err, context.DeadlineExceeded) { - return nil - } - } - return s.IsDone() -} - -var _ ChunkStore = (*TxChunkStoreBase)(nil) - -// TxChunkStoreBase implements the ChunkStore interface -// where the operations are guarded by a transaction. -type TxChunkStoreBase struct { - *TxState - ChunkStore - - rolledBack atomic.Bool -} - -// Close implements the ChunkStore interface. -// The operation is blocked until the -// transaction is not done. -func (s *TxChunkStoreBase) Close() error { - <-s.AwaitDone() - return s.ChunkStore.Close() -} - -// Get implements the ChunkStore interface. -func (s *TxChunkStoreBase) Get(ctx context.Context, address swarm.Address) (swarm.Chunk, error) { - if err := s.IsDone(); err != nil { - return nil, err - } - return s.ChunkStore.Get(ctx, address) -} - -// Put implements the ChunkStore interface. -func (s *TxChunkStoreBase) Put(ctx context.Context, chunk swarm.Chunk) error { - if err := s.IsDone(); err != nil { - return err - } - return s.ChunkStore.Put(ctx, chunk) -} - -// Iterate implements the ChunkStore interface. -func (s *TxChunkStoreBase) Iterate(ctx context.Context, fn IterateChunkFn) error { - if err := s.IsDone(); err != nil { - return err - } - return s.ChunkStore.Iterate(ctx, fn) -} - -// Has implements the ChunkStore interface. -func (s *TxChunkStoreBase) Has(ctx context.Context, address swarm.Address) (bool, error) { - if err := s.IsDone(); err != nil { - return false, err - } - return s.ChunkStore.Has(ctx, address) -} - -// Delete implements the ChunkStore interface. -func (s *TxChunkStoreBase) Delete(ctx context.Context, address swarm.Address) error { - if err := s.IsDone(); err != nil { - return err - } - return s.ChunkStore.Delete(ctx, address) -} - -// Rollback implements the TxChunkStore interface. -func (s *TxChunkStoreBase) Rollback() error { - if s.rolledBack.CompareAndSwap(false, true) { - if err := s.Done(); err == nil || - errors.Is(err, context.Canceled) || - errors.Is(err, context.DeadlineExceeded) { - return nil - } - } - return s.IsDone() -} - -// TxOpCode represents code for tx operations. -type TxOpCode string - -const ( - PutOp TxOpCode = "put" - PutCreateOp TxOpCode = "putCreate" - PutUpdateOp TxOpCode = "putUpdate" - DeleteOp TxOpCode = "delete" -) - -// TxRevertOp represents a reverse operation. -type TxRevertOp[K, V any] struct { - Origin TxOpCode - ObjectID string - - Key K - Val V -} - -// TxRevertFn represents a function that can be invoked -// to reverse the operation that was performed by the -// corresponding TxOpCode. -type TxRevertFn[K, V any] func(K, V) error - -// TxRevertOpStore represents a store for TxRevertOp. -type TxRevertOpStore[K, V any] interface { - // Append appends a Revert operation to the store. - Append(...*TxRevertOp[K, V]) error - // Revert executes all the revere operations - // in the store in reverse order. - Revert() error - // Clean cleans the store. - Clean() error -} - -// NoOpTxRevertOpStore is a no-op implementation of TxRevertOpStore. -type NoOpTxRevertOpStore[K, V any] struct{} - -func (s *NoOpTxRevertOpStore[K, V]) Append(...*TxRevertOp[K, V]) error { return nil } -func (s *NoOpTxRevertOpStore[K, V]) Revert() error { return nil } -func (s *NoOpTxRevertOpStore[K, V]) Clean() error { return nil } - -// InMemTxRevertOpStore is an in-memory implementation of TxRevertOpStore. -type InMemTxRevertOpStore[K, V any] struct { - revOpsFn map[TxOpCode]TxRevertFn[K, V] - - mu sync.Mutex - ops []*TxRevertOp[K, V] -} - -// Append implements TxRevertOpStore. -func (s *InMemTxRevertOpStore[K, V]) Append(ops ...*TxRevertOp[K, V]) error { - if s == nil || len(ops) == 0 { - return nil - } - - s.mu.Lock() - s.ops = append(s.ops, ops...) - s.mu.Unlock() - return nil -} - -// Revert implements TxRevertOpStore. -func (s *InMemTxRevertOpStore[K, V]) Revert() error { - if s == nil { - return nil - } - - s.mu.Lock() - defer s.mu.Unlock() - - var errs error - for i := len(s.ops) - 1; i >= 0; i-- { - op := s.ops[i] - if op == nil { - continue - } - if fn, ok := s.revOpsFn[op.Origin]; !ok { - errs = errors.Join(errs, fmt.Errorf( - "revert operation %q for object %s not found", - op.Origin, - op.ObjectID, - )) - } else if err := fn(op.Key, op.Val); err != nil { - errs = errors.Join(errs, fmt.Errorf( - "revert operation %q for object %s failed: %w", - op.Origin, - op.ObjectID, - err, - )) - } - } - s.ops = nil - return errs -} - -// Clean implements TxRevertOpStore. -func (s *InMemTxRevertOpStore[K, V]) Clean() error { - if s == nil { - return nil - } - - s.mu.Lock() - s.ops = nil - s.mu.Unlock() - return nil -} - -// NewInMemTxRevertOpStore is a convenient constructor for creating instances of -// InMemTxRevertOpStore. The revOpsFn map is used to look up the revert function -// for a given TxOpCode. -func NewInMemTxRevertOpStore[K, V any](revOpsFn map[TxOpCode]TxRevertFn[K, V]) *InMemTxRevertOpStore[K, V] { - return &InMemTxRevertOpStore[K, V]{revOpsFn: revOpsFn} -} diff --git a/pkg/storage/transaction_test.go b/pkg/storage/transaction_test.go deleted file mode 100644 index a03df20a5dc..00000000000 --- a/pkg/storage/transaction_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2022 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package storage_test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/ethersphere/bee/pkg/storage" -) - -func TestTxState(t *testing.T) { - t.Parallel() - - t.Run("lifecycle-normal", func(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - var ( - txs = storage.NewTxState(ctx) - timeout = 100 * time.Millisecond - ) - - if err := txs.IsDone(); err != nil { - t.Fatalf("IsDone(): unexpected error: %v", err) - } - - time.AfterFunc(timeout, func() { - if err := txs.Done(); err != nil { - t.Fatalf("Done(): unexpected error: %v", err) - } - if err := txs.Done(); !errors.Is(err, storage.ErrTxDone) { - t.Fatalf("Done():\n\twant error: %v\n\thave error: %v", storage.ErrTxDone, err) - } - }) - - func() { - for timer := time.NewTimer(5 * timeout); ; { - select { - case <-txs.AwaitDone(): - if !timer.Stop() { - <-timer.C - } - return - case <-timer.C: - select { - case <-ctx.Done(): - t.Fatalf("parent context canceled") - default: - t.Fatalf("Done() did not release AwaitDone()") - } - } - } - }() - - if err := txs.IsDone(); !errors.Is(err, storage.ErrTxDone) { - t.Fatalf("IsDone(): want error %v; have %v", storage.ErrTxDone, err) - } - - select { - case <-txs.AwaitDone(): - default: - t.Error("AwaitDone() is blocking") - } - }) - - t.Run("lifecycle-done-by-parent-ctx", func(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - var ( - txs = storage.NewTxState(ctx) - timeout = 100 * time.Millisecond - ) - - if err := txs.IsDone(); err != nil { - t.Fatalf("IsDone(): unexpected error: %v", err) - } - - time.AfterFunc(timeout, func() { - cancel() - }) - - func() { - for timer := time.NewTimer(5 * timeout); ; { - select { - case <-txs.AwaitDone(): - if !timer.Stop() { - <-timer.C - } - return - case <-timer.C: - select { - case <-ctx.Done(): - t.Fatalf("cancelation of parent context did not release AwaitDone()") - default: - t.Fatalf("parent context not canceled") - } - } - } - }() - - if err := txs.IsDone(); !errors.Is(err, context.Canceled) { - t.Fatalf("IsDone():\n\twant error %v\n\thave %v", context.Canceled, err) - } - if err := txs.Done(); !errors.Is(err, context.Canceled) { - t.Fatalf("Done():\n\twant error: %v\n\thave error: %v", context.Canceled, err) - } - - select { - case <-txs.AwaitDone(): - default: - t.Error("AwaitDone() is blocking") - } - }) -} diff --git a/pkg/storer/cachestore.go b/pkg/storer/cachestore.go index 1e7546c7c23..73be9d058fb 100644 --- a/pkg/storer/cachestore.go +++ b/pkg/storer/cachestore.go @@ -11,7 +11,7 @@ import ( "time" storage "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storer/internal" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) @@ -34,10 +34,7 @@ func (db *DB) cacheWorker(ctx context.Context) { return case <-overCapTrigger: - var ( - size = db.cacheObj.Size() - capc = db.cacheObj.Capacity() - ) + size, capc := db.cacheObj.Size(), db.cacheObj.Capacity() if size <= capc { continue } @@ -45,9 +42,7 @@ func (db *DB) cacheWorker(ctx context.Context) { evict := min(10_000, (size - capc)) dur := captureDuration(time.Now()) - err := db.Execute(ctx, func(s internal.Storage) error { - return db.cacheObj.RemoveOldest(ctx, s, s.ChunkStore(), evict) - }) + err := db.cacheObj.RemoveOldest(ctx, db.storage, evict) db.metrics.MethodCallsDuration.WithLabelValues("cachestore", "RemoveOldest").Observe(dur()) if err != nil { db.metrics.MethodCalls.WithLabelValues("cachestore", "RemoveOldest", "failure").Inc() @@ -67,19 +62,18 @@ func (db *DB) cacheWorker(ctx context.Context) { func (db *DB) Lookup() storage.Getter { return getterWithMetrics{ storage.GetterFunc(func(ctx context.Context, address swarm.Address) (swarm.Chunk, error) { - txnRepo, commit, rollback := db.repo.NewTx(ctx) - ch, err := db.cacheObj.Getter(txnRepo).Get(ctx, address) + ch, err := db.cacheObj.Getter(db.storage).Get(ctx, address) switch { case err == nil: - return ch, commit() + return ch, nil case errors.Is(err, storage.ErrNotFound): // here we would ideally have nothing to do but just to return this // error to the client. The commit is mainly done to end the txn. - return nil, errors.Join(err, commit()) + return nil, err } // if we are here, it means there was some unexpected error, in which // case we need to rollback any changes that were already made. - return nil, fmt.Errorf("cache.Get: %w", errors.Join(err, rollback())) + return nil, fmt.Errorf("cache.Get: %w", err) }), db.metrics, "cachestore", @@ -91,12 +85,11 @@ func (db *DB) Cache() storage.Putter { return putterWithMetrics{ storage.PutterFunc(func(ctx context.Context, ch swarm.Chunk) error { defer db.triggerCacheEviction() - txnRepo, commit, rollback := db.repo.NewTx(ctx) - err := db.cacheObj.Putter(txnRepo).Put(ctx, ch) + err := db.cacheObj.Putter(db.storage).Put(ctx, ch) if err != nil { - return fmt.Errorf("cache.Put: %w", errors.Join(err, rollback())) + return fmt.Errorf("cache.Put: %w", err) } - return errors.Join(err, commit()) + return nil }), db.metrics, "cachestore", @@ -104,7 +97,7 @@ func (db *DB) Cache() storage.Putter { } // CacheShallowCopy creates cache entries with the expectation that the chunk already exists in the chunkstore. -func (db *DB) CacheShallowCopy(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error { +func (db *DB) CacheShallowCopy(ctx context.Context, store transaction.Storage, addrs ...swarm.Address) error { defer db.triggerCacheEviction() dur := captureDuration(time.Now()) err := db.cacheObj.ShallowCopy(ctx, store, addrs...) diff --git a/pkg/storer/cachestore_test.go b/pkg/storer/cachestore_test.go index dc2264491e7..83db1b118a4 100644 --- a/pkg/storer/cachestore_test.go +++ b/pkg/storer/cachestore_test.go @@ -6,12 +6,10 @@ package storer_test import ( "context" - "errors" "testing" "time" "github.com/ethersphere/bee/pkg/spinlock" - storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/storagetest" chunktesting "github.com/ethersphere/bee/pkg/storage/testing" storer "github.com/ethersphere/bee/pkg/storer" @@ -38,32 +36,9 @@ func testCacheStore(t *testing.T, newStorer func() (*storer.DB, error)) { } } }) - - t.Run("rollback", func(t *testing.T) { - want := errors.New("dummy error") - lstore.SetRepoStorePutHook(func(item storage.Item) error { - if item.Namespace() == "cacheOrderIndex" { - return want - } - return nil - }) - errChunk := chunktesting.GenerateTestRandomChunk() - have := lstore.Cache().Put(context.TODO(), errChunk) - if !errors.Is(have, want) { - t.Fatalf("unexpected error on cache put: want %v have %v", want, have) - } - haveChunk, err := lstore.Repo().ChunkStore().Has(context.TODO(), errChunk.Address()) - if err != nil { - t.Fatalf("ChunkStore.Has(...): unexpected error: %v", err) - } - if haveChunk { - t.Fatalf("unexpected chunk state: want false have %t", haveChunk) - } - }) }) t.Run("lookup", func(t *testing.T) { t.Run("commit", func(t *testing.T) { - lstore.SetRepoStorePutHook(nil) getter := lstore.Lookup() for _, ch := range chunks { have, err := getter.Get(context.TODO(), ch.Address()) @@ -75,32 +50,8 @@ func testCacheStore(t *testing.T, newStorer func() (*storer.DB, error)) { } } }) - t.Run("rollback", func(t *testing.T) { - want := errors.New("dummy error") - lstore.SetRepoStorePutHook(func(item storage.Item) error { - if item.Namespace() == "cacheOrderIndex" { - return want - } - return nil - }) - // fail access for the first 4 chunks. This will keep the order as is - // from the last test. - for idx, ch := range chunks { - if idx > 4 { - break - } - _, have := lstore.Lookup().Get(context.TODO(), ch.Address()) - if !errors.Is(have, want) { - t.Fatalf("unexpected error in cache get: want %v have %v", want, have) - } - } - }) }) t.Run("cache chunks beyond capacity", func(t *testing.T) { - lstore.SetRepoStorePutHook(nil) - // add chunks beyond capacity and verify the correct chunks are removed - // from the cache based on last access order - newChunks := chunktesting.GenerateTestRandomChunks(5) putter := lstore.Cache() for _, ch := range newChunks { diff --git a/pkg/storer/compact.go b/pkg/storer/compact.go index 3ecc623e98f..98b8d8dd058 100644 --- a/pkg/storer/compact.go +++ b/pkg/storer/compact.go @@ -62,7 +62,7 @@ func Compact(ctx context.Context, basePath string, opts *Options, validate bool) items := make([]*chunkstore.RetrievalIndexItem, 0, 1_000_000) // we deliberately choose to iterate the whole store again for each shard // so that we do not store all the items in memory (for operators with huge localstores) - _ = chunkstore.Iterate(store, func(item *chunkstore.RetrievalIndexItem) error { + _ = chunkstore.IterateItems(store, func(item *chunkstore.RetrievalIndexItem) error { if item.Location.Shard == uint8(shard) { items = append(items, item) } @@ -88,11 +88,7 @@ func Compact(ctx context.Context, basePath string, opts *Options, validate bool) start := uint32(0) end := lastUsedSlot - batch, err := store.Batch(ctx) - if err != nil { - return err - } - + batch := store.Batch(ctx) for start <= end { if slots[start] != nil { diff --git a/pkg/storer/debug.go b/pkg/storer/debug.go index f78c866c906..ada10c4c6e3 100644 --- a/pkg/storer/debug.go +++ b/pkg/storer/debug.go @@ -60,7 +60,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) { ) eg.Go(func() error { return chunkstore.IterateChunkEntries( - db.repo.IndexStore(), + db.storage.ReadOnly().IndexStore(), func(_ swarm.Address, isShared bool) (bool, error) { select { case <-ctx.Done(): @@ -85,7 +85,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) { ) eg.Go(func() error { return upload.IterateAll( - db.repo.IndexStore(), + db.storage.ReadOnly().IndexStore(), func(_ swarm.Address, isSynced bool) (bool, error) { select { case <-ctx.Done(): @@ -110,7 +110,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) { ) eg.Go(func() error { return pinstore.IterateCollectionStats( - db.repo.IndexStore(), + db.storage.ReadOnly().IndexStore(), func(stat pinstore.CollectionStat) (bool, error) { select { case <-ctx.Done(): @@ -139,7 +139,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) { reserveCapacity = db.reserve.Capacity() reserveSize = db.reserve.Size() eg.Go(func() error { - return db.reserve.IterateChunksItems(db.repo, db.reserve.Radius(), func(ci reserve.ChunkItem) (bool, error) { + return db.reserve.IterateChunksItems(db.reserve.Radius(), func(ci reserve.ChunkItem) (bool, error) { reserveSizeWithinRadius++ return false, nil }) diff --git a/pkg/storer/debug_test.go b/pkg/storer/debug_test.go index d62a38aeb55..6f5a9e098dc 100644 --- a/pkg/storer/debug_test.go +++ b/pkg/storer/debug_test.go @@ -74,7 +74,7 @@ func testDebugInfo(t *testing.T, newStorer func() (*storer.DB, swarm.Address, er }, ChunkStore: storer.ChunkStoreStat{ TotalChunks: 10, - SharedSlots: 10, + SharedSlots: 0, }, Cache: storer.CacheStat{ Capacity: 1000000, diff --git a/pkg/storer/epoch_migration.go b/pkg/storer/epoch_migration.go deleted file mode 100644 index 873160564e1..00000000000 --- a/pkg/storer/epoch_migration.go +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package storer - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/ethersphere/bee/pkg/log" - "github.com/ethersphere/bee/pkg/postage" - "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/shed" - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storer/internal" - "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" - pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" - "github.com/ethersphere/bee/pkg/swarm" - "github.com/ethersphere/bee/pkg/traversal" - "golang.org/x/sync/errgroup" -) - -// epochKey implements storage.Item and is used to store the epoch in the -// store. It is used to check if the epoch migration has already been -// performed. -type epochKey struct{} - -func (epochKey) Namespace() string { return "localstore" } - -func (epochKey) ID() string { return "epoch" } - -// this is a key-only item, so we don't need to marshal/unmarshal -func (epochKey) Marshal() ([]byte, error) { return nil, nil } - -func (epochKey) Unmarshal([]byte) error { return nil } - -func (epochKey) Clone() storage.Item { return epochKey{} } - -func (epochKey) String() string { return "localstore-epoch" } - -var ( - _ internal.Storage = (*putOpStorage)(nil) - _ chunkstore.Sharky = (*putOpStorage)(nil) -) - -// putOpStorage implements the internal.Storage interface which is used by -// the internal component stores to store chunks. It also implements the sharky interface -// which uses the recovery mechanism to recover chunks without moving them. -type putOpStorage struct { - chunkstore.Sharky - - store storage.BatchedStore - location sharky.Location - recovery sharkyRecover -} - -func (p *putOpStorage) IndexStore() storage.BatchedStore { return p.store } - -func (p *putOpStorage) ChunkStore() storage.ChunkStore { - return chunkstore.New(p.store, p) -} - -// Write implements the sharky.Store interface. It uses the sharky recovery mechanism -// to recover chunks without moving them. The location returned is the same as the -// one passed in. This is present in the old localstore indexes. -func (p *putOpStorage) Write(_ context.Context, _ []byte) (sharky.Location, error) { - return p.location, p.recovery.Add(p.location) -} - -type reservePutter interface { - Put(context.Context, internal.Storage, swarm.Chunk) error - Size() int -} - -type sharkyRecover interface { - Add(sharky.Location) error - Read(context.Context, sharky.Location, []byte) error -} - -// epochMigration performs the initial migration if it hasnt been done already. It -// reads the old indexes and writes them in the new format. It only migrates the -// reserve and pinned chunks. It also creates the new epoch key in the store to -// indicate that the migration has been performed. Due to a bug in the old localstore -// pinned chunks are not always present in the pinned index. So we do a best-effort -// migration of the pinning index. If the migration fails, the user can re-pin -// the chunks using the stewardship endpoint if the stamps used to upload them are -// still valid. -func epochMigration( - ctx context.Context, - path string, - stateStore storage.StateStorer, - store storage.BatchedStore, - reserve reservePutter, - recovery sharkyRecover, - logger log.Logger, -) error { - has, err := store.Has(epochKey{}) - if err != nil { - return fmt.Errorf("has epoch key: %w", err) - } - - if has { - return nil - } - - logger.Debug("started", "path", path, "start_time", time.Now()) - - dbshed, err := shed.NewDB(path, nil) - if err != nil { - return fmt.Errorf("shed.NewDB: %w", err) - } - - defer func() { - if dbshed != nil { - dbshed.Close() - } - }() - - pullIndex, retrievalDataIndex, err := initShedIndexes(dbshed, swarm.ZeroAddress) - if err != nil { - return fmt.Errorf("initShedIndexes: %w", err) - } - - chunkCount, err := retrievalDataIndex.Count() - if err != nil { - return fmt.Errorf("retrievalDataIndex count: %w", err) - } - - pullIdxCnt, _ := pullIndex.Count() - - logger.Debug("index counts", "retrieval index", chunkCount, "pull index", pullIdxCnt) - - e := &epochMigrator{ - stateStore: stateStore, - store: store, - recovery: recovery, - reserve: reserve, - pullIndex: pullIndex, - retrievalDataIndex: retrievalDataIndex, - logger: logger, - } - - if e.reserve != nil && chunkCount > 0 { - err = e.migrateReserve(ctx) - if err != nil { - return err - } - } - - if e.stateStore != nil && chunkCount > 0 { - err = e.migratePinning(ctx) - if err != nil { - return err - } - } - - dbshed.Close() - dbshed = nil - - matches, err := filepath.Glob(filepath.Join(path, "*")) - if err != nil { - return err - } - - for _, m := range matches { - if !strings.Contains(m, indexPath) && !strings.Contains(m, sharkyPath) { - err = os.Remove(m) - if err != nil { - return err - } - } - } - - return store.Put(epochKey{}) -} - -func initShedIndexes(dbshed *shed.DB, baseAddress swarm.Address) (pullIndex shed.Index, retrievalDataIndex shed.Index, err error) { - // pull index allows history and live syncing per po bin - pullIndex, err = dbshed.NewIndex("PO|BinID->Hash", shed.IndexFuncs{ - EncodeKey: func(fields shed.Item) (key []byte, err error) { - key = make([]byte, 9) - key[0] = swarm.Proximity(baseAddress.Bytes(), fields.Address) - binary.BigEndian.PutUint64(key[1:9], fields.BinID) - return key, nil - }, - DecodeKey: func(key []byte) (e shed.Item, err error) { - e.BinID = binary.BigEndian.Uint64(key[1:9]) - return e, nil - }, - EncodeValue: func(fields shed.Item) (value []byte, err error) { - value = make([]byte, 64) // 32 bytes address, 32 bytes batch id - copy(value, fields.Address) - copy(value[32:], fields.BatchID) - return value, nil - }, - DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { - e.Address = value[:32] - e.BatchID = value[32:64] - return e, nil - }, - }) - if err != nil { - return shed.Index{}, shed.Index{}, err - } - - // Index storing actual chunk address, data and bin id. - headerSize := 16 + postage.StampSize - retrievalDataIndex, err = dbshed.NewIndex("Address->StoreTimestamp|BinID|BatchID|BatchIndex|Sig|Location", shed.IndexFuncs{ - EncodeKey: func(fields shed.Item) (key []byte, err error) { - return fields.Address, nil - }, - DecodeKey: func(key []byte) (e shed.Item, err error) { - e.Address = key - return e, nil - }, - EncodeValue: func(fields shed.Item) (value []byte, err error) { - b := make([]byte, headerSize) - binary.BigEndian.PutUint64(b[:8], fields.BinID) - binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp)) - stamp, err := postage.NewStamp(fields.BatchID, fields.Index, fields.Timestamp, fields.Sig).MarshalBinary() - if err != nil { - return nil, err - } - copy(b[16:], stamp) - value = append(b, fields.Location...) - return value, nil - }, - DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { - e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16])) - e.BinID = binary.BigEndian.Uint64(value[:8]) - stamp := new(postage.Stamp) - if err = stamp.UnmarshalBinary(value[16:headerSize]); err != nil { - return e, err - } - e.BatchID = stamp.BatchID() - e.Index = stamp.Index() - e.Timestamp = stamp.Timestamp() - e.Sig = stamp.Sig() - e.Location = value[headerSize:] - return e, nil - }, - }) - if err != nil { - return shed.Index{}, shed.Index{}, err - } - - return pullIndex, retrievalDataIndex, nil -} - -// epochMigrator is a helper struct for migrating epoch data. It is used to house -// the main logic of the migration so that it can be tested. Also it houses the -// dependencies of the migration logic. -type epochMigrator struct { - stateStore storage.StateStorer - store storage.BatchedStore - recovery sharkyRecover - reserve reservePutter - pullIndex shed.Index - retrievalDataIndex shed.Index - logger log.Logger -} - -func (e *epochMigrator) migrateReserve(ctx context.Context) error { - type putOp struct { - pIdx shed.Item - chunk swarm.Chunk - loc sharky.Location - } - - e.logger.Debug("migrating reserve contents") - - opChan := make(chan putOp, 4) - eg, egCtx := errgroup.WithContext(ctx) - - for i := 0; i < 4; i++ { - eg.Go(func() error { - for { - select { - case <-egCtx.Done(): - return egCtx.Err() - case op, more := <-opChan: - if !more { - return nil - } - pStorage := &putOpStorage{ - store: e.store, - location: op.loc, - recovery: e.recovery, - } - - err := e.reserve.Put(egCtx, pStorage, op.chunk) - if err != nil { - return err - } - } - } - }) - } - - err := func() error { - defer close(opChan) - - return e.pullIndex.Iterate(func(i shed.Item) (stop bool, err error) { - addr := swarm.NewAddress(i.Address) - - item, err := e.retrievalDataIndex.Get(i) - if err != nil { - e.logger.Debug("retrieval data index read failed", "chunk_address", addr, "error", err) - return false, nil //continue - } - - l, err := sharky.LocationFromBinary(item.Location) - if err != nil { - e.logger.Debug("location from binary failed", "chunk_address", addr, "error", err) - return false, err - } - - chData := make([]byte, l.Length) - err = e.recovery.Read(ctx, l, chData) - if err != nil { - e.logger.Debug("reading location failed", "chunk_address", addr, "error", err) - return false, nil // continue - } - - ch := swarm.NewChunk(addr, chData). - WithStamp(postage.NewStamp(item.BatchID, item.Index, item.Timestamp, item.Sig)) - - select { - case <-egCtx.Done(): - return true, egCtx.Err() - case opChan <- putOp{pIdx: i, chunk: ch, loc: l}: - } - return false, nil - }, nil) - }() - if err != nil { - return err - } - - if err := eg.Wait(); err != nil { - return err - } - - e.logger.Debug("migrating reserve contents done", "reserve_size", e.reserve.Size()) - - return nil -} - -const pinStorePrefix = "root-pin" - -func (e *epochMigrator) migratePinning(ctx context.Context) error { - pinChan := make(chan swarm.Address, 4) - eg, egCtx := errgroup.WithContext(ctx) - - pStorage := &putOpStorage{ - store: e.store, - recovery: e.recovery, - } - var mu sync.Mutex // used to protect pStorage.location - - traverser := traversal.New( - storage.GetterFunc(func(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, err error) { - i := shed.Item{ - Address: addr.Bytes(), - } - item, err := e.retrievalDataIndex.Get(i) - if err != nil { - return nil, err - } - - l, err := sharky.LocationFromBinary(item.Location) - if err != nil { - return nil, err - } - - chData := make([]byte, l.Length) - err = e.recovery.Read(ctx, l, chData) - if err != nil { - return nil, err - } - - return swarm.NewChunk(addr, chData), nil - }), - ) - - e.logger.Debug("migrating pinning collections, if all the chunks in the collection" + - " are not found locally, the collection will not be migrated. Users will have to" + - " re-pin the content using the stewardship API. The migration will print out the failed" + - " collections at the end.") - - for i := 0; i < 4; i++ { - eg.Go(func() error { - for { - select { - case <-egCtx.Done(): - return egCtx.Err() - case addr, more := <-pinChan: - if !more { - return nil - } - - pinningPutter, err := pinstore.NewCollection(pStorage) - if err != nil { - return err - } - - traverserFn := func(chAddr swarm.Address) error { - item, err := e.retrievalDataIndex.Get(shed.Item{Address: chAddr.Bytes()}) - if err != nil { - return err - } - - l, err := sharky.LocationFromBinary(item.Location) - if err != nil { - return err - } - ch := swarm.NewChunk(chAddr, nil) - - mu.Lock() - pStorage.location = l - err = pinningPutter.Put(egCtx, pStorage, pStorage.IndexStore(), ch) - if err != nil { - mu.Unlock() - return err - } - mu.Unlock() - - return nil - } - - err = func() error { - if err := traverser.Traverse(egCtx, addr, traverserFn); err != nil { - return err - } - - if err := pinningPutter.Close(pStorage, pStorage.IndexStore(), addr); err != nil { - return err - } - return nil - }() - - _ = e.stateStore.Delete(fmt.Sprintf("%s-%s", pinStorePrefix, addr)) - - // do not fail the entire migration if the collection is not migrated - if err != nil { - e.logger.Debug("pinning collection migration failed", "collection_root_address", addr, "error", err) - } else { - e.logger.Debug("pinning collection migration successful", "collection_root_address", addr) - } - } - } - }) - } - - err := func() error { - defer close(pinChan) - - return e.stateStore.Iterate(pinStorePrefix, func(key, value []byte) (stop bool, err error) { - var ref swarm.Address - if err := json.Unmarshal(value, &ref); err != nil { - return true, fmt.Errorf("pinning: unmarshal pin reference: %w", err) - } - select { - case <-egCtx.Done(): - return true, egCtx.Err() - case pinChan <- ref: - } - return false, nil - }) - }() - if err != nil { - return err - } - - if err := eg.Wait(); err != nil { - return err - } - - e.logger.Debug("migrating pinning collections done") - - return nil -} diff --git a/pkg/storer/epoch_migration_test.go b/pkg/storer/epoch_migration_test.go deleted file mode 100644 index 6978cfe0536..00000000000 --- a/pkg/storer/epoch_migration_test.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package storer_test - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "io" - "io/fs" - "os" - "path" - "path/filepath" - "strings" - "sync" - "testing" - - "github.com/ethersphere/bee/pkg/file/splitter" - "github.com/ethersphere/bee/pkg/log" - postagetesting "github.com/ethersphere/bee/pkg/postage/testing" - "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/shed" - mockstatestore "github.com/ethersphere/bee/pkg/statestore/mock" - storage "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/inmemstore" - chunktest "github.com/ethersphere/bee/pkg/storage/testing" - storer "github.com/ethersphere/bee/pkg/storer" - "github.com/ethersphere/bee/pkg/storer/internal" - pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" - "github.com/ethersphere/bee/pkg/swarm" -) - -type dirFS struct { - basedir string -} - -func (d *dirFS) Open(path string) (fs.File, error) { - return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) -} - -func createOldDataDir(t *testing.T, dataPath string, baseAddress swarm.Address, stateStore storage.StateStorer) { - t.Helper() - - binIDs := map[uint8]int{} - - assignBinID := func(addr swarm.Address) int { - po := swarm.Proximity(baseAddress.Bytes(), addr.Bytes()) - if _, ok := binIDs[po]; !ok { - binIDs[po] = 1 - return 1 - } - binIDs[po]++ - return binIDs[po] - } - - err := os.Mkdir(filepath.Join(dataPath, "sharky"), 0777) - if err != nil { - t.Fatal(err) - } - - sharkyStore, err := sharky.New(&dirFS{basedir: filepath.Join(dataPath, "sharky")}, 2, swarm.SocMaxChunkSize) - if err != nil { - t.Fatal(err) - } - defer sharkyStore.Close() - - shedDB, err := shed.NewDB(dataPath, nil) - if err != nil { - t.Fatal(err) - } - defer shedDB.Close() - - pIdx, rIdx, err := storer.InitShedIndexes(shedDB, baseAddress) - if err != nil { - t.Fatal(err) - } - - reserveChunks := chunktest.GenerateTestRandomChunks(10) - - for _, c := range reserveChunks { - loc, err := sharkyStore.Write(context.Background(), c.Data()) - if err != nil { - t.Fatal(err) - } - - locBuf, err := loc.MarshalBinary() - if err != nil { - t.Fatal(err) - } - - binID := assignBinID(c.Address()) - - err = pIdx.Put(shed.Item{ - Address: c.Address().Bytes(), - BinID: uint64(binID), - BatchID: c.Stamp().BatchID(), - }) - if err != nil { - t.Fatal(err) - } - - err = rIdx.Put(shed.Item{ - Address: c.Address().Bytes(), - BinID: uint64(binID), - BatchID: c.Stamp().BatchID(), - Index: c.Stamp().Index(), - Timestamp: c.Stamp().Timestamp(), - Sig: c.Stamp().Sig(), - Location: locBuf, - }) - - if err != nil { - t.Fatal(err) - } - } - - // create a pinning collection - writer := splitter.NewSimpleSplitter( - storage.PutterFunc( - func(ctx context.Context, chunk swarm.Chunk) error { - c := chunk.WithStamp(postagetesting.MustNewStamp()) - - loc, err := sharkyStore.Write(context.Background(), c.Data()) - if err != nil { - return err - } - - locBuf, err := loc.MarshalBinary() - if err != nil { - return err - } - - return rIdx.Put(shed.Item{ - Address: c.Address().Bytes(), - BatchID: c.Stamp().BatchID(), - Index: c.Stamp().Index(), - Timestamp: c.Stamp().Timestamp(), - Sig: c.Stamp().Sig(), - Location: locBuf, - }) - }, - ), - ) - - randData := make([]byte, 4096*20) - _, err = rand.Read(randData) - if err != nil { - t.Fatal(err) - } - - root, err := writer.Split(context.Background(), io.NopCloser(bytes.NewBuffer(randData)), 4096*20, false) - if err != nil { - t.Fatal(err) - } - - err = stateStore.Put(fmt.Sprintf("root-pin-%s", root.String()), root) - if err != nil { - t.Fatal(err) - } -} - -type testSharkyRecovery struct { - *sharky.Recovery - mtx sync.Mutex - addCalls int -} - -func (t *testSharkyRecovery) Add(loc sharky.Location) error { - t.mtx.Lock() - t.addCalls++ - t.mtx.Unlock() - return t.Recovery.Add(loc) -} - -type testReservePutter struct { - mtx sync.Mutex - size int - calls int -} - -func (t *testReservePutter) Put(ctx context.Context, st internal.Storage, ch swarm.Chunk) error { - t.mtx.Lock() - t.calls++ - t.mtx.Unlock() - return st.ChunkStore().Put(ctx, ch) -} - -func (t *testReservePutter) AddSize(size int) { - t.mtx.Lock() - t.size += size - t.mtx.Unlock() -} - -func (t *testReservePutter) Size() int { - t.mtx.Lock() - defer t.mtx.Unlock() - return t.size -} - -// TestEpochMigration_FLAKY is flaky on windows. -func TestEpochMigration_FLAKY(t *testing.T) { - t.Parallel() - t.Skip("will be removed") - - var ( - dataPath = t.TempDir() - baseAddress = swarm.RandAddress(t) - stateStore = mockstatestore.NewStateStore() - reserve = &testReservePutter{} - logBytes = bytes.NewBuffer(nil) - logger = log.NewLogger("test", log.WithSink(logBytes)) - indexStore = inmemstore.New() - ) - - createOldDataDir(t, dataPath, baseAddress, stateStore) - - r, err := sharky.NewRecovery(path.Join(dataPath, "sharky"), 2, swarm.SocMaxChunkSize) - if err != nil { - t.Fatal(err) - } - - sharkyRecovery := &testSharkyRecovery{Recovery: r} - - err = storer.EpochMigration( - context.Background(), - dataPath, - stateStore, - indexStore, - reserve, - sharkyRecovery, - logger, - ) - if err != nil { - t.Fatal(err) - } - - if !strings.Contains(logBytes.String(), "migrating pinning collections done") { - t.Fatalf("expected log to contain 'migrating pinning collections done', got %s", logBytes.String()) - } - - if !strings.Contains(logBytes.String(), "migrating reserve contents done") { - t.Fatalf("expected log to contain 'migrating pinning collections done', got %s", logBytes.String()) - } - - if sharkyRecovery.addCalls != 31 { - t.Fatalf("expected 31 add calls, got %d", sharkyRecovery.addCalls) - } - - if reserve.calls != 10 { - t.Fatalf("expected 10 reserve calls, got %d", reserve.calls) - } - - if reserve.size != 10 { - t.Fatalf("expected 10 reserve size, got %d", reserve.size) - } - - pins, err := pinstore.Pins(indexStore) - if err != nil { - t.Fatal(err) - } - - if len(pins) != 1 { - t.Fatalf("expected 1 pin, got %d", len(pins)) - } - - if !strings.Contains(logBytes.String(), pins[0].String()) { - t.Fatalf("expected log to contain root pin reference, got %s", logBytes.String()) - } -} - -func TestEpochMigrationLightNode(t *testing.T) { - t.Parallel() - - var ( - dataPath = t.TempDir() - baseAddress = swarm.RandAddress(t) - stateStore = mockstatestore.NewStateStore() - reserve storer.ReservePutter - logBytes = bytes.NewBuffer(nil) - logger = log.NewLogger("test", log.WithSink(logBytes)) - indexStore = inmemstore.New() - ) - - createOldDataDir(t, dataPath, baseAddress, stateStore) - - r, err := sharky.NewRecovery(path.Join(dataPath, "sharky"), 2, swarm.SocMaxChunkSize) - if err != nil { - t.Fatal(err) - } - - sharkyRecovery := &testSharkyRecovery{Recovery: r} - - err = storer.EpochMigration( - context.Background(), - dataPath, - stateStore, - indexStore, - reserve, - sharkyRecovery, - logger, - ) - if err != nil { - t.Fatal(err) - } - - if !strings.Contains(logBytes.String(), "migrating pinning collections done") { - t.Fatalf("expected log to contain 'migrating pinning collections done', got %s", logBytes.String()) - } - - if strings.Contains(logBytes.String(), "migrating reserve contents done") { - t.Fatalf("expected log to not contain 'migrating reserve contents done', got %s", logBytes.String()) - } - - if sharkyRecovery.addCalls != 21 { - t.Fatalf("expected 31 add calls, got %d", sharkyRecovery.addCalls) - } - - pins, err := pinstore.Pins(indexStore) - if err != nil { - t.Fatal(err) - } - - if len(pins) != 1 { - t.Fatalf("expected 1 pin, got %d", len(pins)) - } - - if !strings.ContainsAny(logBytes.String(), pins[0].String()) { - t.Fatalf("expected log to contain root pin reference, got %s", logBytes.String()) - } -} diff --git a/pkg/storer/export_test.go b/pkg/storer/export_test.go index 7bdc2198e10..cdceaca2d51 100644 --- a/pkg/storer/export_test.go +++ b/pkg/storer/export_test.go @@ -5,28 +5,17 @@ package storer import ( - "context" - - storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storer/internal/events" "github.com/ethersphere/bee/pkg/storer/internal/reserve" -) - -var ( - InitShedIndexes = initShedIndexes - EpochMigration = epochMigration -) - -type ( - ReservePutter = reservePutter + "github.com/ethersphere/bee/pkg/storer/internal/transaction" ) func (db *DB) Reserve() *reserve.Reserve { return db.reserve } -func (db *DB) Repo() storage.Repository { - return db.repo +func (db *DB) Storage() transaction.Storage { + return db.storage } func (db *DB) Events() *events.Subscriber { @@ -37,109 +26,6 @@ func ReplaceSharkyShardLimit(val int) { sharkyNoOfShards = val } -type wrappedRepo struct { - storage.Repository - deleteHook func(storage.Item) error - putHook func(storage.Item) error -} - -func (w *wrappedRepo) IndexStore() storage.BatchedStore { - return &wrappedStore{ - BatchedStore: w.Repository.IndexStore(), - deleteHook: w.deleteHook, - putHook: w.putHook, - } -} - -type wrappedStore struct { - storage.BatchedStore - deleteHook func(storage.Item) error - putHook func(storage.Item) error -} - -func (w *wrappedStore) Put(item storage.Item) error { - if w.putHook != nil { - err := w.putHook(item) - if err != nil { - return err - } - } - return w.BatchedStore.Put(item) -} - -func (w *wrappedStore) Delete(item storage.Item) error { - if w.deleteHook != nil { - err := w.deleteHook(item) - if err != nil { - return err - } - } - return w.BatchedStore.Delete(item) -} - -func (w *wrappedStore) Batch(ctx context.Context) (storage.Batch, error) { - batch, err := w.BatchedStore.Batch(ctx) - if err != nil { - return nil, err - } - return &wrappedBatch{ - Batch: batch, - deleteHook: w.deleteHook, - putHook: w.putHook, - }, nil -} - -type wrappedBatch struct { - storage.Batch - deleteHook func(storage.Item) error - putHook func(storage.Item) error -} - -func (w *wrappedBatch) Put(item storage.Item) error { - if w.putHook != nil { - err := w.putHook(item) - if err != nil { - return err - } - } - return w.Batch.Put(item) -} - -func (w *wrappedBatch) Delete(item storage.Item) error { - if w.deleteHook != nil { - err := w.deleteHook(item) - if err != nil { - return err - } - } - return w.Batch.Delete(item) -} - -func (w *wrappedRepo) NewTx(ctx context.Context) (storage.Repository, func() error, func() error) { - repo, commit, rollback := w.Repository.NewTx(ctx) - return &wrappedRepo{ - Repository: repo, - deleteHook: w.deleteHook, - putHook: w.putHook, - }, commit, rollback -} - -func (db *DB) SetRepoStoreDeleteHook(fn func(storage.Item) error) { - if alreadyWrapped, ok := db.repo.(*wrappedRepo); ok { - db.repo = &wrappedRepo{Repository: alreadyWrapped.Repository, deleteHook: fn} - return - } - db.repo = &wrappedRepo{Repository: db.repo, deleteHook: fn} -} - -func (db *DB) SetRepoStorePutHook(fn func(storage.Item) error) { - if alreadyWrapped, ok := db.repo.(*wrappedRepo); ok { - db.repo = &wrappedRepo{Repository: alreadyWrapped.Repository, putHook: fn} - return - } - db.repo = &wrappedRepo{Repository: db.repo, putHook: fn} -} - func (db *DB) WaitForBgCacheWorkers() (unblock func()) { for i := 0; i < defaultBgCacheWorkers; i++ { db.cacheLimiter.sem <- struct{}{} diff --git a/pkg/storer/internal/cache/cache.go b/pkg/storer/internal/cache/cache.go index f4ae7e06a4e..de91fc65972 100644 --- a/pkg/storer/internal/cache/cache.go +++ b/pkg/storer/internal/cache/cache.go @@ -15,7 +15,7 @@ import ( "time" storage "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storer/internal" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" "resenje.org/multex" ) @@ -39,8 +39,9 @@ var ( // part of the reserve but are potentially useful to store for obtaining bandwidth // incentives. type Cache struct { - size atomic.Int64 - capacity int + size atomic.Int64 + capacity int + chunkLock *multex.Multex // protects storage ops at chunk level removeLock sync.RWMutex // blocks Get and Put ops while cache items are being evicted. } @@ -48,8 +49,8 @@ type Cache struct { // New creates a new Cache component with the specified capacity. The store is used // here only to read the initial state of the cache before shutdown if there was // any. -func New(ctx context.Context, store internal.Storage, capacity uint64) (*Cache, error) { - count, err := store.IndexStore().Count(&cacheEntry{}) +func New(ctx context.Context, store storage.Reader, capacity uint64) (*Cache, error) { + count, err := store.Count(&cacheEntry{}) if err != nil { return nil, fmt.Errorf("failed counting cache entries: %w", err) } @@ -71,7 +72,7 @@ func (c *Cache) Capacity() uint64 { return uint64(c.capacity) } // Putter returns a Storage.Putter instance which adds the chunk to the underlying // chunkstore and also adds a Cache entry for the chunk. -func (c *Cache) Putter(store internal.Storage) storage.Putter { +func (c *Cache) Putter(store transaction.Storage) storage.Putter { return storage.PutterFunc(func(ctx context.Context, chunk swarm.Chunk) error { c.chunkLock.Lock(chunk.Address().ByteString()) @@ -79,8 +80,11 @@ func (c *Cache) Putter(store internal.Storage) storage.Putter { c.removeLock.RLock() defer c.removeLock.RUnlock() + trx, done := store.NewTransaction(ctx) + defer done() + newEntry := &cacheEntry{Address: chunk.Address()} - found, err := store.IndexStore().Has(newEntry) + found, err := trx.IndexStore().Has(newEntry) if err != nil { return fmt.Errorf("failed checking has cache entry: %w", err) } @@ -90,18 +94,13 @@ func (c *Cache) Putter(store internal.Storage) storage.Putter { return nil } - batch, err := store.IndexStore().Batch(ctx) - if err != nil { - return fmt.Errorf("failed creating batch: %w", err) - } - newEntry.AccessTimestamp = now().UnixNano() - err = batch.Put(newEntry) + err = trx.IndexStore().Put(newEntry) if err != nil { return fmt.Errorf("failed adding cache entry: %w", err) } - err = batch.Put(&cacheOrderIndex{ + err = trx.IndexStore().Put(&cacheOrderIndex{ Address: newEntry.Address, AccessTimestamp: newEntry.AccessTimestamp, }) @@ -109,15 +108,15 @@ func (c *Cache) Putter(store internal.Storage) storage.Putter { return fmt.Errorf("failed adding cache order index: %w", err) } - if err := batch.Commit(); err != nil { - return fmt.Errorf("batch commit: %w", err) - } - - err = store.ChunkStore().Put(ctx, chunk) + err = trx.ChunkStore().Put(ctx, chunk) if err != nil { return fmt.Errorf("failed adding chunk to chunkstore: %w", err) } + if err := trx.Commit(); err != nil { + return fmt.Errorf("batch commit: %w", err) + } + c.size.Add(1) return nil @@ -128,23 +127,26 @@ func (c *Cache) Putter(store internal.Storage) storage.Putter { // part of cache it will update the cache indexes. If the operation to update the // cache indexes fail, we need to fail the operation as this should signal the user // of this getter to rollback the operation. -func (c *Cache) Getter(store internal.Storage) storage.Getter { +func (c *Cache) Getter(store transaction.Storage) storage.Getter { return storage.GetterFunc(func(ctx context.Context, address swarm.Address) (swarm.Chunk, error) { - ch, err := store.ChunkStore().Get(ctx, address) - if err != nil { - return nil, err - } - c.chunkLock.Lock(address.ByteString()) defer c.chunkLock.Unlock(address.ByteString()) c.removeLock.RLock() defer c.removeLock.RUnlock() + trx, done := store.NewTransaction(ctx) + defer done() + + ch, err := trx.ChunkStore().Get(ctx, address) + if err != nil { + return nil, err + } + // check if there is an entry in Cache. As this is the download path, we do // a best-effort operation. So in case of any error we return the chunk. entry := &cacheEntry{Address: address} - err = store.IndexStore().Get(entry) + err = trx.IndexStore().Get(entry) if err != nil { if errors.Is(err, storage.ErrNotFound) { return ch, nil @@ -152,12 +154,7 @@ func (c *Cache) Getter(store internal.Storage) storage.Getter { return nil, fmt.Errorf("unexpected error getting indexstore entry: %w", err) } - batch, err := store.IndexStore().Batch(ctx) - if err != nil { - return nil, fmt.Errorf("failed creating batch: %w", err) - } - - err = batch.Delete(&cacheOrderIndex{ + err = trx.IndexStore().Delete(&cacheOrderIndex{ Address: entry.Address, AccessTimestamp: entry.AccessTimestamp, }) @@ -166,7 +163,7 @@ func (c *Cache) Getter(store internal.Storage) storage.Getter { } entry.AccessTimestamp = now().UnixNano() - err = batch.Put(&cacheOrderIndex{ + err = trx.IndexStore().Put(&cacheOrderIndex{ Address: entry.Address, AccessTimestamp: entry.AccessTimestamp, }) @@ -174,12 +171,12 @@ func (c *Cache) Getter(store internal.Storage) storage.Getter { return nil, fmt.Errorf("failed adding cache order index: %w", err) } - err = batch.Put(entry) + err = trx.IndexStore().Put(entry) if err != nil { return nil, fmt.Errorf("failed adding cache entry: %w", err) } - err = batch.Commit() + err = trx.Commit() if err != nil { return nil, fmt.Errorf("batch commit: %w", err) } @@ -191,7 +188,7 @@ func (c *Cache) Getter(store internal.Storage) storage.Getter { // ShallowCopy creates cache entries with the expectation that the chunk already exists in the chunkstore. func (c *Cache) ShallowCopy( ctx context.Context, - store internal.Storage, + store transaction.Storage, addrs ...swarm.Address, ) (err error) { @@ -200,67 +197,67 @@ func (c *Cache) ShallowCopy( defer func() { if err != nil { - for _, addr := range addrs { - err = errors.Join(store.ChunkStore().Delete(context.Background(), addr)) - } + err = store.Run(ctx, func(s transaction.Store) error { + var rerr error + for _, addr := range addrs { + rerr = errors.Join(rerr, s.ChunkStore().Delete(context.Background(), addr)) + } + return rerr + }) } }() //consider only the amount that can fit, the rest should be deleted from the chunkstore. if len(addrs) > c.capacity { - for _, addr := range addrs[:len(addrs)-c.capacity] { - _ = store.ChunkStore().Delete(ctx, addr) - } + + _ = store.Run(ctx, func(s transaction.Store) error { + for _, addr := range addrs[:len(addrs)-c.capacity] { + _ = s.ChunkStore().Delete(ctx, addr) + } + return nil + }) addrs = addrs[len(addrs)-c.capacity:] } entriesToAdd := make([]*cacheEntry, 0, len(addrs)) - for _, addr := range addrs { - entry := &cacheEntry{Address: addr, AccessTimestamp: now().UnixNano()} - if has, err := store.IndexStore().Has(entry); err == nil && has { - continue - } - entriesToAdd = append(entriesToAdd, entry) - } - if len(entriesToAdd) == 0 { - return nil - } - - batch, err := store.IndexStore().Batch(ctx) - if err != nil { - return fmt.Errorf("failed creating batch: %w", err) - } - - for _, entry := range entriesToAdd { - err = batch.Put(entry) - if err != nil { - return fmt.Errorf("failed adding entry %s: %w", entry, err) + err = store.Run(ctx, func(s transaction.Store) error { + for _, addr := range addrs { + entry := &cacheEntry{Address: addr, AccessTimestamp: now().UnixNano()} + if has, err := s.IndexStore().Has(entry); err == nil && has { + continue + } + entriesToAdd = append(entriesToAdd, entry) } - err = batch.Put(&cacheOrderIndex{ - Address: entry.Address, - AccessTimestamp: entry.AccessTimestamp, - }) - if err != nil { - return fmt.Errorf("failed adding cache order index: %w", err) + + for _, entry := range entriesToAdd { + err = s.IndexStore().Put(entry) + if err != nil { + return fmt.Errorf("failed adding entry %s: %w", entry, err) + } + err = s.IndexStore().Put(&cacheOrderIndex{ + Address: entry.Address, + AccessTimestamp: entry.AccessTimestamp, + }) + if err != nil { + return fmt.Errorf("failed adding cache order index: %w", err) + } } - } - if err := batch.Commit(); err != nil { - return fmt.Errorf("batch commit: %w", err) + return nil + }) + if err == nil { + c.size.Add(int64(len(entriesToAdd))) } - c.size.Add(int64(len(entriesToAdd))) - - return nil + return err } // RemoveOldest removes the oldest cache entries from the store. The count // specifies the number of entries to remove. func (c *Cache) RemoveOldest( ctx context.Context, - store internal.Storage, - chStore storage.ChunkStore, + st transaction.Storage, count uint64, ) error { if count <= 0 { @@ -271,7 +268,7 @@ func (c *Cache) RemoveOldest( defer c.removeLock.Unlock() evictItems := make([]*cacheEntry, 0, count) - err := store.IndexStore().Iterate( + err := st.ReadOnly().IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &cacheOrderIndex{} }, ItemProperty: storage.QueryItemID, @@ -302,30 +299,26 @@ func (c *Cache) RemoveOldest( end = len(evictItems) } - batch, err := store.IndexStore().Batch(ctx) - if err != nil { - return fmt.Errorf("failed creating batch: %w", err) - } - - for _, entry := range evictItems[i:end] { - err = batch.Delete(entry) - if err != nil { - return fmt.Errorf("failed deleting cache entry %s: %w", entry, err) - } - err = batch.Delete(&cacheOrderIndex{ - Address: entry.Address, - AccessTimestamp: entry.AccessTimestamp, - }) - if err != nil { - return fmt.Errorf("failed deleting cache order index %s: %w", entry.Address, err) - } - err = chStore.Delete(ctx, entry.Address) - if err != nil { - return fmt.Errorf("failed deleting chunk %s from chunkstore: %w", entry.Address, err) + err := st.Run(ctx, func(s transaction.Store) error { + for _, entry := range evictItems[i:end] { + err = s.IndexStore().Delete(entry) + if err != nil { + return fmt.Errorf("failed deleting cache entry %s: %w", entry, err) + } + err = s.IndexStore().Delete(&cacheOrderIndex{ + Address: entry.Address, + AccessTimestamp: entry.AccessTimestamp, + }) + if err != nil { + return fmt.Errorf("failed deleting cache order index %s: %w", entry.Address, err) + } + err = s.ChunkStore().Delete(ctx, entry.Address) + if err != nil { + return fmt.Errorf("failed deleting chunk %s from chunkstore: %w", entry.Address, err) + } } - } - - err = batch.Commit() + return nil + }) if err != nil { return err } diff --git a/pkg/storer/internal/cache/cache_test.go b/pkg/storer/internal/cache/cache_test.go index 6450dc71198..221bb88823d 100644 --- a/pkg/storer/internal/cache/cache_test.go +++ b/pkg/storer/internal/cache/cache_test.go @@ -19,6 +19,7 @@ import ( chunktest "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/cache" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" "github.com/google/go-cmp/cmp" ) @@ -86,59 +87,9 @@ func TestCacheEntryItem(t *testing.T) { } } -type testStorage struct { - internal.Storage - putFn func(storage.Item) error -} - -func (t *testStorage) IndexStore() storage.BatchedStore { - return &wrappedStore{BatchedStore: t.Storage.IndexStore(), putFn: t.putFn} -} - -type wrappedStore struct { - storage.BatchedStore - putFn func(storage.Item) error -} - -func (w *wrappedStore) Put(i storage.Item) error { - if w.putFn != nil { - return w.putFn(i) - } - return w.BatchedStore.Put(i) -} - -func (w *wrappedStore) Batch(ctx context.Context) (storage.Batch, error) { - b, err := w.BatchedStore.Batch(ctx) - if err != nil { - return nil, err - } - return &wrappedBatch{Batch: b, putFn: w.putFn}, nil -} - -type wrappedBatch struct { - storage.Batch - putFn func(storage.Item) error -} - -func (w *wrappedBatch) Put(i storage.Item) error { - if w.putFn != nil { - return w.putFn(i) - } - return w.Batch.Put(i) -} - -func newTestStorage(t *testing.T) *testStorage { +func newTestStorage(t *testing.T) transaction.Storage { t.Helper() - - storg, closer := internal.NewInmemStorage() - t.Cleanup(func() { - err := closer() - if err != nil { - t.Errorf("failed closing storage: %v", err) - } - }) - - return &testStorage{Storage: storg} + return internal.NewInmemStorage() } type timeProvider struct { @@ -172,18 +123,18 @@ func TestCache(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st, 10) + c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) if err != nil { t.Fatal(err) } - verifyCacheState(t, st.IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0) }) t.Run("putter", func(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st, 10) + c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -196,18 +147,18 @@ func TestCache(t *testing.T) { if err != nil { t.Fatal(err) } - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) - verifyCacheOrder(t, c, st.IndexStore(), chunks[:idx+1]...) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) + verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[:idx+1]...) } }) t.Run("new cache retains state", func(t *testing.T) { - c2, err := cache.New(context.TODO(), st, 10) + c2, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) if err != nil { t.Fatal(err) } - verifyCacheState(t, st.IndexStore(), c2, chunks[0].Address(), chunks[len(chunks)-1].Address(), uint64(len(chunks))) - verifyCacheOrder(t, c2, st.IndexStore(), chunks...) + verifyCacheState(t, st.ReadOnly().IndexStore(), c2, chunks[0].Address(), chunks[len(chunks)-1].Address(), uint64(len(chunks))) + verifyCacheOrder(t, c2, st.ReadOnly().IndexStore(), chunks...) }) }) @@ -215,7 +166,7 @@ func TestCache(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st, 10) + c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -237,8 +188,8 @@ func TestCache(t *testing.T) { if !readChunk.Equal(ch) { t.Fatalf("incorrect chunk: %s", ch.Address()) } - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) - verifyCacheOrder(t, c, st.IndexStore(), chunks[:idx+1]...) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) + verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[:idx+1]...) } }) @@ -256,13 +207,13 @@ func TestCache(t *testing.T) { } if idx == 0 { // once we access the first entry, the top will change - verifyCacheState(t, st.IndexStore(), c, chunks[9].Address(), chunks[idx].Address(), 10) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[9].Address(), chunks[idx].Address(), 10) } else { - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), 10) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), 10) } newOrder = append(newOrder, chunks[idx]) } - verifyCacheOrder(t, c, st.IndexStore(), newOrder...) + verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), newOrder...) }) t.Run("not in chunkstore returns error", func(t *testing.T) { @@ -276,14 +227,17 @@ func TestCache(t *testing.T) { }) t.Run("not in cache doesnt affect state", func(t *testing.T) { - state := c.State(st.IndexStore()) + state := c.State(st.ReadOnly().IndexStore()) for i := 0; i < 5; i++ { extraChunk := chunktest.GenerateTestRandomChunk() - err := st.ChunkStore().Put(context.TODO(), extraChunk) + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.TODO(), extraChunk) + }) if err != nil { t.Fatal(err) } + readChunk, err := c.Getter(st).Get(context.TODO(), extraChunk.Address()) if err != nil { t.Fatal(err) @@ -291,15 +245,17 @@ func TestCache(t *testing.T) { if !readChunk.Equal(extraChunk) { t.Fatalf("incorrect chunk: %s", extraChunk.Address()) } - verifyCacheState(t, st.IndexStore(), c, state.Head, state.Tail, state.Size) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, state.Head, state.Tail, state.Size) } }) }) t.Run("handle error", func(t *testing.T) { + t.Skip("rollback tests are not needed if the transaction is tested at the high level") + t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st, 10) + c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -314,12 +270,15 @@ func TestCache(t *testing.T) { } // return error for state update, which occurs at the end of Get/Put operations retErr := errors.New("dummy error") - st.putFn = func(i storage.Item) error { - if i.Namespace() == "cacheOrderIndex" { - return retErr - } - return st.Storage.IndexStore().Put(i) - } + // st.putFn = func(i storage.Item) error { + // if i.Namespace() == "cacheOrderIndex" { + // return retErr + // } + + // return st.Run(context.Background(), func(s transaction.Store) error { + // return s.IndexStore().Put(i) + // }) + // } // on error the cache expects the overarching transactions to clean itself up // and undo any store updates. So here we only want to ensure the state is @@ -332,7 +291,7 @@ func TestCache(t *testing.T) { } // state should be preserved on failure - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) }) t.Run("get error handling", func(t *testing.T) { @@ -342,7 +301,7 @@ func TestCache(t *testing.T) { } // state should be preserved on failure - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) }) }) } @@ -351,7 +310,7 @@ func TestShallowCopy(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.Background(), st, 10) + c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -362,7 +321,10 @@ func TestShallowCopy(t *testing.T) { // add the chunks to chunkstore. This simulates the reserve already populating // the chunkstore with chunks. for _, ch := range chunks { - err := st.ChunkStore().Put(context.Background(), ch) + + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.Background(), ch) + }) if err != nil { t.Fatal(err) } @@ -374,8 +336,8 @@ func TestShallowCopy(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) - verifyCacheOrder(t, c, st.IndexStore(), chunks...) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) + verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...) // move again, should be no-op err = c.ShallowCopy(context.Background(), st, chunksToMove...) @@ -383,8 +345,8 @@ func TestShallowCopy(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) - verifyCacheOrder(t, c, st.IndexStore(), chunks...) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) + verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...) chunks1 := chunktest.GenerateTestRandomChunks(10) chunksToMove1 := make([]swarm.Address, 0, 10) @@ -392,7 +354,9 @@ func TestShallowCopy(t *testing.T) { // add the chunks to chunkstore. This simulates the reserve already populating // the chunkstore with chunks. for _, ch := range chunks1 { - err := st.ChunkStore().Put(context.Background(), ch) + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.Background(), ch) + }) if err != nil { t.Fatal(err) } @@ -405,22 +369,22 @@ func TestShallowCopy(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks1[9].Address(), 20) - verifyCacheOrder(t, c, st.IndexStore(), append(chunks, chunks1...)...) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks1[9].Address(), 20) + verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), append(chunks, chunks1...)...) - err = c.RemoveOldest(context.Background(), st, st.ChunkStore(), 10) + err = c.RemoveOldest(context.Background(), st, 10) if err != nil { t.Fatal(err) } - verifyChunksDeleted(t, st.ChunkStore(), chunks...) + verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks...) } func TestShallowCopyOverCap(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.Background(), st, 10) + c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -431,7 +395,10 @@ func TestShallowCopyOverCap(t *testing.T) { // add the chunks to chunkstore. This simulates the reserve already populating // the chunkstore with chunks. for _, ch := range chunks { - err := st.ChunkStore().Put(context.Background(), ch) + + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.Background(), ch) + }) if err != nil { t.Fatal(err) } @@ -444,20 +411,20 @@ func TestShallowCopyOverCap(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.IndexStore(), c, chunks[5].Address(), chunks[14].Address(), 10) - verifyCacheOrder(t, c, st.IndexStore(), chunks[5:15]...) + verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[5].Address(), chunks[14].Address(), 10) + verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[5:15]...) - err = c.RemoveOldest(context.Background(), st, st.ChunkStore(), 5) + err = c.RemoveOldest(context.Background(), st, 5) if err != nil { t.Fatal(err) } - verifyChunksDeleted(t, st.ChunkStore(), chunks[5:10]...) + verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks[5:10]...) } func verifyCacheState( t *testing.T, - store storage.Store, + store storage.Reader, c *cache.Cache, expStart, expEnd swarm.Address, expCount uint64, @@ -475,7 +442,7 @@ func verifyCacheState( func verifyCacheOrder( t *testing.T, c *cache.Cache, - st storage.Store, + st storage.Reader, chs ...swarm.Chunk, ) { t.Helper() @@ -504,7 +471,7 @@ func verifyCacheOrder( func verifyChunksDeleted( t *testing.T, - chStore storage.ChunkStore, + chStore storage.ReadOnlyChunkStore, chs ...swarm.Chunk, ) { t.Helper() diff --git a/pkg/storer/internal/cache/export_test.go b/pkg/storer/internal/cache/export_test.go index 770bdd64690..6a28ac16127 100644 --- a/pkg/storer/internal/cache/export_test.go +++ b/pkg/storer/internal/cache/export_test.go @@ -35,7 +35,7 @@ type CacheState struct { Size uint64 } -func (c *Cache) State(store storage.Store) CacheState { +func (c *Cache) State(store storage.Reader) CacheState { state := CacheState{} state.Size = c.Size() runner := swarm.ZeroAddress @@ -66,7 +66,7 @@ func (c *Cache) State(store storage.Store) CacheState { } func (c *Cache) IterateOldToNew( - st storage.Store, + st storage.Reader, start, end swarm.Address, iterateFn func(ch swarm.Address) (bool, error), ) error { diff --git a/pkg/storer/internal/chunkstamp/chunkstamp.go b/pkg/storer/internal/chunkstamp/chunkstamp.go index 394ab386b73..ccb938f59a6 100644 --- a/pkg/storer/internal/chunkstamp/chunkstamp.go +++ b/pkg/storer/internal/chunkstamp/chunkstamp.go @@ -179,7 +179,7 @@ func LoadWithBatchID(s storage.Reader, namespace string, addr swarm.Address, bat // Store creates new or updated an existing stamp index // record related to the given namespace and chunk. -func Store(s storage.Writer, namespace string, chunk swarm.Chunk) error { +func Store(s storage.IndexStore, namespace string, chunk swarm.Chunk) error { item := &item{ namespace: []byte(namespace), address: chunk.Address(), @@ -192,7 +192,7 @@ func Store(s storage.Writer, namespace string, chunk swarm.Chunk) error { } // DeleteAll removes all swarm.Stamp related to the given address. -func DeleteAll(s storage.Store, namespace string, addr swarm.Address) error { +func DeleteAll(s storage.IndexStore, namespace string, addr swarm.Address) error { var stamps []swarm.Stamp err := s.Iterate( storage.Query{ @@ -227,7 +227,7 @@ func DeleteAll(s storage.Store, namespace string, addr swarm.Address) error { } // Delete removes a stamp associated with an chunk and batchID. -func Delete(s storage.Store, batch storage.Writer, namespace string, addr swarm.Address, batchId []byte) error { +func Delete(s storage.IndexStore, namespace string, addr swarm.Address, batchId []byte) error { stamp, err := LoadWithBatchID(s, namespace, addr, batchId) if err != nil { if errors.Is(err, storage.ErrNotFound) { @@ -235,7 +235,7 @@ func Delete(s storage.Store, batch storage.Writer, namespace string, addr swarm. } return err } - return batch.Delete(&item{ + return s.Delete(&item{ namespace: []byte(namespace), address: addr, stamp: stamp, diff --git a/pkg/storer/internal/chunkstamp/chunkstamp_test.go b/pkg/storer/internal/chunkstamp/chunkstamp_test.go index a09b1c111ac..ff4277fed80 100644 --- a/pkg/storer/internal/chunkstamp/chunkstamp_test.go +++ b/pkg/storer/internal/chunkstamp/chunkstamp_test.go @@ -5,6 +5,7 @@ package chunkstamp_test import ( + "context" "errors" "fmt" "testing" @@ -15,6 +16,7 @@ import ( chunktest "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/chunkstamp" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" "github.com/google/go-cmp/cmp" ) @@ -135,12 +137,7 @@ func TestChunkStampItem(t *testing.T) { func TestStoreLoadDelete(t *testing.T) { t.Parallel() - ts, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + ts := internal.NewInmemStorage() for i, chunk := range chunktest.GenerateTestRandomChunks(10) { ns := fmt.Sprintf("namespace_%d", i) @@ -153,15 +150,17 @@ func TestStoreLoadDelete(t *testing.T) { have := want.Clone() - if err := ts.IndexStore().Get(have); !errors.Is(err, storage.ErrNotFound) { + if err := ts.ReadOnly().IndexStore().Get(have); !errors.Is(err, storage.ErrNotFound) { t.Fatalf("Get(...): unexpected error: have: %v; want: %v", err, storage.ErrNotFound) } - if err := chunkstamp.Store(ts.IndexStore(), ns, chunk); err != nil { + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return chunkstamp.Store(s.IndexStore(), ns, chunk) + }); err != nil { t.Fatalf("Store(...): unexpected error: %v", err) } - if err := ts.IndexStore().Get(have); err != nil { + if err := ts.ReadOnly().IndexStore().Get(have); err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -177,7 +176,7 @@ func TestStoreLoadDelete(t *testing.T) { t.Run("load stored chunk stamp", func(t *testing.T) { want := chunk.Stamp() - have, err := chunkstamp.Load(ts.IndexStore(), ns, chunk.Address()) + have, err := chunkstamp.Load(ts.ReadOnly().IndexStore(), ns, chunk.Address()) if err != nil { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -190,7 +189,7 @@ func TestStoreLoadDelete(t *testing.T) { t.Run("load stored chunk stamp with batch id", func(t *testing.T) { want := chunk.Stamp() - have, err := chunkstamp.LoadWithBatchID(ts.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) + have, err := chunkstamp.LoadWithBatchID(ts.ReadOnly().IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) if err != nil { t.Fatalf("LoadWithBatchID(...): unexpected error: %v", err) } @@ -202,16 +201,20 @@ func TestStoreLoadDelete(t *testing.T) { t.Run("delete stored stamp", func(t *testing.T) { if i%2 == 0 { - if err := chunkstamp.Delete(ts.IndexStore(), ts.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()); err != nil { + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return chunkstamp.Delete(s.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) + }); err != nil { t.Fatalf("Delete(...): unexpected error: %v", err) } } else { - if err := chunkstamp.DeleteWithStamp(ts.IndexStore(), ns, chunk.Address(), chunk.Stamp()); err != nil { + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return chunkstamp.DeleteWithStamp(s.IndexStore(), ns, chunk.Address(), chunk.Stamp()) + }); err != nil { t.Fatalf("DeleteWithStamp(...): unexpected error: %v", err) } } - have, err := chunkstamp.LoadWithBatchID(ts.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) + have, err := chunkstamp.LoadWithBatchID(ts.ReadOnly().IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -221,15 +224,20 @@ func TestStoreLoadDelete(t *testing.T) { }) t.Run("delete all stored stamp index", func(t *testing.T) { - if err := chunkstamp.Store(ts.IndexStore(), ns, chunk); err != nil { + + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return chunkstamp.Store(s.IndexStore(), ns, chunk) + }); err != nil { t.Fatalf("Store(...): unexpected error: %v", err) } - if err := chunkstamp.DeleteAll(ts.IndexStore(), ns, chunk.Address()); err != nil { + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return chunkstamp.DeleteAll(s.IndexStore(), ns, chunk.Address()) + }); err != nil { t.Fatalf("DeleteAll(...): unexpected error: %v", err) } - have, err := chunkstamp.Load(ts.IndexStore(), ns, chunk.Address()) + have, err := chunkstamp.Load(ts.ReadOnly().IndexStore(), ns, chunk.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -238,7 +246,7 @@ func TestStoreLoadDelete(t *testing.T) { } cnt := 0 - err = ts.IndexStore().Iterate( + err = ts.ReadOnly().IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(chunkstamp.Item) diff --git a/pkg/storer/internal/chunkstore/chunkstore.go b/pkg/storer/internal/chunkstore/chunkstore.go index f1af1098944..eaa1b4d789d 100644 --- a/pkg/storer/internal/chunkstore/chunkstore.go +++ b/pkg/storer/internal/chunkstore/chunkstore.go @@ -12,7 +12,7 @@ import ( "time" "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/storage" + storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/storageutil" "github.com/ethersphere/bee/pkg/swarm" "golang.org/x/exp/slices" @@ -33,91 +33,6 @@ const RetrievalIndexItemSize = swarm.HashSize + 8 + sharky.LocationSize + 4 var _ storage.Item = (*RetrievalIndexItem)(nil) -// RetrievalIndexItem is the index which gives us the sharky location from the swarm.Address. -// The RefCnt stores the reference of each time a Put operation is issued on this Address. -type RetrievalIndexItem struct { - Address swarm.Address - Timestamp uint64 - Location sharky.Location - RefCnt uint32 -} - -func (r *RetrievalIndexItem) ID() string { return r.Address.ByteString() } - -func (RetrievalIndexItem) Namespace() string { return "retrievalIdx" } - -// Stored in bytes as: -// |--Address(32)--|--Timestamp(8)--|--Location(7)--|--RefCnt(4)--| -func (r *RetrievalIndexItem) Marshal() ([]byte, error) { - if r.Address.IsZero() { - return nil, errMarshalInvalidRetrievalIndexAddress - } - - buf := make([]byte, RetrievalIndexItemSize) - i := 0 - - locBuf, err := r.Location.MarshalBinary() - if err != nil { - return nil, errMarshalInvalidRetrievalIndexLocation - } - - copy(buf[i:swarm.HashSize], r.Address.Bytes()) - i += swarm.HashSize - - binary.LittleEndian.PutUint64(buf[i:i+8], r.Timestamp) - i += 8 - - copy(buf[i:i+sharky.LocationSize], locBuf) - i += sharky.LocationSize - - binary.LittleEndian.PutUint32(buf[i:], r.RefCnt) - - return buf, nil -} - -func (r *RetrievalIndexItem) Unmarshal(buf []byte) error { - if len(buf) != RetrievalIndexItemSize { - return errUnmarshalInvalidRetrievalIndexSize - } - - i := 0 - ni := new(RetrievalIndexItem) - - ni.Address = swarm.NewAddress(slices.Clone(buf[i : i+swarm.HashSize])) - i += swarm.HashSize - - ni.Timestamp = binary.LittleEndian.Uint64(buf[i : i+8]) - i += 8 - - loc := new(sharky.Location) - if err := loc.UnmarshalBinary(buf[i : i+sharky.LocationSize]); err != nil { - return errUnmarshalInvalidRetrievalIndexLocationBytes - } - ni.Location = *loc - i += sharky.LocationSize - - ni.RefCnt = binary.LittleEndian.Uint32(buf[i:]) - - *r = *ni - return nil -} - -func (r *RetrievalIndexItem) Clone() storage.Item { - if r == nil { - return nil - } - return &RetrievalIndexItem{ - Address: r.Address.Clone(), - Timestamp: r.Timestamp, - Location: r.Location, - RefCnt: r.RefCnt, - } -} - -func (r RetrievalIndexItem) String() string { - return storageutil.JoinFields(r.Namespace(), r.ID()) -} - // Sharky provides an abstraction for the sharky.Store operations used in the // chunkstore. This allows us to be more flexible in passing in the sharky instance // to chunkstore. For eg, check the TxChunkStore implementation in this pkg. @@ -127,19 +42,19 @@ type Sharky interface { Release(context.Context, sharky.Location) error } -type ChunkStoreWrapper struct { - store storage.Store - sharky Sharky -} - -func New(store storage.Store, sharky Sharky) *ChunkStoreWrapper { - return &ChunkStoreWrapper{store: store, sharky: sharky} +func Get(ctx context.Context, r storage.Reader, s storage.Sharky, addr swarm.Address) (swarm.Chunk, error) { + rIdx := &RetrievalIndexItem{Address: addr} + err := r.Get(rIdx) + if err != nil { + return nil, fmt.Errorf("chunk store: failed reading retrievalIndex for address %s: %w", addr, err) + } + return readChunk(ctx, r, s, rIdx) } // helper to read chunk from retrievalIndex. -func (c *ChunkStoreWrapper) readChunk(ctx context.Context, rIdx *RetrievalIndexItem) (swarm.Chunk, error) { +func readChunk(ctx context.Context, r storage.Reader, s storage.Sharky, rIdx *RetrievalIndexItem) (swarm.Chunk, error) { buf := make([]byte, rIdx.Location.Length) - err := c.sharky.Read(ctx, rIdx.Location, buf) + err := s.Read(ctx, rIdx.Location, buf) if err != nil { return nil, fmt.Errorf( "chunk store: failed reading location: %v for chunk %s from sharky: %w", @@ -150,31 +65,22 @@ func (c *ChunkStoreWrapper) readChunk(ctx context.Context, rIdx *RetrievalIndexI return swarm.NewChunk(rIdx.Address, buf), nil } -func (c *ChunkStoreWrapper) Get(ctx context.Context, addr swarm.Address) (swarm.Chunk, error) { - rIdx := &RetrievalIndexItem{Address: addr} - err := c.store.Get(rIdx) - if err != nil { - return nil, fmt.Errorf("chunk store: failed reading retrievalIndex for address %s: %w", addr, err) - } - return c.readChunk(ctx, rIdx) -} - -func (c *ChunkStoreWrapper) Has(_ context.Context, addr swarm.Address) (bool, error) { - return c.store.Has(&RetrievalIndexItem{Address: addr}) +func Has(_ context.Context, r storage.Reader, addr swarm.Address) (bool, error) { + return r.Has(&RetrievalIndexItem{Address: addr}) } -func (c *ChunkStoreWrapper) Put(ctx context.Context, ch swarm.Chunk) error { +func Put(ctx context.Context, s storage.IndexStore, sh storage.Sharky, ch swarm.Chunk) error { var ( rIdx = &RetrievalIndexItem{Address: ch.Address()} loc sharky.Location found = true ) - err := c.store.Get(rIdx) + err := s.Get(rIdx) switch { case errors.Is(err, storage.ErrNotFound): // if this is the first instance of this address, we should store the chunk // in sharky and create the new indexes. - loc, err = c.sharky.Write(ctx, ch.Data()) + loc, err = sh.Write(ctx, ch.Data()) if err != nil { return fmt.Errorf("chunk store: write to sharky failed: %w", err) } @@ -188,7 +94,7 @@ func (c *ChunkStoreWrapper) Put(ctx context.Context, ch swarm.Chunk) error { rIdx.RefCnt++ err = func() error { - err = c.store.Put(rIdx) + err = s.Put(rIdx) if err != nil { return fmt.Errorf("chunk store: failed to update retrievalIndex: %w", err) } @@ -198,16 +104,16 @@ func (c *ChunkStoreWrapper) Put(ctx context.Context, ch swarm.Chunk) error { if err != nil && !found { return errors.Join( err, - c.sharky.Release(context.Background(), loc), + sh.Release(context.Background(), loc), ) } return nil } -func (c *ChunkStoreWrapper) Delete(ctx context.Context, addr swarm.Address) error { +func Delete(ctx context.Context, s storage.IndexStore, sh storage.Sharky, addr swarm.Address) error { rIdx := &RetrievalIndexItem{Address: addr} - err := c.store.Get(rIdx) + err := s.Get(rIdx) switch { case errors.Is(err, storage.ErrNotFound): return nil @@ -218,7 +124,7 @@ func (c *ChunkStoreWrapper) Delete(ctx context.Context, addr swarm.Address) erro } if rIdx.RefCnt > 0 { // If there are more references for this we don't delete it from sharky. - err = c.store.Put(rIdx) + err = s.Put(rIdx) if err != nil { return fmt.Errorf("chunk store: failed updating retrievalIndex for address %s: %w", addr, err) } @@ -226,14 +132,14 @@ func (c *ChunkStoreWrapper) Delete(ctx context.Context, addr swarm.Address) erro } // Delete the chunk. - err = c.sharky.Release(ctx, rIdx.Location) + err = sh.Release(ctx, rIdx.Location) if err != nil { return fmt.Errorf( "chunk store: failed to release sharky slot %v for address %s: %w", rIdx.Location, rIdx.Address, err, ) } - err = c.store.Delete(rIdx) + err = s.Delete(rIdx) if err != nil { return fmt.Errorf("chunk store: failed to delete retrievalIndex for address %s: %w", addr, err) } @@ -241,13 +147,13 @@ func (c *ChunkStoreWrapper) Delete(ctx context.Context, addr swarm.Address) erro return nil } -func (c *ChunkStoreWrapper) Iterate(ctx context.Context, fn storage.IterateChunkFn) error { - return c.store.Iterate( +func Iterate(ctx context.Context, s storage.IndexStore, sh storage.Sharky, fn storage.IterateChunkFn) error { + return s.Iterate( storage.Query{ Factory: func() storage.Item { return new(RetrievalIndexItem) }, }, func(r storage.Result) (bool, error) { - ch, err := c.readChunk(ctx, r.Entry.(*RetrievalIndexItem)) + ch, err := readChunk(ctx, s, sh, r.Entry.(*RetrievalIndexItem)) if err != nil { return true, err } @@ -256,19 +162,155 @@ func (c *ChunkStoreWrapper) Iterate(ctx context.Context, fn storage.IterateChunk ) } -func (c *ChunkStoreWrapper) Close() error { - return c.store.Close() -} - -func IterateChunkEntries(st storage.Store, fn func(swarm.Address, bool) (bool, error)) error { +func IterateChunkEntries(st storage.Reader, fn func(swarm.Address, bool) (bool, error)) error { return st.Iterate( storage.Query{ Factory: func() storage.Item { return new(RetrievalIndexItem) }, }, func(r storage.Result) (bool, error) { - addr := r.Entry.(*RetrievalIndexItem).Address - isShared := r.Entry.(*RetrievalIndexItem).RefCnt > 1 + item := r.Entry.(*RetrievalIndexItem) + addr := item.Address + isShared := item.RefCnt > 1 return fn(addr, isShared) }, ) } + +type LocationResult struct { + Err error + Location sharky.Location +} + +type IterateResult struct { + Err error + Item *RetrievalIndexItem +} + +// IterateLocations iterates over entire retrieval index and plucks only sharky location. +func IterateLocations( + ctx context.Context, + st storage.Reader, + locationResultC chan<- LocationResult, +) { + go func() { + defer close(locationResultC) + + err := st.Iterate(storage.Query{ + Factory: func() storage.Item { return new(RetrievalIndexItem) }, + }, func(r storage.Result) (bool, error) { + entry := r.Entry.(*RetrievalIndexItem) + result := LocationResult{Location: entry.Location} + + select { + case <-ctx.Done(): + return true, ctx.Err() + case locationResultC <- result: + } + + return false, nil + }) + if err != nil { + result := LocationResult{Err: fmt.Errorf("iterate retrieval index error: %w", err)} + + select { + case <-ctx.Done(): + case locationResultC <- result: + } + } + }() +} + +// Iterate iterates over entire retrieval index with a call back. +func IterateItems(st storage.Store, callBackFunc func(*RetrievalIndexItem) error) error { + return st.Iterate(storage.Query{ + Factory: func() storage.Item { return new(RetrievalIndexItem) }, + }, func(r storage.Result) (bool, error) { + entry := r.Entry.(*RetrievalIndexItem) + return false, callBackFunc(entry) + }) +} + +// RetrievalIndexItem is the index which gives us the sharky location from the swarm.Address. +// The RefCnt stores the reference of each time a Put operation is issued on this Address. +type RetrievalIndexItem struct { + Address swarm.Address + Timestamp uint64 + Location sharky.Location + RefCnt uint32 +} + +func (r *RetrievalIndexItem) ID() string { return r.Address.ByteString() } + +func (RetrievalIndexItem) Namespace() string { return "retrievalIdx" } + +// Stored in bytes as: +// |--Address(32)--|--Timestamp(8)--|--Location(7)--|--RefCnt(4)--| +func (r *RetrievalIndexItem) Marshal() ([]byte, error) { + if r.Address.IsZero() { + return nil, errMarshalInvalidRetrievalIndexAddress + } + + buf := make([]byte, RetrievalIndexItemSize) + i := 0 + + locBuf, err := r.Location.MarshalBinary() + if err != nil { + return nil, errMarshalInvalidRetrievalIndexLocation + } + + copy(buf[i:swarm.HashSize], r.Address.Bytes()) + i += swarm.HashSize + + binary.LittleEndian.PutUint64(buf[i:i+8], r.Timestamp) + i += 8 + + copy(buf[i:i+sharky.LocationSize], locBuf) + i += sharky.LocationSize + + binary.LittleEndian.PutUint32(buf[i:], r.RefCnt) + + return buf, nil +} + +func (r *RetrievalIndexItem) Unmarshal(buf []byte) error { + if len(buf) != RetrievalIndexItemSize { + return errUnmarshalInvalidRetrievalIndexSize + } + + i := 0 + ni := new(RetrievalIndexItem) + + ni.Address = swarm.NewAddress(slices.Clone(buf[i : i+swarm.HashSize])) + i += swarm.HashSize + + ni.Timestamp = binary.LittleEndian.Uint64(buf[i : i+8]) + i += 8 + + loc := new(sharky.Location) + if err := loc.UnmarshalBinary(buf[i : i+sharky.LocationSize]); err != nil { + return errUnmarshalInvalidRetrievalIndexLocationBytes + } + ni.Location = *loc + i += sharky.LocationSize + + ni.RefCnt = binary.LittleEndian.Uint32(buf[i:]) + + *r = *ni + return nil +} + +func (r *RetrievalIndexItem) Clone() storage.Item { + if r == nil { + return nil + } + return &RetrievalIndexItem{ + Address: r.Address.Clone(), + Timestamp: r.Timestamp, + Location: r.Location, + RefCnt: r.RefCnt, + } +} + +func (r RetrievalIndexItem) String() string { + return storageutil.JoinFields(r.Namespace(), r.ID()) +} diff --git a/pkg/storer/internal/chunkstore/chunkstore_test.go b/pkg/storer/internal/chunkstore/chunkstore_test.go index ce135ebaceb..c4092cedbf2 100644 --- a/pkg/storer/internal/chunkstore/chunkstore_test.go +++ b/pkg/storer/internal/chunkstore/chunkstore_test.go @@ -19,8 +19,10 @@ import ( "github.com/ethersphere/bee/pkg/storage/storagetest" chunktest "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" "github.com/spf13/afero" + "github.com/stretchr/testify/assert" ) func TestRetrievalIndexItem(t *testing.T) { @@ -117,22 +119,21 @@ func TestChunkStore(t *testing.T) { t.Fatal(err) } + st := transaction.NewStorage(sharky, store) + t.Cleanup(func() { if err := store.Close(); err != nil { t.Errorf("inmem store close failed: %v", err) } - if err := sharky.Close(); err != nil { - t.Errorf("inmem sharky close failed: %v", err) - } }) - st := chunkstore.New(store, sharky) - testChunks := chunktest.GenerateTestRandomChunks(50) t.Run("put chunks", func(t *testing.T) { for _, ch := range testChunks { - err := st.Put(context.TODO(), ch) + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.TODO(), ch) + }) if err != nil { t.Fatalf("failed putting new chunk: %v", err) } @@ -143,7 +144,9 @@ func TestChunkStore(t *testing.T) { for idx, ch := range testChunks { // only put duplicates for odd numbered indexes if idx%2 != 0 { - err := st.Put(context.TODO(), ch) + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.TODO(), ch) + }) if err != nil { t.Fatalf("failed putting new chunk: %v", err) } @@ -153,7 +156,7 @@ func TestChunkStore(t *testing.T) { t.Run("get chunks", func(t *testing.T) { for _, ch := range testChunks { - readCh, err := st.Get(context.TODO(), ch.Address()) + readCh, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -165,7 +168,7 @@ func TestChunkStore(t *testing.T) { t.Run("has chunks", func(t *testing.T) { for _, ch := range testChunks { - exists, err := st.Has(context.TODO(), ch.Address()) + exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -177,7 +180,7 @@ func TestChunkStore(t *testing.T) { t.Run("iterate chunks", func(t *testing.T) { count := 0 - err := st.Iterate(context.TODO(), func(_ swarm.Chunk) (bool, error) { + err := chunkstore.Iterate(context.TODO(), store, sharky, func(_ swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -213,7 +216,9 @@ func TestChunkStore(t *testing.T) { for idx, ch := range testChunks { // Delete all even numbered indexes along with 0 if idx%2 == 0 { - err := st.Delete(context.TODO(), ch.Address()) + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Delete(context.TODO(), ch.Address()) + }) if err != nil { t.Fatalf("failed deleting chunk: %v", err) } @@ -225,11 +230,11 @@ func TestChunkStore(t *testing.T) { for idx, ch := range testChunks { if idx%2 == 0 { // Check even numbered indexes are deleted - _, err := st.Get(context.TODO(), ch.Address()) + _, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("expected storage not found error found: %v", err) } - found, err := st.Has(context.TODO(), ch.Address()) + found, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("unexpected error in Has: %v", err) } @@ -238,14 +243,14 @@ func TestChunkStore(t *testing.T) { } } else { // Check rest of the entries are intact - readCh, err := st.Get(context.TODO(), ch.Address()) + readCh, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } if !readCh.Equal(ch) { t.Fatal("read chunk doesnt match") } - exists, err := st.Has(context.TODO(), ch.Address()) + exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -258,7 +263,7 @@ func TestChunkStore(t *testing.T) { t.Run("iterate chunks after delete", func(t *testing.T) { count := 0 - err := st.Iterate(context.TODO(), func(_ swarm.Chunk) (bool, error) { + err := chunkstore.Iterate(context.TODO(), store, sharky, func(_ swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -274,7 +279,9 @@ func TestChunkStore(t *testing.T) { t.Run("delete duplicate chunks", func(t *testing.T) { for idx, ch := range testChunks { if idx%2 != 0 { - err := st.Delete(context.TODO(), ch.Address()) + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Delete(context.TODO(), ch.Address()) + }) if err != nil { t.Fatalf("failed deleting chunk: %v", err) } @@ -285,14 +292,14 @@ func TestChunkStore(t *testing.T) { t.Run("check chunks still exists", func(t *testing.T) { for idx, ch := range testChunks { if idx%2 != 0 { - readCh, err := st.Get(context.TODO(), ch.Address()) + readCh, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } if !readCh.Equal(ch) { t.Fatal("read chunk doesnt match") } - exists, err := st.Has(context.TODO(), ch.Address()) + exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -306,7 +313,9 @@ func TestChunkStore(t *testing.T) { t.Run("delete duplicate chunks again", func(t *testing.T) { for idx, ch := range testChunks { if idx%2 != 0 { - err := st.Delete(context.TODO(), ch.Address()) + err := st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Delete(context.TODO(), ch.Address()) + }) if err != nil { t.Fatalf("failed deleting chunk: %v", err) } @@ -316,7 +325,7 @@ func TestChunkStore(t *testing.T) { t.Run("check all are deleted", func(t *testing.T) { count := 0 - err := st.Iterate(context.TODO(), func(_ swarm.Chunk) (bool, error) { + err := chunkstore.Iterate(context.TODO(), store, sharky, func(_ swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -335,3 +344,99 @@ func TestChunkStore(t *testing.T) { } }) } + +// TestIterateLocations asserts that all stored chunks +// are retrievable by sharky using IterateLocations. +func TestIterateLocations(t *testing.T) { + t.Parallel() + + const chunksCount = 50 + + st := makeStorage(t) + testChunks := chunktest.GenerateTestRandomChunks(chunksCount) + ctx := context.Background() + + for _, ch := range testChunks { + assert.NoError(t, st.Run(context.Background(), func(s transaction.Store) error { return s.ChunkStore().Put(ctx, ch) })) + } + + readCount := 0 + respC := make(chan chunkstore.LocationResult, chunksCount) + chunkstore.IterateLocations(ctx, st.ReadOnly().IndexStore(), respC) + + for resp := range respC { + assert.NoError(t, resp.Err) + + buf := make([]byte, resp.Location.Length) + assert.NoError(t, st.sharky.Read(ctx, resp.Location, buf)) + + assert.True(t, swarm.ContainsChunkWithData(testChunks, buf)) + readCount++ + } + + assert.Equal(t, chunksCount, readCount) +} + +// TestIterateLocations_Stop asserts that IterateLocations will +// stop iteration when context is canceled. +func TestIterateLocations_Stop(t *testing.T) { + t.Parallel() + + const chunksCount = 50 + const stopReadAt = 10 + + st := makeStorage(t) + testChunks := chunktest.GenerateTestRandomChunks(chunksCount) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, ch := range testChunks { + assert.NoError(t, st.Run(context.Background(), func(s transaction.Store) error { return s.ChunkStore().Put(ctx, ch) })) + } + + readCount := 0 + respC := make(chan chunkstore.LocationResult) + chunkstore.IterateLocations(ctx, st.ReadOnly().IndexStore(), respC) + + for resp := range respC { + if resp.Err != nil { + assert.ErrorIs(t, resp.Err, context.Canceled) + break + } + + buf := make([]byte, resp.Location.Length) + if err := st.sharky.Read(ctx, resp.Location, buf); err != nil { + assert.ErrorIs(t, err, context.Canceled) + break + } + + assert.True(t, swarm.ContainsChunkWithData(testChunks, buf)) + readCount++ + + if readCount == stopReadAt { + cancel() + } + } + + assert.InDelta(t, stopReadAt, readCount, 1) +} + +type chunkStore struct { + transaction.Storage + sharky *sharky.Store +} + +func makeStorage(t *testing.T) *chunkStore { + t.Helper() + + store := inmemstore.New() + sharky, err := sharky.New(&memFS{Fs: afero.NewMemMapFs()}, 1, swarm.SocMaxChunkSize) + assert.NoError(t, err) + + t.Cleanup(func() { + assert.NoError(t, store.Close()) + assert.NoError(t, sharky.Close()) + }) + + return &chunkStore{transaction.NewStorage(sharky, store), sharky} +} diff --git a/pkg/storer/internal/chunkstore/helpers.go b/pkg/storer/internal/chunkstore/helpers.go deleted file mode 100644 index 8e0269144eb..00000000000 --- a/pkg/storer/internal/chunkstore/helpers.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chunkstore - -import ( - "context" - "fmt" - - "github.com/ethersphere/bee/pkg/sharky" - storage "github.com/ethersphere/bee/pkg/storage" -) - -type LocationResult struct { - Err error - Location sharky.Location -} - -type IterateResult struct { - Err error - Item *RetrievalIndexItem -} - -// IterateLocations iterates over entire retrieval index and plucks only sharky location. -func IterateLocations( - ctx context.Context, - st storage.Store, - locationResultC chan<- LocationResult, -) { - go func() { - defer close(locationResultC) - - err := st.Iterate(storage.Query{ - Factory: func() storage.Item { return new(RetrievalIndexItem) }, - }, func(r storage.Result) (bool, error) { - entry := r.Entry.(*RetrievalIndexItem) - result := LocationResult{Location: entry.Location} - - select { - case <-ctx.Done(): - return true, ctx.Err() - case locationResultC <- result: - } - - return false, nil - }) - if err != nil { - result := LocationResult{Err: fmt.Errorf("iterate retrieval index error: %w", err)} - - select { - case <-ctx.Done(): - case locationResultC <- result: - } - } - }() -} - -// Iterate iterates over entire retrieval index with a call back. -func Iterate(st storage.Store, callBackFunc func(*RetrievalIndexItem) error) error { - return st.Iterate(storage.Query{ - Factory: func() storage.Item { return new(RetrievalIndexItem) }, - }, func(r storage.Result) (bool, error) { - entry := r.Entry.(*RetrievalIndexItem) - return false, callBackFunc(entry) - }) -} diff --git a/pkg/storer/internal/chunkstore/helpers_test.go b/pkg/storer/internal/chunkstore/helpers_test.go deleted file mode 100644 index 5add6228f9e..00000000000 --- a/pkg/storer/internal/chunkstore/helpers_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chunkstore_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/ethersphere/bee/pkg/sharky" - storage "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/inmemstore" - chunktest "github.com/ethersphere/bee/pkg/storage/testing" - "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" - "github.com/ethersphere/bee/pkg/swarm" - "github.com/spf13/afero" -) - -// TestIterateLocations asserts that all stored chunks -// are retrievable by sharky using IterateLocations. -func TestIterateLocations(t *testing.T) { - t.Parallel() - - const chunksCount = 50 - - cs := makeChunkStore(t) - testChunks := chunktest.GenerateTestRandomChunks(chunksCount) - ctx := context.Background() - - for _, ch := range testChunks { - assert.NoError(t, cs.chunkStore.Put(ctx, ch)) - } - - readCount := 0 - respC := make(chan chunkstore.LocationResult, chunksCount) - chunkstore.IterateLocations(ctx, cs.store, respC) - - for resp := range respC { - assert.NoError(t, resp.Err) - - buf := make([]byte, resp.Location.Length) - assert.NoError(t, cs.sharky.Read(ctx, resp.Location, buf)) - - assert.True(t, swarm.ContainsChunkWithData(testChunks, buf)) - readCount++ - } - - assert.Equal(t, chunksCount, readCount) -} - -// TestIterateLocations_Stop asserts that IterateLocations will -// stop iteration when context is canceled. -func TestIterateLocations_Stop(t *testing.T) { - t.Parallel() - - const chunksCount = 50 - const stopReadAt = 10 - - cs := makeChunkStore(t) - testChunks := chunktest.GenerateTestRandomChunks(chunksCount) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for _, ch := range testChunks { - assert.NoError(t, cs.chunkStore.Put(ctx, ch)) - } - - readCount := 0 - respC := make(chan chunkstore.LocationResult) - chunkstore.IterateLocations(ctx, cs.store, respC) - - for resp := range respC { - if resp.Err != nil { - assert.ErrorIs(t, resp.Err, context.Canceled) - break - } - - buf := make([]byte, resp.Location.Length) - if err := cs.sharky.Read(ctx, resp.Location, buf); err != nil { - assert.ErrorIs(t, err, context.Canceled) - break - } - - assert.True(t, swarm.ContainsChunkWithData(testChunks, buf)) - readCount++ - - if readCount == stopReadAt { - cancel() - } - } - - assert.InDelta(t, stopReadAt, readCount, 1) -} - -type chunkStore struct { - store storage.Store - sharky *sharky.Store - chunkStore storage.ChunkStore -} - -func makeChunkStore(t *testing.T) *chunkStore { - t.Helper() - - store := inmemstore.New() - sharky, err := sharky.New(&memFS{Fs: afero.NewMemMapFs()}, 1, swarm.SocMaxChunkSize) - assert.NoError(t, err) - - t.Cleanup(func() { - assert.NoError(t, store.Close()) - assert.NoError(t, sharky.Close()) - }) - - return &chunkStore{ - store: store, - sharky: sharky, - chunkStore: chunkstore.New(store, sharky), - } -} diff --git a/pkg/storer/internal/chunkstore/recovery.go b/pkg/storer/internal/chunkstore/recovery.go deleted file mode 100644 index fad8d47e222..00000000000 --- a/pkg/storer/internal/chunkstore/recovery.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chunkstore - -import ( - "context" - "fmt" - "slices" - - "github.com/ethersphere/bee/pkg/log" - "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/storageutil" - "github.com/vmihailenco/msgpack/v5" -) - -var _ storage.Item = (*pendingTx)(nil) - -// pendingTx is a storage.Item that holds a batch of operations. -type pendingTx struct { - key string - val []byte -} - -// ID implements storage.Item. -func (p *pendingTx) ID() string { - return p.key -} - -// Namespace implements storage.Item. -func (p *pendingTx) Namespace() string { - return "pending-chunkstore-tx" -} - -// Unmarshal implements storage.Item. -func (p *pendingTx) Unmarshal(bytes []byte) error { - p.val = slices.Clone(bytes) - return nil -} - -// Marshal implements storage.Item. -func (p *pendingTx) Marshal() ([]byte, error) { - return p.val, nil -} - -// Clone implements storage.Item. -func (p *pendingTx) Clone() storage.Item { - if p == nil { - return nil - } - return &pendingTx{ - key: p.key, - val: slices.Clone(p.val), - } -} - -// String implements storage.Item. -func (p *pendingTx) String() string { - return storageutil.JoinFields(p.Namespace(), p.ID()) -} - -// Recover attempts to recover from a previous crash -// by reverting all uncommitted transactions. -func (cs *TxChunkStoreWrapper) Recover() error { - logger := log.NewLogger("node").WithName("tx_chunkstore_recovery").Register() // "node" - copies the node.LoggerName in order to avoid circular import. - - if rr, ok := cs.txStore.(storage.Recoverer); ok { - if err := rr.Recover(); err != nil { - return fmt.Errorf("chunkstore: recovery: %w", err) - } - } - - var found bool - - logger.Info("checking for uncommitted transactions") - err := cs.txStore.Iterate(storage.Query{ - Factory: func() storage.Item { return new(pendingTx) }, - ItemProperty: storage.QueryItem, - }, func(r storage.Result) (bool, error) { - found = true - - item := r.Entry.(*pendingTx) - item.key = r.ID - - var locations []sharky.Location - if err := msgpack.Unmarshal(item.val, &locations); err != nil { - return true, fmt.Errorf("location unmarshal failed: %w", err) - } - - ctx := context.Background() - logger.Info("sharky unreleased location found", "count", len(locations), "id", r.ID) - for _, location := range locations { - logger.Debug("releasing location", "location", location) - if err := cs.txSharky.Sharky.Release(ctx, location); err != nil { - logger.Debug("unable to release location", "location", location, "err", err) - return true, fmt.Errorf("unable to release location %v for %s: %w", location, r.ID, err) - } - } - logger.Info("sharky unreleased location released", "id", r.ID) - - logger.Info("cleaning uncommitted transaction log", "id", r.ID) - if err := cs.txStore.Delete(r.Entry); err != nil { - logger.Debug("unable to delete unreleased location", "id", r.ID, "err", err) - return true, fmt.Errorf("unable to delete %s: %w", r.ID, err) - } - logger.Info("uncommitted transaction log cleaned", "id", r.ID) - - return false, nil - }) - if err != nil { - return fmt.Errorf("chunkstore: recovery: iteration failed: %w", err) - } - - if found { - logger.Info("recovery successful") - } else { - logger.Info("no uncommitted transactions found") - } - - return nil -} diff --git a/pkg/storer/internal/chunkstore/recovery_test.go b/pkg/storer/internal/chunkstore/recovery_test.go deleted file mode 100644 index 99c5b0f8c76..00000000000 --- a/pkg/storer/internal/chunkstore/recovery_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chunkstore_test - -import ( - "context" - "testing" - - "slices" - - "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/leveldbstore" - chunktest "github.com/ethersphere/bee/pkg/storage/testing" - "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" - "github.com/ethersphere/bee/pkg/swarm" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/spf13/afero" -) - -func TestTxChunkStore_Recovery(t *testing.T) { - t.Parallel() - - store, err := leveldbstore.New(t.TempDir(), nil) - if err != nil { - t.Fatal(err) - } - - sharky, err := sharky.New(&memFS{Fs: afero.NewMemMapFs()}, 1, swarm.SocMaxChunkSize) - if err != nil { - t.Fatal(err) - } - txChunkStore := chunkstore.NewTxChunkStore(leveldbstore.NewTxStore(store), sharky) - t.Cleanup(func() { - if err := txChunkStore.Close(); err != nil { - t.Fatalf("close: %v", err) - } - }) - - chunks := chunktest.GenerateTestRandomChunks(10) - lessFn := func(i, j swarm.Chunk) int { return i.Address().Compare(j.Address()) } - slices.SortFunc(chunks, lessFn) - - // Sore half of the chunks within a transaction and commit it. - tx := txChunkStore.NewTx(storage.NewTxState(context.TODO())) - for i := 0; i < len(chunks)/2; i++ { - if err = tx.Put(context.TODO(), chunks[i]); err != nil { - t.Fatalf("put chunk: %v", err) - } - } - if err := tx.Commit(); err != nil { - t.Fatalf("commit: %v", err) - } - - // Delete the first stored half of the chunks and store - // the other half and don't commit or revert the transaction. - tx = txChunkStore.NewTx(storage.NewTxState(context.TODO())) - for i := 0; i < len(chunks)/2; i++ { - if err = tx.Delete(context.TODO(), chunks[i].Address()); err != nil { - t.Fatalf("put chunk: %v", err) - } - } - for i := len(chunks) / 2; i < len(chunks); i++ { - if err = tx.Put(context.TODO(), chunks[i]); err != nil { - t.Fatalf("put chunk: %v", err) - } - } - // Do not commit or rollback the transaction as - // if the process crashes and attempt to recover. - if err := txChunkStore.Recover(); err != nil { - t.Fatalf("recover: %v", err) - } - - // Check that the store is in the state we expect. - var ( - have []swarm.Chunk - want = chunks[:len(chunks)/2] - ) - if err := txChunkStore.Iterate( - context.TODO(), - func(chunk swarm.Chunk) (stop bool, err error) { - have = append(have, chunk) - return false, nil - }, - ); err != nil { - t.Fatalf("iterate: %v", err) - } - opts := cmpopts.SortSlices(func(i, j swarm.Chunk) bool { - return i.Address().Compare(j.Address()) < 0 - }) - if diff := cmp.Diff(want, have, opts); diff != "" { - t.Fatalf("recovered store data mismatch (-want +have):\n%s", diff) - } -} diff --git a/pkg/storer/internal/chunkstore/transaction.go b/pkg/storer/internal/chunkstore/transaction.go deleted file mode 100644 index 02927d6423a..00000000000 --- a/pkg/storer/internal/chunkstore/transaction.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chunkstore - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "sync" - - "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/leveldbstore" - "github.com/ethersphere/bee/pkg/storage/storageutil" - "github.com/google/uuid" - "github.com/vmihailenco/msgpack/v5" -) - -// txSharky provides a simple txn functionality over the Sharky store. -// It mainly exists to support the chunk store Delete operation where -// the Release calls are postponed until Commit or Rollback is called. -type txSharky struct { - Sharky - - id []byte - store leveldbstore.Storer - - opsMu sync.Mutex - writtenLocs []sharky.Location - toReleaseLocs map[[32]byte]sharky.Location - toReleaseSums map[sharky.Location][32]byte -} - -func (t *txSharky) Write(ctx context.Context, buf []byte) (sharky.Location, error) { - var ( - sum = sha256.Sum256(buf) - loc sharky.Location - err error - ) - - t.opsMu.Lock() - defer t.opsMu.Unlock() - - // If the chunk is already written in this transaction then we return - // the location of the chunk. This is to avoid getting new location for - // the same chunk in the case Put operation is called after Delete so - // in the case of Rollback operation everything is consistent. - loc, ok := t.toReleaseLocs[sum] - if ok { - delete(t.toReleaseLocs, sum) - delete(t.toReleaseSums, loc) - return loc, nil - } - - loc, err = t.Sharky.Write(ctx, buf) - if err == nil { - t.writtenLocs = append(t.writtenLocs, loc) - - buf, err = msgpack.Marshal(t.writtenLocs) - if err == nil { - err = t.store.DB().Put(t.id, buf, nil) - } - } - return loc, err -} - -func (t *txSharky) Release(ctx context.Context, loc sharky.Location) error { - t.opsMu.Lock() - defer t.opsMu.Unlock() - - sum, ok := t.toReleaseSums[loc] - if !ok { - buf := make([]byte, loc.Length) - if err := t.Sharky.Read(ctx, loc, buf); err != nil { - return err - } - sum = sha256.Sum256(buf) - t.toReleaseSums[loc] = sum - } - t.toReleaseLocs[sum] = loc - - return nil -} - -var ( - _ storage.ChunkStore = (*TxChunkStoreWrapper)(nil) - _ storage.Recoverer = (*TxChunkStoreWrapper)(nil) -) - -type TxChunkStoreWrapper struct { - *storage.TxChunkStoreBase - - txStore storage.TxStore - txSharky *txSharky -} - -// release releases the TxChunkStoreWrapper transaction associated resources. -func (cs *TxChunkStoreWrapper) release() { - cs.TxChunkStoreBase.ChunkStore = nil - cs.txSharky.toReleaseLocs = nil - cs.txSharky.toReleaseSums = nil - cs.txSharky.writtenLocs = nil - cs.txSharky.Sharky = nil -} - -func (cs *TxChunkStoreWrapper) Commit() error { - defer cs.release() - - var errs error - if err := cs.txStore.Commit(); err != nil { - errs = errors.Join(errs, fmt.Errorf("txchunkstore: unable to commit index store transaction: %w", err)) - } - - for _, loc := range cs.txSharky.toReleaseLocs { - errs = errors.Join(errs, cs.txSharky.Sharky.Release(context.Background(), loc)) - } - - if err := cs.txSharky.store.DB().Delete(cs.txSharky.id, nil); err != nil { - errs = errors.Join(errs, fmt.Errorf("txchunkstore: unable to delete transaction: %x: %w", cs.txSharky.id, err)) - } - return errs -} - -func (cs *TxChunkStoreWrapper) Rollback() error { - defer cs.release() - - var errs error - if err := cs.txStore.Rollback(); err != nil { - errs = errors.Join(errs, fmt.Errorf("txchunkstore: unable to rollback index store transaction: %w", err)) - } - - if errs == nil { - for _, loc := range cs.txSharky.writtenLocs { - errs = errors.Join(errs, cs.txSharky.Sharky.Release(context.Background(), loc)) - } - if errs != nil { - return fmt.Errorf("txchunkstore: unable to release locations: %w", errs) - } - } - - if err := cs.txSharky.store.DB().Delete(cs.txSharky.id, nil); err != nil { - errs = errors.Join(errs, fmt.Errorf("txchunkstore: unable to delete transaction: %x: %w", cs.txSharky.id, err)) - } - return errs -} - -var pendingTxNamespace = new(pendingTx).Namespace() - -func (cs *TxChunkStoreWrapper) NewTx(state *storage.TxState) storage.TxChunkStore { - txStore := cs.txStore.NewTx(state) - txSharky := &txSharky{ - id: []byte(storageutil.JoinFields(pendingTxNamespace, uuid.NewString())), - store: cs.txStore.(*leveldbstore.TxStore).BatchedStore.(leveldbstore.Storer), // TODO: make this independent of the underlying store. - Sharky: cs.txSharky.Sharky, - toReleaseLocs: make(map[[32]byte]sharky.Location), - toReleaseSums: make(map[sharky.Location][32]byte), - } - return &TxChunkStoreWrapper{ - TxChunkStoreBase: &storage.TxChunkStoreBase{ - TxState: state, - ChunkStore: New(txStore, txSharky), - }, - txStore: txStore, - txSharky: txSharky, - } -} - -func NewTxChunkStore(txStore storage.TxStore, csSharky Sharky) *TxChunkStoreWrapper { - return &TxChunkStoreWrapper{ - TxChunkStoreBase: &storage.TxChunkStoreBase{ - ChunkStore: New(txStore, csSharky), - }, - txStore: txStore, - txSharky: &txSharky{Sharky: csSharky}, - } -} diff --git a/pkg/storer/internal/chunkstore/transaction_test.go b/pkg/storer/internal/chunkstore/transaction_test.go deleted file mode 100644 index 88dd98851ed..00000000000 --- a/pkg/storer/internal/chunkstore/transaction_test.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chunkstore_test - -import ( - "context" - "testing" - - postagetesting "github.com/ethersphere/bee/pkg/postage/testing" - "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/leveldbstore" - "github.com/ethersphere/bee/pkg/storage/storagetest" - chunktest "github.com/ethersphere/bee/pkg/storage/testing" - "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" - "github.com/ethersphere/bee/pkg/swarm" - "github.com/spf13/afero" -) - -func TestTxChunkStore(t *testing.T) { - t.Parallel() - - store, err := leveldbstore.New(t.TempDir(), nil) - if err != nil { - t.Fatal(err) - } - - sharky, err := sharky.New(&memFS{Fs: afero.NewMemMapFs()}, 1, swarm.SocMaxChunkSize) - if err != nil { - t.Fatal(err) - } - - storagetest.TestTxChunkStore(t, chunkstore.NewTxChunkStore(leveldbstore.NewTxStore(store), sharky)) -} - -// TestMultipleStampsRefCnt tests the behaviour of ref counting along with multiple -// stamps to ensure transactions work correctly. -func TestMultipleStampsRefCnt(t *testing.T) { - t.Parallel() - - store, err := leveldbstore.New(t.TempDir(), nil) - if err != nil { - t.Fatal(err) - } - - sharky, err := sharky.New(&memFS{Fs: afero.NewMemMapFs()}, 1, swarm.SocMaxChunkSize) - if err != nil { - t.Fatal(err) - } - - chunkStore := chunkstore.NewTxChunkStore(leveldbstore.NewTxStore(store), sharky) - t.Cleanup(func() { - if err := chunkStore.Close(); err != nil { - t.Fatalf("close chunk store: %v", err) - } - }) - - var ( - chunk = chunktest.GenerateTestRandomChunk() - stamps = []swarm.Stamp{ - chunk.Stamp(), - postagetesting.MustNewStamp(), - postagetesting.MustNewStamp(), - } - ) - - verifyAllIndexes := func(t *testing.T) { - t.Helper() - - rIdx := chunkstore.RetrievalIndexItem{ - Address: chunk.Address(), - } - - has, err := store.Has(&rIdx) - if err != nil { - t.Fatal(err) - } - if !has { - t.Fatalf("retrievalIndex not found %s", chunk.Address()) - } - } - - t.Run("put with multiple stamps", func(t *testing.T) { - cs := chunkStore.NewTx(storage.NewTxState(context.TODO())) - - for _, stamp := range stamps { - err := cs.Put(context.TODO(), chunk.WithStamp(stamp)) - if err != nil { - t.Fatalf("failed to put chunk: %v", err) - } - } - - err := cs.Commit() - if err != nil { - t.Fatal(err) - } - - verifyAllIndexes(t) - }) - - t.Run("rollback delete operations", func(t *testing.T) { - t.Run("less than refCnt", func(t *testing.T) { - cs := chunkStore.NewTx(storage.NewTxState(context.TODO())) - - for i := 0; i < len(stamps)-1; i++ { - err := cs.Delete(context.TODO(), chunk.Address()) - if err != nil { - t.Fatalf("failed to delete chunk %d: %v", i, err) - } - } - - err := cs.Rollback() - if err != nil { - t.Fatal(err) - } - - verifyAllIndexes(t) - }) - - // this should remove all the stamps and hopefully bring them back - t.Run("till refCnt", func(t *testing.T) { - cs := chunkStore.NewTx(storage.NewTxState(context.TODO())) - - for i := 0; i < len(stamps); i++ { - err := cs.Delete(context.TODO(), chunk.Address()) - if err != nil { - t.Fatalf("failed to delete chunk %d: %v", i, err) - } - } - - err := cs.Rollback() - if err != nil { - t.Fatal(err) - } - - verifyAllIndexes(t) - }) - }) -} diff --git a/pkg/storer/internal/internal.go b/pkg/storer/internal/internal.go index 0433ad7a1c5..7c5c429a803 100644 --- a/pkg/storer/internal/internal.go +++ b/pkg/storer/internal/internal.go @@ -7,31 +7,20 @@ package internal import ( "bytes" "context" - "errors" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/inmemchunkstore" "github.com/ethersphere/bee/pkg/storage/inmemstore" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) -// Storage groups the storage.Store and storage.ChunkStore interfaces. -type Storage interface { - IndexStore() storage.BatchedStore - ChunkStore() storage.ChunkStore -} - // PutterCloserWithReference provides a Putter which can be closed with a root // swarm reference associated with this session. type PutterCloserWithReference interface { - Put(context.Context, Storage, storage.Writer, swarm.Chunk) error - Close(Storage, storage.Writer, swarm.Address) error - Cleanup(TxExecutor) error -} - -// TxExecutor executes a function in a transaction. -type TxExecutor interface { - Execute(context.Context, func(Storage) error) error + Put(context.Context, transaction.Store, swarm.Chunk) error + Close(storage.IndexStore, swarm.Address) error + Cleanup(transaction.Storage) error } var emptyAddr = make([]byte, swarm.HashSize) @@ -56,30 +45,49 @@ func AddressBytesOrZero(addr swarm.Address) []byte { return addr.Bytes() } -// BatchedStorage groups the Storage and TxExecutor interfaces. -type BatchedStorage interface { - Storage - TxExecutor -} - // NewInmemStorage constructs a inmem Storage implementation which can be used // for the tests in the internal packages. -func NewInmemStorage() (BatchedStorage, func() error) { - ts := &inmemRepository{ +func NewInmemStorage() transaction.Storage { + ts := &inmemStorage{ indexStore: inmemstore.New(), chunkStore: inmemchunkstore.New(), } - return ts, func() error { - return errors.Join(ts.indexStore.Close(), ts.chunkStore.Close()) - } + return ts +} + +type inmemStorage struct { + indexStore storage.IndexStore + chunkStore storage.ChunkStore +} + +func (t *inmemStorage) NewTransaction(ctx context.Context) (transaction.Transaction, func()) { + return &inmemTrx{t.indexStore, t.chunkStore}, func() {} } -type inmemRepository struct { - indexStore storage.BatchedStore +type inmemTrx struct { + indexStore storage.IndexStore chunkStore storage.ChunkStore } -func (t *inmemRepository) IndexStore() storage.BatchedStore { return t.indexStore } -func (t *inmemRepository) ChunkStore() storage.ChunkStore { return t.chunkStore } -func (t *inmemRepository) Execute(_ context.Context, f func(Storage) error) error { return f(t) } +type inmemReadOnly struct { + indexStore storage.Reader + chunkStore storage.ReadOnlyChunkStore +} + +func (t *inmemReadOnly) IndexStore() storage.Reader { return t.indexStore } +func (t *inmemReadOnly) ChunkStore() storage.ReadOnlyChunkStore { return t.chunkStore } + +func (t *inmemTrx) IndexStore() storage.IndexStore { return t.indexStore } +func (t *inmemTrx) ChunkStore() storage.ChunkStore { return t.chunkStore } +func (t *inmemTrx) Commit() error { return nil } + +func (t *inmemStorage) ReadOnly() transaction.ReadOnlyStore { + return &inmemReadOnly{t.indexStore, t.chunkStore} +} +func (t *inmemStorage) Close() error { return nil } +func (t *inmemStorage) Run(ctx context.Context, f func(s transaction.Store) error) error { + trx, done := t.NewTransaction(ctx) + defer done() + return f(trx) +} diff --git a/pkg/storer/internal/pinning/export_test.go b/pkg/storer/internal/pinning/export_test.go index 79e5864dfba..1faa8befab6 100644 --- a/pkg/storer/internal/pinning/export_test.go +++ b/pkg/storer/internal/pinning/export_test.go @@ -28,7 +28,7 @@ var ( var NewUUID = newUUID -func GetStat(st storage.Store, root swarm.Address) (CollectionStat, error) { +func GetStat(st storage.Reader, root swarm.Address) (CollectionStat, error) { collection := &pinCollectionItem{Addr: root} err := st.Get(collection) if err != nil { diff --git a/pkg/storer/internal/pinning/pinning.go b/pkg/storer/internal/pinning/pinning.go index 8fb25ba92ad..81dbd2f7f0d 100644 --- a/pkg/storer/internal/pinning/pinning.go +++ b/pkg/storer/internal/pinning/pinning.go @@ -10,12 +10,12 @@ import ( "encoding/binary" "errors" "fmt" - "sync" "github.com/ethersphere/bee/pkg/encryption" storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/storageutil" "github.com/ethersphere/bee/pkg/storer/internal" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" "github.com/google/uuid" ) @@ -59,152 +59,14 @@ type CollectionStat struct { DupInCollection uint64 } -// pinCollectionSize represents the size of the pinCollectionItem -const pinCollectionItemSize = encryption.ReferenceSize + uuidSize + 8 + 8 - -var _ storage.Item = (*pinCollectionItem)(nil) - -// pinCollectionItem is the index used to describe a pinning collection. The Addr -// is the root reference of the collection and UUID is a unique UUID for this collection. -// The Address could be an encrypted swarm hash. This hash has the key to decrypt the -// collection. -type pinCollectionItem struct { - Addr swarm.Address - UUID []byte - Stat CollectionStat -} - -func (p *pinCollectionItem) ID() string { return p.Addr.ByteString() } - -func (pinCollectionItem) Namespace() string { return "pinCollectionItem" } - -func (p *pinCollectionItem) Marshal() ([]byte, error) { - if p.Addr.IsZero() { - return nil, errInvalidPinCollectionAddr - } - if len(p.UUID) == 0 { - return nil, errInvalidPinCollectionUUID - } - buf := make([]byte, pinCollectionItemSize) - copy(buf[:encryption.ReferenceSize], p.Addr.Bytes()) - off := encryption.ReferenceSize - copy(buf[off:off+uuidSize], p.UUID) - statBufOff := encryption.ReferenceSize + uuidSize - binary.LittleEndian.PutUint64(buf[statBufOff:], p.Stat.Total) - binary.LittleEndian.PutUint64(buf[statBufOff+8:], p.Stat.DupInCollection) - return buf, nil -} - -func (p *pinCollectionItem) Unmarshal(buf []byte) error { - if len(buf) != pinCollectionItemSize { - return errInvalidPinCollectionSize - } - ni := new(pinCollectionItem) - if bytes.Equal(buf[swarm.HashSize:encryption.ReferenceSize], emptyKey) { - ni.Addr = swarm.NewAddress(buf[:swarm.HashSize]).Clone() - } else { - ni.Addr = swarm.NewAddress(buf[:encryption.ReferenceSize]).Clone() - } - off := encryption.ReferenceSize - ni.UUID = append(make([]byte, 0, uuidSize), buf[off:off+uuidSize]...) - statBuf := buf[off+uuidSize:] - ni.Stat.Total = binary.LittleEndian.Uint64(statBuf[:8]) - ni.Stat.DupInCollection = binary.LittleEndian.Uint64(statBuf[8:16]) - *p = *ni - return nil -} - -func (p *pinCollectionItem) Clone() storage.Item { - if p == nil { - return nil - } - return &pinCollectionItem{ - Addr: p.Addr.Clone(), - UUID: append([]byte(nil), p.UUID...), - Stat: p.Stat, - } -} - -func (p pinCollectionItem) String() string { - return storageutil.JoinFields(p.Namespace(), p.ID()) -} - -var _ storage.Item = (*pinChunkItem)(nil) - -// pinChunkItem is the index used to represent a single chunk in the pinning -// collection. It is prefixed with the UUID of the collection. -type pinChunkItem struct { - UUID []byte - Addr swarm.Address -} - -func (p *pinChunkItem) Namespace() string { return string(p.UUID) } - -func (p *pinChunkItem) ID() string { return p.Addr.ByteString() } - -// pinChunkItem is a key-only type index. We don't need to store any value. As such -// the serialization functions would be no-ops. A Get operation on this key is not -// required as the key would constitute the item. Usually these type of indexes are -// useful for key-only iterations. -func (p *pinChunkItem) Marshal() ([]byte, error) { - return nil, nil -} - -func (p *pinChunkItem) Unmarshal(_ []byte) error { - return nil -} - -func (p *pinChunkItem) Clone() storage.Item { - if p == nil { - return nil - } - return &pinChunkItem{ - UUID: append([]byte(nil), p.UUID...), - Addr: p.Addr.Clone(), - } -} - -func (p pinChunkItem) String() string { - return storageutil.JoinFields(p.Namespace(), p.ID()) -} - -type dirtyCollection struct { - UUID []byte -} - -func (d *dirtyCollection) ID() string { return string(d.UUID) } - -func (dirtyCollection) Namespace() string { return "dirtyCollection" } - -func (d *dirtyCollection) Marshal() ([]byte, error) { - return nil, nil -} - -func (d *dirtyCollection) Unmarshal(_ []byte) error { - return nil -} - -func (d *dirtyCollection) Clone() storage.Item { - if d == nil { - return nil - } - return &dirtyCollection{ - UUID: append([]byte(nil), d.UUID...), - } -} - -func (d dirtyCollection) String() string { - return storageutil.JoinFields(d.Namespace(), d.ID()) -} - // NewCollection returns a putter wrapped around the passed storage. // The putter will add the chunk to Chunk store if it doesnt exists within this collection. // It will create a new UUID for the collection which can be used to iterate on all the chunks -// that are part of this collection. The root pin is only updated on successful close of this -// Putter. -func NewCollection(st internal.Storage) (internal.PutterCloserWithReference, error) { +// that are part of this collection. The root pin is only updated on successful close of this. +// Calls to the Putter MUST be mutex locked to prevent concurrent upload data races. +func NewCollection(st storage.IndexStore) (internal.PutterCloserWithReference, error) { newCollectionUUID := newUUID() - err := st.IndexStore().Put(&dirtyCollection{UUID: newCollectionUUID}) + err := st.Put(&dirtyCollection{UUID: newCollectionUUID}) if err != nil { return nil, err } @@ -214,14 +76,13 @@ func NewCollection(st internal.Storage) (internal.PutterCloserWithReference, err } type collectionPutter struct { - mtx sync.Mutex collection *pinCollectionItem closed bool } -func (c *collectionPutter) Put(ctx context.Context, st internal.Storage, writer storage.Writer, ch swarm.Chunk) error { - c.mtx.Lock() - defer c.mtx.Unlock() +// Put adds a chunk to the pin collection. +// The user of the putter MUST mutex lock the call to prevent data-races across multiple upload sessions. +func (c *collectionPutter) Put(ctx context.Context, st transaction.Store, ch swarm.Chunk) error { // do not allow any Puts after putter was closed if c.closed { @@ -245,7 +106,7 @@ func (c *collectionPutter) Put(ctx context.Context, st internal.Storage, writer return nil } - err = writer.Put(collectionChunk) + err = st.IndexStore().Put(collectionChunk) if err != nil { return fmt.Errorf("pin store: failed putting collection chunk: %w", err) } @@ -258,16 +119,13 @@ func (c *collectionPutter) Put(ctx context.Context, st internal.Storage, writer return nil } -func (c *collectionPutter) Close(st internal.Storage, writer storage.Writer, root swarm.Address) error { +func (c *collectionPutter) Close(st storage.IndexStore, root swarm.Address) error { if root.IsZero() { return errCollectionRootAddressIsZero } - c.mtx.Lock() - defer c.mtx.Unlock() - collection := &pinCollectionItem{Addr: root} - has, err := st.IndexStore().Has(collection) + has, err := st.Has(collection) if err != nil { return fmt.Errorf("pin store: check previous root: %w", err) @@ -280,12 +138,12 @@ func (c *collectionPutter) Close(st internal.Storage, writer storage.Writer, roo // Save the root pin reference. c.collection.Addr = root - err = writer.Put(c.collection) + err = st.Put(c.collection) if err != nil { return fmt.Errorf("pin store: failed updating collection: %w", err) } - err = writer.Delete(&dirtyCollection{UUID: c.collection.UUID}) + err = st.Delete(&dirtyCollection{UUID: c.collection.UUID}) if err != nil { return fmt.Errorf("pin store: failed deleting dirty collection: %w", err) } @@ -294,19 +152,16 @@ func (c *collectionPutter) Close(st internal.Storage, writer storage.Writer, roo return nil } -func (c *collectionPutter) Cleanup(tx internal.TxExecutor) error { - c.mtx.Lock() - defer c.mtx.Unlock() - +func (c *collectionPutter) Cleanup(st transaction.Storage) error { if c.closed { return nil } - if err := deleteCollectionChunks(context.Background(), tx, c.collection.UUID); err != nil { + if err := deleteCollectionChunks(context.Background(), st, c.collection.UUID); err != nil { return fmt.Errorf("pin store: failed deleting collection chunks: %w", err) } - err := tx.Execute(context.Background(), func(s internal.Storage) error { + err := st.Run(context.Background(), func(s transaction.Store) error { return s.IndexStore().Delete(&dirtyCollection{UUID: c.collection.UUID}) }) if err != nil { @@ -318,34 +173,33 @@ func (c *collectionPutter) Cleanup(tx internal.TxExecutor) error { } // CleanupDirty will iterate over all the dirty collections and delete them. -func CleanupDirty(tx internal.TxExecutor) error { +func CleanupDirty(st transaction.Storage) error { + dirtyCollections := make([]*dirtyCollection, 0) - err := tx.Execute(context.Background(), func(s internal.Storage) error { - return s.IndexStore().Iterate( - storage.Query{ - Factory: func() storage.Item { return new(dirtyCollection) }, - ItemProperty: storage.QueryItemID, - }, - func(r storage.Result) (bool, error) { - di := &dirtyCollection{UUID: []byte(r.ID)} - dirtyCollections = append(dirtyCollections, di) - return false, nil - }, - ) - }) + err := st.ReadOnly().IndexStore().Iterate( + storage.Query{ + Factory: func() storage.Item { return new(dirtyCollection) }, + ItemProperty: storage.QueryItemID, + }, + func(r storage.Result) (bool, error) { + di := &dirtyCollection{UUID: []byte(r.ID)} + dirtyCollections = append(dirtyCollections, di) + return false, nil + }, + ) if err != nil { return fmt.Errorf("pin store: failed iterating dirty collections: %w", err) } for _, di := range dirtyCollections { - _ = (&collectionPutter{collection: &pinCollectionItem{UUID: di.UUID}}).Cleanup(tx) + err = errors.Join(err, (&collectionPutter{collection: &pinCollectionItem{UUID: di.UUID}}).Cleanup(st)) } - return nil + return err } // HasPin function will check if the address represents a valid pin collection. -func HasPin(st storage.Store, root swarm.Address) (bool, error) { +func HasPin(st storage.Reader, root swarm.Address) (bool, error) { collection := &pinCollectionItem{Addr: root} has, err := st.Has(collection) if err != nil { @@ -355,7 +209,7 @@ func HasPin(st storage.Store, root swarm.Address) (bool, error) { } // Pins lists all the added pinning collections. -func Pins(st storage.Store) ([]swarm.Address, error) { +func Pins(st storage.Reader) ([]swarm.Address, error) { var pins []swarm.Address err := st.Iterate(storage.Query{ Factory: func() storage.Item { return new(pinCollectionItem) }, @@ -372,39 +226,34 @@ func Pins(st storage.Store) ([]swarm.Address, error) { return pins, nil } -func deleteCollectionChunks(ctx context.Context, tx internal.TxExecutor, collectionUUID []byte) error { +func deleteCollectionChunks(ctx context.Context, st transaction.Storage, collectionUUID []byte) error { chunksToDelete := make([]*pinChunkItem, 0) - err := tx.Execute(ctx, func(s internal.Storage) error { - return s.IndexStore().Iterate( - storage.Query{ - Factory: func() storage.Item { return &pinChunkItem{UUID: collectionUUID} }, - }, func(r storage.Result) (bool, error) { - addr := swarm.NewAddress([]byte(r.ID)) - chunk := &pinChunkItem{UUID: collectionUUID, Addr: addr} - chunksToDelete = append(chunksToDelete, chunk) - return false, nil - }, - ) - }) + + err := st.ReadOnly().IndexStore().Iterate( + storage.Query{ + Factory: func() storage.Item { return &pinChunkItem{UUID: collectionUUID} }, + }, func(r storage.Result) (bool, error) { + addr := swarm.NewAddress([]byte(r.ID)) + chunk := &pinChunkItem{UUID: collectionUUID, Addr: addr} + chunksToDelete = append(chunksToDelete, chunk) + return false, nil + }, + ) if err != nil { return fmt.Errorf("pin store: failed iterating collection chunks: %w", err) } batchCnt := 1000 for i := 0; i < len(chunksToDelete); i += batchCnt { - err = tx.Execute(context.Background(), func(s internal.Storage) error { - b, err := s.IndexStore().Batch(context.Background()) - if err != nil { - return err - } + err := st.Run(ctx, func(s transaction.Store) error { end := i + batchCnt if end > len(chunksToDelete) { end = len(chunksToDelete) } for _, chunk := range chunksToDelete[i:end] { - err := b.Delete(chunk) + err := s.IndexStore().Delete(chunk) if err != nil { return fmt.Errorf("pin store: failed deleting collection chunk: %w", err) } @@ -413,7 +262,7 @@ func deleteCollectionChunks(ctx context.Context, tx internal.TxExecutor, collect return fmt.Errorf("pin store: failed in tx chunk deletion: %w", err) } } - return b.Commit() + return nil }) if err != nil { return fmt.Errorf("pin store: failed tx deleting collection chunks: %w", err) @@ -422,32 +271,29 @@ func deleteCollectionChunks(ctx context.Context, tx internal.TxExecutor, collect return nil } -// DeletePin will delete the root pin and all the chunks that are part of this -// collection. -func DeletePin(ctx context.Context, tx internal.TxExecutor, root swarm.Address) error { +// DeletePin will delete the root pin and all the chunks that are part of this collection. +func DeletePin(ctx context.Context, st transaction.Storage, root swarm.Address) error { collection := &pinCollectionItem{Addr: root} - err := tx.Execute(context.Background(), func(s internal.Storage) error { - return s.IndexStore().Get(collection) - }) + + err := st.ReadOnly().IndexStore().Get(collection) if err != nil { return fmt.Errorf("pin store: failed getting collection: %w", err) } - if err := deleteCollectionChunks(ctx, tx, collection.UUID); err != nil { + if err := deleteCollectionChunks(ctx, st, collection.UUID); err != nil { return err } - err = tx.Execute(context.Background(), func(s internal.Storage) error { - return s.IndexStore().Delete(collection) + return st.Run(ctx, func(s transaction.Store) error { + err := s.IndexStore().Delete(collection) + if err != nil { + return fmt.Errorf("pin store: failed deleting root collection: %w", err) + } + return nil }) - if err != nil { - return fmt.Errorf("pin store: failed deleting root collection: %w", err) - } - - return nil } -func IterateCollection(st storage.Store, root swarm.Address, fn func(addr swarm.Address) (bool, error)) error { +func IterateCollection(st storage.Reader, root swarm.Address, fn func(addr swarm.Address) (bool, error)) error { collection := &pinCollectionItem{Addr: root} err := st.Get(collection) if err != nil { @@ -467,7 +313,7 @@ func IterateCollection(st storage.Store, root swarm.Address, fn func(addr swarm. }) } -func IterateCollectionStats(st storage.Store, iterateFn func(st CollectionStat) (bool, error)) error { +func IterateCollectionStats(st storage.Reader, iterateFn func(st CollectionStat) (bool, error)) error { return st.Iterate( storage.Query{ Factory: func() storage.Item { return new(pinCollectionItem) }, @@ -477,3 +323,141 @@ func IterateCollectionStats(st storage.Store, iterateFn func(st CollectionStat) }, ) } + +// pinCollectionSize represents the size of the pinCollectionItem +const pinCollectionItemSize = encryption.ReferenceSize + uuidSize + 8 + 8 + +var _ storage.Item = (*pinCollectionItem)(nil) + +// pinCollectionItem is the index used to describe a pinning collection. The Addr +// is the root reference of the collection and UUID is a unique UUID for this collection. +// The Address could be an encrypted swarm hash. This hash has the key to decrypt the +// collection. +type pinCollectionItem struct { + Addr swarm.Address + UUID []byte + Stat CollectionStat +} + +func (p *pinCollectionItem) ID() string { return p.Addr.ByteString() } + +func (pinCollectionItem) Namespace() string { return "pinCollectionItem" } + +func (p *pinCollectionItem) Marshal() ([]byte, error) { + if p.Addr.IsZero() { + return nil, errInvalidPinCollectionAddr + } + if len(p.UUID) == 0 { + return nil, errInvalidPinCollectionUUID + } + buf := make([]byte, pinCollectionItemSize) + copy(buf[:encryption.ReferenceSize], p.Addr.Bytes()) + off := encryption.ReferenceSize + copy(buf[off:off+uuidSize], p.UUID) + statBufOff := encryption.ReferenceSize + uuidSize + binary.LittleEndian.PutUint64(buf[statBufOff:], p.Stat.Total) + binary.LittleEndian.PutUint64(buf[statBufOff+8:], p.Stat.DupInCollection) + return buf, nil +} + +func (p *pinCollectionItem) Unmarshal(buf []byte) error { + if len(buf) != pinCollectionItemSize { + return errInvalidPinCollectionSize + } + ni := new(pinCollectionItem) + if bytes.Equal(buf[swarm.HashSize:encryption.ReferenceSize], emptyKey) { + ni.Addr = swarm.NewAddress(buf[:swarm.HashSize]).Clone() + } else { + ni.Addr = swarm.NewAddress(buf[:encryption.ReferenceSize]).Clone() + } + off := encryption.ReferenceSize + ni.UUID = append(make([]byte, 0, uuidSize), buf[off:off+uuidSize]...) + statBuf := buf[off+uuidSize:] + ni.Stat.Total = binary.LittleEndian.Uint64(statBuf[:8]) + ni.Stat.DupInCollection = binary.LittleEndian.Uint64(statBuf[8:16]) + *p = *ni + return nil +} + +func (p *pinCollectionItem) Clone() storage.Item { + if p == nil { + return nil + } + return &pinCollectionItem{ + Addr: p.Addr.Clone(), + UUID: append([]byte(nil), p.UUID...), + Stat: p.Stat, + } +} + +func (p pinCollectionItem) String() string { + return storageutil.JoinFields(p.Namespace(), p.ID()) +} + +var _ storage.Item = (*pinChunkItem)(nil) + +// pinChunkItem is the index used to represent a single chunk in the pinning +// collection. It is prefixed with the UUID of the collection. +type pinChunkItem struct { + UUID []byte + Addr swarm.Address +} + +func (p *pinChunkItem) Namespace() string { return string(p.UUID) } + +func (p *pinChunkItem) ID() string { return p.Addr.ByteString() } + +// pinChunkItem is a key-only type index. We don't need to store any value. As such +// the serialization functions would be no-ops. A Get operation on this key is not +// required as the key would constitute the item. Usually these type of indexes are +// useful for key-only iterations. +func (p *pinChunkItem) Marshal() ([]byte, error) { + return nil, nil +} + +func (p *pinChunkItem) Unmarshal(_ []byte) error { + return nil +} + +func (p *pinChunkItem) Clone() storage.Item { + if p == nil { + return nil + } + return &pinChunkItem{ + UUID: append([]byte(nil), p.UUID...), + Addr: p.Addr.Clone(), + } +} + +func (p pinChunkItem) String() string { + return storageutil.JoinFields(p.Namespace(), p.ID()) +} + +type dirtyCollection struct { + UUID []byte +} + +func (d *dirtyCollection) ID() string { return string(d.UUID) } + +func (dirtyCollection) Namespace() string { return "dirtyCollection" } + +func (d *dirtyCollection) Marshal() ([]byte, error) { + return nil, nil +} + +func (d *dirtyCollection) Unmarshal(_ []byte) error { + return nil +} + +func (d *dirtyCollection) Clone() storage.Item { + if d == nil { + return nil + } + return &dirtyCollection{ + UUID: append([]byte(nil), d.UUID...), + } +} + +func (d dirtyCollection) String() string { + return storageutil.JoinFields(d.Namespace(), d.ID()) +} diff --git a/pkg/storer/internal/pinning/pinning_test.go b/pkg/storer/internal/pinning/pinning_test.go index 017b135f6f8..57db870a118 100644 --- a/pkg/storer/internal/pinning/pinning_test.go +++ b/pkg/storer/internal/pinning/pinning_test.go @@ -16,6 +16,7 @@ import ( chunktest "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storer/internal" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) @@ -25,17 +26,9 @@ type pinningCollection struct { dupChunks []swarm.Chunk } -func newTestStorage(t *testing.T) internal.BatchedStorage { +func newTestStorage(t *testing.T) transaction.Storage { t.Helper() - - storg, closer := internal.NewInmemStorage() - t.Cleanup(func() { - err := closer() - if err != nil { - t.Errorf("failed closing storage: %v", err) - } - }) - + storg := internal.NewInmemStorage() return storg } @@ -75,24 +68,35 @@ func TestPinStore(t *testing.T) { t.Run("create new collections", func(t *testing.T) { for tCount, tc := range tests { t.Run(fmt.Sprintf("create collection %d", tCount), func(t *testing.T) { - putter, err := pinstore.NewCollection(st) + + var putter internal.PutterCloserWithReference + var err error + err = st.Run(context.Background(), func(s transaction.Store) error { + putter, err = pinstore.NewCollection(s.IndexStore()) + return err + }) if err != nil { t.Fatal(err) } + for _, ch := range append(tc.uniqueChunks, tc.root) { - err := putter.Put(context.Background(), st, st.IndexStore(), ch) - if err != nil { + if err := st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, ch) + }); err != nil { t.Fatal(err) } } for _, ch := range tc.dupChunks { - err := putter.Put(context.Background(), st, st.IndexStore(), ch) - if err != nil { + if err := st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, ch) + }); err != nil { t.Fatal(err) } } - err = putter.Close(st, st.IndexStore(), tc.root.Address()) - if err != nil { + + if err := st.Run(context.Background(), func(s transaction.Store) error { + return putter.Close(s.IndexStore(), tc.root.Address()) + }); err != nil { t.Fatal(err) } }) @@ -105,14 +109,14 @@ func TestPinStore(t *testing.T) { allChunks := append(tc.uniqueChunks, tc.root) allChunks = append(allChunks, tc.dupChunks...) for _, ch := range allChunks { - exists, err := st.ChunkStore().Has(context.TODO(), ch.Address()) + exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatal(err) } if !exists { t.Fatal("chunk should exist") } - rch, err := st.ChunkStore().Get(context.TODO(), ch.Address()) + rch, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatal(err) } @@ -125,7 +129,7 @@ func TestPinStore(t *testing.T) { }) t.Run("verify root pins", func(t *testing.T) { - pins, err := pinstore.Pins(st.IndexStore()) + pins, err := pinstore.Pins(st.ReadOnly().IndexStore()) if err != nil { t.Fatal(err) } @@ -148,7 +152,7 @@ func TestPinStore(t *testing.T) { t.Run("has pin", func(t *testing.T) { for _, tc := range tests { - found, err := pinstore.HasPin(st.IndexStore(), tc.root.Address()) + found, err := pinstore.HasPin(st.ReadOnly().IndexStore(), tc.root.Address()) if err != nil { t.Fatal(err) } @@ -161,7 +165,7 @@ func TestPinStore(t *testing.T) { t.Run("verify internal state", func(t *testing.T) { for _, tc := range tests { count := 0 - err := pinstore.IterateCollection(st.IndexStore(), tc.root.Address(), func(addr swarm.Address) (bool, error) { + err := pinstore.IterateCollection(st.ReadOnly().IndexStore(), tc.root.Address(), func(addr swarm.Address) (bool, error) { count++ return false, nil }) @@ -171,7 +175,7 @@ func TestPinStore(t *testing.T) { if count != len(tc.uniqueChunks)+2 { t.Fatalf("incorrect no of chunks in collection, expected %d found %d", len(tc.uniqueChunks)+2, count) } - stat, err := pinstore.GetStat(st.IndexStore(), tc.root.Address()) + stat, err := pinstore.GetStat(st.ReadOnly().IndexStore(), tc.root.Address()) if err != nil { t.Fatal(err) } @@ -186,7 +190,7 @@ func TestPinStore(t *testing.T) { t.Run("iterate stats", func(t *testing.T) { count, total, dup := 0, 0, 0 - err := pinstore.IterateCollectionStats(st.IndexStore(), func(stat pinstore.CollectionStat) (bool, error) { + err := pinstore.IterateCollectionStats(st.ReadOnly().IndexStore(), func(stat pinstore.CollectionStat) (bool, error) { count++ total += int(stat.Total) dup += int(stat.DupInCollection) @@ -220,7 +224,7 @@ func TestPinStore(t *testing.T) { t.Fatal(err) } - found, err := pinstore.HasPin(st.IndexStore(), tests[0].root.Address()) + found, err := pinstore.HasPin(st.ReadOnly().IndexStore(), tests[0].root.Address()) if err != nil { t.Fatal(err) } @@ -228,7 +232,7 @@ func TestPinStore(t *testing.T) { t.Fatal("expected pin to not be found") } - pins, err := pinstore.Pins(st.IndexStore()) + pins, err := pinstore.Pins(st.ReadOnly().IndexStore()) if err != nil { t.Fatal(err) } @@ -239,14 +243,14 @@ func TestPinStore(t *testing.T) { allChunks := append(tests[0].uniqueChunks, tests[0].root) allChunks = append(allChunks, tests[0].dupChunks...) for _, ch := range allChunks { - exists, err := st.ChunkStore().Has(context.TODO(), ch.Address()) + exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatal(err) } if exists { t.Fatal("chunk should not exist") } - _, err = st.ChunkStore().Get(context.TODO(), ch.Address()) + _, err = st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatal(err) } @@ -255,22 +259,36 @@ func TestPinStore(t *testing.T) { t.Run("error after close", func(t *testing.T) { root := chunktest.GenerateTestRandomChunk() - putter, err := pinstore.NewCollection(st) + + var ( + putter internal.PutterCloserWithReference + err error + ) + err = st.Run(context.Background(), func(s transaction.Store) error { + putter, err = pinstore.NewCollection(s.IndexStore()) + return err + }) if err != nil { t.Fatal(err) } - err = putter.Put(context.Background(), st, st.IndexStore(), root) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, root) + }) if err != nil { t.Fatal(err) } - err = putter.Close(st, st.IndexStore(), root.Address()) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Close(s.IndexStore(), root.Address()) + }) if err != nil { t.Fatal(err) } - err = putter.Put(context.Background(), st, st.IndexStore(), chunktest.GenerateTestRandomChunk()) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, chunktest.GenerateTestRandomChunk()) + }) if !errors.Is(err, pinstore.ErrPutterAlreadyClosed) { t.Fatalf("unexpected error during Put, want: %v, got: %v", pinstore.ErrPutterAlreadyClosed, err) } @@ -278,22 +296,36 @@ func TestPinStore(t *testing.T) { t.Run("duplicate collection", func(t *testing.T) { root := chunktest.GenerateTestRandomChunk() - putter, err := pinstore.NewCollection(st) + + var ( + putter internal.PutterCloserWithReference + err error + ) + err = st.Run(context.Background(), func(s transaction.Store) error { + putter, err = pinstore.NewCollection(s.IndexStore()) + return err + }) if err != nil { t.Fatal(err) } - err = putter.Put(context.Background(), st, st.IndexStore(), root) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, root) + }) if err != nil { t.Fatal(err) } - err = putter.Close(st, st.IndexStore(), root.Address()) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Close(s.IndexStore(), root.Address()) + }) if err != nil { t.Fatal(err) } - err = putter.Close(st, st.IndexStore(), root.Address()) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Close(s.IndexStore(), root.Address()) + }) if err == nil || !errors.Is(err, pinstore.ErrDuplicatePinCollection) { t.Fatalf("unexpected error during CLose, want: %v, got: %v", pinstore.ErrDuplicatePinCollection, err) } @@ -301,17 +333,29 @@ func TestPinStore(t *testing.T) { t.Run("zero address close", func(t *testing.T) { root := chunktest.GenerateTestRandomChunk() - putter, err := pinstore.NewCollection(st) + + var ( + putter internal.PutterCloserWithReference + err error + ) + err = st.Run(context.Background(), func(s transaction.Store) error { + putter, err = pinstore.NewCollection(s.IndexStore()) + return err + }) if err != nil { t.Fatal(err) } - err = putter.Put(context.Background(), st, st.IndexStore(), root) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, root) + }) if err != nil { t.Fatal(err) } - err = putter.Close(st, st.IndexStore(), swarm.ZeroAddress) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Close(s.IndexStore(), swarm.ZeroAddress) + }) if !errors.Is(err, pinstore.ErrCollectionRootAddressIsZero) { t.Fatalf("unexpected error on close, want: %v, got: %v", pinstore.ErrCollectionRootAddressIsZero, err) } @@ -327,13 +371,22 @@ func TestCleanup(t *testing.T) { st := newTestStorage(t) chunks := chunktest.GenerateTestRandomChunks(5) - putter, err := pinstore.NewCollection(st) + var ( + putter internal.PutterCloserWithReference + err error + ) + err = st.Run(context.Background(), func(s transaction.Store) error { + putter, err = pinstore.NewCollection(s.IndexStore()) + return err + }) if err != nil { t.Fatal(err) } for _, ch := range chunks { - err = putter.Put(context.Background(), st, st.IndexStore(), ch) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, ch) + }) if err != nil { t.Fatal(err) } @@ -345,7 +398,7 @@ func TestCleanup(t *testing.T) { } for _, ch := range chunks { - exists, err := st.ChunkStore().Has(context.Background(), ch.Address()) + exists, err := st.ReadOnly().ChunkStore().Has(context.Background(), ch.Address()) if err != nil { t.Fatal(err) } @@ -361,13 +414,22 @@ func TestCleanup(t *testing.T) { st := newTestStorage(t) chunks := chunktest.GenerateTestRandomChunks(5) - putter, err := pinstore.NewCollection(st) + var ( + putter internal.PutterCloserWithReference + err error + ) + err = st.Run(context.Background(), func(s transaction.Store) error { + putter, err = pinstore.NewCollection(s.IndexStore()) + return err + }) if err != nil { t.Fatal(err) } for _, ch := range chunks { - err = putter.Put(context.Background(), st, st.IndexStore(), ch) + err = st.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, ch) + }) if err != nil { t.Fatal(err) } @@ -379,7 +441,7 @@ func TestCleanup(t *testing.T) { } for _, ch := range chunks { - exists, err := st.ChunkStore().Has(context.Background(), ch.Address()) + exists, err := st.ReadOnly().ChunkStore().Has(context.Background(), ch.Address()) if err != nil { t.Fatal(err) } diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 69b1abeccbb..1309fbed724 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -10,15 +10,16 @@ import ( "encoding/hex" "errors" "fmt" - "sync" + "sort" + "strconv" "sync/atomic" "time" "github.com/ethersphere/bee/pkg/log" "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/chunkstamp" "github.com/ethersphere/bee/pkg/storer/internal/stampindex" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" "resenje.org/multex" @@ -35,13 +36,13 @@ type Reserve struct { size atomic.Int64 radius atomic.Uint32 - binMtx sync.Mutex - mutx *multex.Multex + multx *multex.Multex + st transaction.Storage } func New( baseAddr swarm.Address, - store storage.Store, + st transaction.Storage, capacity int, radiusSetter topology.SetStorageRadiuser, logger log.Logger, @@ -49,51 +50,64 @@ func New( rs := &Reserve{ baseAddr: baseAddr, + st: st, capacity: capacity, radiusSetter: radiusSetter, logger: logger.WithName(reserveNamespace).Register(), - mutx: multex.New(), + multx: multex.New(), } - rItem := &radiusItem{} - err := store.Get(rItem) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return nil, err - } - rs.radius.Store(uint32(rItem.Radius)) + err := st.Run(context.Background(), func(s transaction.Store) error { + rItem := &radiusItem{} + err := s.IndexStore().Get(rItem) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return err + } + rs.radius.Store(uint32(rItem.Radius)) - epochItem := &EpochItem{} - err = store.Get(epochItem) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - err := store.Put(&EpochItem{Timestamp: uint64(time.Now().Unix())}) - if err != nil { - return nil, err + epochItem := &EpochItem{} + err = s.IndexStore().Get(epochItem) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + err := s.IndexStore().Put(&EpochItem{Timestamp: uint64(time.Now().Unix())}) + if err != nil { + return err + } + } else { + return err } - } else { - return nil, err } - } - size, err := store.Count(&BatchRadiusItem{}) - if err != nil { - return nil, err - } - rs.size.Store(int64(size)) + size, err := s.IndexStore().Count(&BatchRadiusItem{}) + if err != nil { + return err + } + rs.size.Store(int64(size)) + return nil + }) - return rs, nil + return rs, err } // Put stores a new chunk in the reserve and returns if the reserve size should increase. -func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.Chunk) error { - indexStore := store.IndexStore() - chunkStore := store.ChunkStore() - - unlock := r.lock(chunk.Address(), chunk.Stamp().BatchID()) - defer unlock() +func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { po := swarm.Proximity(r.baseAddr.Bytes(), chunk.Address().Bytes()) + // batchID lock, Put vs Eviction + r.multx.Lock(string(chunk.Stamp().BatchID())) + defer r.multx.Unlock(string(chunk.Stamp().BatchID())) + + // bin lock + r.multx.Lock(strconv.Itoa(int(po))) + defer r.multx.Unlock(strconv.Itoa(int(po))) + + trx, done := r.st.NewTransaction(ctx) + defer done() + + indexStore := trx.IndexStore() + chunkStore := trx.ChunkStore() + has, err := indexStore.Has(&BatchRadiusItem{ Bin: po, Address: chunk.Address(), @@ -106,18 +120,11 @@ func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.C return nil } - storeBatch, err := indexStore.Batch(ctx) - if err != nil { - return err - } - - newStampIndex := true - - item, loaded, err := stampindex.LoadOrStore(indexStore, storeBatch, reserveNamespace, chunk) + item, loadedStamp, err := stampindex.LoadOrStore(indexStore, reserveNamespace, chunk) if err != nil { return fmt.Errorf("load or store stamp index for chunk %v has fail: %w", chunk, err) } - if loaded { + if loadedStamp { prev := binary.BigEndian.Uint64(item.StampTimestamp) curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp()) if prev >= curr { @@ -129,9 +136,8 @@ func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.C // 2. Delete the old chunk's stamp data. // 3. Delete ALL old chunk related items from the reserve. // 4. Update the stamp index. - newStampIndex = false - err := r.removeChunk(ctx, store, storeBatch, item.ChunkAddress, chunk.Stamp().BatchID()) + err := r.removeChunk(ctx, trx, item.ChunkAddress, chunk.Stamp().BatchID()) if err != nil { return fmt.Errorf("failed removing older chunk: %w", err) } @@ -143,13 +149,13 @@ func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.C "batch_id", hex.EncodeToString(chunk.Stamp().BatchID()), ) - err = stampindex.Store(storeBatch, reserveNamespace, chunk) + err = stampindex.Store(indexStore, reserveNamespace, chunk) if err != nil { return fmt.Errorf("failed updating stamp index: %w", err) } } - err = chunkstamp.Store(storeBatch, reserveNamespace, chunk) + err = chunkstamp.Store(indexStore, reserveNamespace, chunk) if err != nil { return err } @@ -159,7 +165,7 @@ func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.C return err } - err = storeBatch.Put(&BatchRadiusItem{ + err = indexStore.Put(&BatchRadiusItem{ Bin: po, BinID: binID, Address: chunk.Address(), @@ -169,7 +175,7 @@ func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.C return err } - err = storeBatch.Put(&ChunkBinItem{ + err = indexStore.Put(&ChunkBinItem{ Bin: po, BinID: binID, Address: chunk.Address(), @@ -185,40 +191,40 @@ func (r *Reserve) Put(ctx context.Context, store internal.Storage, chunk swarm.C return err } - err = storeBatch.Commit() + err = trx.Commit() if err != nil { return err } - if newStampIndex { + if !loadedStamp { r.size.Add(1) } return nil } -func (r *Reserve) Has(store storage.Store, addr swarm.Address, batchID []byte) (bool, error) { +func (r *Reserve) Has(addr swarm.Address, batchID []byte) (bool, error) { item := &BatchRadiusItem{Bin: swarm.Proximity(r.baseAddr.Bytes(), addr.Bytes()), BatchID: batchID, Address: addr} - return store.Has(item) + return r.st.ReadOnly().IndexStore().Has(item) } -func (r *Reserve) Get(ctx context.Context, storage internal.Storage, addr swarm.Address, batchID []byte) (swarm.Chunk, error) { - - unlock := r.lock(addr, batchID) - defer unlock() +func (r *Reserve) Get(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error) { + r.multx.Lock(string(batchID)) + defer r.multx.Unlock(string(batchID)) item := &BatchRadiusItem{Bin: swarm.Proximity(r.baseAddr.Bytes(), addr.Bytes()), BatchID: batchID, Address: addr} - err := storage.IndexStore().Get(item) + st := r.st.ReadOnly() + err := st.IndexStore().Get(item) if err != nil { return nil, err } - stamp, err := chunkstamp.LoadWithBatchID(storage.IndexStore(), reserveNamespace, addr, item.BatchID) + stamp, err := chunkstamp.LoadWithBatchID(st.IndexStore(), reserveNamespace, addr, item.BatchID) if err != nil { return nil, err } - ch, err := storage.ChunkStore().Get(ctx, addr) + ch, err := st.ChunkStore().Get(ctx, addr) if err != nil { return nil, err } @@ -226,101 +232,16 @@ func (r *Reserve) Get(ctx context.Context, storage internal.Storage, addr swarm. return ch.WithStamp(stamp), nil } -func (r *Reserve) IterateBin(store storage.Store, bin uint8, startBinID uint64, cb func(swarm.Address, uint64, []byte) (bool, error)) error { - err := store.Iterate(storage.Query{ - Factory: func() storage.Item { return &ChunkBinItem{} }, - Prefix: binIDToString(bin, startBinID), - PrefixAtStart: true, - }, func(res storage.Result) (bool, error) { - item := res.Entry.(*ChunkBinItem) - if item.Bin > bin { - return true, nil - } - - stop, err := cb(item.Address, item.BinID, item.BatchID) - if stop || err != nil { - return true, err - } - - return false, nil - }) - - return err -} - -func (r *Reserve) IterateChunks(store internal.Storage, startBin uint8, cb func(swarm.Chunk) (bool, error)) error { - err := store.IndexStore().Iterate(storage.Query{ - Factory: func() storage.Item { return &ChunkBinItem{} }, - Prefix: binIDToString(startBin, 0), - PrefixAtStart: true, - }, func(res storage.Result) (bool, error) { - item := res.Entry.(*ChunkBinItem) - - chunk, err := store.ChunkStore().Get(context.Background(), item.Address) - if err != nil { - return false, err - } - - stamp, err := chunkstamp.LoadWithBatchID(store.IndexStore(), reserveNamespace, item.Address, item.BatchID) - if err != nil { - return false, err - } - - stop, err := cb(chunk.WithStamp(stamp)) - if stop || err != nil { - return true, err - } - return false, nil - }) - - return err -} - -type ChunkItem struct { - ChunkAddress swarm.Address - BatchID []byte - Type swarm.ChunkType - BinID uint64 - Bin uint8 -} - -func (r *Reserve) IterateChunksItems(store internal.Storage, startBin uint8, cb func(ChunkItem) (bool, error)) error { - err := store.IndexStore().Iterate(storage.Query{ - Factory: func() storage.Item { return &ChunkBinItem{} }, - Prefix: binIDToString(startBin, 0), - PrefixAtStart: true, - }, func(res storage.Result) (bool, error) { - item := res.Entry.(*ChunkBinItem) - - chItem := ChunkItem{ - ChunkAddress: item.Address, - BatchID: item.BatchID, - Type: item.ChunkType, - BinID: item.BinID, - Bin: item.Bin, - } - - stop, err := cb(chItem) - if stop || err != nil { - return true, err - } - return false, nil - }) - - return err -} - // EvictBatchBin evicts all chunks from bins upto the bin provided. func (r *Reserve) EvictBatchBin( ctx context.Context, - txExecutor internal.TxExecutor, batchID []byte, count int, bin uint8, ) (int, error) { - unlock := r.lock(swarm.ZeroAddress, batchID) - defer unlock() + r.multx.Lock(string(batchID)) + defer r.multx.Unlock(string(batchID)) var evicted []*BatchRadiusItem @@ -328,26 +249,29 @@ func (r *Reserve) EvictBatchBin( return 0, nil } - err := txExecutor.Execute(ctx, func(store internal.Storage) error { - return store.IndexStore().Iterate(storage.Query{ - Factory: func() storage.Item { return &BatchRadiusItem{} }, - Prefix: string(batchID), - }, func(res storage.Result) (bool, error) { - batchRadius := res.Entry.(*BatchRadiusItem) - if batchRadius.Bin >= bin { - return true, nil - } - evicted = append(evicted, batchRadius) - if len(evicted) == count { - return true, nil - } - return false, nil - }) + err := r.st.ReadOnly().IndexStore().Iterate(storage.Query{ + Factory: func() storage.Item { return &BatchRadiusItem{} }, + Prefix: string(batchID), + }, func(res storage.Result) (bool, error) { + batchRadius := res.Entry.(*BatchRadiusItem) + if batchRadius.Bin >= bin { + return true, nil + } + evicted = append(evicted, batchRadius) + return false, nil }) if err != nil { return 0, err } + // evict oldest chunks first + sort.Slice(evicted, func(i, j int) bool { + return evicted[i].BinID < evicted[j].BinID + }) + + // evict only count many items + evicted = evicted[:min(len(evicted), count)] + batchCnt := 1_000 evictionCompleted := 0 defer func() { @@ -360,23 +284,12 @@ func (r *Reserve) EvictBatchBin( end = len(evicted) } - moveToCache := make([]swarm.Address, 0, end-i) - - err := txExecutor.Execute(ctx, func(store internal.Storage) error { - batch, err := store.IndexStore().Batch(ctx) - if err != nil { - return err - } - + err := r.st.Run(ctx, func(s transaction.Store) error { for _, item := range evicted[i:end] { - err = removeChunkWithItem(ctx, store, batch, item) + err = r.removeChunkWithItem(ctx, s, item) if err != nil { return err } - moveToCache = append(moveToCache, item.Address) - } - if err := batch.Commit(); err != nil { - return err } return nil }) @@ -391,8 +304,7 @@ func (r *Reserve) EvictBatchBin( func (r *Reserve) removeChunk( ctx context.Context, - store internal.Storage, - batch storage.Writer, + trx transaction.Store, chunkAddress swarm.Address, batchID []byte, ) error { @@ -401,21 +313,20 @@ func (r *Reserve) removeChunk( BatchID: batchID, Address: chunkAddress, } - err := store.IndexStore().Get(item) + err := trx.IndexStore().Get(item) if err != nil { return err } - return removeChunkWithItem(ctx, store, batch, item) + return r.removeChunkWithItem(ctx, trx, item) } -func removeChunkWithItem( +func (r *Reserve) removeChunkWithItem( ctx context.Context, - store internal.Storage, - batch storage.Writer, + trx transaction.Store, item *BatchRadiusItem, ) error { - indexStore := store.IndexStore() + indexStore := trx.IndexStore() var errs error @@ -423,38 +334,109 @@ func removeChunkWithItem( if stamp != nil { errs = errors.Join( stampindex.Delete( - batch, + trx.IndexStore(), reserveNamespace, swarm.NewChunk(item.Address, nil).WithStamp(stamp), ), - chunkstamp.DeleteWithStamp(batch, reserveNamespace, item.Address, stamp), + chunkstamp.DeleteWithStamp(trx.IndexStore(), reserveNamespace, item.Address, stamp), ) } return errors.Join(errs, - batch.Delete(item), - batch.Delete(&ChunkBinItem{Bin: item.Bin, BinID: item.BinID}), - store.ChunkStore().Delete(ctx, item.Address), + trx.IndexStore().Delete(item), + trx.IndexStore().Delete(&ChunkBinItem{Bin: item.Bin, BinID: item.BinID}), + trx.ChunkStore().Delete(ctx, item.Address), ) } -func (r *Reserve) lock(addr swarm.Address, batchID []byte) func() { - r.mutx.Lock(addr.ByteString()) - r.mutx.Lock(string(batchID)) - return func() { - r.mutx.Unlock(addr.ByteString()) - r.mutx.Unlock(string(batchID)) - } +func (r *Reserve) IterateBin(bin uint8, startBinID uint64, cb func(swarm.Address, uint64, []byte) (bool, error)) error { + err := r.st.ReadOnly().IndexStore().Iterate(storage.Query{ + Factory: func() storage.Item { return &ChunkBinItem{} }, + Prefix: binIDToString(bin, startBinID), + PrefixAtStart: true, + }, func(res storage.Result) (bool, error) { + item := res.Entry.(*ChunkBinItem) + if item.Bin > bin { + return true, nil + } + + stop, err := cb(item.Address, item.BinID, item.BatchID) + if stop || err != nil { + return true, err + } + + return false, nil + }) + + return err } -func (r *Reserve) Radius() uint8 { - return uint8(r.radius.Load()) +func (r *Reserve) IterateChunks(startBin uint8, cb func(swarm.Chunk) (bool, error)) error { + store := r.st.ReadOnly() + err := store.IndexStore().Iterate(storage.Query{ + Factory: func() storage.Item { return &ChunkBinItem{} }, + Prefix: binIDToString(startBin, 0), + PrefixAtStart: true, + }, func(res storage.Result) (bool, error) { + item := res.Entry.(*ChunkBinItem) + + chunk, err := store.ChunkStore().Get(context.Background(), item.Address) + if err != nil { + return false, err + } + + stamp, err := chunkstamp.LoadWithBatchID(store.IndexStore(), reserveNamespace, item.Address, item.BatchID) + if err != nil { + return false, err + } + + stop, err := cb(chunk.WithStamp(stamp)) + if stop || err != nil { + return true, err + } + return false, nil + }) + + return err } -func (r *Reserve) SetRadius(store storage.Store, rad uint8) error { - r.radius.Store(uint32(rad)) - r.radiusSetter.SetStorageRadius(rad) - return store.Put(&radiusItem{Radius: rad}) +type ChunkItem struct { + ChunkAddress swarm.Address + BatchID []byte + Type swarm.ChunkType + BinID uint64 + Bin uint8 +} + +func (r *Reserve) IterateChunksItems(startBin uint8, cb func(ChunkItem) (bool, error)) error { + store := r.st.ReadOnly() + err := store.IndexStore().Iterate(storage.Query{ + Factory: func() storage.Item { return &ChunkBinItem{} }, + Prefix: binIDToString(startBin, 0), + PrefixAtStart: true, + }, func(res storage.Result) (bool, error) { + item := res.Entry.(*ChunkBinItem) + + chItem := ChunkItem{ + ChunkAddress: item.Address, + BatchID: item.BatchID, + Type: item.ChunkType, + BinID: item.BinID, + Bin: item.Bin, + } + + stop, err := cb(chItem) + if stop || err != nil { + return true, err + } + return false, nil + }) + + return err +} + +func (r *Reserve) Radius() uint8 { + return uint8(r.radius.Load()) } func (r *Reserve) Size() int { @@ -476,12 +458,17 @@ func (r *Reserve) EvictionTarget() int { return int(r.size.Load()) - r.capacity } -func (r *Reserve) LastBinIDs(store storage.Store) ([]uint64, uint64, error) { - r.binMtx.Lock() - defer r.binMtx.Unlock() +func (r *Reserve) SetRadius(rad uint8) error { + r.radius.Store(uint32(rad)) + r.radiusSetter.SetStorageRadius(rad) + return r.st.Run(context.Background(), func(s transaction.Store) error { + return s.IndexStore().Put(&radiusItem{Radius: rad}) + }) +} +func (r *Reserve) LastBinIDs() ([]uint64, uint64, error) { var epoch EpochItem - err := store.Get(&epoch) + err := r.st.ReadOnly().IndexStore().Get(&epoch) if err != nil { return nil, 0, err } @@ -490,7 +477,7 @@ func (r *Reserve) LastBinIDs(store storage.Store) ([]uint64, uint64, error) { for bin := uint8(0); bin < swarm.MaxBins; bin++ { binItem := &BinItem{Bin: bin} - err := store.Get(binItem) + err := r.st.ReadOnly().IndexStore().Get(binItem) if err != nil { if errors.Is(err, storage.ErrNotFound) { ids[bin] = 0 @@ -505,11 +492,7 @@ func (r *Reserve) LastBinIDs(store storage.Store) ([]uint64, uint64, error) { return ids, epoch.Timestamp, nil } -// should be called under lock -func (r *Reserve) IncBinID(store storage.Store, bin uint8) (uint64, error) { - r.binMtx.Lock() - defer r.binMtx.Unlock() - +func (r *Reserve) IncBinID(store storage.IndexStore, bin uint8) (uint64, error) { item := &BinItem{Bin: bin} err := store.Get(item) if err != nil { diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go index e2589407cfc..4220a97516e 100644 --- a/pkg/storer/internal/reserve/reserve_test.go +++ b/pkg/storer/internal/reserve/reserve_test.go @@ -22,6 +22,7 @@ import ( "github.com/ethersphere/bee/pkg/storer/internal/chunkstamp" "github.com/ethersphere/bee/pkg/storer/internal/reserve" "github.com/ethersphere/bee/pkg/storer/internal/stampindex" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" kademlia "github.com/ethersphere/bee/pkg/topology/mock" ) @@ -31,16 +32,11 @@ func TestReserve(t *testing.T) { baseAddr := swarm.RandAddress(t) - ts, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + ts := internal.NewInmemStorage() r, err := reserve.New( baseAddr, - ts.IndexStore(), + ts, 0, kademlia.NewTopologyDriver(), log.Noop, ) @@ -51,15 +47,15 @@ func TestReserve(t *testing.T) { for b := 0; b < 2; b++ { for i := 1; i < 51; i++ { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) - err := r.Put(context.Background(), ts, ch) + err := r.Put(context.Background(), ch) if err != nil { t.Fatal(err) } - checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: uint8(b), BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) - checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: uint8(b), BinID: uint64(i)}, false) - checkChunk(t, ts, ch, false) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: uint8(b), BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: uint8(b), BinID: uint64(i)}, false) + checkChunk(t, ts.ReadOnly(), ch, false) - h, err := r.Has(ts.IndexStore(), ch.Address(), ch.Stamp().BatchID()) + h, err := r.Has(ch.Address(), ch.Stamp().BatchID()) if err != nil { t.Fatal(err) } @@ -67,7 +63,7 @@ func TestReserve(t *testing.T) { t.Fatalf("expected chunk addr %s binID %d", ch.Address(), i) } - chGet, err := r.Get(context.Background(), ts, ch.Address(), ch.Stamp().BatchID()) + chGet, err := r.Get(context.Background(), ch.Address(), ch.Stamp().BatchID()) if err != nil { t.Fatal(err) } @@ -84,16 +80,11 @@ func TestReserveChunkType(t *testing.T) { ctx := context.Background() baseAddr := swarm.RandAddress(t) - ts, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + ts := internal.NewInmemStorage() r, err := reserve.New( baseAddr, - ts.IndexStore(), + ts, 0, kademlia.NewTopologyDriver(), log.Noop, ) @@ -111,12 +102,12 @@ func TestReserveChunkType(t *testing.T) { ch = chunk.GenerateTestRandomSoChunk(t, ch) storedChunksSO++ } - if err := r.Put(ctx, ts, ch); err != nil { + if err := r.Put(ctx, ch); err != nil { t.Errorf("unexpected error: %v", err) } } - err = ts.IndexStore().Iterate(storage.Query{ + err = ts.ReadOnly().IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &reserve.ChunkBinItem{} }, }, func(res storage.Result) (bool, error) { item := res.Entry.(*reserve.ChunkBinItem) @@ -146,16 +137,11 @@ func TestReplaceOldIndex(t *testing.T) { baseAddr := swarm.RandAddress(t) - ts, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + ts := internal.NewInmemStorage() r, err := reserve.New( baseAddr, - ts.IndexStore(), + ts, 0, kademlia.NewTopologyDriver(), log.Noop, ) @@ -167,27 +153,27 @@ func TestReplaceOldIndex(t *testing.T) { ch1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 0)) ch2 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 1)) - err = r.Put(context.Background(), ts, ch1) + err = r.Put(context.Background(), ch1) if err != nil { t.Fatal(err) } - err = r.Put(context.Background(), ts, ch2) + err = r.Put(context.Background(), ch2) if err != nil { t.Fatal(err) } // Chunk 1 must be gone - checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address()}, true) - checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 1}, true) - checkChunk(t, ts, ch1, true) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address()}, true) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 1}, true) + checkChunk(t, ts.ReadOnly(), ch1, true) // Chunk 2 must be stored - checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address()}, false) - checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 2}, false) - checkChunk(t, ts, ch2, false) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address()}, false) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 2}, false) + checkChunk(t, ts.ReadOnly(), ch2, false) - item, err := stampindex.Load(ts.IndexStore(), "reserve", ch2) + item, err := stampindex.Load(ts.ReadOnly().IndexStore(), "reserve", ch2) if err != nil { t.Fatal(err) } @@ -201,12 +187,7 @@ func TestEvict(t *testing.T) { baseAddr := swarm.RandAddress(t) - ts, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + ts := internal.NewInmemStorage() chunksPerBatch := 50 var chunks []swarm.Chunk @@ -215,7 +196,7 @@ func TestEvict(t *testing.T) { r, err := reserve.New( baseAddr, - ts.IndexStore(), + ts, 0, kademlia.NewTopologyDriver(), log.Noop, ) @@ -227,7 +208,7 @@ func TestEvict(t *testing.T) { for b := 0; b < 3; b++ { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID)) chunks = append(chunks, ch) - err := r.Put(context.Background(), ts, ch) + err := r.Put(context.Background(), ch) if err != nil { t.Fatal(err) } @@ -236,7 +217,7 @@ func TestEvict(t *testing.T) { totalEvicted := 0 for i := 0; i < 3; i++ { - evicted, err := r.EvictBatchBin(context.Background(), ts, evictBatch.ID, math.MaxInt, uint8(i)) + evicted, err := r.EvictBatchBin(context.Background(), evictBatch.ID, math.MaxInt, uint8(i)) if err != nil { t.Fatal(err) } @@ -252,21 +233,21 @@ func TestEvict(t *testing.T) { for i, ch := range chunks { binID := i%chunksPerBatch + 1 b := swarm.Proximity(baseAddr.Bytes(), ch.Address().Bytes()) - _, err := r.Get(context.Background(), ts, ch.Address(), ch.Stamp().BatchID()) + _, err := r.Get(context.Background(), ch.Address(), ch.Stamp().BatchID()) if bytes.Equal(ch.Stamp().BatchID(), evictBatch.ID) { if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("got err %v, want %v", err, storage.ErrNotFound) } - checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, true) - checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, true) - checkChunk(t, ts, ch, true) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, true) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, true) + checkChunk(t, ts.ReadOnly(), ch, true) } else { if err != nil { t.Fatal(err) } - checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) - checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, false) - checkChunk(t, ts, ch, false) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, false) + checkChunk(t, ts.ReadOnly(), ch, false) } } } @@ -276,29 +257,32 @@ func TestEvictMaxCount(t *testing.T) { baseAddr := swarm.RandAddress(t) - ts, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + ts := internal.NewInmemStorage() - r, err := reserve.New(baseAddr, ts.IndexStore(), 0, kademlia.NewTopologyDriver(), log.Noop) + r, err := reserve.New( + baseAddr, + ts, + 0, kademlia.NewTopologyDriver(), + log.Noop, + ) if err != nil { t.Fatal(err) } + var chunks []swarm.Chunk + batch := postagetesting.MustNewBatch() for i := 0; i < 50; i++ { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewBatchStamp(batch.ID)) - err := r.Put(context.Background(), ts, ch) + chunks = append(chunks, ch) + err := r.Put(context.Background(), ch) if err != nil { t.Fatal(err) } } - evicted, err := r.EvictBatchBin(context.Background(), ts, batch.ID, 10, 1) + evicted, err := r.EvictBatchBin(context.Background(), batch.ID, 10, 1) if err != nil { t.Fatal(err) } @@ -306,29 +290,32 @@ func TestEvictMaxCount(t *testing.T) { t.Fatalf("wanted evicted count 10, got %d", evicted) } - if r.Size() != 40 { - t.Fatalf("wanted size 40, got %d", r.Size()) + for i, ch := range chunks { + if i < 10 { + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, true) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: uint64(i + 1)}, true) + checkChunk(t, ts.ReadOnly(), ch, true) + } else { + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) + checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: uint64(i + 1)}, false) + checkChunk(t, ts.ReadOnly(), ch, false) + } } } func TestIterate(t *testing.T) { t.Parallel() - createReserve := func(t *testing.T) (*reserve.Reserve, internal.Storage) { + createReserve := func(t *testing.T) *reserve.Reserve { t.Helper() baseAddr := swarm.RandAddress(t) - ts, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + ts := internal.NewInmemStorage() r, err := reserve.New( baseAddr, - ts.IndexStore(), + ts, 0, kademlia.NewTopologyDriver(), log.Noop, ) @@ -339,23 +326,23 @@ func TestIterate(t *testing.T) { for b := 0; b < 3; b++ { for i := 0; i < 10; i++ { ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) - err := r.Put(context.Background(), ts, ch) + err := r.Put(context.Background(), ch) if err != nil { t.Fatal(err) } } } - return r, ts + return r } t.Run("iterate bin", func(t *testing.T) { t.Parallel() - r, ts := createReserve(t) + r := createReserve(t) var id uint64 = 1 - err := r.IterateBin(ts.IndexStore(), 1, 0, func(ch swarm.Address, binID uint64, _ []byte) (bool, error) { + err := r.IterateBin(1, 0, func(ch swarm.Address, binID uint64, _ []byte) (bool, error) { if binID != id { t.Fatalf("got %d, want %d", binID, id) } @@ -373,10 +360,10 @@ func TestIterate(t *testing.T) { t.Run("iterate chunks", func(t *testing.T) { t.Parallel() - r, ts := createReserve(t) + r := createReserve(t) count := 0 - err := r.IterateChunks(ts, 2, func(_ swarm.Chunk) (bool, error) { + err := r.IterateChunks(2, func(_ swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -391,10 +378,10 @@ func TestIterate(t *testing.T) { t.Run("iterate chunk items", func(t *testing.T) { t.Parallel() - r, ts := createReserve(t) + r := createReserve(t) count := 0 - err := r.IterateChunksItems(ts, 0, func(_ reserve.ChunkItem) (bool, error) { + err := r.IterateChunksItems(0, func(_ reserve.ChunkItem) (bool, error) { count++ return false, nil }) @@ -409,9 +396,9 @@ func TestIterate(t *testing.T) { t.Run("last bin id", func(t *testing.T) { t.Parallel() - r, ts := createReserve(t) + r := createReserve(t) - ids, _, err := r.LastBinIDs(ts.IndexStore()) + ids, _, err := r.LastBinIDs() if err != nil { t.Fatal(err) } @@ -429,7 +416,7 @@ func TestIterate(t *testing.T) { }) } -func checkStore(t *testing.T, s storage.Store, k storage.Key, gone bool) { +func checkStore(t *testing.T, s storage.Reader, k storage.Key, gone bool) { t.Helper() h, err := s.Has(k) if err != nil { @@ -443,7 +430,7 @@ func checkStore(t *testing.T, s storage.Store, k storage.Key, gone bool) { } } -func checkChunk(t *testing.T, s internal.Storage, ch swarm.Chunk, gone bool) { +func checkChunk(t *testing.T, s transaction.ReadOnlyStore, ch swarm.Chunk, gone bool) { t.Helper() h, err := s.ChunkStore().Has(context.Background(), ch.Address()) if err != nil { diff --git a/pkg/storer/internal/stampindex/stampindex.go b/pkg/storer/internal/stampindex/stampindex.go index 596c2af6af0..be7485c57d9 100644 --- a/pkg/storer/internal/stampindex/stampindex.go +++ b/pkg/storer/internal/stampindex/stampindex.go @@ -150,8 +150,7 @@ func (i Item) String() string { // If the record is not found, it will try to create and save a new record and // return it. func LoadOrStore( - s storage.Reader, - w storage.Writer, + s storage.IndexStore, namespace string, chunk swarm.Chunk, ) (item *Item, loaded bool, err error) { @@ -165,7 +164,7 @@ func LoadOrStore( StampTimestamp: chunk.Stamp().Timestamp(), ChunkAddress: chunk.Address(), ChunkIsImmutable: chunk.Immutable(), - }, false, Store(w, namespace, chunk) + }, false, Store(s, namespace, chunk) } return nil, false, err } @@ -189,7 +188,7 @@ func Load(s storage.Reader, namespace string, chunk swarm.Chunk) (*Item, error) // Store creates new or updated an existing stamp index // record related to the given namespace and chunk. -func Store(s storage.Writer, namespace string, chunk swarm.Chunk) error { +func Store(s storage.IndexStore, namespace string, chunk swarm.Chunk) error { item := &Item{ namespace: []byte(namespace), batchID: chunk.Stamp().BatchID(), diff --git a/pkg/storer/internal/stampindex/stampindex_test.go b/pkg/storer/internal/stampindex/stampindex_test.go index c95cdf0b682..cb3ee47d7f4 100644 --- a/pkg/storer/internal/stampindex/stampindex_test.go +++ b/pkg/storer/internal/stampindex/stampindex_test.go @@ -5,6 +5,7 @@ package stampindex_test import ( + "context" "errors" "fmt" "testing" @@ -14,20 +15,16 @@ import ( chunktest "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/stampindex" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" ) // newTestStorage is a helper function that creates a new storage. -func newTestStorage(t *testing.T) internal.Storage { +func newTestStorage(t *testing.T) transaction.Storage { t.Helper() - - inmemStorage, closer := internal.NewInmemStorage() - t.Cleanup(func() { - if err := closer(); err != nil { - t.Errorf("failed closing the storage: %v", err) - } - }) + inmemStorage := internal.NewInmemStorage() return inmemStorage } @@ -126,7 +123,11 @@ func TestStoreLoadDelete(t *testing.T) { ns := fmt.Sprintf("namespace_%d", i) t.Run(ns, func(t *testing.T) { t.Run("store new stamp index", func(t *testing.T) { - err := stampindex.Store(ts.IndexStore(), ns, chunk) + + err := ts.Run(context.Background(), func(s transaction.Store) error { + return stampindex.Store(s.IndexStore(), ns, chunk) + + }) if err != nil { t.Fatalf("Store(...): unexpected error: %v", err) } @@ -145,7 +146,7 @@ func TestStoreLoadDelete(t *testing.T) { chunk.Stamp().BatchID(), chunk.Stamp().Index(), ) - err = ts.IndexStore().Get(have) + err = ts.ReadOnly().IndexStore().Get(have) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -165,7 +166,7 @@ func TestStoreLoadDelete(t *testing.T) { want.ChunkAddress = chunk.Address() want.ChunkIsImmutable = chunk.Immutable() - have, err := stampindex.Load(ts.IndexStore(), ns, chunk) + have, err := stampindex.Load(ts.ReadOnly().IndexStore(), ns, chunk) if err != nil { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -176,12 +177,15 @@ func TestStoreLoadDelete(t *testing.T) { }) t.Run("delete stored stamp index", func(t *testing.T) { - err := stampindex.Delete(ts.IndexStore(), ns, chunk) + + err := ts.Run(context.Background(), func(s transaction.Store) error { + return stampindex.Delete(s.IndexStore(), ns, chunk) + }) if err != nil { t.Fatalf("Delete(...): unexpected error: %v", err) } - have, err := stampindex.Load(ts.IndexStore(), ns, chunk) + have, err := stampindex.Load(ts.ReadOnly().IndexStore(), ns, chunk) if have != nil { t.Fatalf("Load(...): unexpected item %v", have) } @@ -190,7 +194,7 @@ func TestStoreLoadDelete(t *testing.T) { } cnt := 0 - err = ts.IndexStore().Iterate( + err = ts.ReadOnly().IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(stampindex.Item) @@ -230,21 +234,25 @@ func TestLoadOrStore(t *testing.T) { want.ChunkAddress = chunk.Address() want.ChunkIsImmutable = chunk.Immutable() - r, w := ts.IndexStore(), ts.IndexStore() + trx, done := ts.NewTransaction(context.Background()) - have, loaded, err := stampindex.LoadOrStore(r, w, ns, chunk) + have, loaded, err := stampindex.LoadOrStore(trx.IndexStore(), ns, chunk) if err != nil { t.Fatalf("LoadOrStore(...): unexpected error: %v", err) } if loaded { t.Fatalf("LoadOrStore(...): unexpected loaded flag") } - if diff := cmp.Diff(want, have, cmp.AllowUnexported(stampindex.Item{})); diff != "" { t.Fatalf("Get(...): mismatch (-want +have):\n%s", diff) } + assert.NoError(t, trx.Commit()) + done() + + trx, done = ts.NewTransaction(context.Background()) + defer done() - have, loaded, err = stampindex.LoadOrStore(r, w, ns, chunk) + have, loaded, err = stampindex.LoadOrStore(trx.IndexStore(), ns, chunk) if err != nil { t.Fatalf("LoadOrStore(...): unexpected error: %v", err) } @@ -255,9 +263,10 @@ func TestLoadOrStore(t *testing.T) { if diff := cmp.Diff(want, have, cmp.AllowUnexported(stampindex.Item{})); diff != "" { t.Fatalf("Get(...): mismatch (-want +have):\n%s", diff) } + assert.NoError(t, trx.Commit()) cnt := 0 - err = ts.IndexStore().Iterate( + err = ts.ReadOnly().IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(stampindex.Item) diff --git a/pkg/storer/internal/transaction/metrics.go b/pkg/storer/internal/transaction/metrics.go new file mode 100644 index 00000000000..33ce09e21cf --- /dev/null +++ b/pkg/storer/internal/transaction/metrics.go @@ -0,0 +1,41 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package transaction + +import ( + m "github.com/ethersphere/bee/pkg/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type metrics struct { + MethodCalls *prometheus.CounterVec + MethodDuration *prometheus.HistogramVec +} + +// newMetrics is a convenient constructor for creating new metrics. +func newMetrics() metrics { + const subsystem = "transaction" + + return metrics{ + MethodCalls: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: m.Namespace, + Subsystem: subsystem, + Name: "method_calls", + Help: "The number of method calls.", + }, + []string{"method", "status"}, + ), + MethodDuration: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: m.Namespace, + Subsystem: subsystem, + Name: "method_duration", + Help: "The duration each method call took.", + }, + []string{"method", "status"}, + ), + } +} diff --git a/pkg/storer/internal/transaction/transaction.go b/pkg/storer/internal/transaction/transaction.go new file mode 100644 index 00000000000..a109bb3936a --- /dev/null +++ b/pkg/storer/internal/transaction/transaction.go @@ -0,0 +1,308 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package transaction + +import ( + "context" + "errors" + "fmt" + "time" + + m "github.com/ethersphere/bee/pkg/metrics" + "github.com/ethersphere/bee/pkg/sharky" + "github.com/ethersphere/bee/pkg/storage" + "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" + "github.com/ethersphere/bee/pkg/swarm" + "github.com/prometheus/client_golang/prometheus" + "resenje.org/multex" +) + +// TODO(esad): remove contexts from sharky and any other storage call + +/* +The rules of the transction is as follows: + +-sharky_write -> write to disk, keep sharky location in memory +-sharky_release -> keep location in memory, do not release from the disk +-store write -> write to batch +-on commit -> if batch_commit succeeds, release sharky_release locations from the disk + -> if batch_commit fails or is not called, release all sharky_write location from the disk, do nothing for sharky_release +*/ + +type Transaction interface { + Store + Commit() error +} + +type Store interface { + ChunkStore() storage.ChunkStore + IndexStore() storage.IndexStore +} + +type ReadOnlyStore interface { + IndexStore() storage.Reader + ChunkStore() storage.ReadOnlyChunkStore +} + +type Storage interface { + NewTransaction(context.Context) (Transaction, func()) + ReadOnly() ReadOnlyStore + Run(context.Context, func(Store) error) error + Close() error +} + +type store struct { + sharky *sharky.Store + bstore storage.BatchStore + metrics metrics + chunkLocker *multex.Multex +} + +func NewStorage(sharky *sharky.Store, bstore storage.BatchStore) Storage { + return &store{sharky, bstore, newMetrics(), multex.New()} +} + +type transaction struct { + batch storage.Batch + indexstore *indexTrx + chunkStore *chunkStoreTrx + sharkyTrx *sharkyTrx + metrics metrics +} + +// NewTransaction returns a new storage transaction. +// Commit must be called to persist data to the disk. +// The callback function must be the final call of the transaction whether or not any errors +// were returned from the storage ops or commit. Safest option is to do a defer call immediately after +// creating the transaction. +// Calls made to the transaction are NOT thread-safe. +func (s *store) NewTransaction(ctx context.Context) (Transaction, func()) { + + b := s.bstore.Batch(ctx) + indexTrx := &indexTrx{s.bstore, b} + sharyTrx := &sharkyTrx{s.sharky, s.metrics, nil, nil} + + t := &transaction{ + batch: b, + indexstore: indexTrx, + chunkStore: &chunkStoreTrx{indexTrx, sharyTrx, s.chunkLocker, make(map[string]struct{}), s.metrics, false}, + sharkyTrx: sharyTrx, + metrics: s.metrics, + } + + return t, func() { + // for whatever reason, commit was not called + // release uncommitted but written sharky locations + // unlock the locked addresses + for _, l := range t.sharkyTrx.writtenLocs { + _ = t.sharkyTrx.sharky.Release(context.TODO(), l) + } + for addr := range t.chunkStore.lockedAddrs { + s.chunkLocker.Unlock(addr) + } + t.sharkyTrx.writtenLocs = nil + t.chunkStore.lockedAddrs = nil + } +} + +func (s *store) ReadOnly() ReadOnlyStore { + indexStore := &indexTrx{s.bstore, nil} + sharyTrx := &sharkyTrx{s.sharky, s.metrics, nil, nil} + + return &readOnly{indexStore, &chunkStoreTrx{indexStore, sharyTrx, s.chunkLocker, nil, s.metrics, true}} +} + +func (s *store) Run(ctx context.Context, f func(Store) error) error { + trx, done := s.NewTransaction(ctx) + defer done() + + err := f(trx) + if err != nil { + return err + } + return trx.Commit() +} + +// Metrics returns set of prometheus collectors. +func (s *store) Metrics() []prometheus.Collector { + return m.PrometheusCollectorsFromFields(s.metrics) +} + +func (s *store) Close() error { + return errors.Join(s.bstore.Close(), s.sharky.Close()) +} + +type readOnly struct { + indexStore *indexTrx + chunkStore *chunkStoreTrx +} + +func (t *readOnly) IndexStore() storage.Reader { + return t.indexStore +} + +func (t *readOnly) ChunkStore() storage.ReadOnlyChunkStore { + return t.chunkStore +} + +func (t *transaction) Commit() (err error) { + + defer handleMetric("commit", t.metrics)(err) + defer func() { + for addr := range t.chunkStore.lockedAddrs { + t.chunkStore.globalLocker.Unlock(addr) + } + t.chunkStore.lockedAddrs = nil + t.sharkyTrx.writtenLocs = nil + }() + + h := handleMetric("batch_commit", t.metrics) + err = t.batch.Commit() + h(err) + if err != nil { + for _, l := range t.sharkyTrx.writtenLocs { + if rerr := t.sharkyTrx.sharky.Release(context.TODO(), l); rerr != nil { + err = errors.Join(err, fmt.Errorf("failed releasing location during commit rollback %s: %w", l, rerr)) + } + } + return err + } + + for _, l := range t.sharkyTrx.releasedLocs { + h := handleMetric("sharky_release", t.metrics) + rerr := t.sharkyTrx.sharky.Release(context.TODO(), l) + h(rerr) + if rerr != nil { + err = errors.Join(err, fmt.Errorf("failed releasing location afer commit %s: %w", l, rerr)) + } + } + + return err +} + +// IndexStore gives acces to the index store of the transaction. +// Note that no writes are persisted to the disk until the commit is called. +// Reads return data from the disk and not what has been written to the transaction before the commit call. +func (t *transaction) IndexStore() storage.IndexStore { + return t.indexstore +} + +// ChunkStore gives acces to the chunkstore of the transaction. +// Note that no writes are persisted to the disk until the commit is called. +// Reads return data from the disk and not what has been written to the transaction before the commit call. +func (t *transaction) ChunkStore() storage.ChunkStore { + return t.chunkStore +} + +type chunkStoreTrx struct { + indexStore *indexTrx + sharkyTrx *sharkyTrx + globalLocker *multex.Multex + lockedAddrs map[string]struct{} + metrics metrics + readOnly bool +} + +func (c *chunkStoreTrx) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, err error) { + defer handleMetric("chunkstore_get", c.metrics)(err) + unlock := c.lock(addr) + defer unlock() + ch, err = chunkstore.Get(ctx, c.indexStore, c.sharkyTrx, addr) + return ch, err +} +func (c *chunkStoreTrx) Has(ctx context.Context, addr swarm.Address) (_ bool, err error) { + defer handleMetric("chunkstore_has", c.metrics)(err) + unlock := c.lock(addr) + defer unlock() + return chunkstore.Has(ctx, c.indexStore, addr) +} +func (c *chunkStoreTrx) Put(ctx context.Context, ch swarm.Chunk) (err error) { + defer handleMetric("chunkstore_put", c.metrics)(err) + unlock := c.lock(ch.Address()) + defer unlock() + return chunkstore.Put(ctx, c.indexStore, c.sharkyTrx, ch) +} +func (c *chunkStoreTrx) Delete(ctx context.Context, addr swarm.Address) (err error) { + defer handleMetric("chunkstore_delete", c.metrics)(err) + unlock := c.lock(addr) + defer unlock() + return chunkstore.Delete(ctx, c.indexStore, c.sharkyTrx, addr) +} +func (c *chunkStoreTrx) Iterate(ctx context.Context, fn storage.IterateChunkFn) (err error) { + defer handleMetric("chunkstore_iterate", c.metrics)(err) + return chunkstore.Iterate(ctx, c.indexStore, c.sharkyTrx, fn) +} + +func (c *chunkStoreTrx) lock(addr swarm.Address) func() { + // directly lock + if c.readOnly { + c.globalLocker.Lock(addr.ByteString()) + return func() { c.globalLocker.Unlock(addr.ByteString()) } + } + + // lock chunk only once in the same transaction + if _, ok := c.lockedAddrs[addr.ByteString()]; !ok { + c.globalLocker.Lock(addr.ByteString()) + c.lockedAddrs[addr.ByteString()] = struct{}{} + } + + return func() {} // unlocking the chunk will be done in the Commit() +} + +type indexTrx struct { + store storage.Reader + batch storage.Batch +} + +func (s *indexTrx) Get(i storage.Item) error { return s.store.Get(i) } +func (s *indexTrx) Has(k storage.Key) (bool, error) { return s.store.Has(k) } +func (s *indexTrx) GetSize(k storage.Key) (int, error) { return s.store.GetSize(k) } +func (s *indexTrx) Iterate(q storage.Query, f storage.IterateFn) error { + return s.store.Iterate(q, f) +} +func (s *indexTrx) Count(k storage.Key) (int, error) { return s.store.Count(k) } +func (s *indexTrx) Put(i storage.Item) error { return s.batch.Put(i) } +func (s *indexTrx) Delete(i storage.Item) error { return s.batch.Delete(i) } + +type sharkyTrx struct { + sharky *sharky.Store + metrics metrics + writtenLocs []sharky.Location + releasedLocs []sharky.Location +} + +func (s *sharkyTrx) Read(ctx context.Context, loc sharky.Location, buf []byte) (err error) { + defer handleMetric("sharky_read", s.metrics)(err) + return s.sharky.Read(ctx, loc, buf) +} + +func (s *sharkyTrx) Write(ctx context.Context, data []byte) (_ sharky.Location, err error) { + defer handleMetric("sharky_write", s.metrics)(err) + loc, err := s.sharky.Write(ctx, data) + if err != nil { + return sharky.Location{}, err + } + + s.writtenLocs = append(s.writtenLocs, loc) + return loc, nil +} + +func (s *sharkyTrx) Release(ctx context.Context, loc sharky.Location) error { + s.releasedLocs = append(s.releasedLocs, loc) + return nil +} + +func handleMetric(key string, m metrics) func(err error) { + t := time.Now() + return func(err error) { + if err != nil { + m.MethodCalls.WithLabelValues(key, "failure").Inc() + m.MethodDuration.WithLabelValues(key, "failure").Observe(float64(time.Since(t))) + } else { + m.MethodCalls.WithLabelValues(key, "success").Inc() + m.MethodDuration.WithLabelValues(key, "success").Observe(float64(time.Since(t))) + } + } +} diff --git a/pkg/storer/internal/transaction/transaction_test.go b/pkg/storer/internal/transaction/transaction_test.go new file mode 100644 index 00000000000..5c621f88492 --- /dev/null +++ b/pkg/storer/internal/transaction/transaction_test.go @@ -0,0 +1,150 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package transaction_test + +import ( + "context" + "io/fs" + "os" + "path/filepath" + "testing" + + "github.com/ethersphere/bee/pkg/sharky" + "github.com/ethersphere/bee/pkg/storage" + "github.com/ethersphere/bee/pkg/storage/leveldbstore" + test "github.com/ethersphere/bee/pkg/storage/testing" + "github.com/ethersphere/bee/pkg/storer/internal/cache" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" + "github.com/ethersphere/bee/pkg/swarm" + "github.com/stretchr/testify/assert" +) + +type dirFS struct { + basedir string +} + +func (d *dirFS) Open(path string) (fs.File, error) { + return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) +} + +func Test_TransactionStorage(t *testing.T) { + t.Parallel() + + sharkyStore, err := sharky.New(&dirFS{basedir: t.TempDir()}, 32, swarm.SocMaxChunkSize) + assert.NoError(t, err) + + store, err := leveldbstore.New("", nil) + assert.NoError(t, err) + + st := transaction.NewStorage(sharkyStore, store) + t.Cleanup(func() { + assert.NoError(t, st.Close()) + }) + + t.Run("put", func(t *testing.T) { + t.Parallel() + + tx, done := st.NewTransaction(context.Background()) + defer done() + + ch1 := test.GenerateTestRandomChunk() + ch2 := test.GenerateTestRandomChunk() + + assert.NoError(t, tx.IndexStore().Put(&cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1})) + assert.NoError(t, tx.ChunkStore().Put(context.Background(), ch1)) + assert.NoError(t, tx.IndexStore().Put(&cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1})) + assert.NoError(t, tx.ChunkStore().Put(context.Background(), ch2)) + assert.NoError(t, tx.Commit()) + + item := cache.CacheEntryItem{Address: ch1.Address()} + assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.Equal(t, item, cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1}) + + ch1_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.NoError(t, err) + assert.Equal(t, ch1.Data(), ch1_get.Data()) + assert.Equal(t, ch1.Address(), ch1_get.Address()) + + item = cache.CacheEntryItem{Address: ch2.Address()} + assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.Equal(t, item, cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1}) + + ch2_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.NoError(t, err) + assert.Equal(t, ch1.Data(), ch2_get.Data()) + assert.Equal(t, ch1.Address(), ch2_get.Address()) + }) + + t.Run("put-forget commit", func(t *testing.T) { + t.Parallel() + + tx, done := st.NewTransaction(context.Background()) + + ch1 := test.GenerateTestRandomChunk() + ch2 := test.GenerateTestRandomChunk() + + assert.NoError(t, tx.IndexStore().Put(&cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1})) + assert.NoError(t, tx.ChunkStore().Put(context.Background(), ch1)) + assert.NoError(t, tx.IndexStore().Put(&cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1})) + assert.NoError(t, tx.ChunkStore().Put(context.Background(), ch2)) + + done() + + assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch1.Address()}), storage.ErrNotFound) + assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch2.Address()}), storage.ErrNotFound) + _, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.ErrorIs(t, err, storage.ErrNotFound) + _, err = st.ReadOnly().ChunkStore().Get(context.Background(), ch2.Address()) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) + + t.Run("put-delete", func(t *testing.T) { + t.Parallel() + + ch1 := test.GenerateTestRandomChunk() + ch2 := test.GenerateTestRandomChunk() + + _ = st.Run(context.Background(), func(s transaction.Store) error { + assert.NoError(t, s.IndexStore().Put(&cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1})) + assert.NoError(t, s.ChunkStore().Put(context.Background(), ch1)) + assert.NoError(t, s.IndexStore().Put(&cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1})) + assert.NoError(t, s.ChunkStore().Put(context.Background(), ch2)) + return nil + }) + + item := cache.CacheEntryItem{Address: ch1.Address()} + assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.Equal(t, item, cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1}) + + ch1_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.NoError(t, err) + assert.Equal(t, ch1.Data(), ch1_get.Data()) + assert.Equal(t, ch1.Address(), ch1_get.Address()) + + item = cache.CacheEntryItem{Address: ch2.Address()} + assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.Equal(t, item, cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1}) + + ch2_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.NoError(t, err) + assert.Equal(t, ch1.Data(), ch2_get.Data()) + assert.Equal(t, ch1.Address(), ch2_get.Address()) + + _ = st.Run(context.Background(), func(s transaction.Store) error { + assert.NoError(t, s.IndexStore().Delete(&cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1})) + assert.NoError(t, s.ChunkStore().Delete(context.Background(), ch1.Address())) + assert.NoError(t, s.IndexStore().Delete(&cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1})) + assert.NoError(t, s.ChunkStore().Delete(context.Background(), ch2.Address())) + return nil + }) + + assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch1.Address()}), storage.ErrNotFound) + assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch2.Address()}), storage.ErrNotFound) + _, err = st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.ErrorIs(t, err, storage.ErrNotFound) + _, err = st.ReadOnly().ChunkStore().Get(context.Background(), ch2.Address()) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/pkg/storer/internal/upload/uploadstore.go b/pkg/storer/internal/upload/uploadstore.go index 1126770176a..95a6b0b5bb5 100644 --- a/pkg/storer/internal/upload/uploadstore.go +++ b/pkg/storer/internal/upload/uploadstore.go @@ -18,6 +18,7 @@ import ( "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/chunkstamp" "github.com/ethersphere/bee/pkg/storer/internal/stampindex" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) @@ -377,16 +378,17 @@ type uploadPutter struct { } // NewPutter returns a new chunk putter associated with the tagID. -func NewPutter(s internal.Storage, tagID uint64) (internal.PutterCloserWithReference, error) { +// Calls to the Putter must be mutex locked to prevent concurrent upload data races. +func NewPutter(s storage.IndexStore, tagID uint64) (internal.PutterCloserWithReference, error) { ti := &TagItem{TagID: tagID} - has, err := s.IndexStore().Has(ti) + has, err := s.Has(ti) if err != nil { return nil, err } if !has { return nil, fmt.Errorf("upload store: tag %d not found: %w", tagID, storage.ErrNotFound) } - err = s.IndexStore().Put(&dirtyTagItem{TagID: tagID, Started: now().UnixNano()}) + err = s.Put(&dirtyTagItem{TagID: tagID, Started: now().UnixNano()}) if err != nil { return nil, err } @@ -401,7 +403,8 @@ func NewPutter(s internal.Storage, tagID uint64) (internal.PutterCloserWithRefer // - uploadItem entry to keep track of this chunk. // - pushItem entry to make it available for PushSubscriber // - add chunk to the chunkstore till it is synced -func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer storage.Writer, chunk swarm.Chunk) error { +// The user of the putter MUST mutex lock the call to prevent data-races across multiple upload sessions. +func (u *uploadPutter) Put(ctx context.Context, st transaction.Store, chunk swarm.Chunk) error { if u.closed { return errPutterAlreadyClosed } @@ -411,7 +414,7 @@ func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer stora Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - switch exists, err := s.IndexStore().Has(ui); { + switch exists, err := st.IndexStore().Has(ui); { case err != nil: return fmt.Errorf("store has item %q call failed: %w", ui, err) case exists: @@ -421,8 +424,7 @@ func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer stora } switch item, loaded, err := stampindex.LoadOrStore( - s.IndexStore(), - writer, + st.IndexStore(), stampIndexUploadNamespace, chunk, ); { @@ -436,7 +438,7 @@ func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer stora if prev > curr { return errOverwriteOfNewerBatch } - err = stampindex.Store(writer, stampIndexUploadNamespace, chunk) + err = stampindex.Store(st.IndexStore(), stampIndexUploadNamespace, chunk) if err != nil { return fmt.Errorf("failed updating stamp index: %w", err) } @@ -444,18 +446,18 @@ func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer stora u.split++ - if err := s.ChunkStore().Put(ctx, chunk); err != nil { + if err := st.ChunkStore().Put(ctx, chunk); err != nil { return fmt.Errorf("chunk store put chunk %q call failed: %w", chunk.Address(), err) } - if err := chunkstamp.Store(writer, chunkStampNamespace, chunk); err != nil { + if err := chunkstamp.Store(st.IndexStore(), chunkStampNamespace, chunk); err != nil { return fmt.Errorf("associate chunk with stamp %q call failed: %w", chunk.Address(), err) } ui.Uploaded = now().UnixNano() ui.TagID = u.tagID - if err := writer.Put(ui); err != nil { + if err := st.IndexStore().Put(ui); err != nil { return fmt.Errorf("store put item %q call failed: %w", ui, err) } @@ -465,7 +467,7 @@ func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer stora BatchID: chunk.Stamp().BatchID(), TagID: u.tagID, } - if err := writer.Put(pi); err != nil { + if err := st.IndexStore().Put(pi); err != nil { return fmt.Errorf("store put item %q call failed: %w", pi, err) } @@ -476,13 +478,13 @@ func (u *uploadPutter) Put(ctx context.Context, s internal.Storage, writer stora // with a swarm reference. This can be useful while keeping track of uploads through // the tags. It will update the tag. This will be filled with the Split and Seen count // by the Putter. -func (u *uploadPutter) Close(s internal.Storage, writer storage.Writer, addr swarm.Address) error { +func (u *uploadPutter) Close(s storage.IndexStore, addr swarm.Address) error { if u.closed { return nil } ti := &TagItem{TagID: u.tagID} - err := s.IndexStore().Get(ti) + err := s.Get(ti) if err != nil { return fmt.Errorf("failed reading tag while closing: %w", err) } @@ -494,12 +496,12 @@ func (u *uploadPutter) Close(s internal.Storage, writer storage.Writer, addr swa ti.Address = addr.Clone() } - err = writer.Put(ti) + err = s.Put(ti) if err != nil { return fmt.Errorf("failed storing tag: %w", err) } - err = writer.Delete(&dirtyTagItem{TagID: u.tagID}) + err = s.Delete(&dirtyTagItem{TagID: u.tagID}) if err != nil { return fmt.Errorf("failed deleting dirty tag: %w", err) } @@ -509,70 +511,60 @@ func (u *uploadPutter) Close(s internal.Storage, writer storage.Writer, addr swa return nil } -func (u *uploadPutter) Cleanup(tx internal.TxExecutor) error { +func (u *uploadPutter) Cleanup(st transaction.Storage) error { if u.closed { return nil } itemsToDelete := make([]*pushItem, 0) - err := tx.Execute(context.Background(), func(s internal.Storage) error { - di := &dirtyTagItem{TagID: u.tagID} - err := s.IndexStore().Get(di) - if err != nil { - return fmt.Errorf("failed reading dirty tag while cleaning up: %w", err) - } + di := &dirtyTagItem{TagID: u.tagID} + err := st.ReadOnly().IndexStore().Get(di) + if err != nil { + return fmt.Errorf("failed reading dirty tag while cleaning up: %w", err) + } - return s.IndexStore().Iterate( - storage.Query{ - Factory: func() storage.Item { return &pushItem{} }, - PrefixAtStart: true, - Prefix: fmt.Sprintf("%d", di.Started), - }, - func(res storage.Result) (bool, error) { - pi := res.Entry.(*pushItem) - if pi.TagID == u.tagID { - itemsToDelete = append(itemsToDelete, pi) - } - return false, nil - }, - ) - }) + err = st.ReadOnly().IndexStore().Iterate( + storage.Query{ + Factory: func() storage.Item { return &pushItem{} }, + PrefixAtStart: true, + Prefix: fmt.Sprintf("%d", di.Started), + }, + func(res storage.Result) (bool, error) { + pi := res.Entry.(*pushItem) + if pi.TagID == u.tagID { + itemsToDelete = append(itemsToDelete, pi) + } + return false, nil + }, + ) if err != nil { return fmt.Errorf("failed iterating over push items: %w", err) } batchCnt := 1000 for i := 0; i < len(itemsToDelete); i += batchCnt { - err = tx.Execute(context.Background(), func(st internal.Storage) error { - - b, err := st.IndexStore().Batch(context.Background()) - if err != nil { - return err - } + _ = st.Run(context.Background(), func(s transaction.Store) error { end := i + batchCnt if end > len(itemsToDelete) { end = len(itemsToDelete) } for _, pi := range itemsToDelete[i:end] { - _ = remove(st, b, pi.Address, pi.BatchID) - _ = b.Delete(pi) + _ = remove(s, pi.Address, pi.BatchID) + _ = s.IndexStore().Delete(pi) } - return b.Commit() + return nil }) - if err != nil { - return fmt.Errorf("failed deleting push items: %w", err) - } } - return tx.Execute(context.Background(), func(tx internal.Storage) error { - return tx.IndexStore().Delete(&dirtyTagItem{TagID: u.tagID}) + return st.Run(context.Background(), func(s transaction.Store) error { + return s.IndexStore().Delete(&dirtyTagItem{TagID: u.tagID}) }) } // Remove removes all the state associated with the given address and batchID. -func remove(st internal.Storage, writer storage.Writer, address swarm.Address, batchID []byte) error { +func remove(st transaction.Store, address swarm.Address, batchID []byte) error { ui := &uploadItem{ Address: address, BatchID: batchID, @@ -583,7 +575,7 @@ func remove(st internal.Storage, writer storage.Writer, address swarm.Address, b return fmt.Errorf("failed to read uploadItem %s: %w", ui, err) } - err = writer.Delete(ui) + err = st.IndexStore().Delete(ui) if err != nil { return fmt.Errorf("failed deleting upload item: %w", err) } @@ -599,7 +591,7 @@ func remove(st internal.Storage, writer storage.Writer, address swarm.Address, b return fmt.Errorf("failed getting stamp: %w", err) } - err = chunkstamp.DeleteWithStamp(writer, chunkStampNamespace, address, stamp) + err = chunkstamp.DeleteWithStamp(st.IndexStore(), chunkStampNamespace, address, stamp) if err != nil { return fmt.Errorf("failed deleting chunk stamp %x: %w", batchID, err) } @@ -608,27 +600,25 @@ func remove(st internal.Storage, writer storage.Writer, address swarm.Address, b } // CleanupDirty does a best-effort cleanup of dirty tags. This is called on startup. -func CleanupDirty(tx internal.TxExecutor) error { +func CleanupDirty(st transaction.Storage) error { dirtyTags := make([]*dirtyTagItem, 0) - err := tx.Execute(context.Background(), func(s internal.Storage) error { - return s.IndexStore().Iterate( - storage.Query{ - Factory: func() storage.Item { return &dirtyTagItem{} }, - }, - func(res storage.Result) (bool, error) { - di := res.Entry.(*dirtyTagItem) - dirtyTags = append(dirtyTags, di) - return false, nil - }, - ) - }) + err := st.ReadOnly().IndexStore().Iterate( + storage.Query{ + Factory: func() storage.Item { return &dirtyTagItem{} }, + }, + func(res storage.Result) (bool, error) { + di := res.Entry.(*dirtyTagItem) + dirtyTags = append(dirtyTags, di) + return false, nil + }, + ) if err != nil { return fmt.Errorf("failed iterating dirty tags: %w", err) } for _, di := range dirtyTags { - _ = (&uploadPutter{tagID: di.TagID}).Cleanup(tx) + _ = (&uploadPutter{tagID: di.TagID}).Cleanup(st) } return nil @@ -637,7 +627,7 @@ func CleanupDirty(tx internal.TxExecutor) error { // Report is the implementation of the PushReporter interface. func Report( ctx context.Context, - s internal.Storage, + trx transaction.Store, chunk swarm.Chunk, state storage.ChunkState, ) error { @@ -646,7 +636,9 @@ func Report( BatchID: chunk.Stamp().BatchID(), } - err := s.IndexStore().Get(ui) + indexStore := trx.IndexStore() + + err := indexStore.Get(ui) if err != nil { return fmt.Errorf("failed to read uploadItem %s: %w", ui, err) } @@ -655,7 +647,7 @@ func Report( TagID: ui.TagID, } - err = s.IndexStore().Get(ti) + err = indexStore.Get(ti) if err != nil { return fmt.Errorf("failed getting tag: %w", err) } @@ -673,18 +665,13 @@ func Report( break } - batch, err := s.IndexStore().Batch(ctx) - if err != nil { - return err - } - - err = batch.Put(ti) + err = indexStore.Put(ti) if err != nil { return fmt.Errorf("failed updating tag: %w", err) } if state == storage.ChunkSent { - return batch.Commit() + return nil } // Once the chunk is stored/synced/failed to sync, it is deleted from the upload store as @@ -696,28 +683,28 @@ func Report( BatchID: chunk.Stamp().BatchID(), } - err = batch.Delete(pi) + err = indexStore.Delete(pi) if err != nil { return fmt.Errorf("failed deleting pushItem %s: %w", pi, err) } - err = chunkstamp.Delete(s.IndexStore(), batch, chunkStampNamespace, pi.Address, pi.BatchID) + err = chunkstamp.Delete(indexStore, chunkStampNamespace, pi.Address, pi.BatchID) if err != nil { return fmt.Errorf("failed deleting chunk stamp %x: %w", pi.BatchID, err) } - err = s.ChunkStore().Delete(ctx, chunk.Address()) + err = trx.ChunkStore().Delete(ctx, chunk.Address()) if err != nil { return fmt.Errorf("failed deleting chunk %s: %w", chunk.Address(), err) } ui.Synced = now().UnixNano() - err = batch.Put(ui) + err = indexStore.Put(ui) if err != nil { return fmt.Errorf("failed updating uploadItem %s: %w", ui, err) } - return batch.Commit() + return nil } var ( @@ -761,7 +748,7 @@ func (n nextTagID) String() string { // NextTag returns the next tag ID to be used. It reads the last used ID and // increments it by 1. This method needs to be called under lock by user as there // is no guarantee for parallel updates. -func NextTag(st storage.Store) (TagItem, error) { +func NextTag(st storage.IndexStore) (TagItem, error) { var ( tagID nextTagID tag TagItem @@ -784,7 +771,7 @@ func NextTag(st storage.Store) (TagItem, error) { } // TagInfo returns the TagItem for this particular tagID. -func TagInfo(st storage.Store, tagID uint64) (TagItem, error) { +func TagInfo(st storage.Reader, tagID uint64) (TagItem, error) { ti := TagItem{TagID: tagID} err := st.Get(&ti) if err != nil { @@ -795,7 +782,7 @@ func TagInfo(st storage.Store, tagID uint64) (TagItem, error) { } // ListAllTags returns all the TagItems in the store. -func ListAllTags(st storage.Store) ([]TagItem, error) { +func ListAllTags(st storage.Reader) ([]TagItem, error) { var tags []TagItem err := st.Iterate(storage.Query{ Factory: func() storage.Item { return new(TagItem) }, @@ -810,7 +797,7 @@ func ListAllTags(st storage.Store) ([]TagItem, error) { return tags, nil } -func Iterate(ctx context.Context, s internal.Storage, consumerFn func(chunk swarm.Chunk) (bool, error)) error { +func Iterate(ctx context.Context, s transaction.ReadOnlyStore, consumerFn func(chunk swarm.Chunk) (bool, error)) error { return s.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &pushItem{} }, }, func(r storage.Result) (bool, error) { @@ -841,14 +828,14 @@ func Iterate(ctx context.Context, s internal.Storage, consumerFn func(chunk swar } // DeleteTag deletes TagItem associated with the given tagID. -func DeleteTag(st storage.Store, tagID uint64) error { +func DeleteTag(st storage.Writer, tagID uint64) error { if err := st.Delete(&TagItem{TagID: tagID}); err != nil { return fmt.Errorf("uploadstore: failed to delete tag %d: %w", tagID, err) } return nil } -func IterateAll(st storage.Store, iterateFn func(addr swarm.Address, isSynced bool) (bool, error)) error { +func IterateAll(st storage.Reader, iterateFn func(addr swarm.Address, isSynced bool) (bool, error)) error { return st.Iterate( storage.Query{ Factory: func() storage.Item { return new(uploadItem) }, @@ -862,7 +849,7 @@ func IterateAll(st storage.Store, iterateFn func(addr swarm.Address, isSynced bo } // BatchIDForChunk returns the first known batchID for the given chunk address. -func BatchIDForChunk(st storage.Store, addr swarm.Address) ([]byte, error) { +func BatchIDForChunk(st storage.Reader, addr swarm.Address) ([]byte, error) { var batchID []byte err := st.Iterate( diff --git a/pkg/storer/internal/upload/uploadstore_test.go b/pkg/storer/internal/upload/uploadstore_test.go index 569b8c7999e..126289b0703 100644 --- a/pkg/storer/internal/upload/uploadstore_test.go +++ b/pkg/storer/internal/upload/uploadstore_test.go @@ -21,6 +21,7 @@ import ( "github.com/ethersphere/bee/pkg/storage/storagetest" chunktest "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storer/internal" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/storer/internal/upload" "github.com/ethersphere/bee/pkg/swarm" "github.com/google/go-cmp/cmp" @@ -430,17 +431,10 @@ func TestItemDirtyTagItem(t *testing.T) { } } -func newTestStorage(t *testing.T) internal.BatchedStorage { +func newTestStorage(t *testing.T) transaction.Storage { t.Helper() - storg, closer := internal.NewInmemStorage() - t.Cleanup(func() { - err := closer() - if err != nil { - t.Errorf("failed closing storage: %v", err) - } - }) - + storg := internal.NewInmemStorage() return storg } @@ -449,27 +443,34 @@ func TestChunkPutter(t *testing.T) { ts := newTestStorage(t) - tag, err := upload.NextTag(ts.IndexStore()) + tx, done := ts.NewTransaction(context.Background()) + defer done() + tag, err := upload.NextTag(tx.IndexStore()) if err != nil { t.Fatalf("failed creating tag: %v", err) } - putter, err := upload.NewPutter(ts, tag.TagID) + putter, err := upload.NewPutter(tx.IndexStore(), tag.TagID) if err != nil { t.Fatalf("failed creating putter: %v", err) } + _ = tx.Commit() for _, chunk := range chunktest.GenerateTestRandomChunks(10) { t.Run(fmt.Sprintf("chunk %s", chunk.Address()), func(t *testing.T) { t.Run("put new chunk", func(t *testing.T) { - err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk) + err := ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, chunk) + }) if err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } }) t.Run("put existing chunk", func(t *testing.T) { - err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk) + err := ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, chunk) + }) if err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } @@ -480,7 +481,7 @@ func TestChunkPutter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - err := ts.IndexStore().Get(ui) + err := ts.ReadOnly().IndexStore().Get(ui) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -500,7 +501,7 @@ func TestChunkPutter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - err = ts.IndexStore().Get(pi) + err = ts.ReadOnly().IndexStore().Get(pi) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -515,7 +516,7 @@ func TestChunkPutter(t *testing.T) { t.Fatalf("Get(...): unexpected UploadItem (-want +have):\n%s", diff) } - have, err := ts.ChunkStore().Get(context.Background(), chunk.Address()) + have, err := ts.ReadOnly().ChunkStore().Get(context.Background(), chunk.Address()) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -528,12 +529,12 @@ func TestChunkPutter(t *testing.T) { t.Run("iterate all", func(t *testing.T) { count := 0 - err := upload.IterateAll(ts.IndexStore(), func(addr swarm.Address, synced bool) (bool, error) { + err := upload.IterateAll(ts.ReadOnly().IndexStore(), func(addr swarm.Address, synced bool) (bool, error) { count++ if synced { t.Fatal("expected synced to be false") } - has, err := ts.ChunkStore().Has(context.Background(), addr) + has, err := ts.ReadOnly().ChunkStore().Has(context.Background(), addr) if err != nil { t.Fatalf("unexpected error in Has(...): %v", err) } @@ -553,12 +554,19 @@ func TestChunkPutter(t *testing.T) { t.Run("close with reference", func(t *testing.T) { addr := swarm.RandAddress(t) - err := putter.Close(ts, ts.IndexStore(), addr) + err := ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Close(s.IndexStore(), addr) + }) if err != nil { t.Fatalf("Close(...): unexpected error %v", err) } - ti, err := upload.TagInfo(ts.IndexStore(), tag.TagID) + var ti upload.TagItem + + err = ts.Run(context.Background(), func(s transaction.Store) error { + ti, err = upload.TagInfo(s.IndexStore(), tag.TagID) + return err + }) if err != nil { t.Fatalf("TagInfo(...): unexpected error %v", err) } @@ -576,33 +584,43 @@ func TestChunkPutter(t *testing.T) { }) t.Run("error after close", func(t *testing.T) { - err := putter.Put(context.Background(), ts, ts.IndexStore(), chunktest.GenerateTestRandomChunk()) + err := ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, chunktest.GenerateTestRandomChunk()) + }) if !errors.Is(err, upload.ErrPutterAlreadyClosed) { t.Fatalf("unexpected error, expected: %v, got: %v", upload.ErrPutterAlreadyClosed, err) } }) t.Run("restart putter", func(t *testing.T) { - putter, err = upload.NewPutter(ts, tag.TagID) + + var putter internal.PutterCloserWithReference + + err = ts.Run(context.Background(), func(s transaction.Store) error { + putter, err = upload.NewPutter(s.IndexStore(), tag.TagID) + return err + }) if err != nil { t.Fatalf("failed creating putter: %v", err) } for _, chunk := range chunktest.GenerateTestRandomChunks(5) { - err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk) - if err != nil { + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, chunk) + }); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } } // close with different address addr := swarm.RandAddress(t) - err = putter.Close(ts, ts.IndexStore(), addr) - if err != nil { + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Close(s.IndexStore(), addr) + }); err != nil { t.Fatalf("Close(...): unexpected error %v", err) } - ti, err := upload.TagInfo(ts.IndexStore(), tag.TagID) + ti, err := upload.TagInfo(ts.ReadOnly().IndexStore(), tag.TagID) if err != nil { t.Fatalf("TagInfo(...): unexpected error %v", err) } @@ -626,54 +644,62 @@ func TestChunkReporter(t *testing.T) { ts := newTestStorage(t) - tag, err := upload.NextTag(ts.IndexStore()) - if err != nil { + var ( + tag upload.TagItem + putter internal.PutterCloserWithReference + err error + ) + if err := ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }); err != nil { t.Fatalf("failed creating tag: %v", err) } - putter, err := upload.NewPutter(ts, tag.TagID) - if err != nil { + if err := ts.Run(context.Background(), func(s transaction.Store) error { + putter, err = upload.NewPutter(s.IndexStore(), tag.TagID) + return err + }); err != nil { t.Fatalf("failed creating putter: %v", err) } for idx, chunk := range chunktest.GenerateTestRandomChunks(10) { t.Run(fmt.Sprintf("chunk %s", chunk.Address()), func(t *testing.T) { - err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk) - if err != nil { + + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, chunk) + }); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } - t.Run("mark sent", func(t *testing.T) { - err := upload.Report(context.Background(), ts, chunk, storage.ChunkSent) - if err != nil { + report := func(ch swarm.Chunk, state int) { + t.Helper() + if err := ts.Run(context.Background(), func(s transaction.Store) error { + return upload.Report(context.Background(), s, ch, state) + }); err != nil { t.Fatalf("Report(...): unexpected error: %v", err) } + } + + t.Run("mark sent", func(t *testing.T) { + report(chunk, storage.ChunkSent) }) if idx < 4 { t.Run("mark stored", func(t *testing.T) { - err := upload.Report(context.Background(), ts, chunk, storage.ChunkStored) - if err != nil { - t.Fatalf("Report(...): unexpected error: %v", err) - } + report(chunk, storage.ChunkStored) }) } if idx >= 4 && idx < 8 { t.Run("mark synced", func(t *testing.T) { - err := upload.Report(context.Background(), ts, chunk, storage.ChunkSynced) - if err != nil { - t.Fatalf("Report(...): unexpected error: %v", err) - } + report(chunk, storage.ChunkSynced) }) } if idx >= 8 { t.Run("mark could not sync", func(t *testing.T) { - err := upload.Report(context.Background(), ts, chunk, storage.ChunkCouldNotSync) - if err != nil { - t.Fatalf("Report(...): unexpected error: %v", err) - } + report(chunk, storage.ChunkCouldNotSync) }) } @@ -681,7 +707,7 @@ func TestChunkReporter(t *testing.T) { ti := &upload.TagItem{ TagID: tag.TagID, } - err := ts.IndexStore().Get(ti) + err := ts.ReadOnly().IndexStore().Get(ti) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -711,7 +737,7 @@ func TestChunkReporter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - err = ts.IndexStore().Get(ui) + err = ts.ReadOnly().IndexStore().Get(ui) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -732,7 +758,7 @@ func TestChunkReporter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - has, err := ts.IndexStore().Has(pi) + has, err := ts.ReadOnly().IndexStore().Has(pi) if err != nil { t.Fatalf("Has(...): unexpected error: %v", err) } @@ -740,7 +766,7 @@ func TestChunkReporter(t *testing.T) { t.Fatalf("Has(...): expected to not be found: %s", pi) } - have, err := ts.ChunkStore().Has(context.Background(), chunk.Address()) + have, err := ts.ReadOnly().ChunkStore().Has(context.Background(), chunk.Address()) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -754,12 +780,16 @@ func TestChunkReporter(t *testing.T) { t.Run("close with reference", func(t *testing.T) { addr := swarm.RandAddress(t) - err := putter.Close(ts, ts.IndexStore(), addr) + err := ts.Run(context.Background(), func(s transaction.Store) error { return putter.Close(s.IndexStore(), addr) }) if err != nil { t.Fatalf("Close(...): unexpected error %v", err) } - ti, err := upload.TagInfo(ts.IndexStore(), tag.TagID) + var ti upload.TagItem + err = ts.Run(context.Background(), func(s transaction.Store) error { + ti, err = upload.TagInfo(s.IndexStore(), tag.TagID) + return err + }) if err != nil { t.Fatalf("TagInfo(...): unexpected error %v", err) } @@ -785,12 +815,21 @@ func TestStampIndexHandling(t *testing.T) { ts := newTestStorage(t) - tag, err := upload.NextTag(ts.IndexStore()) + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { t.Fatalf("failed creating tag: %v", err) } - putter, err := upload.NewPutter(ts, tag.TagID) + var putter internal.PutterCloserWithReference + err = ts.Run(context.Background(), func(s transaction.Store) error { + putter, err = upload.NewPutter(s.IndexStore(), tag.TagID) + return err + }) if err != nil { t.Fatalf("failed creating putter: %v", err) } @@ -803,14 +842,15 @@ func TestStampIndexHandling(t *testing.T) { chunk.BucketDepth(), true, ) - if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk); err != nil { + + if err := put(t, ts, putter, chunk); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } chunk2 := chunktest.GenerateTestRandomChunk().WithStamp(chunk.Stamp()) want := upload.ErrOverwriteOfImmutableBatch - have := putter.Put(context.Background(), ts, ts.IndexStore(), chunk2) + have := put(t, ts, putter, chunk2) if !errors.Is(have, want) { t.Fatalf("Put(...): unexpected error:\n\twant: %v\n\thave: %v", want, have) } @@ -818,7 +858,7 @@ func TestStampIndexHandling(t *testing.T) { t.Run("put existing index with older batch timestamp", func(t *testing.T) { chunk := chunktest.GenerateTestRandomChunk() - if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk); err != nil { + if err := put(t, ts, putter, chunk); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } @@ -836,7 +876,7 @@ func TestStampIndexHandling(t *testing.T) { chunk2 := chunktest.GenerateTestRandomChunk().WithStamp(stamp) want := upload.ErrOverwriteOfNewerBatch - have := putter.Put(context.Background(), ts, ts.IndexStore(), chunk2) + have := put(t, ts, putter, chunk2) if !errors.Is(have, want) { t.Fatalf("Put(...): unexpected error:\n\twant: %v\n\thave: %v", want, have) } @@ -844,7 +884,7 @@ func TestStampIndexHandling(t *testing.T) { t.Run("put existing chunk with newer batch timestamp", func(t *testing.T) { chunk := chunktest.GenerateTestRandomChunk() - if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk); err != nil { + if err := put(t, ts, putter, chunk); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } @@ -861,7 +901,7 @@ func TestStampIndexHandling(t *testing.T) { chunk2 := chunktest.GenerateTestRandomChunk().WithStamp(stamp) - if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk2); err != nil { + if err := put(t, ts, putter, chunk2); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } }) @@ -873,9 +913,14 @@ func TestNextTagID(t *testing.T) { ts := newTestStorage(t) for i := 1; i < 4; i++ { - tag, err := upload.NextTag(ts.IndexStore()) + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { - t.Fatal(err) + t.Fatalf("failed creating tag: %v", err) } if tag.TagID != uint64(i) { @@ -884,7 +929,7 @@ func TestNextTagID(t *testing.T) { } var lastTag upload.NextTagID - err := ts.IndexStore().Get(&lastTag) + err := ts.ReadOnly().IndexStore().Get(&lastTag) if err != nil { t.Fatal(err) } @@ -901,14 +946,19 @@ func TestListTags(t *testing.T) { want := make([]upload.TagItem, 10) for i := range want { - ti, err := upload.NextTag(ts.IndexStore()) + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { t.Fatalf("failed creating tag: %v", err) } - want[i] = ti + want[i] = tag } - have, err := upload.ListAllTags(ts.IndexStore()) + have, err := upload.ListAllTags(ts.ReadOnly().IndexStore()) if err != nil { t.Fatalf("upload.ListAllTags(): unexpected error: %v", err) } @@ -925,7 +975,7 @@ func TestIterate(t *testing.T) { ts := newTestStorage(t) t.Run("on empty storage does not call the callback fn", func(t *testing.T) { - err := upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { + err := upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { t.Fatal("unexpected call") return false, nil }) @@ -935,29 +985,38 @@ func TestIterate(t *testing.T) { }) t.Run("iterates chunks", func(t *testing.T) { - tag, err := upload.NextTag(ts.IndexStore()) + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { t.Fatalf("failed creating tag: %v", err) } - putter, err := upload.NewPutter(ts, tag.TagID) + var putter internal.PutterCloserWithReference + err = ts.Run(context.Background(), func(s transaction.Store) error { + putter, err = upload.NewPutter(s.IndexStore(), tag.TagID) + return err + }) if err != nil { t.Fatalf("failed creating putter: %v", err) } chunk1, chunk2 := chunktest.GenerateTestRandomChunk(), chunktest.GenerateTestRandomChunk() - err = putter.Put(context.Background(), ts, ts.IndexStore(), chunk1) + err = put(t, ts, putter, chunk1) if err != nil { t.Fatalf("session.Put(...): unexpected error: %v", err) } - err = putter.Put(context.Background(), ts, ts.IndexStore(), chunk2) + err = put(t, ts, putter, chunk2) if err != nil { t.Fatalf("session.Put(...): unexpected error: %v", err) } var count int - err = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { + err = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { count++ if !chunk.Equal(chunk1) && !chunk.Equal(chunk2) { return true, fmt.Errorf("unknown chunk %s", chunk.Address()) @@ -972,12 +1031,12 @@ func TestIterate(t *testing.T) { t.Fatalf("expected to iterate 0 chunks, got: %v", count) } - err = putter.Close(ts, ts.IndexStore(), swarm.ZeroAddress) + err = ts.Run(context.Background(), func(s transaction.Store) error { return putter.Close(s.IndexStore(), swarm.ZeroAddress) }) if err != nil { t.Fatalf("Close(...) error: %v", err) } - err = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { + err = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { count++ if !chunk.Equal(chunk1) && !chunk.Equal(chunk2) { return true, fmt.Errorf("unknown chunk %s", chunk.Address()) @@ -999,17 +1058,24 @@ func TestDeleteTag(t *testing.T) { ts := newTestStorage(t) - tag, err := upload.NextTag(ts.IndexStore()) + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { - t.Fatal("failed creating tag", err) + t.Fatalf("failed creating tag: %v", err) } - err = upload.DeleteTag(ts.IndexStore(), tag.TagID) + err = ts.Run(context.Background(), func(s transaction.Store) error { + return upload.DeleteTag(s.IndexStore(), tag.TagID) + }) if err != nil { t.Fatalf("upload.DeleteTag(): unexpected error: %v", err) } - _, err = upload.TagInfo(ts.IndexStore(), tag.TagID) + _, err = upload.TagInfo(ts.ReadOnly().IndexStore(), tag.TagID) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("want: %v; have: %v", storage.ErrNotFound, err) } @@ -1020,22 +1086,31 @@ func TestBatchIDForChunk(t *testing.T) { ts := newTestStorage(t) - tag, err := upload.NextTag(ts.IndexStore()) + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { t.Fatalf("failed creating tag: %v", err) } - putter, err := upload.NewPutter(ts, tag.TagID) + var putter internal.PutterCloserWithReference + err = ts.Run(context.Background(), func(s transaction.Store) error { + putter, err = upload.NewPutter(s.IndexStore(), tag.TagID) + return err + }) if err != nil { t.Fatalf("failed creating putter: %v", err) } chunk := chunktest.GenerateTestRandomChunk() - if err := putter.Put(context.Background(), ts, ts.IndexStore(), chunk); err != nil { + if err := put(t, ts, putter, chunk); err != nil { t.Fatalf("Put(...): unexpected error: %v", err) } - batchID, err := upload.BatchIDForChunk(ts.IndexStore(), chunk.Address()) + batchID, err := upload.BatchIDForChunk(ts.ReadOnly().IndexStore(), chunk.Address()) if err != nil { t.Fatalf("BatchIDForChunk(...): unexpected error: %v", err) } @@ -1052,18 +1127,28 @@ func TestCleanup(t *testing.T) { t.Parallel() ts := newTestStorage(t) - tag, err := upload.NextTag(ts.IndexStore()) + + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { - t.Fatal("failed creating tag", err) + t.Fatalf("failed creating tag: %v", err) } - putter, err := upload.NewPutter(ts, tag.TagID) + var putter internal.PutterCloserWithReference + err = ts.Run(context.Background(), func(s transaction.Store) error { + putter, err = upload.NewPutter(s.IndexStore(), tag.TagID) + return err + }) if err != nil { - t.Fatal("failed creating putter", err) + t.Fatalf("failed creating putter: %v", err) } chunk := chunktest.GenerateTestRandomChunk() - err = putter.Put(context.Background(), ts, ts.IndexStore(), chunk) + err = put(t, ts, putter, chunk) if err != nil { t.Fatal("session.Put(...): unexpected error", err) } @@ -1074,7 +1159,7 @@ func TestCleanup(t *testing.T) { } count := 0 - _ = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { + _ = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -1082,7 +1167,7 @@ func TestCleanup(t *testing.T) { t.Fatalf("expected to iterate 0 chunks, got: %v", count) } - if _, err := ts.ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { + if _, err := ts.ReadOnly().ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { t.Fatalf("expected chunk not found error, got: %v", err) } }) @@ -1091,18 +1176,28 @@ func TestCleanup(t *testing.T) { t.Parallel() ts := newTestStorage(t) - tag, err := upload.NextTag(ts.IndexStore()) + + var tag upload.TagItem + var err error + err = ts.Run(context.Background(), func(s transaction.Store) error { + tag, err = upload.NextTag(s.IndexStore()) + return err + }) if err != nil { - t.Fatal("failed creating tag", err) + t.Fatalf("failed creating tag: %v", err) } - putter, err := upload.NewPutter(ts, tag.TagID) + var putter internal.PutterCloserWithReference + err = ts.Run(context.Background(), func(s transaction.Store) error { + putter, err = upload.NewPutter(s.IndexStore(), tag.TagID) + return err + }) if err != nil { - t.Fatal("failed creating putter", err) + t.Fatalf("failed creating putter: %v", err) } chunk := chunktest.GenerateTestRandomChunk() - err = putter.Put(context.Background(), ts, ts.IndexStore(), chunk) + err = put(t, ts, putter, chunk) if err != nil { t.Fatal("session.Put(...): unexpected error", err) } @@ -1113,7 +1208,7 @@ func TestCleanup(t *testing.T) { } count := 0 - _ = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { + _ = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -1121,8 +1216,15 @@ func TestCleanup(t *testing.T) { t.Fatalf("expected to iterate 0 chunks, got: %v", count) } - if _, err := ts.ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { + if _, err := ts.ReadOnly().ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { t.Fatalf("expected chunk not found error, got: %v", err) } }) } + +func put(t *testing.T, ts transaction.Storage, putter internal.PutterCloserWithReference, ch swarm.Chunk) error { + t.Helper() + return ts.Run(context.Background(), func(s transaction.Store) error { + return putter.Put(context.Background(), s, ch) + }) +} diff --git a/pkg/storer/migration/all_steps.go b/pkg/storer/migration/all_steps.go index ef028659f6d..39f2b5a7438 100644 --- a/pkg/storer/migration/all_steps.go +++ b/pkg/storer/migration/all_steps.go @@ -5,28 +5,29 @@ package migration import ( - storage "github.com/ethersphere/bee/pkg/storage" + "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/migration" "github.com/ethersphere/bee/pkg/storer/internal/reserve" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" ) // AfterInitSteps lists all migration steps for localstore IndexStore after the localstore is intiated. func AfterInitSteps( sharkyPath string, sharkyNoOfShards int, - chunkStore storage.ChunkStore, + st transaction.Storage, ) migration.Steps { return map[uint64]migration.StepFn{ 1: step_01, - 2: step_02, - 3: step_03(chunkStore, reserve.ChunkType), - 4: step_04(sharkyPath, sharkyNoOfShards), + 2: step_02(st), + 3: step_03(st, reserve.ChunkType), + 4: step_04(sharkyPath, sharkyNoOfShards, st), } } -// BeforeIinitSteps lists all migration steps for localstore IndexStore before the localstore is intiated. -func BeforeIinitSteps() migration.Steps { +// BeforeInitSteps lists all migration steps for localstore IndexStore before the localstore is intiated. +func BeforeInitSteps(st storage.BatchStore) migration.Steps { return map[uint64]migration.StepFn{ - 1: RefCountSizeInc, + 1: RefCountSizeInc(st), } } diff --git a/pkg/storer/migration/all_steps_test.go b/pkg/storer/migration/all_steps_test.go index 10a6fbce752..088a38d44b8 100644 --- a/pkg/storer/migration/all_steps_test.go +++ b/pkg/storer/migration/all_steps_test.go @@ -5,36 +5,39 @@ package migration_test import ( + "context" "testing" "github.com/stretchr/testify/assert" - "github.com/ethersphere/bee/pkg/storage/inmemchunkstore" "github.com/ethersphere/bee/pkg/storage/inmemstore" "github.com/ethersphere/bee/pkg/storage/migration" + "github.com/ethersphere/bee/pkg/storer/internal" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" localmigration "github.com/ethersphere/bee/pkg/storer/migration" ) func TestPreSteps(t *testing.T) { t.Parallel() - chStore := inmemchunkstore.New() + store := internal.NewInmemStorage() - assert.NotEmpty(t, localmigration.AfterInitSteps("", 0, chStore)) + assert.NotEmpty(t, localmigration.AfterInitSteps("", 0, store)) t.Run("version numbers", func(t *testing.T) { t.Parallel() - err := migration.ValidateVersions(localmigration.AfterInitSteps("", 0, chStore)) + err := migration.ValidateVersions(localmigration.AfterInitSteps("", 0, store)) assert.NoError(t, err) }) t.Run("zero store migration", func(t *testing.T) { t.Parallel() - store := inmemstore.New() - - err := migration.Migrate(store, "migration", localmigration.AfterInitSteps("", 4, chStore)) + store := internal.NewInmemStorage() + err := store.Run(context.Background(), func(s transaction.Store) error { + return migration.Migrate(s.IndexStore(), "migration", localmigration.AfterInitSteps("", 4, store)) + }) assert.NoError(t, err) }) } @@ -42,12 +45,14 @@ func TestPreSteps(t *testing.T) { func TestPostSteps(t *testing.T) { t.Parallel() - assert.NotEmpty(t, localmigration.BeforeIinitSteps()) + st := inmemstore.New() + + assert.NotEmpty(t, localmigration.BeforeInitSteps(st)) t.Run("version numbers", func(t *testing.T) { t.Parallel() - err := migration.ValidateVersions(localmigration.BeforeIinitSteps()) + err := migration.ValidateVersions(localmigration.BeforeInitSteps(st)) assert.NoError(t, err) }) @@ -56,7 +61,7 @@ func TestPostSteps(t *testing.T) { store := inmemstore.New() - err := migration.Migrate(store, "migration", localmigration.BeforeIinitSteps()) + err := migration.Migrate(store, "migration", localmigration.BeforeInitSteps(store)) assert.NoError(t, err) }) } diff --git a/pkg/storer/migration/refCntSize.go b/pkg/storer/migration/refCntSize.go index ccb4552b9a0..3a28d99affe 100644 --- a/pkg/storer/migration/refCntSize.go +++ b/pkg/storer/migration/refCntSize.go @@ -101,60 +101,57 @@ func (r OldRetrievalIndexItem) String() string { return storageutil.JoinFields(r.Namespace(), r.ID()) } -func RefCountSizeInc(s storage.BatchedStore) error { - - logger := log.NewLogger("migration-RefCountSizeInc", log.WithSink(os.Stdout)) - - logger.Info("starting migration of replacing chunkstore items to increase refCnt capacity") - - var itemsToDelete []*OldRetrievalIndexItem - - err := s.Iterate( - storage.Query{ - Factory: func() storage.Item { return &OldRetrievalIndexItem{} }, - }, - func(res storage.Result) (bool, error) { - item := res.Entry.(*OldRetrievalIndexItem) - itemsToDelete = append(itemsToDelete, item) - return false, nil - }, - ) - if err != nil { - return err - } - - for i := 0; i < len(itemsToDelete); i += 10000 { - end := i + 10000 - if end > len(itemsToDelete) { - end = len(itemsToDelete) - } - - b, err := s.Batch(context.Background()) +func RefCountSizeInc(s storage.BatchStore) func() error { + return func() error { + logger := log.NewLogger("migration-RefCountSizeInc", log.WithSink(os.Stdout)) + + logger.Info("starting migration of replacing chunkstore items to increase refCnt capacity") + + var itemsToDelete []*OldRetrievalIndexItem + + err := s.Iterate( + storage.Query{ + Factory: func() storage.Item { return &OldRetrievalIndexItem{} }, + }, + func(res storage.Result) (bool, error) { + item := res.Entry.(*OldRetrievalIndexItem) + itemsToDelete = append(itemsToDelete, item) + return false, nil + }, + ) if err != nil { return err } - for _, item := range itemsToDelete[i:end] { + for i := 0; i < len(itemsToDelete); i += 10000 { + end := i + 10000 + if end > len(itemsToDelete) { + end = len(itemsToDelete) + } + + b := s.Batch(context.Background()) + for _, item := range itemsToDelete[i:end] { + + //create new + err = b.Put(&chunkstore.RetrievalIndexItem{ + Address: item.Address, + Timestamp: item.Timestamp, + Location: item.Location, + RefCnt: uint32(item.RefCnt), + }) + if err != nil { + return err + } + } - //create new - err = b.Put(&chunkstore.RetrievalIndexItem{ - Address: item.Address, - Timestamp: item.Timestamp, - Location: item.Location, - RefCnt: uint32(item.RefCnt), - }) + err = b.Commit() if err != nil { return err } } - err = b.Commit() - if err != nil { - return err - } - } - - logger.Info("migration complete") + logger.Info("migration complete") - return nil + return nil + } } diff --git a/pkg/storer/migration/refCntSize_test.go b/pkg/storer/migration/refCntSize_test.go index a7869d31c31..e0941670825 100644 --- a/pkg/storer/migration/refCntSize_test.go +++ b/pkg/storer/migration/refCntSize_test.go @@ -36,7 +36,7 @@ func Test_RefCntSize(t *testing.T) { assert.NoError(t, err) } - assert.NoError(t, stepFn(store)) + assert.NoError(t, stepFn(store)()) // check if all entries are migrated. for _, entry := range oldItems { diff --git a/pkg/storer/migration/step_01.go b/pkg/storer/migration/step_01.go index cc0b4fe8544..fc290231f16 100644 --- a/pkg/storer/migration/step_01.go +++ b/pkg/storer/migration/step_01.go @@ -4,14 +4,10 @@ package migration -import ( - storage "github.com/ethersphere/bee/pkg/storage" -) - // step_01 serves as example for setting up migration step. // // In this step store is not being modified. -func step_01(s storage.BatchedStore) error { +func step_01() error { // NOOP return nil } diff --git a/pkg/storer/migration/step_01_test.go b/pkg/storer/migration/step_01_test.go index 8b89d556745..c82a12634f8 100644 --- a/pkg/storer/migration/step_01_test.go +++ b/pkg/storer/migration/step_01_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/ethersphere/bee/pkg/storage/inmemstore" localmigration "github.com/ethersphere/bee/pkg/storer/migration" ) @@ -17,7 +16,5 @@ func Test_Step_01(t *testing.T) { t.Parallel() stepFn := localmigration.Step_01 - store := inmemstore.New() - - assert.NoError(t, stepFn(store)) + assert.NoError(t, stepFn()) } diff --git a/pkg/storer/migration/step_02.go b/pkg/storer/migration/step_02.go index 2da6963a35f..6ef082a5ca7 100644 --- a/pkg/storer/migration/step_02.go +++ b/pkg/storer/migration/step_02.go @@ -5,42 +5,52 @@ package migration import ( + "context" "time" storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storer/internal/cache" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) // step_02 migrates the cache to the new format. // the old cacheEntry item has the same key, but the value is different. So only // a Put is needed. -func step_02(st storage.BatchedStore) error { - var entries []*cache.CacheEntryItem - err := st.Iterate( - storage.Query{ - Factory: func() storage.Item { return &cache.CacheEntryItem{} }, - ItemProperty: storage.QueryItemID, - }, - func(res storage.Result) (bool, error) { - entry := &cache.CacheEntryItem{ - Address: swarm.NewAddress([]byte(res.ID)), - AccessTimestamp: time.Now().UnixNano(), - } - entries = append(entries, entry) - return false, nil - }, - ) - if err != nil { - return err - } +func step_02(st transaction.Storage) func() error { + + return func() error { + + trx, done := st.NewTransaction(context.Background()) + defer done() - for _, entry := range entries { - err := st.Put(entry) + var entries []*cache.CacheEntryItem + err := trx.IndexStore().Iterate( + storage.Query{ + Factory: func() storage.Item { return &cache.CacheEntryItem{} }, + ItemProperty: storage.QueryItemID, + }, + func(res storage.Result) (bool, error) { + entry := &cache.CacheEntryItem{ + Address: swarm.NewAddress([]byte(res.ID)), + AccessTimestamp: time.Now().UnixNano(), + } + entries = append(entries, entry) + return false, nil + }, + ) if err != nil { return err } + + for _, entry := range entries { + err := trx.IndexStore().Put(entry) + if err != nil { + return err + } + } + + return trx.Commit() } - return nil } diff --git a/pkg/storer/migration/step_02_test.go b/pkg/storer/migration/step_02_test.go index 85180ce134b..091e17c476d 100644 --- a/pkg/storer/migration/step_02_test.go +++ b/pkg/storer/migration/step_02_test.go @@ -5,14 +5,16 @@ package migration_test import ( + "context" "crypto/rand" "testing" "github.com/stretchr/testify/assert" storage "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/inmemstore" + "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/cache" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" ) @@ -49,23 +51,25 @@ func Test_Step_02(t *testing.T) { t.Parallel() stepFn := localmigration.Step_02 - store := inmemstore.New() + store := internal.NewInmemStorage() // simulate old cacheEntryItem with some random bytes. var addrs []*testEntry for i := 0; i < 10; i++ { entry := &testEntry{address: swarm.RandAddress(t)} addrs = append(addrs, entry) - err := store.Put(entry) + err := store.Run(context.Background(), func(s transaction.Store) error { + return s.IndexStore().Put(entry) + }) assert.NoError(t, err) } - assert.NoError(t, stepFn(store)) + assert.NoError(t, stepFn(store)()) // check if all entries are migrated. for _, entry := range addrs { cEntry := &cache.CacheEntryItem{Address: entry.address} - err := store.Get(cEntry) + err := store.ReadOnly().IndexStore().Get(cEntry) assert.NoError(t, err) assert.Equal(t, entry.address, cEntry.Address) assert.Greater(t, cEntry.AccessTimestamp, int64(0)) diff --git a/pkg/storer/migration/step_03.go b/pkg/storer/migration/step_03.go index 8f52ba11547..b88d12e7505 100644 --- a/pkg/storer/migration/step_03.go +++ b/pkg/storer/migration/step_03.go @@ -12,16 +12,17 @@ import ( "github.com/ethersphere/bee/pkg/log" storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storer/internal/reserve" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) // step_03 is a migration step that removes all BinItem entries and migrates // ChunkBinItem and BatchRadiusItem entries to use a new BinID field. func step_03( - chunkStore storage.ChunkStore, + st transaction.Storage, chunkType func(swarm.Chunk) swarm.ChunkType, -) func(st storage.BatchedStore) error { - return func(st storage.BatchedStore) error { +) func() error { + return func() error { /* STEP 1, remove all of the BinItem entires STEP 2, remove all of the ChunkBinItem entries @@ -33,17 +34,25 @@ func step_03( logger.Info("starting migration for reconstructing reserve bin IDs, do not interrupt or kill the process...") // STEP 1 - for i := uint8(0); i < swarm.MaxBins; i++ { - err := st.Delete(&reserve.BinItem{Bin: i}) - if err != nil { - return err + + err := st.Run(context.Background(), func(s transaction.Store) error { + for i := uint8(0); i < swarm.MaxBins; i++ { + err := s.IndexStore().Delete(&reserve.BinItem{Bin: i}) + if err != nil { + return err + } } + return nil + }) + if err != nil { + return err } + logger.Info("removed all bin index entries") // STEP 2 var chunkBinItems []*reserve.ChunkBinItem - err := st.Iterate( + err = st.ReadOnly().IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &reserve.ChunkBinItem{} }, }, @@ -63,19 +72,15 @@ func step_03( end = len(chunkBinItems) } - b, err := st.Batch(context.Background()) - if err != nil { - return err - } - - for _, item := range chunkBinItems[i:end] { - err = b.Delete(item) - if err != nil { - return err + err := st.Run(context.Background(), func(s transaction.Store) error { + for _, item := range chunkBinItems[i:end] { + err := s.IndexStore().Delete(item) + if err != nil { + return err + } } - } - - err = b.Commit() + return nil + }) if err != nil { return err } @@ -85,7 +90,7 @@ func step_03( // STEP 3 var batchRadiusItems []*reserve.BatchRadiusItem - err = st.Iterate( + err = st.ReadOnly().IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &reserve.BatchRadiusItem{} }, }, @@ -111,56 +116,57 @@ func step_03( end = len(batchRadiusItems) } - b, err := st.Batch(context.Background()) - if err != nil { - return err - } - - for _, item := range batchRadiusItems[i:end] { - chunk, err := chunkStore.Get(context.Background(), item.Address) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return err - } - hasChunkEntry := err == nil - - if !hasChunkEntry { - err = b.Delete(item) - if err != nil { + err := st.Run(context.Background(), func(s transaction.Store) error { + for _, item := range batchRadiusItems[i:end] { + chunk, err := s.ChunkStore().Get(context.Background(), item.Address) + if err != nil && !errors.Is(err, storage.ErrNotFound) { return err } - missingChunks++ - } else { - newBinID, err := rs.IncBinID(st, item.Bin) - if err != nil { - return err - } - - item.BinID = newBinID - err = b.Put(item) - if err != nil { - return err - } - - err = b.Put(&reserve.ChunkBinItem{ - BatchID: item.BatchID, - Bin: item.Bin, - Address: item.Address, - BinID: newBinID, - ChunkType: chunkType(chunk), - }) - if err != nil { - return err + hasChunkEntry := err == nil + + if !hasChunkEntry { + err = s.IndexStore().Delete(item) + if err != nil { + return err + } + missingChunks++ + } else { + + var newBinID uint64 + err = st.Run(context.Background(), func(s transaction.Store) error { + newBinID, err = rs.IncBinID(s.IndexStore(), item.Bin) + return err + }) + if err != nil { + return err + } + + item.BinID = newBinID + err = s.IndexStore().Put(item) + if err != nil { + return err + } + + err = s.IndexStore().Put(&reserve.ChunkBinItem{ + BatchID: item.BatchID, + Bin: item.Bin, + Address: item.Address, + BinID: newBinID, + ChunkType: chunkType(chunk), + }) + if err != nil { + return err + } } } - } - - err = b.Commit() + return nil + }) if err != nil { return err } } + logger.Info("migrated all chunk entries", "new_size", len(batchRadiusItems)-missingChunks, "missing_chunks", missingChunks) return nil - } } diff --git a/pkg/storer/migration/step_03_test.go b/pkg/storer/migration/step_03_test.go index 180cc16aa10..27029ecfda9 100644 --- a/pkg/storer/migration/step_03_test.go +++ b/pkg/storer/migration/step_03_test.go @@ -10,10 +10,10 @@ import ( "testing" storage "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/inmemchunkstore" - "github.com/ethersphere/bee/pkg/storage/inmemstore" chunktest "github.com/ethersphere/bee/pkg/storage/testing" + "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/reserve" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" "github.com/stretchr/testify/assert" @@ -22,10 +22,9 @@ import ( func Test_Step_03(t *testing.T) { t.Parallel() - store := inmemstore.New() - chStore := inmemchunkstore.New() + store := internal.NewInmemStorage() baseAddr := swarm.RandAddress(t) - stepFn := localmigration.Step_03(chStore, func(_ swarm.Chunk) swarm.ChunkType { + stepFn := localmigration.Step_03(store, func(_ swarm.Chunk) swarm.ChunkType { return swarm.ChunkTypeContentAddressed }) @@ -33,7 +32,9 @@ func Test_Step_03(t *testing.T) { var chunksPerPO uint64 = 2 for i := uint8(0); i < swarm.MaxBins; i++ { - err := store.Put(&reserve.BinItem{Bin: i, BinID: 10}) + err := store.Run(context.Background(), func(s transaction.Store) error { + return s.IndexStore().Put(&reserve.BinItem{Bin: i, BinID: 10}) + }) assert.NoError(t, err) } @@ -48,7 +49,9 @@ func Test_Step_03(t *testing.T) { BatchID: ch.Stamp().BatchID(), ChunkType: swarm.ChunkTypeContentAddressed, } - err := store.Put(cb) + err := store.Run(context.Background(), func(s transaction.Store) error { + return s.IndexStore().Put(cb) + }) if err != nil { t.Fatal(err) } @@ -59,7 +62,9 @@ func Test_Step_03(t *testing.T) { Address: ch.Address(), BinID: 0, } - err = store.Put(br) + err = store.Run(context.Background(), func(s transaction.Store) error { + return s.IndexStore().Put(br) + }) if err != nil { t.Fatal(err) } @@ -69,7 +74,9 @@ func Test_Step_03(t *testing.T) { continue } - err = chStore.Put(context.Background(), ch) + err = store.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.Background(), ch) + }) if err != nil { t.Fatal(err) } @@ -78,11 +85,11 @@ func Test_Step_03(t *testing.T) { } } - assert.NoError(t, stepFn(store)) + assert.NoError(t, stepFn()) binIDs := make(map[uint8][]uint64) cbCount := 0 - err := store.Iterate( + err := store.ReadOnly().IndexStore().Iterate( storage.Query{Factory: func() storage.Item { return &reserve.ChunkBinItem{} }}, func(res storage.Result) (stop bool, err error) { cb := res.Entry.(*reserve.ChunkBinItem) @@ -110,7 +117,7 @@ func Test_Step_03(t *testing.T) { } brCount := 0 - err = store.Iterate( + err = store.ReadOnly().IndexStore().Iterate( storage.Query{Factory: func() storage.Item { return &reserve.BatchRadiusItem{} }}, func(res storage.Result) (stop bool, err error) { br := res.Entry.(*reserve.BatchRadiusItem) diff --git a/pkg/storer/migration/step_04.go b/pkg/storer/migration/step_04.go index e621ae3458a..fc84bda4eb1 100644 --- a/pkg/storer/migration/step_04.go +++ b/pkg/storer/migration/step_04.go @@ -10,8 +10,8 @@ import ( "github.com/ethersphere/bee/pkg/log" "github.com/ethersphere/bee/pkg/sharky" - "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) @@ -20,8 +20,9 @@ import ( func step_04( sharkyBasePath string, sharkyNoOfShards int, -) func(st storage.BatchedStore) error { - return func(st storage.BatchedStore) error { + st transaction.Storage, +) func() error { + return func() error { // for in-mem store, skip this step if sharkyBasePath == "" { return nil @@ -35,7 +36,7 @@ func step_04( } locationResultC := make(chan chunkstore.LocationResult) - chunkstore.IterateLocations(context.Background(), st, locationResultC) + chunkstore.IterateLocations(context.Background(), st.ReadOnly().IndexStore(), locationResultC) for res := range locationResultC { if res.Err != nil { diff --git a/pkg/storer/migration/step_04_test.go b/pkg/storer/migration/step_04_test.go index 6b70d69d5e7..66159585956 100644 --- a/pkg/storer/migration/step_04_test.go +++ b/pkg/storer/migration/step_04_test.go @@ -15,6 +15,7 @@ import ( "github.com/ethersphere/bee/pkg/storage/inmemstore" chunktest "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" "github.com/stretchr/testify/assert" @@ -36,34 +37,40 @@ func Test_Step_04(t *testing.T) { assert.NoError(t, err) store := inmemstore.New() - chStore := chunkstore.New(store, sharkyStore) - stepFn := localmigration.Step_04(sharkyDir, 1) + + storage := transaction.NewStorage(sharkyStore, store) + + stepFn := localmigration.Step_04(sharkyDir, 1, storage) chunks := chunktest.GenerateTestRandomChunks(10) for _, ch := range chunks { - err := chStore.Put(context.Background(), ch) + err = storage.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.Background(), ch) + }) assert.NoError(t, err) } for _, ch := range chunks[:2] { - err := store.Delete(&chunkstore.RetrievalIndexItem{Address: ch.Address()}) + err = storage.Run(context.Background(), func(s transaction.Store) error { + return s.IndexStore().Delete(&chunkstore.RetrievalIndexItem{Address: ch.Address()}) + }) assert.NoError(t, err) } err = sharkyStore.Close() assert.NoError(t, err) - assert.NoError(t, stepFn(store)) + assert.NoError(t, stepFn()) sharkyStore, err = sharky.New(&dirFS{basedir: sharkyDir}, 1, swarm.SocMaxChunkSize) assert.NoError(t, err) - chStore = chunkstore.New(store, sharkyStore) + store2 := transaction.NewStorage(sharkyStore, store) // check that the chunks are still there for _, ch := range chunks[2:] { - _, err := chStore.Get(context.Background(), ch.Address()) + _, err := store2.ReadOnly().ChunkStore().Get(context.Background(), ch.Address()) assert.NoError(t, err) } diff --git a/pkg/storer/netstore_test.go b/pkg/storer/netstore_test.go index b15cd2cfed8..923fb368e9b 100644 --- a/pkg/storer/netstore_test.go +++ b/pkg/storer/netstore_test.go @@ -87,7 +87,7 @@ func testNetStore(t *testing.T, newStorer func(r retrieval.Interface) (*storer.D t.Fatalf("unexpected no of pusher ops want 10 have %d", count) } - verifyChunks(t, lstore.Repo(), chunks, false) + verifyChunks(t, lstore.Storage(), chunks, false) }) t.Run("pusher error", func(t *testing.T) { @@ -145,7 +145,7 @@ func testNetStore(t *testing.T, newStorer func(r retrieval.Interface) (*storer.D t.Fatalf("session.Cleanup(): unexpected error: %v", err) } - verifyChunks(t, lstore.Repo(), chunks, false) + verifyChunks(t, lstore.Storage(), chunks, false) }) t.Run("context cancellation", func(t *testing.T) { @@ -185,7 +185,7 @@ func testNetStore(t *testing.T, newStorer func(r retrieval.Interface) (*storer.D t.Fatalf("unexpected no of pusher ops want 5 have %d", count) } - verifyChunks(t, lstore.Repo(), chunks, false) + verifyChunks(t, lstore.Storage(), chunks, false) }) t.Run("shallow receipt retry", func(t *testing.T) { @@ -230,56 +230,56 @@ func testNetStore(t *testing.T, newStorer func(r retrieval.Interface) (*storer.D t.Fatalf("unexpected no of pusher ops want 0 have %d", count) } }) - }) - - t.Run("download", func(t *testing.T) { - t.Parallel() - t.Run("with cache", func(t *testing.T) { + t.Run("download", func(t *testing.T) { t.Parallel() - chunks := chunktesting.GenerateTestRandomChunks(10) + t.Run("with cache", func(t *testing.T) { + t.Parallel() - lstore, err := newStorer(&testRetrieval{fn: func(address swarm.Address) (swarm.Chunk, error) { - for _, ch := range chunks[5:] { - if ch.Address().Equal(address) { - return ch, nil + chunks := chunktesting.GenerateTestRandomChunks(10) + + lstore, err := newStorer(&testRetrieval{fn: func(address swarm.Address) (swarm.Chunk, error) { + for _, ch := range chunks[5:] { + if ch.Address().Equal(address) { + return ch, nil + } } + return nil, storage.ErrNotFound + }}) + if err != nil { + t.Fatal(err) } - return nil, storage.ErrNotFound - }}) - if err != nil { - t.Fatal(err) - } - // Add some chunks to Cache to simulate local retrieval. - for idx, ch := range chunks { - if idx < 5 { - err := lstore.Cache().Put(context.TODO(), ch) - if err != nil { - t.Fatalf("cache.Put(...): unexpected error: %v", err) + // Add some chunks to Cache to simulate local retrieval. + for idx, ch := range chunks { + if idx < 5 { + err := lstore.Cache().Put(context.TODO(), ch) + if err != nil { + t.Fatalf("cache.Put(...): unexpected error: %v", err) + } + } else { + break } - } else { - break } - } - getter := lstore.Download(true) + getter := lstore.Download(true) - for idx, ch := range chunks { - readCh, err := getter.Get(context.TODO(), ch.Address()) - if err != nil { - t.Fatalf("download.Get(...): unexpected error: %v idx %d", err, idx) - } - if !readCh.Equal(ch) { - t.Fatalf("incorrect chunk read: address %s", readCh.Address()) + for idx, ch := range chunks { + readCh, err := getter.Get(context.TODO(), ch.Address()) + if err != nil { + t.Fatalf("download.Get(...): unexpected error: %v idx %d", err, idx) + } + if !readCh.Equal(ch) { + t.Fatalf("incorrect chunk read: address %s", readCh.Address()) + } } - } - t.Cleanup(lstore.WaitForBgCacheWorkers()) + t.Cleanup(lstore.WaitForBgCacheWorkers()) - // After download is complete all chunks should be in the local storage. - verifyChunks(t, lstore.Repo(), chunks, true) + // After download is complete all chunks should be in the local storage. + verifyChunks(t, lstore.Storage(), chunks, true) + }) }) t.Run("no cache", func(t *testing.T) { @@ -324,8 +324,8 @@ func testNetStore(t *testing.T, newStorer func(r retrieval.Interface) (*storer.D } // only the chunks that were already in cache should be present - verifyChunks(t, lstore.Repo(), chunks[:5], true) - verifyChunks(t, lstore.Repo(), chunks[5:], false) + verifyChunks(t, lstore.Storage(), chunks[:5], true) + verifyChunks(t, lstore.Storage(), chunks[5:], false) }) }) } @@ -348,17 +348,17 @@ func TestNetStore(t *testing.T) { return db, err }) }) - t.Run("disk", func(t *testing.T) { - t.Parallel() - - testNetStore(t, func(r retrieval.Interface) (*storer.DB, error) { - opts := dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second) - - db, err := diskStorer(t, opts)() - if err == nil { - db.SetRetrievalService(r) - } - return db, err - }) - }) + // t.Run("disk", func(t *testing.T) { + // t.Parallel() + + // testNetStore(t, func(r retrieval.Interface) (*storer.DB, error) { + // opts := dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second) + + // db, err := diskStorer(t, opts)() + // if err == nil { + // db.SetRetrievalService(r) + // } + // return db, err + // }) + // }) } diff --git a/pkg/storer/pinstore.go b/pkg/storer/pinstore.go index 58cc2ef2cc2..a7a01a5cf61 100644 --- a/pkg/storer/pinstore.go +++ b/pkg/storer/pinstore.go @@ -12,6 +12,7 @@ import ( storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storer/internal" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) @@ -21,8 +22,8 @@ func (db *DB) NewCollection(ctx context.Context) (PutterSession, error) { pinningPutter internal.PutterCloserWithReference err error ) - err = db.Execute(ctx, func(txnRepo internal.Storage) error { - pinningPutter, err = pinstore.NewCollection(txnRepo) + err = db.storage.Run(ctx, func(store transaction.Store) error { + pinningPutter, err = pinstore.NewCollection(store.IndexStore()) if err != nil { return fmt.Errorf("pinstore.NewCollection: %w", err) } @@ -36,8 +37,10 @@ func (db *DB) NewCollection(ctx context.Context) (PutterSession, error) { Putter: putterWithMetrics{ storage.PutterFunc( func(ctx context.Context, chunk swarm.Chunk) error { - return db.Execute(ctx, func(s internal.Storage) error { - return pinningPutter.Put(ctx, s, s.IndexStore(), chunk) + return db.storage.Run(ctx, func(s transaction.Store) error { + unlock := db.Lock(uploadsLock) + defer unlock() + return pinningPutter.Put(ctx, s, chunk) }) }, ), @@ -45,12 +48,16 @@ func (db *DB) NewCollection(ctx context.Context) (PutterSession, error) { "pinstore", }, done: func(address swarm.Address) error { - return db.Execute(ctx, func(s internal.Storage) error { - return pinningPutter.Close(s, s.IndexStore(), address) + unlock := db.Lock(uploadsLock) + defer unlock() + return db.storage.Run(ctx, func(s transaction.Store) error { + return pinningPutter.Close(s.IndexStore(), address) }) }, cleanup: func() error { - return pinningPutter.Cleanup(db) + unlock := db.Lock(uploadsLock) + defer unlock() + return pinningPutter.Cleanup(db.storage) }, }, nil } @@ -67,7 +74,10 @@ func (db *DB) DeletePin(ctx context.Context, root swarm.Address) (err error) { } }() - return pinstore.DeletePin(ctx, db, root) + unlock := db.Lock(uploadsLock) + defer unlock() + + return pinstore.DeletePin(ctx, db.storage, root) } // Pins is the implementation of the PinStore.Pins method. @@ -82,7 +92,7 @@ func (db *DB) Pins() (address []swarm.Address, err error) { } }() - return pinstore.Pins(db.repo.IndexStore()) + return pinstore.Pins(db.storage.ReadOnly().IndexStore()) } // HasPin is the implementation of the PinStore.HasPin method. @@ -97,9 +107,9 @@ func (db *DB) HasPin(root swarm.Address) (has bool, err error) { } }() - return pinstore.HasPin(db.repo.IndexStore(), root) + return pinstore.HasPin(db.storage.ReadOnly().IndexStore(), root) } func (db *DB) IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error { - return pinstore.IterateCollection(db.repo.IndexStore(), root, iterateFn) + return pinstore.IterateCollection(db.storage.ReadOnly().IndexStore(), root, iterateFn) } diff --git a/pkg/storer/pinstore_test.go b/pkg/storer/pinstore_test.go index 43676f6ce61..5af2bf9ee7e 100644 --- a/pkg/storer/pinstore_test.go +++ b/pkg/storer/pinstore_test.go @@ -6,12 +6,10 @@ package storer_test import ( "context" - "errors" "fmt" "testing" "time" - storage "github.com/ethersphere/bee/pkg/storage" chunktesting "github.com/ethersphere/bee/pkg/storage/testing" storer "github.com/ethersphere/bee/pkg/storer" "github.com/ethersphere/bee/pkg/swarm" @@ -71,7 +69,7 @@ func testPinStore(t *testing.T, newStorer func() (*storer.DB, error)) { t.Fatalf("session.Done(...): unexpected error: %v", err) } } - verifyPinCollection(t, lstore.Repo(), tc.chunks[0], tc.chunks, !tc.fail) + verifyPinCollection(t, lstore.Storage(), tc.chunks[0], tc.chunks, !tc.fail) }) } @@ -106,25 +104,7 @@ func testPinStore(t *testing.T, newStorer func() (*storer.DB, error)) { t.Fatalf("DeletePin(...): unexpected error: %v", err) } - verifyPinCollection(t, lstore.Repo(), testCases[2].chunks[0], testCases[2].chunks, false) - }) - t.Run("rollback", func(t *testing.T) { - want := errors.New("dummy error") - lstore.SetRepoStoreDeleteHook(func(item storage.Item) error { - // return error for delete of second last item in collection - // this should trigger a rollback - if item.ID() == testCases[0].chunks[8].Address().ByteString() { - return want - } - return nil - }) - - have := lstore.DeletePin(context.TODO(), testCases[0].chunks[0].Address()) - if !errors.Is(have, want) { - t.Fatalf("DeletePin(...): unexpected error: want %v have %v", want, have) - } - - verifyPinCollection(t, lstore.Repo(), testCases[0].chunks[0], testCases[0].chunks, true) + verifyPinCollection(t, lstore.Storage(), testCases[2].chunks[0], testCases[2].chunks, false) }) }) @@ -169,8 +149,8 @@ func testPinStore(t *testing.T, newStorer func() (*storer.DB, error)) { t.Fatalf("session2.Done(...): unexpected error: %v", err) } - verifyPinCollection(t, lstore.Repo(), chunks[0], chunks, true) - verifyChunkRefCount(t, lstore.Repo(), chunks) + verifyPinCollection(t, lstore.Storage(), chunks[0], chunks, true) + verifyChunkRefCount(t, lstore.Storage().ReadOnly(), chunks) }) } diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go index 38060e842f8..57fd74d8949 100644 --- a/pkg/storer/reserve.go +++ b/pkg/storer/reserve.go @@ -18,17 +18,16 @@ import ( "github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/storageutil" - "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/reserve" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/swarm" ) const ( - reserveOverCapacity = "reserveOverCapacity" - reserveUnreserved = "reserveUnreserved" - reserveUpdateLockKey = "reserveUpdateLockKey" - batchExpiry = "batchExpiry" - batchExpiryDone = "batchExpiryDone" + reserveOverCapacity = "reserveOverCapacity" + reserveUnreserved = "reserveUnreserved" + batchExpiry = "batchExpiry" + batchExpiryDone = "batchExpiryDone" ) var errMaxRadius = errors.New("max radius reached") @@ -43,7 +42,6 @@ func threshold(capacity int) int { return capacity * 5 / 10 } func (db *DB) startReserveWorkers( ctx context.Context, - warmupDur, wakeUpDur time.Duration, radius func() (uint8, error), ) { ctx, cancel := context.WithCancel(ctx) @@ -58,7 +56,7 @@ func (db *DB) startReserveWorkers( go db.evictionWorker(ctx) select { - case <-time.After(warmupDur): + case <-time.After(db.opts.warmupDuration): case <-db.quit: return } @@ -69,7 +67,8 @@ func (db *DB) startReserveWorkers( return // node shutdown } - if err := db.reserve.SetRadius(db.repo.IndexStore(), r); err != nil { + err = db.reserve.SetRadius(r) + if err != nil { db.logger.Error(err, "reserve set radius") } else { db.metrics.StorageRadius.Set(float64(r)) @@ -116,20 +115,22 @@ func (db *DB) reserveSizeWithinRadiusWorker(ctx context.Context) { evictBatches := make(map[string]bool) - err := db.reserve.IterateChunksItems(db.repo, 0, func(ci reserve.ChunkItem) (bool, error) { - if ci.Bin >= radius { - count++ - } + err := db.storage.Run(ctx, func(t transaction.Store) error { + return db.reserve.IterateChunksItems(0, func(ci reserve.ChunkItem) (bool, error) { + if ci.Bin >= radius { + count++ + } - if skipInvalidCheck { - return false, nil - } + if skipInvalidCheck { + return false, nil + } - if exists, err := db.batchstore.Exists(ci.BatchID); err == nil && !exists { - missing++ - evictBatches[string(ci.BatchID)] = true - } - return false, nil + if exists, err := db.batchstore.Exists(ci.BatchID); err == nil && !exists { + missing++ + evictBatches[string(ci.BatchID)] = true + } + return false, nil + }) }) if err != nil { db.logger.Error(err, "reserve count within radius") @@ -169,10 +170,11 @@ func (db *DB) reserveSizeWithinRadiusWorker(ctx context.Context) { if count < threshold(db.reserve.Capacity()) && db.syncer.SyncRate() == 0 && radius > 0 { radius-- - err := db.reserve.SetRadius(db.repo.IndexStore(), radius) + err := db.reserve.SetRadius(radius) if err != nil { db.logger.Error(err, "reserve set radius") } + db.logger.Info("reserve radius decrease", "radius", radius) } db.metrics.StorageRadius.Set(float64(radius)) @@ -230,8 +232,8 @@ func (db *DB) evictExpiredBatches(ctx context.Context) error { if evicted > 0 { db.logger.Debug("evicted expired batch", "batch_id", hex.EncodeToString(batchID), "total_evicted", evicted) } - err = db.Execute(ctx, func(tx internal.Storage) error { - return tx.IndexStore().Delete(&expiredBatchItem{BatchID: batchID}) + err = db.storage.Run(ctx, func(st transaction.Store) error { + return st.IndexStore().Delete(&expiredBatchItem{BatchID: batchID}) }) if err != nil { return err @@ -243,7 +245,7 @@ func (db *DB) evictExpiredBatches(ctx context.Context) error { func (db *DB) getExpiredBatches() ([][]byte, error) { var batchesToEvict [][]byte - err := db.repo.IndexStore().Iterate(storage.Query{ + err := db.storage.ReadOnly().IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return new(expiredBatchItem) }, ItemProperty: storage.QueryItemID, }, func(result storage.Result) (bool, error) { @@ -256,6 +258,38 @@ func (db *DB) getExpiredBatches() ([][]byte, error) { return batchesToEvict, nil } +func (db *DB) evictBatch( + ctx context.Context, + batchID []byte, + evictCount int, + upToBin uint8, +) (evicted int, err error) { + dur := captureDuration(time.Now()) + defer func() { + db.metrics.ReserveSize.Set(float64(db.reserve.Size())) + db.metrics.MethodCallsDuration.WithLabelValues("reserve", "EvictBatch").Observe(dur()) + if err == nil { + db.metrics.MethodCalls.WithLabelValues("reserve", "EvictBatch", "success").Inc() + } else { + db.metrics.MethodCalls.WithLabelValues("reserve", "EvictBatch", "failure").Inc() + } + if upToBin == swarm.MaxBins { + db.metrics.ExpiredChunkCount.Add(float64(evicted)) + } else { + db.metrics.EvictedChunkCount.Add(float64(evicted)) + } + db.logger.Debug( + "reserve eviction", + "uptoBin", upToBin, + "evicted", evicted, + "batchID", hex.EncodeToString(batchID), + "new_size", db.reserve.Size(), + ) + }() + + return db.reserve.EvictBatchBin(ctx, batchID, evictCount, upToBin) +} + // EvictBatch evicts all chunks belonging to a batch from the reserve. func (db *DB) EvictBatch(ctx context.Context, batchID []byte) error { if db.reserve == nil { @@ -263,7 +297,7 @@ func (db *DB) EvictBatch(ctx context.Context, batchID []byte) error { return nil } - err := db.Execute(ctx, func(tx internal.Storage) error { + err := db.storage.Run(ctx, func(tx transaction.Store) error { return tx.IndexStore().Put(&expiredBatchItem{BatchID: batchID}) }) if err != nil { @@ -285,7 +319,7 @@ func (db *DB) ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte } }() - return db.reserve.Get(ctx, db.repo, addr, batchID) + return db.reserve.Get(ctx, addr, batchID) } func (db *DB) ReserveHas(addr swarm.Address, batchID []byte) (has bool, err error) { @@ -299,18 +333,17 @@ func (db *DB) ReserveHas(addr swarm.Address, batchID []byte) (has bool, err erro } }() - return db.reserve.Has(db.repo.IndexStore(), addr, batchID) + return db.reserve.Has(addr, batchID) } // ReservePutter returns a Putter for inserting chunks into the reserve. func (db *DB) ReservePutter() storage.Putter { return putterWithMetrics{ storage.PutterFunc( - func(ctx context.Context, chunk swarm.Chunk) (err error) { - err = db.Execute(ctx, func(tx internal.Storage) error { - return db.reserve.Put(ctx, tx, chunk) - }) + func(ctx context.Context, chunk swarm.Chunk) error { + err := db.reserve.Put(ctx, chunk) if err != nil { + db.logger.Debug("reserve put error", "error", err) return fmt.Errorf("reserve: putter.Put: %w", err) } db.reserveBinEvents.Trigger(string(db.po(chunk.Address()))) @@ -326,38 +359,6 @@ func (db *DB) ReservePutter() storage.Putter { } } -func (db *DB) evictBatch( - ctx context.Context, - batchID []byte, - evictCount int, - upToBin uint8, -) (evicted int, err error) { - dur := captureDuration(time.Now()) - defer func() { - db.metrics.ReserveSize.Set(float64(db.reserve.Size())) - db.metrics.MethodCallsDuration.WithLabelValues("reserve", "EvictBatch").Observe(dur()) - if err == nil { - db.metrics.MethodCalls.WithLabelValues("reserve", "EvictBatch", "success").Inc() - } else { - db.metrics.MethodCalls.WithLabelValues("reserve", "EvictBatch", "failure").Inc() - } - if upToBin == swarm.MaxBins { - db.metrics.ExpiredChunkCount.Add(float64(evicted)) - } else { - db.metrics.EvictedChunkCount.Add(float64(evicted)) - } - db.logger.Debug( - "reserve eviction", - "uptoBin", upToBin, - "evicted", evicted, - "batchID", hex.EncodeToString(batchID), - "new_size", db.reserve.Size(), - ) - }() - - return db.reserve.EvictBatchBin(ctx, db, batchID, evictCount, upToBin) -} - func (db *DB) unreserve(ctx context.Context) (err error) { dur := captureDuration(time.Now()) defer func() { @@ -421,7 +422,7 @@ func (db *DB) unreserve(ctx context.Context) (err error) { radius++ db.logger.Info("reserve radius increase", "radius", radius) - _ = db.reserve.SetRadius(db.repo.IndexStore(), radius) + _ = db.reserve.SetRadius(radius) } return errMaxRadius @@ -432,11 +433,12 @@ func (db *DB) ReserveLastBinIDs() ([]uint64, uint64, error) { if db.reserve == nil { return nil, 0, nil } - return db.reserve.LastBinIDs(db.repo.IndexStore()) + + return db.reserve.LastBinIDs() } func (db *DB) ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error { - return db.reserve.IterateChunks(db.repo, 0, cb) + return db.reserve.IterateChunks(0, cb) } func (db *DB) StorageRadius() uint8 { @@ -483,8 +485,7 @@ func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan for { - err := db.reserve.IterateBin(db.repo.IndexStore(), bin, start, func(a swarm.Address, binID uint64, batchID []byte) (bool, error) { - + err := db.reserve.IterateBin(bin, start, func(a swarm.Address, binID uint64, batchID []byte) (bool, error) { select { case out <- &BinC{Address: a, BinID: binID, BatchID: batchID}: start = binID + 1 diff --git a/pkg/storer/reserve_test.go b/pkg/storer/reserve_test.go index 26e3f3cabcf..a86f8c96612 100644 --- a/pkg/storer/reserve_test.go +++ b/pkg/storer/reserve_test.go @@ -119,14 +119,14 @@ func TestReplaceOldIndex(t *testing.T) { } // Chunk 1 must be missing - item, err := stampindex.Load(storer.Repo().IndexStore(), "reserve", ch_1) + item, err := stampindex.Load(storer.Storage().ReadOnly().IndexStore(), "reserve", ch_1) if err != nil { t.Fatal(err) } if !item.ChunkAddress.Equal(ch_2.Address()) { t.Fatalf("wanted addr %s, got %s", ch_1.Address(), item.ChunkAddress) } - _, err = chunkstamp.Load(storer.Repo().IndexStore(), "reserve", ch_1.Address()) + _, err = chunkstamp.Load(storer.Storage().ReadOnly().IndexStore(), "reserve", ch_1.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("wanted err %s, got err %s", storage.ErrNotFound, err) } @@ -658,11 +658,11 @@ func checkSaved(t *testing.T, st *storer.DB, ch swarm.Chunk, stampSaved, chunkSt if !stampSaved { stampWantedErr = storage.ErrNotFound } - _, err := stampindex.Load(st.Repo().IndexStore(), "reserve", ch) + _, err := stampindex.Load(st.Storage().ReadOnly().IndexStore(), "reserve", ch) if !errors.Is(err, stampWantedErr) { t.Fatalf("wanted err %s, got err %s", stampWantedErr, err) } - _, err = chunkstamp.Load(st.Repo().IndexStore(), "reserve", ch.Address()) + _, err = chunkstamp.Load(st.Storage().ReadOnly().IndexStore(), "reserve", ch.Address()) if !errors.Is(err, stampWantedErr) { t.Fatalf("wanted err %s, got err %s", stampWantedErr, err) } @@ -671,7 +671,7 @@ func checkSaved(t *testing.T, st *storer.DB, ch swarm.Chunk, stampSaved, chunkSt if !chunkStoreSaved { chunkStoreWantedErr = storage.ErrNotFound } - gotCh, err := st.Repo().ChunkStore().Get(context.Background(), ch.Address()) + gotCh, err := st.Storage().ReadOnly().ChunkStore().Get(context.Background(), ch.Address()) if !errors.Is(err, chunkStoreWantedErr) { t.Fatalf("wanted err %s, got err %s", chunkStoreWantedErr, err) } diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index 7fd5cc2ad4a..70f6420eb45 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -139,7 +139,7 @@ func (db *DB) ReserveSample( addStats(stats) }() - err := db.reserve.IterateChunksItems(db.repo, storageRadius, func(chi reserve.ChunkItem) (bool, error) { + err := db.reserve.IterateChunksItems(storageRadius, func(chi reserve.ChunkItem) (bool, error) { select { case chunkC <- chi: stats.TotalIterated++ @@ -264,7 +264,7 @@ func (db *DB) ReserveSample( if le(item.TransformedAddress, currentMaxAddr) || len(sampleItems) < SampleSize { start := time.Now() - stamp, err := chunkstamp.LoadWithBatchID(db.repo.IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID()) + stamp, err := chunkstamp.LoadWithBatchID(db.storage.ReadOnly().IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID()) if err != nil { stats.StampLoadFailed++ db.logger.Debug("failed loading stamp", "chunk_address", item.ChunkAddress, "error", err) diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go index 12057f95c43..1b187d56406 100644 --- a/pkg/storer/storer.go +++ b/pkg/storer/storer.go @@ -27,12 +27,11 @@ import ( "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/leveldbstore" "github.com/ethersphere/bee/pkg/storage/migration" - "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/cache" - "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" "github.com/ethersphere/bee/pkg/storer/internal/events" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" "github.com/ethersphere/bee/pkg/storer/internal/reserve" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/storer/internal/upload" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" @@ -212,7 +211,7 @@ func closer(closers ...io.Closer) io.Closer { }) } -func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Closer, error) { +func initInmemRepository() (transaction.Storage, io.Closer, error) { store, err := leveldbstore.New("", nil) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err) @@ -227,10 +226,7 @@ func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Clo return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err) } - txStore := leveldbstore.NewTxStore(store) - txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) - - return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky), nil + return transaction.NewStorage(sharky, store), closer(store, sharky), nil } // loggerName is the tree path name of the logger for this package. @@ -276,15 +272,15 @@ func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) { func initDiskRepository( ctx context.Context, basePath string, - locker storage.ChunkLocker, opts *Options, -) (storage.Repository, io.Closer, error) { +) (transaction.Storage, io.Closer, error) { + store, err := initStore(basePath, opts) if err != nil { return nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err) } - err = migration.Migrate(store, "core-migration", localmigration.BeforeIinitSteps()) + err = migration.Migrate(store, "core-migration", localmigration.BeforeInitSteps(store)) if err != nil { return nil, nil, fmt.Errorf("failed core migration: %w", err) } @@ -356,78 +352,7 @@ func initDiskRepository( return nil, nil, fmt.Errorf("failed creating sharky instance: %w", err) } - txStore := leveldbstore.NewTxStore(store) - if err := txStore.Recover(); err != nil { - return nil, nil, fmt.Errorf("failed to recover index store: %w", err) - } - - txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) - if err := txChunkStore.Recover(); err != nil { - return nil, nil, fmt.Errorf("failed to recover chunk store: %w", err) - } - - return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil -} - -func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - txnRepo, commit, rollback := repo.NewTx(ctx) - c, err := cache.New(ctx, txnRepo, capacity) - if err != nil { - return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback())) - } - - return c, commit() -} - -type noopRadiusSetter struct{} - -func (noopRadiusSetter) SetStorageRadius(uint8) {} - -func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) { - store, err := initStore(basePath, opts) - if err != nil { - return err - } - defer store.Close() - - sharkyBasePath := path.Join(basePath, sharkyPath) - var sharkyRecover *sharky.Recovery - // if this is a fresh node then perform an empty epoch migration - if _, err := os.Stat(sharkyBasePath); err == nil { - sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize) - if err != nil { - return err - } - defer sharkyRecover.Close() - } - - logger := opts.Logger.WithName("epochmigration").Register() - - var rs reservePutter - - if opts.ReserveCapacity > 0 { - rs, err = reserve.New( - opts.Address, - store, - opts.ReserveCapacity, - noopRadiusSetter{}, - logger, - ) - if err != nil { - return err - } - } - - defer func() { - if sharkyRecover != nil { - retErr = errors.Join(retErr, sharkyRecover.Save()) - } - }() - - return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger) + return transaction.NewStorage(sharky, store), closer(store, sharky, recoveryCloser), nil } const lockKeyNewSession string = "new_session" @@ -482,9 +407,8 @@ type DB struct { logger log.Logger tracer *tracing.Tracer - metrics metrics - - repo storage.Repository + metrics metrics + storage transaction.Storage lock *multex.Multex cacheObj *cache.Cache retrieval retrieval.Interface @@ -516,7 +440,7 @@ type workerOpts struct { // component stores. func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { var ( - repo storage.Repository + st transaction.Storage err error dbCloser io.Closer ) @@ -532,28 +456,13 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { metrics := newMetrics() opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats) - locker := func(addr swarm.Address) func() { - lock.Lock(addr.ByteString()) - return func() { - lock.Unlock(addr.ByteString()) - } - } - if dirPath == "" { - repo, dbCloser, err = initInmemRepository(locker) + st, dbCloser, err = initInmemRepository() if err != nil { return nil, err } } else { - // only perform migration if not done already - if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil { - err = performEpochMigration(ctx, dirPath, opts) - if err != nil { - return nil, err - } - } - - repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts) + st, dbCloser, err = initDiskRepository(ctx, dirPath, opts) if err != nil { return nil, err } @@ -563,16 +472,19 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { if dirPath != "" { sharkyBasePath = path.Join(dirPath, sharkyPath) } - err = migration.Migrate( - repo.IndexStore(), - "migration", - localmigration.AfterInitSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()), - ) + + err = st.Run(ctx, func(s transaction.Store) error { + return migration.Migrate( + s.IndexStore(), + "migration", + localmigration.AfterInitSteps(sharkyBasePath, sharkyNoOfShards, st), + ) + }) if err != nil { return nil, err } - cacheObj, err := initCache(ctx, opts.CacheCapacity, repo) + cacheObj, err := cache.New(ctx, st.ReadOnly().IndexStore(), opts.CacheCapacity) if err != nil { return nil, err } @@ -582,10 +494,10 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { clCtx, clCancel := context.WithCancel(ctx) db := &DB{ metrics: metrics, + storage: st, logger: logger, tracer: opts.Tracer, baseAddr: opts.Address, - repo: repo, lock: lock, cacheObj: cacheObj, retrieval: noopRetrieval{}, @@ -615,7 +527,7 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { if opts.ReserveCapacity > 0 { rs, err := reserve.New( opts.Address, - repo.IndexStore(), + st, opts.ReserveCapacity, opts.RadiusSetter, logger, @@ -633,8 +545,8 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { // Cleanup any dirty state in upload and pinning stores, this could happen // in case of dirty shutdowns err = errors.Join( - upload.CleanupDirty(db), - pinstore.CleanupDirty(db), + upload.CleanupDirty(db.storage), + pinstore.CleanupDirty(db.storage), ) if err != nil { return nil, err @@ -649,7 +561,7 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { // Metrics returns set of prometheus collectors. func (db *DB) Metrics() []prometheus.Collector { collectors := m.PrometheusCollectorsFromFields(db.metrics) - if v, ok := db.repo.(m.Collector); ok { + if v, ok := db.storage.(m.Collector); ok { collectors = append(collectors, v.Metrics()...) } return collectors @@ -706,7 +618,7 @@ func (db *DB) SetRetrievalService(r retrieval.Interface) { func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) { db.setSyncerOnce.Do(func() { db.syncer = s - go db.startReserveWorkers(ctx, db.opts.warmupDuration, db.opts.wakeupDuration, radius) + go db.startReserveWorkers(ctx, radius) }) } @@ -717,16 +629,18 @@ func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.A } func (db *DB) ChunkStore() storage.ReadOnlyChunkStore { - return db.repo.ChunkStore() + return db.storage.ReadOnly().ChunkStore() } -// Execute implements the internal.TxExecutor interface. -func (db *DB) Execute(ctx context.Context, do func(internal.Storage) error) error { - tx, commit, rollback := db.repo.NewTx(ctx) - if err := do(tx); err != nil { - return errors.Join(err, rollback()) +func (db *DB) Lock(strs ...string) func() { + for _, s := range strs { + db.lock.Lock(s) + } + return func() { + for _, s := range strs { + db.lock.Unlock(s) + } } - return commit() } type putterSession struct { diff --git a/pkg/storer/storer_test.go b/pkg/storer/storer_test.go index ce2d7eb72cf..7910ff59cbb 100644 --- a/pkg/storer/storer_test.go +++ b/pkg/storer/storer_test.go @@ -15,11 +15,12 @@ import ( "github.com/ethersphere/bee/pkg/postage" batchstore "github.com/ethersphere/bee/pkg/postage/batchstore/mock" "github.com/ethersphere/bee/pkg/storage" - "github.com/ethersphere/bee/pkg/storage/inmemchunkstore" "github.com/ethersphere/bee/pkg/storage/migration" "github.com/ethersphere/bee/pkg/storer" + "github.com/ethersphere/bee/pkg/storer/internal" cs "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/storer/internal/upload" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" @@ -29,14 +30,14 @@ import ( func verifyChunks( t *testing.T, - repo storage.Repository, + st transaction.Storage, chunks []swarm.Chunk, has bool, ) { t.Helper() for _, ch := range chunks { - hasFound, err := repo.ChunkStore().Has(context.TODO(), ch.Address()) + hasFound, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("ChunkStore.Has(...): unexpected error: %v", err) } @@ -49,13 +50,13 @@ func verifyChunks( func verifyChunkRefCount( t *testing.T, - repo storage.Repository, + st transaction.ReadOnlyStore, chunks []swarm.Chunk, ) { t.Helper() for _, ch := range chunks { - _ = repo.IndexStore().Iterate(storage.Query{ + _ = st.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return new(cs.RetrievalIndexItem) }, }, func(r storage.Result) (bool, error) { entry := r.Entry.(*cs.RetrievalIndexItem) @@ -69,17 +70,17 @@ func verifyChunkRefCount( func verifySessionInfo( t *testing.T, - repo storage.Repository, + st transaction.Storage, sessionID uint64, chunks []swarm.Chunk, has bool, ) { t.Helper() - verifyChunks(t, repo, chunks, has) + verifyChunks(t, st, chunks, has) if has { - tagInfo, err := upload.TagInfo(repo.IndexStore(), sessionID) + tagInfo, err := upload.TagInfo(st.ReadOnly().IndexStore(), sessionID) if err != nil { t.Fatalf("upload.TagInfo(...): unexpected error: %v", err) } @@ -95,14 +96,14 @@ func verifySessionInfo( func verifyPinCollection( t *testing.T, - repo storage.Repository, + st transaction.Storage, root swarm.Chunk, chunks []swarm.Chunk, has bool, ) { t.Helper() - hasFound, err := pinstore.HasPin(repo.IndexStore(), root.Address()) + hasFound, err := pinstore.HasPin(st.ReadOnly().IndexStore(), root.Address()) if err != nil { t.Fatalf("pinstore.HasPin(...): unexpected error: %v", err) } @@ -111,7 +112,7 @@ func verifyPinCollection( t.Fatalf("unexpected pin collection state: want %t have %t", has, hasFound) } - verifyChunks(t, repo, chunks, has) + verifyChunks(t, st, chunks, has) } // TestMain exists to adjust the time.Now function to a fixed value. @@ -164,14 +165,14 @@ func TestNew(t *testing.T) { t.Parallel() lstore := makeInmemStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) - assertStorerVersion(t, lstore, "") + assertStorerVersion(t, lstore.Storage().ReadOnly().IndexStore(), "") }) t.Run("disk", func(t *testing.T) { t.Parallel() lstore := makeDiskStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) - assertStorerVersion(t, lstore, path.Join(t.TempDir(), "sharky")) + assertStorerVersion(t, lstore.Storage().ReadOnly().IndexStore(), path.Join(t.TempDir(), "sharky")) }) }) } @@ -198,16 +199,15 @@ func dbTestOps(baseAddr swarm.Address, reserveCapacity int, bs postage.Storer, r return opts } -func assertStorerVersion(t *testing.T, lstore *storer.DB, sharkyPath string) { +func assertStorerVersion(t *testing.T, r storage.Reader, sharkyPath string) { t.Helper() - current, err := migration.Version(lstore.Repo().IndexStore(), "migration") + current, err := migration.Version(r, "migration") if err != nil { t.Fatalf("migration.Version(...): unexpected error: %v", err) } - expected := migration.LatestVersion(localmigration.AfterInitSteps(sharkyPath, 4, inmemchunkstore.New())) - + expected := migration.LatestVersion(localmigration.AfterInitSteps(sharkyPath, 4, internal.NewInmemStorage())) if current != expected { t.Fatalf("storer is not migrated to latest version; got %d, expected %d", current, expected) } diff --git a/pkg/storer/subscribe_push.go b/pkg/storer/subscribe_push.go index d9c37abce28..860c20bfa42 100644 --- a/pkg/storer/subscribe_push.go +++ b/pkg/storer/subscribe_push.go @@ -37,7 +37,7 @@ func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func()) { var count int - err := upload.Iterate(ctx, db.repo, func(chunk swarm.Chunk) (bool, error) { + err := upload.Iterate(ctx, db.storage.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { select { case chunks <- chunk: count++ diff --git a/pkg/storer/uploadstore.go b/pkg/storer/uploadstore.go index 9b5aeca4bbc..d993626a708 100644 --- a/pkg/storer/uploadstore.go +++ b/pkg/storer/uploadstore.go @@ -13,20 +13,21 @@ import ( storage "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storer/internal" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" + "github.com/ethersphere/bee/pkg/storer/internal/transaction" "github.com/ethersphere/bee/pkg/storer/internal/upload" "github.com/ethersphere/bee/pkg/swarm" ) -const uploadStoreKey = "uploadstore" +const uploadsLock = "pin-upload-store" // Report implements the storage.PushReporter by wrapping the internal reporter // with a transaction. func (db *DB) Report(ctx context.Context, chunk swarm.Chunk, state storage.ChunkState) error { - db.lock.Lock(uploadStoreKey) - defer db.lock.Unlock(uploadStoreKey) + unlock := db.Lock(uploadsLock) + defer unlock() - err := db.Execute(ctx, func(s internal.Storage) error { + err := db.storage.Run(ctx, func(s transaction.Store) error { return upload.Report(ctx, s, chunk, state) }) if err != nil { @@ -48,14 +49,14 @@ func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession err error ) - err = db.Execute(ctx, func(txnRepo internal.Storage) error { - uploadPutter, err = upload.NewPutter(txnRepo, tagID) + err = db.storage.Run(ctx, func(s transaction.Store) error { + uploadPutter, err = upload.NewPutter(s.IndexStore(), tagID) if err != nil { return fmt.Errorf("upload.NewPutter: %w", err) } if pin { - pinningPutter, err = pinstore.NewCollection(txnRepo) + pinningPutter, err = pinstore.NewCollection(s.IndexStore()) if err != nil { return fmt.Errorf("pinstore.NewCollection: %w", err) } @@ -70,27 +71,18 @@ func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession return &putterSession{ Putter: putterWithMetrics{ storage.PutterFunc(func(ctx context.Context, chunk swarm.Chunk) error { - db.lock.Lock(uploadStoreKey) - defer db.lock.Unlock(uploadStoreKey) - return db.Execute(ctx, func(s internal.Storage) error { - - b, err := s.IndexStore().Batch(ctx) - if err != nil { - return err - } - err = errors.Join( - uploadPutter.Put(ctx, s, b, chunk), + unlock := db.Lock(uploadsLock) + defer unlock() + return db.storage.Run(ctx, func(s transaction.Store) error { + return errors.Join( + uploadPutter.Put(ctx, s, chunk), func() error { if pinningPutter != nil { - return pinningPutter.Put(ctx, s, b, chunk) + return pinningPutter.Put(ctx, s, chunk) } return nil }(), ) - if err != nil { - return err - } - return b.Commit() }) }), db.metrics, @@ -98,39 +90,29 @@ func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession }, done: func(address swarm.Address) error { defer db.events.Trigger(subscribePushEventKey) - db.lock.Lock(uploadStoreKey) - defer db.lock.Unlock(uploadStoreKey) - return db.Execute(ctx, func(s internal.Storage) error { - - b, err := s.IndexStore().Batch(ctx) - if err != nil { - return err - } - - err = errors.Join( - uploadPutter.Close(s, b, address), + unlock := db.Lock(uploadsLock) + defer unlock() + return db.storage.Run(ctx, func(s transaction.Store) error { + return errors.Join( + uploadPutter.Close(s.IndexStore(), address), func() error { if pinningPutter != nil { - return pinningPutter.Close(s, b, address) + return pinningPutter.Close(s.IndexStore(), address) } return nil }(), ) - if err != nil { - return err - } - return b.Commit() }) }, cleanup: func() error { defer db.events.Trigger(subscribePushEventKey) - db.lock.Lock(uploadStoreKey) - defer db.lock.Unlock(uploadStoreKey) + unlock := db.Lock(uploadsLock) + defer unlock() return errors.Join( - uploadPutter.Cleanup(db), + uploadPutter.Cleanup(db.storage), func() error { if pinningPutter != nil { - return pinningPutter.Cleanup(db) + return pinningPutter.Cleanup(db.storage) } return nil }(), @@ -141,20 +123,29 @@ func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession // NewSession is the implementation of UploadStore.NewSession method. func (db *DB) NewSession() (SessionInfo, error) { - db.lock.Lock(lockKeyNewSession) - defer db.lock.Unlock(lockKeyNewSession) + unlock := db.Lock(lockKeyNewSession) + defer unlock() - return upload.NextTag(db.repo.IndexStore()) + trx, done := db.storage.NewTransaction(context.Background()) + defer done() + + info, err := upload.NextTag(trx.IndexStore()) + if err != nil { + return SessionInfo{}, err + } + return info, trx.Commit() } // Session is the implementation of the UploadStore.Session method. func (db *DB) Session(tagID uint64) (SessionInfo, error) { - return upload.TagInfo(db.repo.IndexStore(), tagID) + return upload.TagInfo(db.storage.ReadOnly().IndexStore(), tagID) } // DeleteSession is the implementation of the UploadStore.DeleteSession method. func (db *DB) DeleteSession(tagID uint64) error { - return upload.DeleteTag(db.repo.IndexStore(), tagID) + return db.storage.Run(context.Background(), func(s transaction.Store) error { + return upload.DeleteTag(s.IndexStore(), tagID) + }) } // ListSessions is the implementation of the UploadStore.ListSessions method. @@ -163,7 +154,7 @@ func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error) { limit = min(limit, maxPageSize) - tags, err := upload.ListAllTags(db.repo.IndexStore()) + tags, err := upload.ListAllTags(db.storage.ReadOnly().IndexStore()) if err != nil { return nil, err } @@ -177,5 +168,5 @@ func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error) { // BatchHint is the implementation of the UploadStore.BatchHint method. func (db *DB) BatchHint(address swarm.Address) ([]byte, error) { - return upload.BatchIDForChunk(db.repo.IndexStore(), address) + return upload.BatchIDForChunk(db.storage.ReadOnly().IndexStore(), address) } diff --git a/pkg/storer/uploadstore_test.go b/pkg/storer/uploadstore_test.go index fa267728e6e..1f15fe5401a 100644 --- a/pkg/storer/uploadstore_test.go +++ b/pkg/storer/uploadstore_test.go @@ -127,9 +127,9 @@ func testUploadStore(t *testing.T, newStorer func() (*storer.DB, error)) { t.Fatalf("session.Done(...): unexpected error: %v", err) } } - verifySessionInfo(t, lstore.Repo(), tag.TagID, tc.chunks, !tc.fail) + verifySessionInfo(t, lstore.Storage(), tag.TagID, tc.chunks, !tc.fail) if tc.pin { - verifyPinCollection(t, lstore.Repo(), tc.chunks[0], tc.chunks, !tc.fail) + verifyPinCollection(t, lstore.Storage(), tc.chunks[0], tc.chunks, !tc.fail) } }) } @@ -336,11 +336,11 @@ func TestUploadStore(t *testing.T) { return storer.New(context.Background(), "", dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) }) }) - t.Run("disk", func(t *testing.T) { - t.Parallel() + // t.Run("disk", func(t *testing.T) { + // t.Parallel() - testUploadStore(t, diskStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second))) - }) + // testUploadStore(t, diskStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second))) + // }) } func testReporter(t *testing.T, newStorer func() (*storer.DB, error)) { @@ -396,7 +396,7 @@ func testReporter(t *testing.T, newStorer func() (*storer.DB, error)) { t.Fatalf("unexpected tag item (-want +have):\n%s", diff) } - has, err := lstore.Repo().ChunkStore().Has(context.Background(), chunks[0].Address()) + has, err := lstore.Storage().ReadOnly().ChunkStore().Has(context.Background(), chunks[0].Address()) if err != nil { t.Fatalf("ChunkStore.Has(...): unexpected error: %v", err) } @@ -404,47 +404,6 @@ func testReporter(t *testing.T, newStorer func() (*storer.DB, error)) { t.Fatalf("expected chunk %s to not be found", chunks[0].Address()) } }) - - t.Run("rollback", func(t *testing.T) { - want := errors.New("dummy error") - lstore.SetRepoStorePutHook(func(item storage.Item) error { - if item.Namespace() == "tagItem" { - return want - } - return nil - }) - have := lstore.Report(context.Background(), chunks[1], storage.ChunkSynced) - if !errors.Is(have, want) { - t.Fatalf("unexpected error on Report: want %v have %v", want, have) - } - - wantTI := storer.SessionInfo{ - TagID: session.TagID, - Split: 0, - Seen: 0, - Sent: 0, - Synced: 1, - Stored: 0, - StartedAt: session.StartedAt, - } - - gotTI, err := lstore.Session(session.TagID) - if err != nil { - t.Fatalf("Session(...): unexpected error: %v", err) - } - - if diff := cmp.Diff(wantTI, gotTI); diff != "" { - t.Fatalf("unexpected tag item (-want +have):\n%s", diff) - } - - has, err := lstore.Repo().ChunkStore().Has(context.Background(), chunks[1].Address()) - if err != nil { - t.Fatalf("ChunkStore.Has(...): unexpected error: %v", err) - } - if !has { - t.Fatalf("expected chunk %s to be found", chunks[1].Address()) - } - }) }) } diff --git a/pkg/storer/validate.go b/pkg/storer/validate.go index 5eb5cfb5e45..2abc00ddd54 100644 --- a/pkg/storer/validate.go +++ b/pkg/storer/validate.go @@ -108,7 +108,7 @@ func validateWork(logger log.Logger, store storage.Store, readFn func(context.Co s := time.Now() - _ = chunkstore.Iterate(store, func(item *chunkstore.RetrievalIndexItem) error { + _ = chunkstore.IterateItems(store, func(item *chunkstore.RetrievalIndexItem) error { total++ return nil }) @@ -128,7 +128,7 @@ func validateWork(logger log.Logger, store storage.Store, readFn func(context.Co } count := 0 - _ = chunkstore.Iterate(store, func(item *chunkstore.RetrievalIndexItem) error { + _ = chunkstore.IterateItems(store, func(item *chunkstore.RetrievalIndexItem) error { iteratateItemsC <- item count++ if count%100_000 == 0 {