diff --git a/pkg/storer/debug.go b/pkg/storer/debug.go index 8f3685df2b7..0be502fafd8 100644 --- a/pkg/storer/debug.go +++ b/pkg/storer/debug.go @@ -60,7 +60,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) { ) eg.Go(func() error { return chunkstore.IterateChunkEntries( - db.storage.ReadOnly().IndexStore(), + db.storage.IndexStore(), func(_ swarm.Address, isShared bool) (bool, error) { select { case <-ctx.Done(): @@ -84,7 +84,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) { synced uint64 ) eg.Go(func() error { - return upload.IterateAllTagItems(db.storage.ReadOnly().IndexStore(), func(ti *upload.TagItem) (bool, error) { + return upload.IterateAllTagItems(db.storage.IndexStore(), func(ti *upload.TagItem) (bool, error) { select { case <-ctx.Done(): return true, ctx.Err() @@ -104,7 +104,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) { ) eg.Go(func() error { return pinstore.IterateCollectionStats( - db.storage.ReadOnly().IndexStore(), + db.storage.IndexStore(), func(stat pinstore.CollectionStat) (bool, error) { select { case <-ctx.Done(): diff --git a/pkg/storer/internal/cache/cache.go b/pkg/storer/internal/cache/cache.go index 5cf57589722..958dcfe2360 100644 --- a/pkg/storer/internal/cache/cache.go +++ b/pkg/storer/internal/cache/cache.go @@ -209,7 +209,7 @@ func (c *Cache) ShallowCopy( for _, addr := range addrs { entry := &cacheEntry{Address: addr, AccessTimestamp: now().UnixNano()} - if has, err := store.ReadOnly().IndexStore().Has(entry); err == nil && has { + if has, err := store.IndexStore().Has(entry); err == nil && has { // Since the caller has previously referenced the chunk (+1 refCnt), and if the chunk is already referenced // by the cache store (+1 refCnt), then we must decrement the refCnt by one ( -1 refCnt to bring the total to +1). // See https://github.com/ethersphere/bee/issues/4530. @@ -271,7 +271,7 @@ func (c *Cache) removeOldest(ctx context.Context, st transaction.Storage, count defer c.glock.Unlock() evictItems := make([]*cacheEntry, 0, count) - err := st.ReadOnly().IndexStore().Iterate( + err := st.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &cacheOrderIndex{} }, ItemProperty: storage.QueryItemID, diff --git a/pkg/storer/internal/cache/cache_test.go b/pkg/storer/internal/cache/cache_test.go index b961f5e7153..bc1c129ceef 100644 --- a/pkg/storer/internal/cache/cache_test.go +++ b/pkg/storer/internal/cache/cache_test.go @@ -123,18 +123,18 @@ func TestCache(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) + c, err := cache.New(context.TODO(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0) + verifyCacheState(t, st.IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0) }) t.Run("putter", func(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) + c, err := cache.New(context.TODO(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -147,18 +147,18 @@ func TestCache(t *testing.T) { if err != nil { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[:idx+1]...) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) + verifyCacheOrder(t, c, st.IndexStore(), chunks[:idx+1]...) } }) t.Run("new cache retains state", func(t *testing.T) { - c2, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) + c2, err := cache.New(context.TODO(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c2, chunks[0].Address(), chunks[len(chunks)-1].Address(), uint64(len(chunks))) - verifyCacheOrder(t, c2, st.ReadOnly().IndexStore(), chunks...) + verifyCacheState(t, st.IndexStore(), c2, chunks[0].Address(), chunks[len(chunks)-1].Address(), uint64(len(chunks))) + verifyCacheOrder(t, c2, st.IndexStore(), chunks...) }) }) @@ -166,7 +166,7 @@ func TestCache(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) + c, err := cache.New(context.TODO(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -188,8 +188,8 @@ func TestCache(t *testing.T) { if !readChunk.Equal(ch) { t.Fatalf("incorrect chunk: %s", ch.Address()) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[:idx+1]...) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1)) + verifyCacheOrder(t, c, st.IndexStore(), chunks[:idx+1]...) } }) @@ -207,13 +207,13 @@ func TestCache(t *testing.T) { } if idx == 0 { // once we access the first entry, the top will change - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[9].Address(), chunks[idx].Address(), 10) + verifyCacheState(t, st.IndexStore(), c, chunks[9].Address(), chunks[idx].Address(), 10) } else { - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), 10) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), 10) } newOrder = append(newOrder, chunks[idx]) } - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), newOrder...) + verifyCacheOrder(t, c, st.IndexStore(), newOrder...) }) t.Run("not in chunkstore returns error", func(t *testing.T) { @@ -227,7 +227,7 @@ func TestCache(t *testing.T) { }) t.Run("not in cache doesnt affect state", func(t *testing.T) { - state := c.State(st.ReadOnly().IndexStore()) + state := c.State(st.IndexStore()) for i := 0; i < 5; i++ { extraChunk := chunktest.GenerateTestRandomChunk() @@ -245,7 +245,7 @@ func TestCache(t *testing.T) { if !readChunk.Equal(extraChunk) { t.Fatalf("incorrect chunk: %s", extraChunk.Address()) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, state.Head, state.Tail, state.Size) + verifyCacheState(t, st.IndexStore(), c, state.Head, state.Tail, state.Size) } }) }) @@ -255,7 +255,7 @@ func TestCache(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10) + c, err := cache.New(context.TODO(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -281,7 +281,7 @@ func TestCache(t *testing.T) { } // state should be preserved on failure - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) }) t.Run("get error handling", func(t *testing.T) { @@ -291,7 +291,7 @@ func TestCache(t *testing.T) { } // state should be preserved on failure - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5) }) }) } @@ -300,7 +300,7 @@ func TestRemoveOldest(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10) + c, err := cache.New(context.Background(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -314,24 +314,24 @@ func TestRemoveOldest(t *testing.T) { } } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[29].Address(), 30) - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[29].Address(), 30) + verifyCacheOrder(t, c, st.IndexStore(), chunks...) err = c.RemoveOldestMaxBatch(context.Background(), st, 30, 5) if err != nil { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0) + verifyCacheState(t, st.IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0) - verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks...) + verifyChunksDeleted(t, st.ChunkStore(), chunks...) } func TestShallowCopy(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10) + c, err := cache.New(context.Background(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -357,8 +357,8 @@ func TestShallowCopy(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) + verifyCacheOrder(t, c, st.IndexStore(), chunks...) // move again, should be no-op err = c.ShallowCopy(context.Background(), st, chunksToMove...) @@ -366,8 +366,8 @@ func TestShallowCopy(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10) + verifyCacheOrder(t, c, st.IndexStore(), chunks...) chunks1 := chunktest.GenerateTestRandomChunks(10) chunksToMove1 := make([]swarm.Address, 0, 10) @@ -390,22 +390,22 @@ func TestShallowCopy(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks1[9].Address(), 20) - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), append(chunks, chunks1...)...) + verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks1[9].Address(), 20) + verifyCacheOrder(t, c, st.IndexStore(), append(chunks, chunks1...)...) err = c.RemoveOldest(context.Background(), st, 10) if err != nil { t.Fatal(err) } - verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks...) + verifyChunksDeleted(t, st.ChunkStore(), chunks...) } func TestShallowCopyOverCap(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10) + c, err := cache.New(context.Background(), st.IndexStore(), 10) if err != nil { t.Fatal(err) } @@ -432,22 +432,22 @@ func TestShallowCopyOverCap(t *testing.T) { t.Fatal(err) } - verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[5].Address(), chunks[14].Address(), 10) - verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[5:15]...) + verifyCacheState(t, st.IndexStore(), c, chunks[5].Address(), chunks[14].Address(), 10) + verifyCacheOrder(t, c, st.IndexStore(), chunks[5:15]...) err = c.RemoveOldest(context.Background(), st, 5) if err != nil { t.Fatal(err) } - verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks[5:10]...) + verifyChunksDeleted(t, st.ChunkStore(), chunks[5:10]...) } func TestShallowCopyAlreadyCached(t *testing.T) { t.Parallel() st := newTestStorage(t) - c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 1000) + c, err := cache.New(context.Background(), st.IndexStore(), 1000) if err != nil { t.Fatal(err) } @@ -478,14 +478,14 @@ func TestShallowCopyAlreadyCached(t *testing.T) { t.Fatal(err) } - verifyChunksExist(t, st.ReadOnly().ChunkStore(), chunks...) + verifyChunksExist(t, st.ChunkStore(), chunks...) err = c.RemoveOldest(context.Background(), st, 10) if err != nil { t.Fatal(err) } - verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks...) + verifyChunksDeleted(t, st.ChunkStore(), chunks...) } func verifyCacheState( diff --git a/pkg/storer/internal/chunkstamp/chunkstamp_test.go b/pkg/storer/internal/chunkstamp/chunkstamp_test.go index ff4277fed80..dea100d6909 100644 --- a/pkg/storer/internal/chunkstamp/chunkstamp_test.go +++ b/pkg/storer/internal/chunkstamp/chunkstamp_test.go @@ -150,7 +150,7 @@ func TestStoreLoadDelete(t *testing.T) { have := want.Clone() - if err := ts.ReadOnly().IndexStore().Get(have); !errors.Is(err, storage.ErrNotFound) { + if err := ts.IndexStore().Get(have); !errors.Is(err, storage.ErrNotFound) { t.Fatalf("Get(...): unexpected error: have: %v; want: %v", err, storage.ErrNotFound) } @@ -160,7 +160,7 @@ func TestStoreLoadDelete(t *testing.T) { t.Fatalf("Store(...): unexpected error: %v", err) } - if err := ts.ReadOnly().IndexStore().Get(have); err != nil { + if err := ts.IndexStore().Get(have); err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -176,7 +176,7 @@ func TestStoreLoadDelete(t *testing.T) { t.Run("load stored chunk stamp", func(t *testing.T) { want := chunk.Stamp() - have, err := chunkstamp.Load(ts.ReadOnly().IndexStore(), ns, chunk.Address()) + have, err := chunkstamp.Load(ts.IndexStore(), ns, chunk.Address()) if err != nil { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -189,7 +189,7 @@ func TestStoreLoadDelete(t *testing.T) { t.Run("load stored chunk stamp with batch id", func(t *testing.T) { want := chunk.Stamp() - have, err := chunkstamp.LoadWithBatchID(ts.ReadOnly().IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) + have, err := chunkstamp.LoadWithBatchID(ts.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) if err != nil { t.Fatalf("LoadWithBatchID(...): unexpected error: %v", err) } @@ -214,7 +214,7 @@ func TestStoreLoadDelete(t *testing.T) { } } - have, err := chunkstamp.LoadWithBatchID(ts.ReadOnly().IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) + have, err := chunkstamp.LoadWithBatchID(ts.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -237,7 +237,7 @@ func TestStoreLoadDelete(t *testing.T) { t.Fatalf("DeleteAll(...): unexpected error: %v", err) } - have, err := chunkstamp.Load(ts.ReadOnly().IndexStore(), ns, chunk.Address()) + have, err := chunkstamp.Load(ts.IndexStore(), ns, chunk.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -246,7 +246,7 @@ func TestStoreLoadDelete(t *testing.T) { } cnt := 0 - err = ts.ReadOnly().IndexStore().Iterate( + err = ts.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(chunkstamp.Item) diff --git a/pkg/storer/internal/chunkstore/chunkstore_test.go b/pkg/storer/internal/chunkstore/chunkstore_test.go index c4092cedbf2..cc0cb235614 100644 --- a/pkg/storer/internal/chunkstore/chunkstore_test.go +++ b/pkg/storer/internal/chunkstore/chunkstore_test.go @@ -156,7 +156,7 @@ func TestChunkStore(t *testing.T) { t.Run("get chunks", func(t *testing.T) { for _, ch := range testChunks { - readCh, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) + readCh, err := st.ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -168,7 +168,7 @@ func TestChunkStore(t *testing.T) { t.Run("has chunks", func(t *testing.T) { for _, ch := range testChunks { - exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) + exists, err := st.ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -230,11 +230,11 @@ func TestChunkStore(t *testing.T) { for idx, ch := range testChunks { if idx%2 == 0 { // Check even numbered indexes are deleted - _, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) + _, err := st.ChunkStore().Get(context.TODO(), ch.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("expected storage not found error found: %v", err) } - found, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) + found, err := st.ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("unexpected error in Has: %v", err) } @@ -243,14 +243,14 @@ func TestChunkStore(t *testing.T) { } } else { // Check rest of the entries are intact - readCh, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) + readCh, err := st.ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } if !readCh.Equal(ch) { t.Fatal("read chunk doesnt match") } - exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) + exists, err := st.ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -292,14 +292,14 @@ func TestChunkStore(t *testing.T) { t.Run("check chunks still exists", func(t *testing.T) { for idx, ch := range testChunks { if idx%2 != 0 { - readCh, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) + readCh, err := st.ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } if !readCh.Equal(ch) { t.Fatal("read chunk doesnt match") } - exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) + exists, err := st.ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("failed getting chunk: %v", err) } @@ -362,7 +362,7 @@ func TestIterateLocations(t *testing.T) { readCount := 0 respC := make(chan chunkstore.LocationResult, chunksCount) - chunkstore.IterateLocations(ctx, st.ReadOnly().IndexStore(), respC) + chunkstore.IterateLocations(ctx, st.IndexStore(), respC) for resp := range respC { assert.NoError(t, resp.Err) @@ -396,7 +396,7 @@ func TestIterateLocations_Stop(t *testing.T) { readCount := 0 respC := make(chan chunkstore.LocationResult) - chunkstore.IterateLocations(ctx, st.ReadOnly().IndexStore(), respC) + chunkstore.IterateLocations(ctx, st.IndexStore(), respC) for resp := range respC { if resp.Err != nil { diff --git a/pkg/storer/internal/internal.go b/pkg/storer/internal/internal.go index 7c5c429a803..12fe9f3b711 100644 --- a/pkg/storer/internal/internal.go +++ b/pkg/storer/internal/internal.go @@ -70,21 +70,13 @@ type inmemTrx struct { chunkStore storage.ChunkStore } -type inmemReadOnly struct { - indexStore storage.Reader - chunkStore storage.ReadOnlyChunkStore -} - -func (t *inmemReadOnly) IndexStore() storage.Reader { return t.indexStore } -func (t *inmemReadOnly) ChunkStore() storage.ReadOnlyChunkStore { return t.chunkStore } +func (t *inmemStorage) IndexStore() storage.Reader { return t.indexStore } +func (t *inmemStorage) ChunkStore() storage.ReadOnlyChunkStore { return t.chunkStore } func (t *inmemTrx) IndexStore() storage.IndexStore { return t.indexStore } func (t *inmemTrx) ChunkStore() storage.ChunkStore { return t.chunkStore } func (t *inmemTrx) Commit() error { return nil } -func (t *inmemStorage) ReadOnly() transaction.ReadOnlyStore { - return &inmemReadOnly{t.indexStore, t.chunkStore} -} func (t *inmemStorage) Close() error { return nil } func (t *inmemStorage) Run(ctx context.Context, f func(s transaction.Store) error) error { trx, done := t.NewTransaction(ctx) diff --git a/pkg/storer/internal/pinning/pinning.go b/pkg/storer/internal/pinning/pinning.go index 81dbd2f7f0d..24ed8cc0115 100644 --- a/pkg/storer/internal/pinning/pinning.go +++ b/pkg/storer/internal/pinning/pinning.go @@ -176,7 +176,7 @@ func (c *collectionPutter) Cleanup(st transaction.Storage) error { func CleanupDirty(st transaction.Storage) error { dirtyCollections := make([]*dirtyCollection, 0) - err := st.ReadOnly().IndexStore().Iterate( + err := st.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(dirtyCollection) }, ItemProperty: storage.QueryItemID, @@ -229,7 +229,7 @@ func Pins(st storage.Reader) ([]swarm.Address, error) { func deleteCollectionChunks(ctx context.Context, st transaction.Storage, collectionUUID []byte) error { chunksToDelete := make([]*pinChunkItem, 0) - err := st.ReadOnly().IndexStore().Iterate( + err := st.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &pinChunkItem{UUID: collectionUUID} }, }, func(r storage.Result) (bool, error) { @@ -275,7 +275,7 @@ func deleteCollectionChunks(ctx context.Context, st transaction.Storage, collect func DeletePin(ctx context.Context, st transaction.Storage, root swarm.Address) error { collection := &pinCollectionItem{Addr: root} - err := st.ReadOnly().IndexStore().Get(collection) + err := st.IndexStore().Get(collection) if err != nil { return fmt.Errorf("pin store: failed getting collection: %w", err) } diff --git a/pkg/storer/internal/pinning/pinning_test.go b/pkg/storer/internal/pinning/pinning_test.go index 57db870a118..b0cefdbc82b 100644 --- a/pkg/storer/internal/pinning/pinning_test.go +++ b/pkg/storer/internal/pinning/pinning_test.go @@ -109,14 +109,14 @@ func TestPinStore(t *testing.T) { allChunks := append(tc.uniqueChunks, tc.root) allChunks = append(allChunks, tc.dupChunks...) for _, ch := range allChunks { - exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) + exists, err := st.ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatal(err) } if !exists { t.Fatal("chunk should exist") } - rch, err := st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) + rch, err := st.ChunkStore().Get(context.TODO(), ch.Address()) if err != nil { t.Fatal(err) } @@ -129,7 +129,7 @@ func TestPinStore(t *testing.T) { }) t.Run("verify root pins", func(t *testing.T) { - pins, err := pinstore.Pins(st.ReadOnly().IndexStore()) + pins, err := pinstore.Pins(st.IndexStore()) if err != nil { t.Fatal(err) } @@ -152,7 +152,7 @@ func TestPinStore(t *testing.T) { t.Run("has pin", func(t *testing.T) { for _, tc := range tests { - found, err := pinstore.HasPin(st.ReadOnly().IndexStore(), tc.root.Address()) + found, err := pinstore.HasPin(st.IndexStore(), tc.root.Address()) if err != nil { t.Fatal(err) } @@ -165,7 +165,7 @@ func TestPinStore(t *testing.T) { t.Run("verify internal state", func(t *testing.T) { for _, tc := range tests { count := 0 - err := pinstore.IterateCollection(st.ReadOnly().IndexStore(), tc.root.Address(), func(addr swarm.Address) (bool, error) { + err := pinstore.IterateCollection(st.IndexStore(), tc.root.Address(), func(addr swarm.Address) (bool, error) { count++ return false, nil }) @@ -175,7 +175,7 @@ func TestPinStore(t *testing.T) { if count != len(tc.uniqueChunks)+2 { t.Fatalf("incorrect no of chunks in collection, expected %d found %d", len(tc.uniqueChunks)+2, count) } - stat, err := pinstore.GetStat(st.ReadOnly().IndexStore(), tc.root.Address()) + stat, err := pinstore.GetStat(st.IndexStore(), tc.root.Address()) if err != nil { t.Fatal(err) } @@ -190,7 +190,7 @@ func TestPinStore(t *testing.T) { t.Run("iterate stats", func(t *testing.T) { count, total, dup := 0, 0, 0 - err := pinstore.IterateCollectionStats(st.ReadOnly().IndexStore(), func(stat pinstore.CollectionStat) (bool, error) { + err := pinstore.IterateCollectionStats(st.IndexStore(), func(stat pinstore.CollectionStat) (bool, error) { count++ total += int(stat.Total) dup += int(stat.DupInCollection) @@ -224,7 +224,7 @@ func TestPinStore(t *testing.T) { t.Fatal(err) } - found, err := pinstore.HasPin(st.ReadOnly().IndexStore(), tests[0].root.Address()) + found, err := pinstore.HasPin(st.IndexStore(), tests[0].root.Address()) if err != nil { t.Fatal(err) } @@ -232,7 +232,7 @@ func TestPinStore(t *testing.T) { t.Fatal("expected pin to not be found") } - pins, err := pinstore.Pins(st.ReadOnly().IndexStore()) + pins, err := pinstore.Pins(st.IndexStore()) if err != nil { t.Fatal(err) } @@ -243,14 +243,14 @@ func TestPinStore(t *testing.T) { allChunks := append(tests[0].uniqueChunks, tests[0].root) allChunks = append(allChunks, tests[0].dupChunks...) for _, ch := range allChunks { - exists, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) + exists, err := st.ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatal(err) } if exists { t.Fatal("chunk should not exist") } - _, err = st.ReadOnly().ChunkStore().Get(context.TODO(), ch.Address()) + _, err = st.ChunkStore().Get(context.TODO(), ch.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatal(err) } @@ -398,7 +398,7 @@ func TestCleanup(t *testing.T) { } for _, ch := range chunks { - exists, err := st.ReadOnly().ChunkStore().Has(context.Background(), ch.Address()) + exists, err := st.ChunkStore().Has(context.Background(), ch.Address()) if err != nil { t.Fatal(err) } @@ -441,7 +441,7 @@ func TestCleanup(t *testing.T) { } for _, ch := range chunks { - exists, err := st.ReadOnly().ChunkStore().Has(context.Background(), ch.Address()) + exists, err := st.ChunkStore().Has(context.Background(), ch.Address()) if err != nil { t.Fatal(err) } diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index b33cb7c085b..96b498100eb 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -204,7 +204,7 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { func (r *Reserve) Has(addr swarm.Address, batchID []byte) (bool, error) { item := &BatchRadiusItem{Bin: swarm.Proximity(r.baseAddr.Bytes(), addr.Bytes()), BatchID: batchID, Address: addr} - return r.st.ReadOnly().IndexStore().Has(item) + return r.st.IndexStore().Has(item) } func (r *Reserve) Get(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error) { @@ -212,18 +212,17 @@ func (r *Reserve) Get(ctx context.Context, addr swarm.Address, batchID []byte) ( defer r.multx.Unlock(string(batchID)) item := &BatchRadiusItem{Bin: swarm.Proximity(r.baseAddr.Bytes(), addr.Bytes()), BatchID: batchID, Address: addr} - st := r.st.ReadOnly() - err := st.IndexStore().Get(item) + err := r.st.IndexStore().Get(item) if err != nil { return nil, err } - stamp, err := chunkstamp.LoadWithBatchID(st.IndexStore(), reserveNamespace, addr, item.BatchID) + stamp, err := chunkstamp.LoadWithBatchID(r.st.IndexStore(), reserveNamespace, addr, item.BatchID) if err != nil { return nil, err } - ch, err := st.ChunkStore().Get(ctx, addr) + ch, err := r.st.ChunkStore().Get(ctx, addr) if err != nil { return nil, err } @@ -248,7 +247,7 @@ func (r *Reserve) EvictBatchBin( return 0, nil } - err := r.st.ReadOnly().IndexStore().Iterate(storage.Query{ + err := r.st.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &BatchRadiusItem{} }, Prefix: string(batchID), }, func(res storage.Result) (bool, error) { @@ -345,7 +344,7 @@ func (r *Reserve) removeChunkWithItem( } func (r *Reserve) IterateBin(bin uint8, startBinID uint64, cb func(swarm.Address, uint64, []byte) (bool, error)) error { - err := r.st.ReadOnly().IndexStore().Iterate(storage.Query{ + err := r.st.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &ChunkBinItem{} }, Prefix: binIDToString(bin, startBinID), PrefixAtStart: true, @@ -367,20 +366,19 @@ func (r *Reserve) IterateBin(bin uint8, startBinID uint64, cb func(swarm.Address } func (r *Reserve) IterateChunks(startBin uint8, cb func(swarm.Chunk) (bool, error)) error { - store := r.st.ReadOnly() - err := store.IndexStore().Iterate(storage.Query{ + err := r.st.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &ChunkBinItem{} }, Prefix: binIDToString(startBin, 0), PrefixAtStart: true, }, func(res storage.Result) (bool, error) { item := res.Entry.(*ChunkBinItem) - chunk, err := store.ChunkStore().Get(context.Background(), item.Address) + chunk, err := r.st.ChunkStore().Get(context.Background(), item.Address) if err != nil { return false, err } - stamp, err := chunkstamp.LoadWithBatchID(store.IndexStore(), reserveNamespace, item.Address, item.BatchID) + stamp, err := chunkstamp.LoadWithBatchID(r.st.IndexStore(), reserveNamespace, item.Address, item.BatchID) if err != nil { return false, err } @@ -396,8 +394,7 @@ func (r *Reserve) IterateChunks(startBin uint8, cb func(swarm.Chunk) (bool, erro } func (r *Reserve) IterateChunksItems(startBin uint8, cb func(*ChunkBinItem) (bool, error)) error { - store := r.st.ReadOnly() - err := store.IndexStore().Iterate(storage.Query{ + err := r.st.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &ChunkBinItem{} }, Prefix: binIDToString(startBin, 0), PrefixAtStart: true, @@ -447,7 +444,7 @@ func (r *Reserve) SetRadius(rad uint8) error { func (r *Reserve) LastBinIDs() ([]uint64, uint64, error) { var epoch EpochItem - err := r.st.ReadOnly().IndexStore().Get(&epoch) + err := r.st.IndexStore().Get(&epoch) if err != nil { return nil, 0, err } @@ -456,7 +453,7 @@ func (r *Reserve) LastBinIDs() ([]uint64, uint64, error) { for bin := uint8(0); bin < swarm.MaxBins; bin++ { binItem := &BinItem{Bin: bin} - err := r.st.ReadOnly().IndexStore().Get(binItem) + err := r.st.IndexStore().Get(binItem) if err != nil { if errors.Is(err, storage.ErrNotFound) { ids[bin] = 0 diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go index 40fa1c563ab..1eb087a9b44 100644 --- a/pkg/storer/internal/reserve/reserve_test.go +++ b/pkg/storer/internal/reserve/reserve_test.go @@ -51,9 +51,9 @@ func TestReserve(t *testing.T) { if err != nil { t.Fatal(err) } - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: uint8(b), BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: uint8(b), BinID: uint64(i)}, false) - checkChunk(t, ts.ReadOnly(), ch, false) + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: uint8(b), BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: uint8(b), BinID: uint64(i)}, false) + checkChunk(t, ts, ch, false) h, err := r.Has(ch.Address(), ch.Stamp().BatchID()) if err != nil { @@ -107,7 +107,7 @@ func TestReserveChunkType(t *testing.T) { } } - err = ts.ReadOnly().IndexStore().Iterate(storage.Query{ + err = ts.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return &reserve.ChunkBinItem{} }, }, func(res storage.Result) (bool, error) { item := res.Entry.(*reserve.ChunkBinItem) @@ -164,16 +164,16 @@ func TestReplaceOldIndex(t *testing.T) { } // Chunk 1 must be gone - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address()}, true) - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 1}, true) - checkChunk(t, ts.ReadOnly(), ch1, true) + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address()}, true) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 1}, true) + checkChunk(t, ts, ch1, true) // Chunk 2 must be stored - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address()}, false) - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 2}, false) - checkChunk(t, ts.ReadOnly(), ch2, false) + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address()}, false) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 2}, false) + checkChunk(t, ts, ch2, false) - item, err := stampindex.Load(ts.ReadOnly().IndexStore(), "reserve", ch2) + item, err := stampindex.Load(ts.IndexStore(), "reserve", ch2) if err != nil { t.Fatal(err) } @@ -238,16 +238,16 @@ func TestEvict(t *testing.T) { if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("got err %v, want %v", err, storage.ErrNotFound) } - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, true) - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, true) - checkChunk(t, ts.ReadOnly(), ch, true) + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, true) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, true) + checkChunk(t, ts, ch, true) } else { if err != nil { t.Fatal(err) } - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, false) - checkChunk(t, ts.ReadOnly(), ch, false) + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID)}, false) + checkChunk(t, ts, ch, false) } } } @@ -294,13 +294,13 @@ func TestEvictMaxCount(t *testing.T) { for i, ch := range chunks { if i < 10 { - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, true) - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: uint64(i + 1)}, true) - checkChunk(t, ts.ReadOnly(), ch, true) + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, true) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: uint64(i + 1)}, true) + checkChunk(t, ts, ch, true) } else { - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.BatchRadiusItem{Bin: 1, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) - checkStore(t, ts.ReadOnly().IndexStore(), &reserve.ChunkBinItem{Bin: 1, BinID: uint64(i - 10 + 1)}, false) - checkChunk(t, ts.ReadOnly(), ch, false) + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 1, BatchID: ch.Stamp().BatchID(), Address: ch.Address()}, false) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 1, BinID: uint64(i - 10 + 1)}, false) + checkChunk(t, ts, ch, false) } } } diff --git a/pkg/storer/internal/stampindex/stampindex_test.go b/pkg/storer/internal/stampindex/stampindex_test.go index cb3ee47d7f4..10c7cab8488 100644 --- a/pkg/storer/internal/stampindex/stampindex_test.go +++ b/pkg/storer/internal/stampindex/stampindex_test.go @@ -146,7 +146,7 @@ func TestStoreLoadDelete(t *testing.T) { chunk.Stamp().BatchID(), chunk.Stamp().Index(), ) - err = ts.ReadOnly().IndexStore().Get(have) + err = ts.IndexStore().Get(have) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -166,7 +166,7 @@ func TestStoreLoadDelete(t *testing.T) { want.ChunkAddress = chunk.Address() want.ChunkIsImmutable = chunk.Immutable() - have, err := stampindex.Load(ts.ReadOnly().IndexStore(), ns, chunk) + have, err := stampindex.Load(ts.IndexStore(), ns, chunk) if err != nil { t.Fatalf("Load(...): unexpected error: %v", err) } @@ -185,7 +185,7 @@ func TestStoreLoadDelete(t *testing.T) { t.Fatalf("Delete(...): unexpected error: %v", err) } - have, err := stampindex.Load(ts.ReadOnly().IndexStore(), ns, chunk) + have, err := stampindex.Load(ts.IndexStore(), ns, chunk) if have != nil { t.Fatalf("Load(...): unexpected item %v", have) } @@ -194,7 +194,7 @@ func TestStoreLoadDelete(t *testing.T) { } cnt := 0 - err = ts.ReadOnly().IndexStore().Iterate( + err = ts.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(stampindex.Item) @@ -266,7 +266,7 @@ func TestLoadOrStore(t *testing.T) { assert.NoError(t, trx.Commit()) cnt := 0 - err = ts.ReadOnly().IndexStore().Iterate( + err = ts.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(stampindex.Item) diff --git a/pkg/storer/internal/transaction/transaction.go b/pkg/storer/internal/transaction/transaction.go index 67429e2ff7c..e46701cd8da 100644 --- a/pkg/storer/internal/transaction/transaction.go +++ b/pkg/storer/internal/transaction/transaction.go @@ -47,8 +47,8 @@ type ReadOnlyStore interface { } type Storage interface { + ReadOnlyStore NewTransaction(context.Context) (Transaction, func()) - ReadOnly() ReadOnlyStore Run(context.Context, func(Store) error) error Close() error } @@ -109,11 +109,14 @@ func (s *store) NewTransaction(ctx context.Context) (Transaction, func()) { } } -func (s *store) ReadOnly() ReadOnlyStore { +func (s *store) IndexStore() storage.Reader { + return &indexTrx{s.bstore, nil, s.metrics} +} + +func (s *store) ChunkStore() storage.ReadOnlyChunkStore { indexStore := &indexTrx{s.bstore, nil, s.metrics} sharyTrx := &sharkyTrx{s.sharky, s.metrics, nil, nil} - - return &readOnly{indexStore, &chunkStoreTrx{indexStore, sharyTrx, s.chunkLocker, nil, s.metrics, true}} + return &chunkStoreTrx{indexStore, sharyTrx, s.chunkLocker, nil, s.metrics, true} } func (s *store) Run(ctx context.Context, f func(Store) error) error { @@ -136,19 +139,6 @@ func (s *store) Close() error { return errors.Join(s.bstore.Close(), s.sharky.Close()) } -type readOnly struct { - indexStore *indexTrx - chunkStore *chunkStoreTrx -} - -func (t *readOnly) IndexStore() storage.Reader { - return t.indexStore -} - -func (t *readOnly) ChunkStore() storage.ReadOnlyChunkStore { - return t.chunkStore -} - func (t *transaction) Commit() (err error) { defer func() { diff --git a/pkg/storer/internal/transaction/transaction_test.go b/pkg/storer/internal/transaction/transaction_test.go index 016a826b088..f8fdd06af5b 100644 --- a/pkg/storer/internal/transaction/transaction_test.go +++ b/pkg/storer/internal/transaction/transaction_test.go @@ -59,19 +59,19 @@ func Test_TransactionStorage(t *testing.T) { assert.NoError(t, tx.Commit()) item := cache.CacheEntryItem{Address: ch1.Address()} - assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.NoError(t, st.IndexStore().Get(&item)) assert.Equal(t, item, cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1}) - ch1_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + ch1_get, err := st.ChunkStore().Get(context.Background(), ch1.Address()) assert.NoError(t, err) assert.Equal(t, ch1.Data(), ch1_get.Data()) assert.Equal(t, ch1.Address(), ch1_get.Address()) item = cache.CacheEntryItem{Address: ch2.Address()} - assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.NoError(t, st.IndexStore().Get(&item)) assert.Equal(t, item, cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1}) - ch2_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + ch2_get, err := st.ChunkStore().Get(context.Background(), ch1.Address()) assert.NoError(t, err) assert.Equal(t, ch1.Data(), ch2_get.Data()) assert.Equal(t, ch1.Address(), ch2_get.Address()) @@ -92,11 +92,11 @@ func Test_TransactionStorage(t *testing.T) { done() - assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch1.Address()}), storage.ErrNotFound) - assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch2.Address()}), storage.ErrNotFound) - _, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.ErrorIs(t, st.IndexStore().Get(&cache.CacheEntryItem{Address: ch1.Address()}), storage.ErrNotFound) + assert.ErrorIs(t, st.IndexStore().Get(&cache.CacheEntryItem{Address: ch2.Address()}), storage.ErrNotFound) + _, err := st.ChunkStore().Get(context.Background(), ch1.Address()) assert.ErrorIs(t, err, storage.ErrNotFound) - _, err = st.ReadOnly().ChunkStore().Get(context.Background(), ch2.Address()) + _, err = st.ChunkStore().Get(context.Background(), ch2.Address()) assert.ErrorIs(t, err, storage.ErrNotFound) }) @@ -115,19 +115,19 @@ func Test_TransactionStorage(t *testing.T) { }) item := cache.CacheEntryItem{Address: ch1.Address()} - assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.NoError(t, st.IndexStore().Get(&item)) assert.Equal(t, item, cache.CacheEntryItem{Address: ch1.Address(), AccessTimestamp: 1}) - ch1_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + ch1_get, err := st.ChunkStore().Get(context.Background(), ch1.Address()) assert.NoError(t, err) assert.Equal(t, ch1.Data(), ch1_get.Data()) assert.Equal(t, ch1.Address(), ch1_get.Address()) item = cache.CacheEntryItem{Address: ch2.Address()} - assert.NoError(t, st.ReadOnly().IndexStore().Get(&item)) + assert.NoError(t, st.IndexStore().Get(&item)) assert.Equal(t, item, cache.CacheEntryItem{Address: ch2.Address(), AccessTimestamp: 1}) - ch2_get, err := st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + ch2_get, err := st.ChunkStore().Get(context.Background(), ch1.Address()) assert.NoError(t, err) assert.Equal(t, ch1.Data(), ch2_get.Data()) assert.Equal(t, ch1.Address(), ch2_get.Address()) @@ -140,11 +140,11 @@ func Test_TransactionStorage(t *testing.T) { return nil }) - assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch1.Address()}), storage.ErrNotFound) - assert.ErrorIs(t, st.ReadOnly().IndexStore().Get(&cache.CacheEntryItem{Address: ch2.Address()}), storage.ErrNotFound) - _, err = st.ReadOnly().ChunkStore().Get(context.Background(), ch1.Address()) + assert.ErrorIs(t, st.IndexStore().Get(&cache.CacheEntryItem{Address: ch1.Address()}), storage.ErrNotFound) + assert.ErrorIs(t, st.IndexStore().Get(&cache.CacheEntryItem{Address: ch2.Address()}), storage.ErrNotFound) + _, err = st.ChunkStore().Get(context.Background(), ch1.Address()) assert.ErrorIs(t, err, storage.ErrNotFound) - _, err = st.ReadOnly().ChunkStore().Get(context.Background(), ch2.Address()) + _, err = st.ChunkStore().Get(context.Background(), ch2.Address()) assert.ErrorIs(t, err, storage.ErrNotFound) }) @@ -160,7 +160,7 @@ func Test_TransactionStorage(t *testing.T) { return nil }) - has, err := st.ReadOnly().ChunkStore().Has(context.Background(), ch1.Address()) + has, err := st.ChunkStore().Has(context.Background(), ch1.Address()) assert.NoError(t, err) if !has { t.Fatal("should have chunk") @@ -180,7 +180,7 @@ func Test_TransactionStorage(t *testing.T) { return nil }) - has, err := st.ReadOnly().ChunkStore().Has(context.Background(), ch1.Address()) + has, err := st.ChunkStore().Has(context.Background(), ch1.Address()) assert.NoError(t, err) if !has { t.Fatal("should NOT have chunk") diff --git a/pkg/storer/internal/upload/uploadstore.go b/pkg/storer/internal/upload/uploadstore.go index 33bb2289b25..4f840693c3c 100644 --- a/pkg/storer/internal/upload/uploadstore.go +++ b/pkg/storer/internal/upload/uploadstore.go @@ -500,12 +500,12 @@ func (u *uploadPutter) Cleanup(st transaction.Storage) error { itemsToDelete := make([]*pushItem, 0) di := &dirtyTagItem{TagID: u.tagID} - err := st.ReadOnly().IndexStore().Get(di) + err := st.IndexStore().Get(di) if err != nil { return fmt.Errorf("failed reading dirty tag while cleaning up: %w", err) } - err = st.ReadOnly().IndexStore().Iterate( + err = st.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &pushItem{} }, PrefixAtStart: true, @@ -584,7 +584,7 @@ func remove(st transaction.Store, address swarm.Address, batchID []byte) error { func CleanupDirty(st transaction.Storage) error { dirtyTags := make([]*dirtyTagItem, 0) - err := st.ReadOnly().IndexStore().Iterate( + err := st.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &dirtyTagItem{} }, }, diff --git a/pkg/storer/internal/upload/uploadstore_test.go b/pkg/storer/internal/upload/uploadstore_test.go index c3226bc2de2..72c90d8fa99 100644 --- a/pkg/storer/internal/upload/uploadstore_test.go +++ b/pkg/storer/internal/upload/uploadstore_test.go @@ -479,7 +479,7 @@ func TestChunkPutter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - err := ts.ReadOnly().IndexStore().Get(ui) + err := ts.IndexStore().Get(ui) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -499,7 +499,7 @@ func TestChunkPutter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - err = ts.ReadOnly().IndexStore().Get(pi) + err = ts.IndexStore().Get(pi) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -514,7 +514,7 @@ func TestChunkPutter(t *testing.T) { t.Fatalf("Get(...): unexpected UploadItem (-want +have):\n%s", diff) } - have, err := ts.ReadOnly().ChunkStore().Get(context.Background(), chunk.Address()) + have, err := ts.ChunkStore().Get(context.Background(), chunk.Address()) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -527,7 +527,7 @@ func TestChunkPutter(t *testing.T) { t.Run("iterate all", func(t *testing.T) { count := 0 - err := ts.ReadOnly().IndexStore().Iterate( + err := ts.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return new(upload.UploadItem) }, }, @@ -538,7 +538,7 @@ func TestChunkPutter(t *testing.T) { if synced { t.Fatal("expected synced to be false") } - has, err := ts.ReadOnly().ChunkStore().Has(context.Background(), address) + has, err := ts.ChunkStore().Has(context.Background(), address) if err != nil { t.Fatalf("unexpected error in Has(...): %v", err) } @@ -589,7 +589,7 @@ func TestChunkPutter(t *testing.T) { t.Run("iterate all tag items", func(t *testing.T) { var tagItemsCount, uploaded, synced uint64 - err := upload.IterateAllTagItems(ts.ReadOnly().IndexStore(), func(ti *upload.TagItem) (bool, error) { + err := upload.IterateAllTagItems(ts.IndexStore(), func(ti *upload.TagItem) (bool, error) { uploaded += ti.Split synced += ti.Synced tagItemsCount++ @@ -647,7 +647,7 @@ func TestChunkPutter(t *testing.T) { t.Fatalf("Close(...): unexpected error %v", err) } - ti, err := upload.TagInfo(ts.ReadOnly().IndexStore(), tag.TagID) + ti, err := upload.TagInfo(ts.IndexStore(), tag.TagID) if err != nil { t.Fatalf("TagInfo(...): unexpected error %v", err) } @@ -734,7 +734,7 @@ func TestChunkReporter(t *testing.T) { ti := &upload.TagItem{ TagID: tag.TagID, } - err := ts.ReadOnly().IndexStore().Get(ti) + err := ts.IndexStore().Get(ti) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -764,7 +764,7 @@ func TestChunkReporter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - has, err := ts.ReadOnly().IndexStore().Has(ui) + has, err := ts.IndexStore().Has(ui) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -777,7 +777,7 @@ func TestChunkReporter(t *testing.T) { Address: chunk.Address(), BatchID: chunk.Stamp().BatchID(), } - has, err = ts.ReadOnly().IndexStore().Has(pi) + has, err = ts.IndexStore().Has(pi) if err != nil { t.Fatalf("Has(...): unexpected error: %v", err) } @@ -785,7 +785,7 @@ func TestChunkReporter(t *testing.T) { t.Fatalf("Has(...): expected to not be found: %s", pi) } - have, err := ts.ReadOnly().ChunkStore().Has(context.Background(), chunk.Address()) + have, err := ts.ChunkStore().Has(context.Background(), chunk.Address()) if err != nil { t.Fatalf("Get(...): unexpected error: %v", err) } @@ -851,7 +851,7 @@ func TestNextTagID(t *testing.T) { } var lastTag upload.NextTagID - err := ts.ReadOnly().IndexStore().Get(&lastTag) + err := ts.IndexStore().Get(&lastTag) if err != nil { t.Fatal(err) } @@ -880,7 +880,7 @@ func TestListTags(t *testing.T) { want[i] = tag } - have, err := upload.ListAllTags(ts.ReadOnly().IndexStore()) + have, err := upload.ListAllTags(ts.IndexStore()) if err != nil { t.Fatalf("upload.ListAllTags(): unexpected error: %v", err) } @@ -897,7 +897,7 @@ func TestIterate(t *testing.T) { ts := newTestStorage(t) t.Run("on empty storage does not call the callback fn", func(t *testing.T) { - err := upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { + err := upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { t.Fatal("unexpected call") return false, nil }) @@ -938,7 +938,7 @@ func TestIterate(t *testing.T) { var count int - err = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { + err = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { count++ if !chunk.Equal(chunk1) && !chunk.Equal(chunk2) { return true, fmt.Errorf("unknown chunk %s", chunk.Address()) @@ -958,7 +958,7 @@ func TestIterate(t *testing.T) { t.Fatalf("Close(...) error: %v", err) } - err = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { + err = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { count++ if !chunk.Equal(chunk1) && !chunk.Equal(chunk2) { return true, fmt.Errorf("unknown chunk %s", chunk.Address()) @@ -997,7 +997,7 @@ func TestDeleteTag(t *testing.T) { t.Fatalf("upload.DeleteTag(): unexpected error: %v", err) } - _, err = upload.TagInfo(ts.ReadOnly().IndexStore(), tag.TagID) + _, err = upload.TagInfo(ts.IndexStore(), tag.TagID) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("want: %v; have: %v", storage.ErrNotFound, err) } @@ -1032,7 +1032,7 @@ func TestBatchIDForChunk(t *testing.T) { t.Fatalf("Put(...): unexpected error: %v", err) } - batchID, err := upload.BatchIDForChunk(ts.ReadOnly().IndexStore(), chunk.Address()) + batchID, err := upload.BatchIDForChunk(ts.IndexStore(), chunk.Address()) if err != nil { t.Fatalf("BatchIDForChunk(...): unexpected error: %v", err) } @@ -1081,7 +1081,7 @@ func TestCleanup(t *testing.T) { } count := 0 - _ = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { + _ = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -1089,7 +1089,7 @@ func TestCleanup(t *testing.T) { t.Fatalf("expected to iterate 0 chunks, got: %v", count) } - if _, err := ts.ReadOnly().ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { + if _, err := ts.ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { t.Fatalf("expected chunk not found error, got: %v", err) } }) @@ -1130,7 +1130,7 @@ func TestCleanup(t *testing.T) { } count := 0 - _ = upload.Iterate(context.Background(), ts.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { + _ = upload.Iterate(context.Background(), ts, func(chunk swarm.Chunk) (bool, error) { count++ return false, nil }) @@ -1138,7 +1138,7 @@ func TestCleanup(t *testing.T) { t.Fatalf("expected to iterate 0 chunks, got: %v", count) } - if _, err := ts.ReadOnly().ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { + if _, err := ts.ChunkStore().Get(context.Background(), chunk.Address()); !errors.Is(err, storage.ErrNotFound) { t.Fatalf("expected chunk not found error, got: %v", err) } }) diff --git a/pkg/storer/migration/step_02_test.go b/pkg/storer/migration/step_02_test.go index 091e17c476d..e9194371428 100644 --- a/pkg/storer/migration/step_02_test.go +++ b/pkg/storer/migration/step_02_test.go @@ -69,7 +69,7 @@ func Test_Step_02(t *testing.T) { // check if all entries are migrated. for _, entry := range addrs { cEntry := &cache.CacheEntryItem{Address: entry.address} - err := store.ReadOnly().IndexStore().Get(cEntry) + err := store.IndexStore().Get(cEntry) assert.NoError(t, err) assert.Equal(t, entry.address, cEntry.Address) assert.Greater(t, cEntry.AccessTimestamp, int64(0)) diff --git a/pkg/storer/migration/step_03.go b/pkg/storer/migration/step_03.go index 14b830afcc1..8cb49e6678f 100644 --- a/pkg/storer/migration/step_03.go +++ b/pkg/storer/migration/step_03.go @@ -52,7 +52,7 @@ func step_03( // STEP 2 var chunkBinItems []*reserve.ChunkBinItem - err = st.ReadOnly().IndexStore().Iterate( + err = st.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &reserve.ChunkBinItem{} }, }, @@ -90,7 +90,7 @@ func step_03( // STEP 3 var batchRadiusItems []*reserve.BatchRadiusItem - err = st.ReadOnly().IndexStore().Iterate( + err = st.IndexStore().Iterate( storage.Query{ Factory: func() storage.Item { return &reserve.BatchRadiusItem{} }, }, diff --git a/pkg/storer/migration/step_03_test.go b/pkg/storer/migration/step_03_test.go index 63adff56d14..650b2c5d0a4 100644 --- a/pkg/storer/migration/step_03_test.go +++ b/pkg/storer/migration/step_03_test.go @@ -89,7 +89,7 @@ func Test_Step_03(t *testing.T) { binIDs := make(map[uint8][]uint64) cbCount := 0 - err := store.ReadOnly().IndexStore().Iterate( + err := store.IndexStore().Iterate( storage.Query{Factory: func() storage.Item { return &reserve.ChunkBinItem{} }}, func(res storage.Result) (stop bool, err error) { cb := res.Entry.(*reserve.ChunkBinItem) @@ -117,7 +117,7 @@ func Test_Step_03(t *testing.T) { } brCount := 0 - err = store.ReadOnly().IndexStore().Iterate( + err = store.IndexStore().Iterate( storage.Query{Factory: func() storage.Item { return &reserve.BatchRadiusItem{} }}, func(res storage.Result) (stop bool, err error) { br := res.Entry.(*reserve.BatchRadiusItem) diff --git a/pkg/storer/migration/step_04.go b/pkg/storer/migration/step_04.go index fc84bda4eb1..68c99dbe8ee 100644 --- a/pkg/storer/migration/step_04.go +++ b/pkg/storer/migration/step_04.go @@ -36,7 +36,7 @@ func step_04( } locationResultC := make(chan chunkstore.LocationResult) - chunkstore.IterateLocations(context.Background(), st.ReadOnly().IndexStore(), locationResultC) + chunkstore.IterateLocations(context.Background(), st.IndexStore(), locationResultC) for res := range locationResultC { if res.Err != nil { diff --git a/pkg/storer/migration/step_04_test.go b/pkg/storer/migration/step_04_test.go index 2931d7f9193..791ecb34a19 100644 --- a/pkg/storer/migration/step_04_test.go +++ b/pkg/storer/migration/step_04_test.go @@ -68,7 +68,7 @@ func Test_Step_04(t *testing.T) { // check that the chunks are still there for _, ch := range chunks[2:] { - _, err := store2.ReadOnly().ChunkStore().Get(context.Background(), ch.Address()) + _, err := store2.ChunkStore().Get(context.Background(), ch.Address()) assert.NoError(t, err) } diff --git a/pkg/storer/migration/step_05.go b/pkg/storer/migration/step_05.go index 0b9f5a446c8..50d950ba0bf 100644 --- a/pkg/storer/migration/step_05.go +++ b/pkg/storer/migration/step_05.go @@ -35,7 +35,7 @@ func step_05(st transaction.Storage) error { close(errC) }() - err := upload.IterateAll(st.ReadOnly().IndexStore(), func(u storage.Item) (bool, error) { + err := upload.IterateAll(st.IndexStore(), func(u storage.Item) (bool, error) { select { case itemC <- u: case err := <-errC: diff --git a/pkg/storer/migration/step_05_test.go b/pkg/storer/migration/step_05_test.go index 32eedaaf514..f56c7a5b431 100644 --- a/pkg/storer/migration/step_05_test.go +++ b/pkg/storer/migration/step_05_test.go @@ -95,11 +95,11 @@ func Test_Step_05(t *testing.T) { t.Fatalf("close putter: %v", err) } - wantCount(t, store.ReadOnly().IndexStore(), 10) + wantCount(t, store.IndexStore(), 10) err = localmigration.Step_05(store) if err != nil { t.Fatalf("step 05: %v", err) } - wantCount(t, store.ReadOnly().IndexStore(), 0) + wantCount(t, store.IndexStore(), 0) } diff --git a/pkg/storer/pinstore.go b/pkg/storer/pinstore.go index a7a01a5cf61..791a1f8bdd6 100644 --- a/pkg/storer/pinstore.go +++ b/pkg/storer/pinstore.go @@ -92,7 +92,7 @@ func (db *DB) Pins() (address []swarm.Address, err error) { } }() - return pinstore.Pins(db.storage.ReadOnly().IndexStore()) + return pinstore.Pins(db.storage.IndexStore()) } // HasPin is the implementation of the PinStore.HasPin method. @@ -107,9 +107,9 @@ func (db *DB) HasPin(root swarm.Address) (has bool, err error) { } }() - return pinstore.HasPin(db.storage.ReadOnly().IndexStore(), root) + return pinstore.HasPin(db.storage.IndexStore(), root) } func (db *DB) IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error { - return pinstore.IterateCollection(db.storage.ReadOnly().IndexStore(), root, iterateFn) + return pinstore.IterateCollection(db.storage.IndexStore(), root, iterateFn) } diff --git a/pkg/storer/pinstore_test.go b/pkg/storer/pinstore_test.go index 5af2bf9ee7e..4fcfd5958e0 100644 --- a/pkg/storer/pinstore_test.go +++ b/pkg/storer/pinstore_test.go @@ -150,7 +150,7 @@ func testPinStore(t *testing.T, newStorer func() (*storer.DB, error)) { } verifyPinCollection(t, lstore.Storage(), chunks[0], chunks, true) - verifyChunkRefCount(t, lstore.Storage().ReadOnly(), chunks) + verifyChunkRefCount(t, lstore.Storage(), chunks) }) } diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go index f56572ed2c6..71273bdf151 100644 --- a/pkg/storer/reserve.go +++ b/pkg/storer/reserve.go @@ -243,7 +243,7 @@ func (db *DB) evictExpiredBatches(ctx context.Context) error { func (db *DB) getExpiredBatches() ([][]byte, error) { var batchesToEvict [][]byte - err := db.storage.ReadOnly().IndexStore().Iterate(storage.Query{ + err := db.storage.IndexStore().Iterate(storage.Query{ Factory: func() storage.Item { return new(expiredBatchItem) }, ItemProperty: storage.QueryItemID, }, func(result storage.Result) (bool, error) { diff --git a/pkg/storer/reserve_test.go b/pkg/storer/reserve_test.go index a86f8c96612..75b3c48cb0b 100644 --- a/pkg/storer/reserve_test.go +++ b/pkg/storer/reserve_test.go @@ -119,14 +119,14 @@ func TestReplaceOldIndex(t *testing.T) { } // Chunk 1 must be missing - item, err := stampindex.Load(storer.Storage().ReadOnly().IndexStore(), "reserve", ch_1) + item, err := stampindex.Load(storer.Storage().IndexStore(), "reserve", ch_1) if err != nil { t.Fatal(err) } if !item.ChunkAddress.Equal(ch_2.Address()) { t.Fatalf("wanted addr %s, got %s", ch_1.Address(), item.ChunkAddress) } - _, err = chunkstamp.Load(storer.Storage().ReadOnly().IndexStore(), "reserve", ch_1.Address()) + _, err = chunkstamp.Load(storer.Storage().IndexStore(), "reserve", ch_1.Address()) if !errors.Is(err, storage.ErrNotFound) { t.Fatalf("wanted err %s, got err %s", storage.ErrNotFound, err) } @@ -658,11 +658,11 @@ func checkSaved(t *testing.T, st *storer.DB, ch swarm.Chunk, stampSaved, chunkSt if !stampSaved { stampWantedErr = storage.ErrNotFound } - _, err := stampindex.Load(st.Storage().ReadOnly().IndexStore(), "reserve", ch) + _, err := stampindex.Load(st.Storage().IndexStore(), "reserve", ch) if !errors.Is(err, stampWantedErr) { t.Fatalf("wanted err %s, got err %s", stampWantedErr, err) } - _, err = chunkstamp.Load(st.Storage().ReadOnly().IndexStore(), "reserve", ch.Address()) + _, err = chunkstamp.Load(st.Storage().IndexStore(), "reserve", ch.Address()) if !errors.Is(err, stampWantedErr) { t.Fatalf("wanted err %s, got err %s", stampWantedErr, err) } @@ -671,7 +671,7 @@ func checkSaved(t *testing.T, st *storer.DB, ch swarm.Chunk, stampSaved, chunkSt if !chunkStoreSaved { chunkStoreWantedErr = storage.ErrNotFound } - gotCh, err := st.Storage().ReadOnly().ChunkStore().Get(context.Background(), ch.Address()) + gotCh, err := st.Storage().ChunkStore().Get(context.Background(), ch.Address()) if !errors.Is(err, chunkStoreWantedErr) { t.Fatalf("wanted err %s, got err %s", chunkStoreWantedErr, err) } diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index a1f5c82c896..ff8cc476823 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -264,7 +264,7 @@ func (db *DB) ReserveSample( if le(item.TransformedAddress, currentMaxAddr) || len(sampleItems) < SampleSize { start := time.Now() - stamp, err := chunkstamp.LoadWithBatchID(db.storage.ReadOnly().IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID()) + stamp, err := chunkstamp.LoadWithBatchID(db.storage.IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID()) if err != nil { stats.StampLoadFailed++ db.logger.Debug("failed loading stamp", "chunk_address", item.ChunkAddress, "error", err) diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go index 1b187d56406..8c4d0a3ef38 100644 --- a/pkg/storer/storer.go +++ b/pkg/storer/storer.go @@ -484,7 +484,7 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { return nil, err } - cacheObj, err := cache.New(ctx, st.ReadOnly().IndexStore(), opts.CacheCapacity) + cacheObj, err := cache.New(ctx, st.IndexStore(), opts.CacheCapacity) if err != nil { return nil, err } @@ -629,7 +629,7 @@ func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.A } func (db *DB) ChunkStore() storage.ReadOnlyChunkStore { - return db.storage.ReadOnly().ChunkStore() + return db.storage.ChunkStore() } func (db *DB) Lock(strs ...string) func() { diff --git a/pkg/storer/storer_test.go b/pkg/storer/storer_test.go index 7910ff59cbb..a0bb47de0b6 100644 --- a/pkg/storer/storer_test.go +++ b/pkg/storer/storer_test.go @@ -37,7 +37,7 @@ func verifyChunks( t.Helper() for _, ch := range chunks { - hasFound, err := st.ReadOnly().ChunkStore().Has(context.TODO(), ch.Address()) + hasFound, err := st.ChunkStore().Has(context.TODO(), ch.Address()) if err != nil { t.Fatalf("ChunkStore.Has(...): unexpected error: %v", err) } @@ -80,7 +80,7 @@ func verifySessionInfo( verifyChunks(t, st, chunks, has) if has { - tagInfo, err := upload.TagInfo(st.ReadOnly().IndexStore(), sessionID) + tagInfo, err := upload.TagInfo(st.IndexStore(), sessionID) if err != nil { t.Fatalf("upload.TagInfo(...): unexpected error: %v", err) } @@ -103,7 +103,7 @@ func verifyPinCollection( ) { t.Helper() - hasFound, err := pinstore.HasPin(st.ReadOnly().IndexStore(), root.Address()) + hasFound, err := pinstore.HasPin(st.IndexStore(), root.Address()) if err != nil { t.Fatalf("pinstore.HasPin(...): unexpected error: %v", err) } @@ -165,14 +165,14 @@ func TestNew(t *testing.T) { t.Parallel() lstore := makeInmemStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) - assertStorerVersion(t, lstore.Storage().ReadOnly().IndexStore(), "") + assertStorerVersion(t, lstore.Storage().IndexStore(), "") }) t.Run("disk", func(t *testing.T) { t.Parallel() lstore := makeDiskStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) - assertStorerVersion(t, lstore.Storage().ReadOnly().IndexStore(), path.Join(t.TempDir(), "sharky")) + assertStorerVersion(t, lstore.Storage().IndexStore(), path.Join(t.TempDir(), "sharky")) }) }) } diff --git a/pkg/storer/subscribe_push.go b/pkg/storer/subscribe_push.go index 860c20bfa42..e4497b26938 100644 --- a/pkg/storer/subscribe_push.go +++ b/pkg/storer/subscribe_push.go @@ -37,7 +37,7 @@ func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func()) { var count int - err := upload.Iterate(ctx, db.storage.ReadOnly(), func(chunk swarm.Chunk) (bool, error) { + err := upload.Iterate(ctx, db.storage, func(chunk swarm.Chunk) (bool, error) { select { case chunks <- chunk: count++ diff --git a/pkg/storer/uploadstore.go b/pkg/storer/uploadstore.go index c80e736c80f..cbe0965f49d 100644 --- a/pkg/storer/uploadstore.go +++ b/pkg/storer/uploadstore.go @@ -142,7 +142,7 @@ func (db *DB) NewSession() (SessionInfo, error) { // Session is the implementation of the UploadStore.Session method. func (db *DB) Session(tagID uint64) (SessionInfo, error) { - return upload.TagInfo(db.storage.ReadOnly().IndexStore(), tagID) + return upload.TagInfo(db.storage.IndexStore(), tagID) } // DeleteSession is the implementation of the UploadStore.DeleteSession method. @@ -158,7 +158,7 @@ func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error) { limit = min(limit, maxPageSize) - tags, err := upload.ListAllTags(db.storage.ReadOnly().IndexStore()) + tags, err := upload.ListAllTags(db.storage.IndexStore()) if err != nil { return nil, err } @@ -172,5 +172,5 @@ func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error) { // BatchHint is the implementation of the UploadStore.BatchHint method. func (db *DB) BatchHint(address swarm.Address) ([]byte, error) { - return upload.BatchIDForChunk(db.storage.ReadOnly().IndexStore(), address) + return upload.BatchIDForChunk(db.storage.IndexStore(), address) } diff --git a/pkg/storer/uploadstore_test.go b/pkg/storer/uploadstore_test.go index 1a8a1c43e27..2b7fed9e06e 100644 --- a/pkg/storer/uploadstore_test.go +++ b/pkg/storer/uploadstore_test.go @@ -404,7 +404,7 @@ func testReporter(t *testing.T, newStorer func() (*storer.DB, error)) { t.Fatalf("unexpected tag item (-want +have):\n%s", diff) } - has, err := lstore.Storage().ReadOnly().ChunkStore().Has(context.Background(), chunks[0].Address()) + has, err := lstore.Storage().ChunkStore().Has(context.Background(), chunks[0].Address()) if err != nil { t.Fatalf("ChunkStore.Has(...): unexpected error: %v", err) }