Skip to content

Commit

Permalink
fix: remove readonly
Browse files Browse the repository at this point in the history
  • Loading branch information
istae committed Feb 7, 2024
1 parent 108c933 commit a5c22a0
Show file tree
Hide file tree
Showing 32 changed files with 202 additions and 223 deletions.
6 changes: 3 additions & 3 deletions pkg/storer/debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) {
)
eg.Go(func() error {
return chunkstore.IterateChunkEntries(
db.storage.ReadOnly().IndexStore(),
db.storage.IndexStore(),
func(_ swarm.Address, isShared bool) (bool, error) {
select {
case <-ctx.Done():
Expand All @@ -84,7 +84,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) {
synced uint64
)
eg.Go(func() error {
return upload.IterateAllTagItems(db.storage.ReadOnly().IndexStore(), func(ti *upload.TagItem) (bool, error) {
return upload.IterateAllTagItems(db.storage.IndexStore(), func(ti *upload.TagItem) (bool, error) {
select {
case <-ctx.Done():
return true, ctx.Err()
Expand All @@ -104,7 +104,7 @@ func (db *DB) DebugInfo(ctx context.Context) (Info, error) {
)
eg.Go(func() error {
return pinstore.IterateCollectionStats(
db.storage.ReadOnly().IndexStore(),
db.storage.IndexStore(),
func(stat pinstore.CollectionStat) (bool, error) {
select {
case <-ctx.Done():
Expand Down
4 changes: 2 additions & 2 deletions pkg/storer/internal/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ func (c *Cache) ShallowCopy(

for _, addr := range addrs {
entry := &cacheEntry{Address: addr, AccessTimestamp: now().UnixNano()}
if has, err := store.ReadOnly().IndexStore().Has(entry); err == nil && has {
if has, err := store.IndexStore().Has(entry); err == nil && has {
// Since the caller has previously referenced the chunk (+1 refCnt), and if the chunk is already referenced
// by the cache store (+1 refCnt), then we must decrement the refCnt by one ( -1 refCnt to bring the total to +1).
// See https://github.com/ethersphere/bee/issues/4530.
Expand Down Expand Up @@ -271,7 +271,7 @@ func (c *Cache) removeOldest(ctx context.Context, st transaction.Storage, count
defer c.glock.Unlock()

evictItems := make([]*cacheEntry, 0, count)
err := st.ReadOnly().IndexStore().Iterate(
err := st.IndexStore().Iterate(
storage.Query{
Factory: func() storage.Item { return &cacheOrderIndex{} },
ItemProperty: storage.QueryItemID,
Expand Down
78 changes: 39 additions & 39 deletions pkg/storer/internal/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,18 +123,18 @@ func TestCache(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10)
c, err := cache.New(context.TODO(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
verifyCacheState(t, st.ReadOnly().IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0)
verifyCacheState(t, st.IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0)
})

t.Run("putter", func(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10)
c, err := cache.New(context.TODO(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
Expand All @@ -147,26 +147,26 @@ func TestCache(t *testing.T) {
if err != nil {
t.Fatal(err)
}
verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1))
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[:idx+1]...)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1))
verifyCacheOrder(t, c, st.IndexStore(), chunks[:idx+1]...)
}
})

t.Run("new cache retains state", func(t *testing.T) {
c2, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10)
c2, err := cache.New(context.TODO(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
verifyCacheState(t, st.ReadOnly().IndexStore(), c2, chunks[0].Address(), chunks[len(chunks)-1].Address(), uint64(len(chunks)))
verifyCacheOrder(t, c2, st.ReadOnly().IndexStore(), chunks...)
verifyCacheState(t, st.IndexStore(), c2, chunks[0].Address(), chunks[len(chunks)-1].Address(), uint64(len(chunks)))
verifyCacheOrder(t, c2, st.IndexStore(), chunks...)
})
})

t.Run("getter", func(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10)
c, err := cache.New(context.TODO(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
Expand All @@ -188,8 +188,8 @@ func TestCache(t *testing.T) {
if !readChunk.Equal(ch) {
t.Fatalf("incorrect chunk: %s", ch.Address())
}
verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1))
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[:idx+1]...)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), uint64(idx+1))
verifyCacheOrder(t, c, st.IndexStore(), chunks[:idx+1]...)
}
})

Expand All @@ -207,13 +207,13 @@ func TestCache(t *testing.T) {
}
if idx == 0 {
// once we access the first entry, the top will change
verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[9].Address(), chunks[idx].Address(), 10)
verifyCacheState(t, st.IndexStore(), c, chunks[9].Address(), chunks[idx].Address(), 10)
} else {
verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), 10)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[idx].Address(), 10)
}
newOrder = append(newOrder, chunks[idx])
}
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), newOrder...)
verifyCacheOrder(t, c, st.IndexStore(), newOrder...)
})

t.Run("not in chunkstore returns error", func(t *testing.T) {
Expand All @@ -227,7 +227,7 @@ func TestCache(t *testing.T) {
})

t.Run("not in cache doesnt affect state", func(t *testing.T) {
state := c.State(st.ReadOnly().IndexStore())
state := c.State(st.IndexStore())

for i := 0; i < 5; i++ {
extraChunk := chunktest.GenerateTestRandomChunk()
Expand All @@ -245,7 +245,7 @@ func TestCache(t *testing.T) {
if !readChunk.Equal(extraChunk) {
t.Fatalf("incorrect chunk: %s", extraChunk.Address())
}
verifyCacheState(t, st.ReadOnly().IndexStore(), c, state.Head, state.Tail, state.Size)
verifyCacheState(t, st.IndexStore(), c, state.Head, state.Tail, state.Size)
}
})
})
Expand All @@ -255,7 +255,7 @@ func TestCache(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.TODO(), st.ReadOnly().IndexStore(), 10)
c, err := cache.New(context.TODO(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
Expand All @@ -281,7 +281,7 @@ func TestCache(t *testing.T) {
}

// state should be preserved on failure
verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5)
})

t.Run("get error handling", func(t *testing.T) {
Expand All @@ -291,7 +291,7 @@ func TestCache(t *testing.T) {
}

// state should be preserved on failure
verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[4].Address(), 5)
})
})
}
Expand All @@ -300,7 +300,7 @@ func TestRemoveOldest(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10)
c, err := cache.New(context.Background(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
Expand All @@ -314,24 +314,24 @@ func TestRemoveOldest(t *testing.T) {
}
}

verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[29].Address(), 30)
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[29].Address(), 30)
verifyCacheOrder(t, c, st.IndexStore(), chunks...)

err = c.RemoveOldestMaxBatch(context.Background(), st, 30, 5)
if err != nil {
t.Fatal(err)
}

verifyCacheState(t, st.ReadOnly().IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0)
verifyCacheState(t, st.IndexStore(), c, swarm.ZeroAddress, swarm.ZeroAddress, 0)

verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks...)
verifyChunksDeleted(t, st.ChunkStore(), chunks...)
}

func TestShallowCopy(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10)
c, err := cache.New(context.Background(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
Expand All @@ -357,17 +357,17 @@ func TestShallowCopy(t *testing.T) {
t.Fatal(err)
}

verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10)
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10)
verifyCacheOrder(t, c, st.IndexStore(), chunks...)

// move again, should be no-op
err = c.ShallowCopy(context.Background(), st, chunksToMove...)
if err != nil {
t.Fatal(err)
}

verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10)
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks...)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks[9].Address(), 10)
verifyCacheOrder(t, c, st.IndexStore(), chunks...)

chunks1 := chunktest.GenerateTestRandomChunks(10)
chunksToMove1 := make([]swarm.Address, 0, 10)
Expand All @@ -390,22 +390,22 @@ func TestShallowCopy(t *testing.T) {
t.Fatal(err)
}

verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[0].Address(), chunks1[9].Address(), 20)
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), append(chunks, chunks1...)...)
verifyCacheState(t, st.IndexStore(), c, chunks[0].Address(), chunks1[9].Address(), 20)
verifyCacheOrder(t, c, st.IndexStore(), append(chunks, chunks1...)...)

err = c.RemoveOldest(context.Background(), st, 10)
if err != nil {
t.Fatal(err)
}

verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks...)
verifyChunksDeleted(t, st.ChunkStore(), chunks...)
}

func TestShallowCopyOverCap(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 10)
c, err := cache.New(context.Background(), st.IndexStore(), 10)
if err != nil {
t.Fatal(err)
}
Expand All @@ -432,22 +432,22 @@ func TestShallowCopyOverCap(t *testing.T) {
t.Fatal(err)
}

verifyCacheState(t, st.ReadOnly().IndexStore(), c, chunks[5].Address(), chunks[14].Address(), 10)
verifyCacheOrder(t, c, st.ReadOnly().IndexStore(), chunks[5:15]...)
verifyCacheState(t, st.IndexStore(), c, chunks[5].Address(), chunks[14].Address(), 10)
verifyCacheOrder(t, c, st.IndexStore(), chunks[5:15]...)

err = c.RemoveOldest(context.Background(), st, 5)
if err != nil {
t.Fatal(err)
}

verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks[5:10]...)
verifyChunksDeleted(t, st.ChunkStore(), chunks[5:10]...)
}

func TestShallowCopyAlreadyCached(t *testing.T) {
t.Parallel()

st := newTestStorage(t)
c, err := cache.New(context.Background(), st.ReadOnly().IndexStore(), 1000)
c, err := cache.New(context.Background(), st.IndexStore(), 1000)
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -478,14 +478,14 @@ func TestShallowCopyAlreadyCached(t *testing.T) {
t.Fatal(err)
}

verifyChunksExist(t, st.ReadOnly().ChunkStore(), chunks...)
verifyChunksExist(t, st.ChunkStore(), chunks...)

err = c.RemoveOldest(context.Background(), st, 10)
if err != nil {
t.Fatal(err)
}

verifyChunksDeleted(t, st.ReadOnly().ChunkStore(), chunks...)
verifyChunksDeleted(t, st.ChunkStore(), chunks...)
}

func verifyCacheState(
Expand Down
14 changes: 7 additions & 7 deletions pkg/storer/internal/chunkstamp/chunkstamp_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ func TestStoreLoadDelete(t *testing.T) {

have := want.Clone()

if err := ts.ReadOnly().IndexStore().Get(have); !errors.Is(err, storage.ErrNotFound) {
if err := ts.IndexStore().Get(have); !errors.Is(err, storage.ErrNotFound) {
t.Fatalf("Get(...): unexpected error: have: %v; want: %v", err, storage.ErrNotFound)
}

Expand All @@ -160,7 +160,7 @@ func TestStoreLoadDelete(t *testing.T) {
t.Fatalf("Store(...): unexpected error: %v", err)
}

if err := ts.ReadOnly().IndexStore().Get(have); err != nil {
if err := ts.IndexStore().Get(have); err != nil {
t.Fatalf("Get(...): unexpected error: %v", err)
}

Expand All @@ -176,7 +176,7 @@ func TestStoreLoadDelete(t *testing.T) {
t.Run("load stored chunk stamp", func(t *testing.T) {
want := chunk.Stamp()

have, err := chunkstamp.Load(ts.ReadOnly().IndexStore(), ns, chunk.Address())
have, err := chunkstamp.Load(ts.IndexStore(), ns, chunk.Address())
if err != nil {
t.Fatalf("Load(...): unexpected error: %v", err)
}
Expand All @@ -189,7 +189,7 @@ func TestStoreLoadDelete(t *testing.T) {
t.Run("load stored chunk stamp with batch id", func(t *testing.T) {
want := chunk.Stamp()

have, err := chunkstamp.LoadWithBatchID(ts.ReadOnly().IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID())
have, err := chunkstamp.LoadWithBatchID(ts.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID())
if err != nil {
t.Fatalf("LoadWithBatchID(...): unexpected error: %v", err)
}
Expand All @@ -214,7 +214,7 @@ func TestStoreLoadDelete(t *testing.T) {
}
}

have, err := chunkstamp.LoadWithBatchID(ts.ReadOnly().IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID())
have, err := chunkstamp.LoadWithBatchID(ts.IndexStore(), ns, chunk.Address(), chunk.Stamp().BatchID())
if !errors.Is(err, storage.ErrNotFound) {
t.Fatalf("Load(...): unexpected error: %v", err)
}
Expand All @@ -237,7 +237,7 @@ func TestStoreLoadDelete(t *testing.T) {
t.Fatalf("DeleteAll(...): unexpected error: %v", err)
}

have, err := chunkstamp.Load(ts.ReadOnly().IndexStore(), ns, chunk.Address())
have, err := chunkstamp.Load(ts.IndexStore(), ns, chunk.Address())
if !errors.Is(err, storage.ErrNotFound) {
t.Fatalf("Load(...): unexpected error: %v", err)
}
Expand All @@ -246,7 +246,7 @@ func TestStoreLoadDelete(t *testing.T) {
}

cnt := 0
err = ts.ReadOnly().IndexStore().Iterate(
err = ts.IndexStore().Iterate(
storage.Query{
Factory: func() storage.Item {
return new(chunkstamp.Item)
Expand Down
Loading

0 comments on commit a5c22a0

Please sign in to comment.