From 783d3001cc121d882adf4ebb970965519355c59f Mon Sep 17 00:00:00 2001 From: istae <14264581+istae@users.noreply.github.com> Date: Fri, 13 Oct 2023 15:11:59 +0300 Subject: [PATCH] chore: extra unit test for compaction (#4400) --- pkg/storer/compact_test.go | 81 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 78 insertions(+), 3 deletions(-) diff --git a/pkg/storer/compact_test.go b/pkg/storer/compact_test.go index 36b03f56c72..8737654547a 100644 --- a/pkg/storer/compact_test.go +++ b/pkg/storer/compact_test.go @@ -7,11 +7,9 @@ package storer_test import ( "bytes" "context" - "os" "testing" "time" - "github.com/ethersphere/bee/pkg/log" "github.com/ethersphere/bee/pkg/postage" postagetesting "github.com/ethersphere/bee/pkg/postage/testing" pullerMock "github.com/ethersphere/bee/pkg/puller/mock" @@ -24,6 +22,7 @@ import ( // The first batch is then expired, causing free slots to accumulate in sharky. // Next, sharky is compacted, after which, it is tested that valid chunks can still be retrieved. func TestCompact(t *testing.T) { + t.Parallel() baseAddr := swarm.RandAddress(t) ctx := context.Background() @@ -31,7 +30,6 @@ func TestCompact(t *testing.T) { opts := dbTestOps(baseAddr, 10_000, nil, nil, time.Second) opts.CacheCapacity = 0 - opts.Logger = log.NewLogger("test", log.WithSink(os.Stdout)) st, err := storer.New(ctx, basePath, opts) if err != nil { @@ -113,3 +111,80 @@ func TestCompact(t *testing.T) { t.Fatal(err) } } + +// TestCompactNoEvictions compacts a store that has no free slots to ensure that no chunks get lost. +func TestCompactNoEvictions(t *testing.T) { + t.Parallel() + + baseAddr := swarm.RandAddress(t) + ctx := context.Background() + basePath := t.TempDir() + + opts := dbTestOps(baseAddr, 10_000, nil, nil, time.Second) + opts.CacheCapacity = 0 + + st, err := storer.New(ctx, basePath, opts) + if err != nil { + t.Fatal(err) + } + st.StartReserveWorker(ctx, pullerMock.NewMockRateReporter(0), networkRadiusFunc(0)) + + var chunks []swarm.Chunk + batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()} + + putter := st.ReservePutter() + + for b := 0; b < len(batches); b++ { + for i := uint64(0); i < 100; i++ { + ch := chunk.GenerateTestRandomChunk() + ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID)) + chunks = append(chunks, ch) + err := putter.Put(ctx, ch) + if err != nil { + t.Fatal(err) + } + } + } + + if err := st.Close(); err != nil { + t.Fatal(err) + } + + err = storer.Compact(ctx, basePath, opts, false) + if err != nil { + t.Fatal(err) + } + + st, err = storer.New(ctx, basePath, opts) + if err != nil { + t.Fatal(err) + } + + putter = st.ReservePutter() + for i := uint64(0); i < 100; i++ { + ch := chunk.GenerateTestRandomChunk() + ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[0].ID)) + chunks = append(chunks, ch) + err := putter.Put(ctx, ch) + if err != nil { + t.Fatal(err) + } + } + + for _, ch := range chunks { + has, err := st.ReserveHas(ch.Address(), ch.Stamp().BatchID()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("store should have chunk") + } + + checkSaved(t, st, ch, true, true) + } + + if err := st.Close(); err != nil { + t.Fatal(err) + } +}