From 9f2efe21f5f09a7a44aecfe11a76bbd0ca091aab Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 29 Sep 2023 15:43:16 +0800 Subject: [PATCH] add cache eviction benchmarks --- share/eds/store_test.go | 58 +++++++++++++++++++++++++++++ share/getters/getter_test.go | 71 ++++++++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+) diff --git a/share/eds/store_test.go b/share/eds/store_test.go index ce42a0e0b9..7052533555 100644 --- a/share/eds/store_test.go +++ b/share/eds/store_test.go @@ -18,11 +18,13 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" + dsbadger "github.com/celestiaorg/go-ds-badger4" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/cache" "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" ) func TestEDSStore(t *testing.T) { @@ -465,6 +467,62 @@ func BenchmarkStore(b *testing.B) { }) } +// BenchmarkCacheEviction benchmarks the time it takes to load a block to the cache, when the +// cache size is set to 1. This forces cache eviction on every read. +// BenchmarkCacheEviction-10/128 384 3533586 ns/op (~3ms) +func BenchmarkCacheEviction(b *testing.B) { + const ( + blocks = 4 + size = 128 + ) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + b.Cleanup(cancel) + + dir := b.TempDir() + ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) + require.NoError(b, err) + + newStore := func(params *Parameters) *Store { + edsStore, err := NewStore(params, dir, ds) + require.NoError(b, err) + err = edsStore.Start(ctx) + require.NoError(b, err) + return edsStore + } + edsStore := newStore(DefaultParameters()) + + // generate EDSs and store them + cids := make([]cid.Cid, blocks) + for i := range cids { + eds := edstest.RandEDS(b, size) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(b, err) + + // store cids for read loop later + cids[i] = ipld.MustCidFromNamespacedSha256(dah.RowRoots[0]) + } + + // restart store to clear cache + require.NoError(b, edsStore.Stop(ctx)) + + // set BlockstoreCacheSize to 1 to force eviction on every read + params := DefaultParameters() + params.BlockstoreCacheSize = 1 + bstore := newStore(params).Blockstore() + + // start benchmark + b.ResetTimer() + for i := 0; i < b.N; i++ { + h := cids[i%blocks] + // every read will trigger eviction + _, err := bstore.Get(ctx, h) + require.NoError(b, err) + } +} + func newStore(t *testing.T) (*Store, error) { t.Helper() diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index bacb0a2c39..eda28fac75 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -3,6 +3,7 @@ package getters import ( "context" "os" + "sync" "testing" "time" @@ -12,7 +13,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" + dsbadger "github.com/celestiaorg/go-ds-badger4" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -217,6 +220,74 @@ func TestIPLDGetter(t *testing.T) { }) } +// BenchmarkIPLDGetterOverBusyCache benchmarks the performance of the IPLDGetter when the +// cache size of the underlying blockstore is less than the number of blocks being requested in +// parallel. This is to ensure performance doesn't degrade when the cache is being frequently +// evicted. +// BenchmarkIPLDGetterOverBusyCache-10/128 1 12460428417 ns/op (~12s) +func BenchmarkIPLDGetterOverBusyCache(b *testing.B) { + const ( + blocks = 10 + size = 128 + ) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + b.Cleanup(cancel) + + dir := b.TempDir() + ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) + require.NoError(b, err) + + newStore := func(params *eds.Parameters) *eds.Store { + edsStore, err := eds.NewStore(params, dir, ds) + require.NoError(b, err) + err = edsStore.Start(ctx) + require.NoError(b, err) + return edsStore + } + edsStore := newStore(eds.DefaultParameters()) + + // generate EDSs and store them + hashes := make([]da.DataAvailabilityHeader, blocks) + for i := range hashes { + eds := edstest.RandEDS(b, size) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(b, err) + + // store cids for read loop later + hashes[i] = dah + } + + // restart store to clear cache + require.NoError(b, edsStore.Stop(ctx)) + + // set BlockstoreCacheSize to 1 to force eviction on every read + params := eds.DefaultParameters() + params.BlockstoreCacheSize = 1 + edsStore2 := newStore(params) + bstore := edsStore2.Blockstore() + bserv := ipld.NewBlockservice(bstore, offline.Exchange(bstore)) + + // start client + getter := NewIPLDGetter(bserv) + + // request blocks in parallel + b.ResetTimer() + g := sync.WaitGroup{} + g.Add(blocks) + for _, h := range hashes { + h := h + go func() { + defer g.Done() + _, err := getter.GetEDS(ctx, &h) + require.NoError(b, err) + }() + } + g.Wait() +} + func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root) { eds := edstest.RandEDS(t, 4) dah, err := share.NewRoot(eds)