From 08e457202a8a62f5258908faa764c64f3d6bb7e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 16 Jul 2024 15:36:53 +0200 Subject: [PATCH 01/54] feat: gsoc --- pkg/gsoc/gsoc.go | 92 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 pkg/gsoc/gsoc.go diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go new file mode 100644 index 00000000000..9ffc9718cc4 --- /dev/null +++ b/pkg/gsoc/gsoc.go @@ -0,0 +1,92 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gsoc + +import ( + "sync" + + "github.com/ethersphere/bee/v2/pkg/pushsync" + "github.com/ethersphere/bee/v2/pkg/soc" + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +type Listener interface { + Register(address [32]byte, handler handler) (cleanup func()) + Handler(c soc.SOC) +} + +type listener struct { + pusher pushsync.PushSyncer + handlers map[[32]byte][]*handler + handlersMu sync.Mutex + quit chan struct{} +} + +// New returns a new pss service. +func New() Listener { + return &listener{ + handlers: make(map[[32]byte][]*handler), + quit: make(chan struct{}), + } +} + +// Register allows the definition of a Handler func for a specific topic on the pss struct. +func (l *listener) Register(address [32]byte, handler handler) (cleanup func()) { + l.handlersMu.Lock() + defer l.handlersMu.Unlock() + + l.handlers[address] = append(l.handlers[address], &handler) + + return func() { + l.handlersMu.Lock() + defer l.handlersMu.Unlock() + + h := l.handlers[address] + for i := 0; i < len(h); i++ { + if h[i] == &handler { + l.handlers[address] = append(h[:i], h[i+1:]...) + return + } + } + } +} + +// Handler is called by push/pull sync and passes the chunk its registered handler +func (l *listener) Handler(c soc.SOC) { + addr, _ := c.Address() + h := l.getHandlers([32]byte(addr.Bytes())) + if h == nil { + return // no handler + } + + var wg sync.WaitGroup + for _, hh := range h { + wg.Add(1) + go func(hh handler) { + defer wg.Done() + hh(c.WrappedChunk().Data()[swarm.SpanSize:]) + }(*hh) + } +} + +func (p *listener) getHandlers(address [32]byte) []*handler { + p.handlersMu.Lock() + defer p.handlersMu.Unlock() + + return p.handlers[address] +} + +func (l *listener) Close() error { + close(l.quit) + l.handlersMu.Lock() + defer l.handlersMu.Unlock() + + l.handlers = make(map[[32]byte][]*handler) //unset handlers on shutdown + + return nil +} + +// handler defines code to be executed upon reception of a GSOC sub message. +type handler func([]byte) From de66d61e6bce073044f87257242686d211871f5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 16 Jul 2024 15:39:10 +0200 Subject: [PATCH 02/54] feat: add gsoc listener to pull and pushsync --- pkg/node/devnode.go | 2 ++ pkg/node/node.go | 7 +++++-- pkg/pullsync/pullsync.go | 25 +++++++++++++++---------- pkg/pullsync/pullsync_test.go | 3 +++ pkg/pushsync/pushsync.go | 7 ++++++- 5 files changed, 31 insertions(+), 13 deletions(-) diff --git a/pkg/node/devnode.go b/pkg/node/devnode.go index 5439fe2d2a7..db84d2190d2 100644 --- a/pkg/node/devnode.go +++ b/pkg/node/devnode.go @@ -23,6 +23,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/bzz" "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/feeds/factory" + "github.com/ethersphere/bee/v2/pkg/gsoc" "github.com/ethersphere/bee/v2/pkg/log" mockP2P "github.com/ethersphere/bee/v2/pkg/p2p/mock" mockPingPong "github.com/ethersphere/bee/v2/pkg/pingpong/mock" @@ -342,6 +343,7 @@ func NewDevBee(logger log.Logger, o *DevOptions) (b *DevBee, err error) { Storer: localStore, Resolver: mockResolver, Pss: pssService, + Gsoc: gsoc.New(), FeedFactory: mockFeeds, Post: post, AccessControl: accesscontrol, diff --git a/pkg/node/node.go b/pkg/node/node.go index a84df04eca9..6a48cac4188 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -32,6 +32,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/config" "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/feeds/factory" + "github.com/ethersphere/bee/v2/pkg/gsoc" "github.com/ethersphere/bee/v2/pkg/hive" "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/metrics" @@ -889,6 +890,7 @@ func NewBee( pricing.SetPaymentThresholdObserver(acc) pssService := pss.New(pssPrivateKey, logger) + gsocService := gsoc.New() b.pssCloser = pssService validStamp := postage.ValidStamp(batchStore) @@ -942,7 +944,7 @@ func NewBee( } } - pushSyncProtocol := pushsync.New(swarmAddress, networkID, nonce, p2ps, localStore, waitNetworkRFunc, kad, o.FullNodeMode, pssService.TryUnwrap, validStamp, logger, acc, pricer, signer, tracer, warmupTime) + pushSyncProtocol := pushsync.New(swarmAddress, nonce, p2ps, localStore, kad, o.FullNodeMode, pssService.TryUnwrap, gsocService.Handler, validStamp, logger, acc, pricer, signer, tracer, warmupTime) b.pushSyncCloser = pushSyncProtocol // set the pushSyncer in the PSS @@ -956,7 +958,7 @@ func NewBee( pusherService.AddFeed(localStore.PusherFeed()) - pullSyncProtocol := pullsync.New(p2ps, localStore, pssService.TryUnwrap, validStamp, logger, pullsync.DefaultMaxPage) + pullSyncProtocol := pullsync.New(p2ps, localStore, pssService.TryUnwrap, gsocService.Handler, validStamp, logger, pullsync.DefaultMaxPage) b.pullSyncCloser = pullSyncProtocol retrieveProtocolSpec := retrieval.Protocol() @@ -1086,6 +1088,7 @@ func NewBee( Storer: localStore, Resolver: multiResolver, Pss: pssService, + Gsoc: gsocService, FeedFactory: feedFactory, Post: post, AccessControl: accesscontrol, diff --git a/pkg/pullsync/pullsync.go b/pkg/pullsync/pullsync.go index dfc24242686..49e625fef74 100644 --- a/pkg/pullsync/pullsync.go +++ b/pkg/pullsync/pullsync.go @@ -71,6 +71,7 @@ type Syncer struct { store storer.Reserve quit chan struct{} unwrap func(swarm.Chunk) + gsocHandler func(soc.SOC) validStamp postage.ValidStampFn intervalsSF singleflight.Group[string, *collectAddrsResult] syncInProgress atomic.Int32 @@ -87,21 +88,23 @@ func New( streamer p2p.Streamer, store storer.Reserve, unwrap func(swarm.Chunk), + gsocHandler func(soc.SOC), validStamp postage.ValidStampFn, logger log.Logger, maxPage uint64, ) *Syncer { return &Syncer{ - streamer: streamer, - store: store, - metrics: newMetrics(), - unwrap: unwrap, - validStamp: validStamp, - logger: logger.WithName(loggerName).Register(), - quit: make(chan struct{}), - maxPage: maxPage, - limiter: ratelimit.New(handleRequestsLimitRate, int(maxPage)), + streamer: streamer, + store: store, + metrics: newMetrics(), + unwrap: unwrap, + gsocHandler: gsocHandler, + validStamp: validStamp, + logger: logger.WithName(loggerName).Register(), + quit: make(chan struct{}), + maxPage: maxPage, + limiter: ratelimit.New(handleRequestsLimitRate, int(maxPage)), } } @@ -356,7 +359,9 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start if cac.Valid(chunk) { go s.unwrap(chunk) - } else if !soc.Valid(chunk) { + } else if chunk, err := soc.FromChunk(chunk); err == nil { + go s.gsocHandler(*chunk) + } else { s.logger.Debug("invalid cac/soc chunk", "error", swarm.ErrInvalidChunk, "peer_address", peer, "chunk", chunk) chunkErr = errors.Join(chunkErr, swarm.ErrInvalidChunk) s.metrics.ReceivedInvalidChunk.Inc() diff --git a/pkg/pullsync/pullsync_test.go b/pkg/pullsync/pullsync_test.go index e77f54705ed..68d9e04ecbc 100644 --- a/pkg/pullsync/pullsync_test.go +++ b/pkg/pullsync/pullsync_test.go @@ -17,6 +17,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/postage" postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing" "github.com/ethersphere/bee/v2/pkg/pullsync" + "github.com/ethersphere/bee/v2/pkg/soc" "github.com/ethersphere/bee/v2/pkg/storage" testingc "github.com/ethersphere/bee/v2/pkg/storage/testing" "github.com/ethersphere/bee/v2/pkg/storer" @@ -353,10 +354,12 @@ func newPullSyncWithStamperValidator( storage := mock.NewReserve(o...) logger := log.Noop unwrap := func(swarm.Chunk) {} + socHandler := func(soc.SOC) {} ps := pullsync.New( s, storage, unwrap, + socHandler, validStamp, logger, maxPage, diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go index c687a544727..8967a5e5855 100644 --- a/pkg/pushsync/pushsync.go +++ b/pkg/pushsync/pushsync.go @@ -85,6 +85,7 @@ type PushSync struct { store Storer topologyDriver topology.Driver unwrap func(swarm.Chunk) + gsocHandler func(soc.SOC) logger log.Logger accounting accounting.Interface pricer pricer.Interface @@ -114,6 +115,7 @@ func New( topology topology.Driver, fullNode bool, unwrap func(swarm.Chunk), + gsocHandler func(soc.SOC), validStamp postage.ValidStampFn, logger log.Logger, accounting accounting.Interface, @@ -132,6 +134,7 @@ func New( topologyDriver: topology, fullNode: fullNode, unwrap: unwrap, + gsocHandler: gsocHandler, logger: logger.WithName(loggerName).Register(), accounting: accounting, pricer: pricer, @@ -225,7 +228,9 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) if cac.Valid(chunk) { go ps.unwrap(chunk) - } else if !soc.Valid(chunk) { + } else if chunk, err := soc.FromChunk(chunk); err == nil { + go ps.gsocHandler(*chunk) + } else { return swarm.ErrInvalidChunk } From 4e5d93c272666d7c8908d4a9054d3452731316e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 16 Jul 2024 15:39:33 +0200 Subject: [PATCH 03/54] feat: gsoc subscribe api --- pkg/api/api.go | 4 ++ pkg/api/gsoc.go | 119 ++++++++++++++++++++++++++++++++++++++++++++++ pkg/api/router.go | 4 ++ 3 files changed, 127 insertions(+) create mode 100644 pkg/api/gsoc.go diff --git a/pkg/api/api.go b/pkg/api/api.go index b4c7f7ad18b..807278b0287 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -33,6 +33,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/file/pipeline" "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/gsoc" "github.com/ethersphere/bee/v2/pkg/jsonhttp" "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/p2p" @@ -148,6 +149,7 @@ type Service struct { storer Storer resolver resolver.Interface pss pss.Interface + gsoc gsoc.Listener steward steward.Interface logger log.Logger loggerV1 log.Logger @@ -250,6 +252,7 @@ type ExtraOptions struct { Storer Storer Resolver resolver.Interface Pss pss.Interface + Gsoc gsoc.Listener FeedFactory feeds.Factory Post postage.Service AccessControl accesscontrol.Controller @@ -333,6 +336,7 @@ func (s *Service) Configure(signer crypto.Signer, tracer *tracing.Tracer, o Opti s.storer = e.Storer s.resolver = e.Resolver s.pss = e.Pss + s.gsoc = e.Gsoc s.feedFactory = e.FeedFactory s.post = e.Post s.accesscontrol = e.AccessControl diff --git a/pkg/api/gsoc.go b/pkg/api/gsoc.go new file mode 100644 index 00000000000..bf6c57abee6 --- /dev/null +++ b/pkg/api/gsoc.go @@ -0,0 +1,119 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package api + +import ( + "net/http" + "time" + + "github.com/ethersphere/bee/v2/pkg/jsonhttp" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/gorilla/mux" + "github.com/gorilla/websocket" +) + +func (s *Service) gsocWsHandler(w http.ResponseWriter, r *http.Request) { + logger := s.logger.WithName("gsoc_subscribe").Build() + + paths := struct { + address []byte `map:"address" validate:"required"` + }{} + if response := s.mapStructure(mux.Vars(r), &paths); response != nil { + response("invalid path params", logger, w) + return + } + + upgrader := websocket.Upgrader{ + ReadBufferSize: swarm.ChunkSize, + WriteBufferSize: swarm.ChunkSize, + CheckOrigin: s.checkOrigin, + } + + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + logger.Debug("upgrade failed", "error", err) + logger.Error(nil, "upgrade failed") + jsonhttp.InternalServerError(w, "upgrade failed") + return + } + + s.wsWg.Add(1) + go s.gsocListeningWs(conn, paths.address) +} + +func (s *Service) gsocListeningWs(conn *websocket.Conn, socAddress []byte) { + defer s.wsWg.Done() + + var ( + dataC = make(chan []byte) + gone = make(chan struct{}) + ticker = time.NewTicker(s.WsPingPeriod) + err error + ) + defer func() { + ticker.Stop() + _ = conn.Close() + }() + cleanup := s.gsoc.Register([32]byte(socAddress), func(m []byte) { + select { + case dataC <- m: + case <-gone: + return + case <-s.quit: + return + } + }) + + defer cleanup() + + conn.SetCloseHandler(func(code int, text string) error { + s.logger.Debug("gsoc ws: client gone", "code", code, "message", text) + close(gone) + return nil + }) + + for { + select { + case b := <-dataC: + err = conn.SetWriteDeadline(time.Now().Add(writeDeadline)) + if err != nil { + s.logger.Debug("gsoc ws: set write deadline failed", "error", err) + return + } + + err = conn.WriteMessage(websocket.BinaryMessage, b) + if err != nil { + s.logger.Debug("gsoc ws: write message failed", "error", err) + return + } + + case <-s.quit: + // shutdown + err = conn.SetWriteDeadline(time.Now().Add(writeDeadline)) + if err != nil { + s.logger.Debug("gsoc ws: set write deadline failed", "error", err) + return + } + err = conn.WriteMessage(websocket.CloseMessage, []byte{}) + if err != nil { + s.logger.Debug("gsoc ws: write close message failed", "error", err) + } + return + case <-gone: + // client gone + return + case <-ticker.C: + err = conn.SetWriteDeadline(time.Now().Add(writeDeadline)) + if err != nil { + s.logger.Debug("gsoc ws: set write deadline failed", "error", err) + return + } + if err = conn.WriteMessage(websocket.PingMessage, nil); err != nil { + // error encountered while pinging client. client probably gone + return + } + } + } +} diff --git a/pkg/api/router.go b/pkg/api/router.go index 000521aab67..2326eb910df 100644 --- a/pkg/api/router.go +++ b/pkg/api/router.go @@ -316,6 +316,10 @@ func (s *Service) mountAPI() { web.FinalHandlerFunc(s.pssWsHandler), )) + handle("/gsoc/subscribe/{address}", web.ChainHandlers( + web.FinalHandlerFunc(s.gsocWsHandler), + )) + handle("/tags", web.ChainHandlers( web.FinalHandler(jsonhttp.MethodHandler{ "GET": http.HandlerFunc(s.listTagsHandler), From 340c23376fa63b4029c86e623f4930f551ceea40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 17 Jul 2024 22:18:06 +0200 Subject: [PATCH 04/54] fix: gsoc address path parsing --- pkg/api/gsoc.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/api/gsoc.go b/pkg/api/gsoc.go index bf6c57abee6..0285e80597b 100644 --- a/pkg/api/gsoc.go +++ b/pkg/api/gsoc.go @@ -18,7 +18,7 @@ func (s *Service) gsocWsHandler(w http.ResponseWriter, r *http.Request) { logger := s.logger.WithName("gsoc_subscribe").Build() paths := struct { - address []byte `map:"address" validate:"required"` + Address []byte `map:"address" validate:"required"` }{} if response := s.mapStructure(mux.Vars(r), &paths); response != nil { response("invalid path params", logger, w) @@ -40,7 +40,7 @@ func (s *Service) gsocWsHandler(w http.ResponseWriter, r *http.Request) { } s.wsWg.Add(1) - go s.gsocListeningWs(conn, paths.address) + go s.gsocListeningWs(conn, paths.Address) } func (s *Service) gsocListeningWs(conn *websocket.Conn, socAddress []byte) { From 9d5b641bc614449bc02fc4fa62fdc529e2a06d7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 17 Jul 2024 22:18:45 +0200 Subject: [PATCH 05/54] test: gsoc as param for testServer --- pkg/api/api_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index 98b35785020..f19a3646e44 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -32,6 +32,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/file/pipeline" "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/gsoc" "github.com/ethersphere/bee/v2/pkg/jsonhttp/jsonhttptest" "github.com/ethersphere/bee/v2/pkg/log" p2pmock "github.com/ethersphere/bee/v2/pkg/p2p/mock" @@ -93,6 +94,7 @@ type testServerOptions struct { StateStorer storage.StateStorer Resolver resolver.Interface Pss pss.Interface + Gsoc gsoc.Listener WsPath string WsPingPeriod time.Duration Logger log.Logger @@ -191,6 +193,7 @@ func newTestServer(t *testing.T, o testServerOptions) (*http.Client, *websocket. Storer: o.Storer, Resolver: o.Resolver, Pss: o.Pss, + Gsoc: o.Gsoc, FeedFactory: o.Feeds, Post: o.Post, AccessControl: o.AccessControl, From 84831ac3fed83cd3e31460e5cc01582b931ddddf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 17 Jul 2024 22:19:14 +0200 Subject: [PATCH 06/54] test: gsoc api --- pkg/api/gsoc_test.go | 171 +++++++++++++++++++++++++++++++++++++++++++ pkg/gsoc/gsoc.go | 1 + 2 files changed, 172 insertions(+) create mode 100644 pkg/api/gsoc_test.go diff --git a/pkg/api/gsoc_test.go b/pkg/api/gsoc_test.go new file mode 100644 index 00000000000..df5ca954369 --- /dev/null +++ b/pkg/api/gsoc_test.go @@ -0,0 +1,171 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package api_test + +import ( + "encoding/hex" + "fmt" + "net/url" + "strings" + "testing" + "time" + + "github.com/ethersphere/bee/v2/pkg/cac" + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/gsoc" + "github.com/ethersphere/bee/v2/pkg/log" + mockbatchstore "github.com/ethersphere/bee/v2/pkg/postage/batchstore/mock" + "github.com/ethersphere/bee/v2/pkg/soc" + mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/ethersphere/bee/v2/pkg/util/testutil" + "github.com/gorilla/websocket" +) + +// TestGsocWebsocketSingleHandler creates a single websocket handler on a chunk address, and receives a message +func TestGsocWebsocketSingleHandler(t *testing.T) { + t.Parallel() + + var ( + id = make([]byte, 32) + g, cl, signer, _ = newGsocTest(t, id, 0) + respC = make(chan error, 1) + payload = []byte("hello there!") + ) + + err := cl.SetReadDeadline(time.Now().Add(2 * time.Second)) + if err != nil { + t.Fatal(err) + } + cl.SetReadLimit(swarm.ChunkSize) + + ch, _ := cac.New(payload) + socCh := soc.New(id, ch) + ch, _ = socCh.Sign(signer) + socCh, _ = soc.FromChunk(ch) + g.Handler(*socCh) + + go expectMessage(t, cl, respC, payload) + if err := <-respC; err != nil { + t.Fatal(err) + } +} + +func TestGsocWebsocketMultiHandler(t *testing.T) { + t.Parallel() + + var ( + id = make([]byte, 32) + g, cl, signer, listener = newGsocTest(t, make([]byte, 32), 0) + owner, _ = signer.EthereumAddress() + chunkAddr, _ = soc.CreateAddress(id, owner.Bytes()) + u = url.URL{Scheme: "ws", Host: listener, Path: fmt.Sprintf("/gsoc/subscribe/%s", hex.EncodeToString(chunkAddr.Bytes()))} + cl2, _, err = websocket.DefaultDialer.Dial(u.String(), nil) + respC = make(chan error, 2) + ) + if err != nil { + t.Fatalf("dial: %v. url %v", err, u.String()) + } + testutil.CleanupCloser(t, cl2) + + err = cl.SetReadDeadline(time.Now().Add(2 * time.Second)) + if err != nil { + t.Fatal(err) + } + cl.SetReadLimit(swarm.ChunkSize) + + ch, _ := cac.New(payload) + socCh := soc.New(id, ch) + ch, _ = socCh.Sign(signer) + socCh, _ = soc.FromChunk(ch) + + // close the websocket before calling pss with the message + err = cl.WriteMessage(websocket.CloseMessage, []byte{}) + if err != nil { + t.Fatal(err) + } + + g.Handler(*socCh) + + go expectMessage(t, cl, respC, payload) + go expectMessage(t, cl2, respC, payload) + if err := <-respC; err != nil { + t.Fatal(err) + } + if err := <-respC; err != nil { + t.Fatal(err) + } +} + +// TestGsocPong tests that the websocket api adheres to the websocket standard +// and sends ping-pong messages to keep the connection alive. +// The test opens a websocket, keeps it alive for 500ms, then receives a pss message. +func TestGsocPong(t *testing.T) { + t.Parallel() + id := make([]byte, 32) + + var ( + g, cl, signer, _ = newGsocTest(t, id, 90*time.Millisecond) + + respC = make(chan error, 1) + pongWait = 1 * time.Millisecond + ) + + cl.SetReadLimit(swarm.ChunkSize) + err := cl.SetReadDeadline(time.Now().Add(pongWait)) + if err != nil { + t.Fatal(err) + } + + time.Sleep(500 * time.Millisecond) // wait to see that the websocket is kept alive + ch, _ := cac.New([]byte("hello there!")) + socCh := soc.New(id, ch) + ch, _ = socCh.Sign(signer) + socCh, _ = soc.FromChunk(ch) + + g.Handler(*socCh) + + go expectMessage(t, cl, respC, nil) + if err := <-respC; err == nil || !strings.Contains(err.Error(), "i/o timeout") { + // note: error has *websocket.netError type so we need to check error by checking message + t.Fatal("want timeout error") + } +} + +func newGsocTest(t *testing.T, socId []byte, pingPeriod time.Duration) (gsoc.Listener, *websocket.Conn, crypto.Signer, string) { + t.Helper() + if pingPeriod == 0 { + pingPeriod = 10 * time.Second + } + var ( + batchStore = mockbatchstore.New() + storer = mockstorer.New() + ) + + privKey, err := crypto.GenerateSecp256k1Key() + if err != nil { + t.Fatal(err) + } + signer := crypto.NewDefaultSigner(privKey) + owner, err := signer.EthereumAddress() + if err != nil { + t.Fatal(err) + } + chunkAddr, _ := soc.CreateAddress(socId, owner.Bytes()) + + gsoc := gsoc.New() + testutil.CleanupCloser(t, gsoc) + + _, cl, listener, _ := newTestServer(t, testServerOptions{ + Gsoc: gsoc, + WsPath: fmt.Sprintf("/gsoc/subscribe/%s", hex.EncodeToString(chunkAddr.Bytes())), + Storer: storer, + BatchStore: batchStore, + Logger: log.Noop, + WsPingPeriod: pingPeriod, + }) + + return gsoc, cl, signer, listener +} diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index 9ffc9718cc4..7fc56e8b486 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -15,6 +15,7 @@ import ( type Listener interface { Register(address [32]byte, handler handler) (cleanup func()) Handler(c soc.SOC) + Close() error } type listener struct { From bbe5d00a41b72477ebdb5480194c95df17995dc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 18 Jul 2024 12:42:14 +0200 Subject: [PATCH 07/54] test: add empty function for so clisten in pushsync t --- pkg/pushsync/pushsync_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/pushsync/pushsync_test.go b/pkg/pushsync/pushsync_test.go index 9b1ee648d3f..cd95d8fe50b 100644 --- a/pkg/pushsync/pushsync_test.go +++ b/pkg/pushsync/pushsync_test.go @@ -25,6 +25,7 @@ import ( pricermock "github.com/ethersphere/bee/v2/pkg/pricer/mock" "github.com/ethersphere/bee/v2/pkg/pushsync" "github.com/ethersphere/bee/v2/pkg/pushsync/pb" + "github.com/ethersphere/bee/v2/pkg/soc" storage "github.com/ethersphere/bee/v2/pkg/storage" testingc "github.com/ethersphere/bee/v2/pkg/storage/testing" "github.com/ethersphere/bee/v2/pkg/swarm" @@ -802,6 +803,7 @@ func createPushSyncNodeWithAccounting( if unwrap == nil { unwrap = func(swarm.Chunk) {} } + gsocListener := func(soc.SOC) {} validStamp := func(ch swarm.Chunk) (swarm.Chunk, error) { return ch, nil @@ -809,7 +811,7 @@ func createPushSyncNodeWithAccounting( radiusFunc := func() (uint8, error) { return 0, nil } - ps := pushsync.New(addr, 1, blockHash.Bytes(), recorderDisconnecter, storer, radiusFunc, mockTopology, true, unwrap, validStamp, logger, acct, mockPricer, signer, nil, -1) + ps := pushsync.New(addr, 1, blockHash.Bytes(), recorderDisconnecter, storer, radiusFunc, mockTopology, true, unwrap, gsocListener, validStamp, logger, acct, mockPricer, signer, nil, -1) t.Cleanup(func() { ps.Close() }) return ps, storer From d800e9abc2f4382a6189e14f213c624421a867fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 18 Jul 2024 15:37:13 +0200 Subject: [PATCH 08/54] refactor: remove unused pusher --- pkg/gsoc/gsoc.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index 7fc56e8b486..49e08407ba4 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -7,7 +7,6 @@ package gsoc import ( "sync" - "github.com/ethersphere/bee/v2/pkg/pushsync" "github.com/ethersphere/bee/v2/pkg/soc" "github.com/ethersphere/bee/v2/pkg/swarm" ) @@ -19,7 +18,6 @@ type Listener interface { } type listener struct { - pusher pushsync.PushSyncer handlers map[[32]byte][]*handler handlersMu sync.Mutex quit chan struct{} @@ -56,7 +54,10 @@ func (l *listener) Register(address [32]byte, handler handler) (cleanup func()) // Handler is called by push/pull sync and passes the chunk its registered handler func (l *listener) Handler(c soc.SOC) { - addr, _ := c.Address() + addr, err := c.Address() + if err != nil { + return // no handler + } h := l.getHandlers([32]byte(addr.Bytes())) if h == nil { return // no handler From 0f36ac7dfb208dc0dd502cc41aa37847cb3adcd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 18 Jul 2024 15:54:41 +0200 Subject: [PATCH 09/54] docs: gsoc openapi --- openapi/Swarm.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/openapi/Swarm.yaml b/openapi/Swarm.yaml index 588d31b59d9..b76006a65a8 100644 --- a/openapi/Swarm.yaml +++ b/openapi/Swarm.yaml @@ -816,6 +816,28 @@ paths: default: description: Default response + "/gsoc/subscribe/{address}": + get: + summary: Subscribe to GSOC payloads + tags: + - GSOC + - Subscribe + - Websocket + parameters: + - in: path + name: reference + schema: + $ref: "SwarmCommon.yaml#/components/schemas/SwarmReference" + required: true + description: "Single Owner Chunk address (which may have multiple payloads)" + responses: + "200": + description: Returns a WebSocket with a subscription for incoming message data on the requested SOC address. + "500": + $ref: "SwarmCommon.yaml#/components/responses/500" + default: + description: Default response + "/soc/{owner}/{id}": post: summary: Upload single owner chunk From 1b770278a5cf9ba054c58074993621e9de4d96b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 18 Jul 2024 17:24:12 +0200 Subject: [PATCH 10/54] test: unit --- pkg/gsoc/gsoc_test.go | 123 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 pkg/gsoc/gsoc_test.go diff --git a/pkg/gsoc/gsoc_test.go b/pkg/gsoc/gsoc_test.go new file mode 100644 index 00000000000..89540410a7b --- /dev/null +++ b/pkg/gsoc/gsoc_test.go @@ -0,0 +1,123 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gsoc_test + +import ( + "testing" + "time" + + "github.com/ethersphere/bee/v2/pkg/cac" + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/gsoc" + "github.com/ethersphere/bee/v2/pkg/soc" + "github.com/ethersphere/bee/v2/pkg/util/testutil" +) + +// TestRegister verifies that handler funcs are able to be registered correctly in pss +func TestRegister(t *testing.T) { + t.Parallel() + + var ( + g = gsoc.New() + h1Calls = 0 + h2Calls = 0 + h3Calls = 0 + msgChan = make(chan struct{}) + + payload1 = []byte("Hello there!") + payload2 = []byte("General Kenobi. You are a bold one. Kill him!") + socId1 = testutil.RandBytes(t, 32) + socId2 = append([]byte{socId1[0] + 1}, socId1[1:]...) + privKey, _ = crypto.GenerateSecp256k1Key() + signer = crypto.NewDefaultSigner(privKey) + owner, _ = signer.EthereumAddress() + address1, _ = soc.CreateAddress(socId1, owner.Bytes()) + address2, _ = soc.CreateAddress(socId2, owner.Bytes()) + + h1 = func(m []byte) { + h1Calls++ + msgChan <- struct{}{} + } + + h2 = func(m []byte) { + h2Calls++ + msgChan <- struct{}{} + } + + h3 = func(m []byte) { + h3Calls++ + msgChan <- struct{}{} + } + ) + _ = g.Register([32]byte(address1.Bytes()), h1) + _ = g.Register([32]byte(address2.Bytes()), h2) + + ch1, _ := cac.New(payload1) + socCh1 := soc.New(socId1, ch1) + ch1, _ = socCh1.Sign(signer) + socCh1, _ = soc.FromChunk(ch1) + + ch2, _ := cac.New(payload2) + socCh2 := soc.New(socId2, ch2) + ch2, _ = socCh2.Sign(signer) + socCh2, _ = soc.FromChunk(ch2) + + // trigger soc upload on address1, check that only h1 is called + g.Handler(*socCh1) + + waitHandlerCallback(t, &msgChan, 1) + + ensureCalls(t, &h1Calls, 1) + ensureCalls(t, &h2Calls, 0) + + // register another handler on the first address + cleanup := g.Register([32]byte(address1.Bytes()), h3) + + g.Handler(*socCh1) + + waitHandlerCallback(t, &msgChan, 2) + + ensureCalls(t, &h1Calls, 2) + ensureCalls(t, &h2Calls, 0) + ensureCalls(t, &h3Calls, 1) + + cleanup() // remove the last handler + + g.Handler(*socCh1) + + waitHandlerCallback(t, &msgChan, 1) + + ensureCalls(t, &h1Calls, 3) + ensureCalls(t, &h2Calls, 0) + ensureCalls(t, &h3Calls, 1) + + g.Handler(*socCh2) + + waitHandlerCallback(t, &msgChan, 1) + + ensureCalls(t, &h1Calls, 3) + ensureCalls(t, &h2Calls, 1) + ensureCalls(t, &h3Calls, 1) +} + +func ensureCalls(t *testing.T, calls *int, exp int) { + t.Helper() + + if exp != *calls { + t.Fatalf("expected %d calls, found %d", exp, *calls) + } +} + +func waitHandlerCallback(t *testing.T, msgChan *chan struct{}, count int) { + t.Helper() + + for received := 0; received < count; received++ { + select { + case <-*msgChan: + case <-time.After(1 * time.Second): + t.Fatal("reached timeout while waiting for handler message") + } + } +} From d53a7761877cd555c4ea458fd787be43a7de2db7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 18 Jul 2024 17:28:17 +0200 Subject: [PATCH 11/54] docs: fix yaml indentation --- openapi/Swarm.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openapi/Swarm.yaml b/openapi/Swarm.yaml index b76006a65a8..2542fe4abef 100644 --- a/openapi/Swarm.yaml +++ b/openapi/Swarm.yaml @@ -816,7 +816,7 @@ paths: default: description: Default response - "/gsoc/subscribe/{address}": + "/gsoc/subscribe/{address}": get: summary: Subscribe to GSOC payloads tags: From 66e44b84b53d812be14176131bac2556447f789a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 25 Jul 2024 10:38:09 +0200 Subject: [PATCH 12/54] feat: add new error handling --- pkg/api/soc.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/api/soc.go b/pkg/api/soc.go index 9eff4bd3c99..08843e6013d 100644 --- a/pkg/api/soc.go +++ b/pkg/api/soc.go @@ -120,6 +120,8 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.NotFound(w, "batch with id not found") case errors.Is(err, errInvalidPostageBatch): jsonhttp.BadRequest(w, "invalid batch id") + case errors.Is(err, errUnsupportedDevNodeOperation): + jsonhttp.NotImplemented(w, "operation is not supported in dev mode") default: jsonhttp.BadRequest(w, nil) } From e5e79c031418df70fe56a2eeb11d73246aef07f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Fri, 26 Jul 2024 15:42:44 +0200 Subject: [PATCH 13/54] feat: logger in gsoc listener --- pkg/api/gsoc_test.go | 2 +- pkg/gsoc/gsoc.go | 8 +++++++- pkg/gsoc/gsoc_test.go | 3 ++- pkg/node/devnode.go | 2 +- pkg/node/node.go | 2 +- 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pkg/api/gsoc_test.go b/pkg/api/gsoc_test.go index df5ca954369..b4e9855b6ef 100644 --- a/pkg/api/gsoc_test.go +++ b/pkg/api/gsoc_test.go @@ -155,7 +155,7 @@ func newGsocTest(t *testing.T, socId []byte, pingPeriod time.Duration) (gsoc.Lis } chunkAddr, _ := soc.CreateAddress(socId, owner.Bytes()) - gsoc := gsoc.New() + gsoc := gsoc.New(log.NewLogger("test")) testutil.CleanupCloser(t, gsoc) _, cl, listener, _ := newTestServer(t, testServerOptions{ diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index 49e08407ba4..bea68bd9192 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -7,6 +7,7 @@ package gsoc import ( "sync" + "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/soc" "github.com/ethersphere/bee/v2/pkg/swarm" ) @@ -21,11 +22,13 @@ type listener struct { handlers map[[32]byte][]*handler handlersMu sync.Mutex quit chan struct{} + logger log.Logger } // New returns a new pss service. -func New() Listener { +func New(logger log.Logger) Listener { return &listener{ + logger: logger, handlers: make(map[[32]byte][]*handler), quit: make(chan struct{}), } @@ -62,6 +65,9 @@ func (l *listener) Handler(c soc.SOC) { if h == nil { return // no handler } + l.logger.Info("new incoming GSOC message", + "GSOC Address", addr, + "wrapped chunk address", c.WrappedChunk().Address()) var wg sync.WaitGroup for _, hh := range h { diff --git a/pkg/gsoc/gsoc_test.go b/pkg/gsoc/gsoc_test.go index 89540410a7b..c4022cc79ce 100644 --- a/pkg/gsoc/gsoc_test.go +++ b/pkg/gsoc/gsoc_test.go @@ -11,6 +11,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/cac" "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/gsoc" + "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/soc" "github.com/ethersphere/bee/v2/pkg/util/testutil" ) @@ -20,7 +21,7 @@ func TestRegister(t *testing.T) { t.Parallel() var ( - g = gsoc.New() + g = gsoc.New(log.NewLogger("test")) h1Calls = 0 h2Calls = 0 h3Calls = 0 diff --git a/pkg/node/devnode.go b/pkg/node/devnode.go index db84d2190d2..68f349327ed 100644 --- a/pkg/node/devnode.go +++ b/pkg/node/devnode.go @@ -343,7 +343,7 @@ func NewDevBee(logger log.Logger, o *DevOptions) (b *DevBee, err error) { Storer: localStore, Resolver: mockResolver, Pss: pssService, - Gsoc: gsoc.New(), + Gsoc: gsoc.New(logger), FeedFactory: mockFeeds, Post: post, AccessControl: accesscontrol, diff --git a/pkg/node/node.go b/pkg/node/node.go index 6a48cac4188..d1a43fe7946 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -890,7 +890,7 @@ func NewBee( pricing.SetPaymentThresholdObserver(acc) pssService := pss.New(pssPrivateKey, logger) - gsocService := gsoc.New() + gsocService := gsoc.New(logger) b.pssCloser = pssService validStamp := postage.ValidStamp(batchStore) From 5c5bb7f9339c0f41f352f01395ef67eb17cd5a71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Fri, 6 Sep 2024 12:42:16 +0200 Subject: [PATCH 14/54] refactor: handle instead of handler --- pkg/api/gsoc_test.go | 6 +++--- pkg/gsoc/gsoc.go | 6 +++--- pkg/node/node.go | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/api/gsoc_test.go b/pkg/api/gsoc_test.go index b4e9855b6ef..72ae38cf877 100644 --- a/pkg/api/gsoc_test.go +++ b/pkg/api/gsoc_test.go @@ -45,7 +45,7 @@ func TestGsocWebsocketSingleHandler(t *testing.T) { socCh := soc.New(id, ch) ch, _ = socCh.Sign(signer) socCh, _ = soc.FromChunk(ch) - g.Handler(*socCh) + g.Handle(*socCh) go expectMessage(t, cl, respC, payload) if err := <-respC; err != nil { @@ -87,7 +87,7 @@ func TestGsocWebsocketMultiHandler(t *testing.T) { t.Fatal(err) } - g.Handler(*socCh) + g.Handle(*socCh) go expectMessage(t, cl, respC, payload) go expectMessage(t, cl2, respC, payload) @@ -125,7 +125,7 @@ func TestGsocPong(t *testing.T) { ch, _ = socCh.Sign(signer) socCh, _ = soc.FromChunk(ch) - g.Handler(*socCh) + g.Handle(*socCh) go expectMessage(t, cl, respC, nil) if err := <-respC; err == nil || !strings.Contains(err.Error(), "i/o timeout") { diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index bea68bd9192..edf7983be99 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -14,7 +14,7 @@ import ( type Listener interface { Register(address [32]byte, handler handler) (cleanup func()) - Handler(c soc.SOC) + Handle(c soc.SOC) Close() error } @@ -55,8 +55,8 @@ func (l *listener) Register(address [32]byte, handler handler) (cleanup func()) } } -// Handler is called by push/pull sync and passes the chunk its registered handler -func (l *listener) Handler(c soc.SOC) { +// Handle is called by push/pull sync and passes the chunk its registered handler +func (l *listener) Handle(c soc.SOC) { addr, err := c.Address() if err != nil { return // no handler diff --git a/pkg/node/node.go b/pkg/node/node.go index d1a43fe7946..9bb480e1480 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -944,7 +944,7 @@ func NewBee( } } - pushSyncProtocol := pushsync.New(swarmAddress, nonce, p2ps, localStore, kad, o.FullNodeMode, pssService.TryUnwrap, gsocService.Handler, validStamp, logger, acc, pricer, signer, tracer, warmupTime) + pushSyncProtocol := pushsync.New(swarmAddress, nonce, p2ps, localStore, kad, o.FullNodeMode, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, acc, pricer, signer, tracer, warmupTime) b.pushSyncCloser = pushSyncProtocol // set the pushSyncer in the PSS @@ -958,7 +958,7 @@ func NewBee( pusherService.AddFeed(localStore.PusherFeed()) - pullSyncProtocol := pullsync.New(p2ps, localStore, pssService.TryUnwrap, gsocService.Handler, validStamp, logger, pullsync.DefaultMaxPage) + pullSyncProtocol := pullsync.New(p2ps, localStore, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, pullsync.DefaultMaxPage) b.pullSyncCloser = pullSyncProtocol retrieveProtocolSpec := retrieval.Protocol() From 4f60796b2d7b2301c3d25f5f8d1df83aa58b5e16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Fri, 6 Sep 2024 12:50:35 +0200 Subject: [PATCH 15/54] docs: copypastes --- pkg/api/gsoc_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/api/gsoc_test.go b/pkg/api/gsoc_test.go index 72ae38cf877..5d0a70cc9c4 100644 --- a/pkg/api/gsoc_test.go +++ b/pkg/api/gsoc_test.go @@ -81,7 +81,7 @@ func TestGsocWebsocketMultiHandler(t *testing.T) { ch, _ = socCh.Sign(signer) socCh, _ = soc.FromChunk(ch) - // close the websocket before calling pss with the message + // close the websocket before calling GSOC with the message err = cl.WriteMessage(websocket.CloseMessage, []byte{}) if err != nil { t.Fatal(err) @@ -101,7 +101,7 @@ func TestGsocWebsocketMultiHandler(t *testing.T) { // TestGsocPong tests that the websocket api adheres to the websocket standard // and sends ping-pong messages to keep the connection alive. -// The test opens a websocket, keeps it alive for 500ms, then receives a pss message. +// The test opens a websocket, keeps it alive for 500ms, then receives a GSOC message. func TestGsocPong(t *testing.T) { t.Parallel() id := make([]byte, 32) From f868608b83a231a326fc08853e64bba19889f61c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Fri, 6 Sep 2024 12:51:57 +0200 Subject: [PATCH 16/54] refactor: rename register to subscribe --- pkg/api/gsoc.go | 2 +- pkg/gsoc/gsoc.go | 6 +++--- pkg/gsoc/gsoc_test.go | 14 +++++++------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/api/gsoc.go b/pkg/api/gsoc.go index 0285e80597b..ea9aad5271e 100644 --- a/pkg/api/gsoc.go +++ b/pkg/api/gsoc.go @@ -56,7 +56,7 @@ func (s *Service) gsocListeningWs(conn *websocket.Conn, socAddress []byte) { ticker.Stop() _ = conn.Close() }() - cleanup := s.gsoc.Register([32]byte(socAddress), func(m []byte) { + cleanup := s.gsoc.Subscribe([32]byte(socAddress), func(m []byte) { select { case dataC <- m: case <-gone: diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index edf7983be99..c2f88fc551c 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -13,7 +13,7 @@ import ( ) type Listener interface { - Register(address [32]byte, handler handler) (cleanup func()) + Subscribe(address [32]byte, handler handler) (cleanup func()) Handle(c soc.SOC) Close() error } @@ -34,8 +34,8 @@ func New(logger log.Logger) Listener { } } -// Register allows the definition of a Handler func for a specific topic on the pss struct. -func (l *listener) Register(address [32]byte, handler handler) (cleanup func()) { +// Subscribe allows the definition of a Handler func for a specific topic on the pss struct. +func (l *listener) Subscribe(address [32]byte, handler handler) (cleanup func()) { l.handlersMu.Lock() defer l.handlersMu.Unlock() diff --git a/pkg/gsoc/gsoc_test.go b/pkg/gsoc/gsoc_test.go index c4022cc79ce..0bab1b39bfb 100644 --- a/pkg/gsoc/gsoc_test.go +++ b/pkg/gsoc/gsoc_test.go @@ -52,8 +52,8 @@ func TestRegister(t *testing.T) { msgChan <- struct{}{} } ) - _ = g.Register([32]byte(address1.Bytes()), h1) - _ = g.Register([32]byte(address2.Bytes()), h2) + _ = g.Subscribe([32]byte(address1.Bytes()), h1) + _ = g.Subscribe([32]byte(address2.Bytes()), h2) ch1, _ := cac.New(payload1) socCh1 := soc.New(socId1, ch1) @@ -66,7 +66,7 @@ func TestRegister(t *testing.T) { socCh2, _ = soc.FromChunk(ch2) // trigger soc upload on address1, check that only h1 is called - g.Handler(*socCh1) + g.Handle(*socCh1) waitHandlerCallback(t, &msgChan, 1) @@ -74,9 +74,9 @@ func TestRegister(t *testing.T) { ensureCalls(t, &h2Calls, 0) // register another handler on the first address - cleanup := g.Register([32]byte(address1.Bytes()), h3) + cleanup := g.Subscribe([32]byte(address1.Bytes()), h3) - g.Handler(*socCh1) + g.Handle(*socCh1) waitHandlerCallback(t, &msgChan, 2) @@ -86,7 +86,7 @@ func TestRegister(t *testing.T) { cleanup() // remove the last handler - g.Handler(*socCh1) + g.Handle(*socCh1) waitHandlerCallback(t, &msgChan, 1) @@ -94,7 +94,7 @@ func TestRegister(t *testing.T) { ensureCalls(t, &h2Calls, 0) ensureCalls(t, &h3Calls, 1) - g.Handler(*socCh2) + g.Handle(*socCh2) waitHandlerCallback(t, &msgChan, 1) From 29af22d6b4825277ee76b12f70bf37320ea31b8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Fri, 6 Sep 2024 12:57:21 +0200 Subject: [PATCH 17/54] refactor: unnecessary go call on gsoc handler --- pkg/pullsync/pullsync.go | 2 +- pkg/pushsync/pushsync.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/pullsync/pullsync.go b/pkg/pullsync/pullsync.go index 49e625fef74..36a0be87ea4 100644 --- a/pkg/pullsync/pullsync.go +++ b/pkg/pullsync/pullsync.go @@ -360,7 +360,7 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start if cac.Valid(chunk) { go s.unwrap(chunk) } else if chunk, err := soc.FromChunk(chunk); err == nil { - go s.gsocHandler(*chunk) + s.gsocHandler(*chunk) } else { s.logger.Debug("invalid cac/soc chunk", "error", swarm.ErrInvalidChunk, "peer_address", peer, "chunk", chunk) chunkErr = errors.Join(chunkErr, swarm.ErrInvalidChunk) diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go index 8967a5e5855..18e87496ea7 100644 --- a/pkg/pushsync/pushsync.go +++ b/pkg/pushsync/pushsync.go @@ -229,7 +229,7 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) if cac.Valid(chunk) { go ps.unwrap(chunk) } else if chunk, err := soc.FromChunk(chunk); err == nil { - go ps.gsocHandler(*chunk) + ps.gsocHandler(*chunk) } else { return swarm.ErrInvalidChunk } From f429c018105e9ffbb1ad2ff1ecf582ce634eade9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 11 Sep 2024 16:11:42 +0200 Subject: [PATCH 18/54] feat: identity address in pull sync --- pkg/pushsync/pushsync.go | 13 +++++++++---- pkg/soc/utils.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 pkg/soc/utils.go diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go index 18e87496ea7..939b45cb980 100644 --- a/pkg/pushsync/pushsync.go +++ b/pkg/pushsync/pushsync.go @@ -362,6 +362,11 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo sentErrorsLeft = maxPushErrors } + idAddress, err := soc.IdentityAddress(ch) + if err != nil { + return nil, err + } + resultChan := make(chan receiptResult) retryC := make(chan struct{}, max(1, parallelForwards)) @@ -398,10 +403,10 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo // If no peer can be found from an origin peer, the origin peer may store the chunk. // Non-origin peers store the chunk if the chunk is within depth. // For non-origin peers, if the chunk is not within depth, they may store the chunk if they are the closest peer to the chunk. - fullSkip := append(skip.ChunkPeers(ch.Address()), ps.errSkip.ChunkPeers(ch.Address())...) + fullSkip := append(skip.ChunkPeers(idAddress), ps.errSkip.ChunkPeers(idAddress)...) peer, err := ps.closestPeer(ch.Address(), origin, fullSkip) if errors.Is(err, topology.ErrNotFound) { - if skip.PruneExpiresAfter(ch.Address(), overDraftRefresh) == 0 { //no overdraft peers, we have depleted ALL peers + if skip.PruneExpiresAfter(idAddress, overDraftRefresh) == 0 { //no overdraft peers, we have depleted ALL peers if inflight == 0 { if ps.fullNode { if cac.Valid(ch) { @@ -455,10 +460,10 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo action, err := ps.prepareCredit(ctx, peer, ch, origin) if err != nil { retry() - skip.Add(ch.Address(), peer, overDraftRefresh) + skip.Add(idAddress, peer, overDraftRefresh) continue } - skip.Forever(ch.Address(), peer) + skip.Forever(idAddress, peer) ps.metrics.TotalSendAttempts.Inc() inflight++ diff --git a/pkg/soc/utils.go b/pkg/soc/utils.go new file mode 100644 index 00000000000..f07c2f2102d --- /dev/null +++ b/pkg/soc/utils.go @@ -0,0 +1,31 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package soc + +import "github.com/ethersphere/bee/v2/pkg/swarm" + +// IdentityAddress returns the internally used address for the chunk +func IdentityAddress(chunk swarm.Chunk) (swarm.Address, error) { + // check the chunk is single owner chunk or cac + if sch, err := FromChunk(chunk); err == nil { + socAddress, err := sch.Address() + if err != nil { + return swarm.ZeroAddress, err + } + h := swarm.NewHasher() + _, err = h.Write(socAddress.Bytes()) + if err != nil { + return swarm.ZeroAddress, err + } + _, err = h.Write(sch.WrappedChunk().Address().Bytes()) + if err != nil { + return swarm.ZeroAddress, err + } + + return swarm.NewAddress(h.Sum(nil)), nil + } + + return chunk.Address(), nil +} From 1d5af443cf4b22976c4a9765c0c780ee1c31fd4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 11 Sep 2024 16:14:32 +0200 Subject: [PATCH 19/54] test: multiple payload push --- pkg/p2p/streamtest/streamtest.go | 7 +++ pkg/pushsync/pushsync_test.go | 105 +++++++++++++++++++++++++++++-- 2 files changed, 106 insertions(+), 6 deletions(-) diff --git a/pkg/p2p/streamtest/streamtest.go b/pkg/p2p/streamtest/streamtest.go index a9892687240..ae312624149 100644 --- a/pkg/p2p/streamtest/streamtest.go +++ b/pkg/p2p/streamtest/streamtest.go @@ -96,6 +96,13 @@ func New(opts ...Option) *Recorder { return r } +func (r *Recorder) Reset() { + r.recordsMu.Lock() + defer r.recordsMu.Unlock() + + r.records = make(map[string][]*Record) +} + func (r *Recorder) SetProtocols(protocols ...p2p.ProtocolSpec) { r.protocols = append(r.protocols, protocols...) } diff --git a/pkg/pushsync/pushsync_test.go b/pkg/pushsync/pushsync_test.go index cd95d8fe50b..4c127ff84a7 100644 --- a/pkg/pushsync/pushsync_test.go +++ b/pkg/pushsync/pushsync_test.go @@ -111,6 +111,81 @@ func TestPushClosest(t *testing.T) { } } +// TestSocListener listens all payload of a SOC. This triggers sending a chunk to the closest node +// and expects a receipt. The message is intercepted in the outgoing stream to check for correctness. +func TestSocListener(t *testing.T) { + t.Parallel() + // chunk data to upload + privKey, err := crypto.DecodeSecp256k1PrivateKey(swarm.MustParseHexAddress("b0baf37700000000000000000000000000000000000000000000000000000000").Bytes()) + if err != nil { + t.Fatal(err) + } + signer := crypto.NewDefaultSigner(privKey) + chunk1 := testingc.FixtureChunk("7000") + chunk2 := testingc.FixtureChunk("0033") + id := make([]byte, swarm.HashSize) + s1 := soc.New(id, chunk1) + s2 := soc.New(id, chunk2) + sch1, err := s1.Sign(signer) + if err != nil { + t.Fatal(err) + } + sch1 = sch1.WithStamp(chunk1.Stamp()) + sch2, err := s2.Sign(signer) + if err != nil { + t.Fatal(err) + } + sch2 = sch2.WithStamp(chunk2.Stamp()) + + // create a pivot node and a mocked closest node + pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000 + closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1 + + // peer is the node responding to the chunk receipt message + // mock should return ErrWantSelf since there's no one to forward to + psPeer, _, _ := createGsocPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) + + recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode)) + + // pivot node needs the streamer since the chunk is intercepted by + // the chunk worker, then gets sent by opening a new stream + psPivot, _, _ := createGsocPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer)) + + // Trigger the sending of chunk to the closest node + receipt, err := psPivot.PushChunkToClosest(context.Background(), sch1) + if err != nil { + t.Fatal(err) + } + + if !sch1.Address().Equal(receipt.Address) { + t.Fatal("invalid receipt") + } + + // this intercepts the outgoing delivery message + waitOnRecordAndTest(t, closestPeer, recorder, sch1.Address(), sch1.Data()) + + // this intercepts the incoming receipt message + waitOnRecordAndTest(t, closestPeer, recorder, sch1.Address(), nil) + + recorder.Reset() + + // Trigger the sending of chunk to the closest node + receipt, err = psPivot.PushChunkToClosest(context.Background(), sch2) + if err != nil { + t.Fatal(err) + } + + if !sch2.Address().Equal(receipt.Address) { + t.Fatal("invalid receipt") + } + + // this intercepts the outgoing delivery message + waitOnRecordAndTest(t, closestPeer, recorder, sch2.Address(), sch2.Data()) + + // this intercepts the incoming receipt message + waitOnRecordAndTest(t, closestPeer, recorder, sch2.Address(), nil) +} + // TestShallowReceipt forces the peer to send back a shallow receipt to a pushsync request. In return, the origin node returns the error along with the received receipt. func TestShallowReceipt(t *testing.T) { t.Parallel() @@ -378,7 +453,7 @@ func TestPushChunkToClosestErrorAttemptRetry(t *testing.T) { }), ) - psPivot, pivotStorer := createPushSyncNodeWithAccounting(t, pivotNode, defaultPrices, recorder, nil, defaultSigner(chunk), pivotAccounting, log.Noop, mock.WithPeers(peer1, peer2, peer3, peer4)) + psPivot, pivotStorer := createPushSyncNodeWithAccounting(t, pivotNode, defaultPrices, recorder, nil, defaultSigner(chunk), pivotAccounting, log.Noop, func(soc.SOC) {}, mock.WithPeers(peer1, peer2, peer3, peer4)) // Trigger the sending of chunk to the closest node receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk) @@ -555,15 +630,15 @@ func TestPropagateErrMsg(t *testing.T) { captureLogger := log.NewLogger("test", log.WithSink(buf)) // Create the closest peer - psClosestPeer, _ := createPushSyncNodeWithAccounting(t, closestPeer, defaultPrices, nil, nil, faultySigner, accountingmock.NewAccounting(), log.Noop, mock.WithClosestPeerErr(topology.ErrWantSelf)) + psClosestPeer, _ := createPushSyncNodeWithAccounting(t, closestPeer, defaultPrices, nil, nil, faultySigner, accountingmock.NewAccounting(), log.Noop, func(soc.SOC) {}, mock.WithClosestPeerErr(topology.ErrWantSelf)) // creating the pivot peer - psPivot, _ := createPushSyncNodeWithAccounting(t, pivotPeer, defaultPrices, nil, nil, defaultSigner(chunk), accountingmock.NewAccounting(), log.Noop, mock.WithPeers(closestPeer)) + psPivot, _ := createPushSyncNodeWithAccounting(t, pivotPeer, defaultPrices, nil, nil, defaultSigner(chunk), accountingmock.NewAccounting(), log.Noop, func(soc.SOC) {}, mock.WithPeers(closestPeer)) combinedRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol(), psClosestPeer.Protocol()), streamtest.WithBaseAddr(triggerPeer)) // Creating the trigger peer - psTriggerPeer, _ := createPushSyncNodeWithAccounting(t, triggerPeer, defaultPrices, combinedRecorder, nil, defaultSigner(chunk), accountingmock.NewAccounting(), captureLogger, mock.WithPeers(pivotPeer)) + psTriggerPeer, _ := createPushSyncNodeWithAccounting(t, triggerPeer, defaultPrices, combinedRecorder, nil, defaultSigner(chunk), accountingmock.NewAccounting(), captureLogger, func(soc.SOC) {}, mock.WithPeers(pivotPeer)) _, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk) if err == nil { @@ -739,7 +814,22 @@ func createPushSyncNode( ) (*pushsync.PushSync, *testStorer, accounting.Interface) { t.Helper() mockAccounting := accountingmock.NewAccounting() - ps, mstorer := createPushSyncNodeWithAccounting(t, addr, prices, recorder, unwrap, signer, mockAccounting, log.Noop, mockOpts...) + ps, mstorer := createPushSyncNodeWithAccounting(t, addr, prices, recorder, unwrap, signer, mockAccounting, log.Noop, func(soc.SOC) {}, mockOpts...) + return ps, mstorer, mockAccounting +} + +func createGsocPushSyncNode( + t *testing.T, + addr swarm.Address, + prices pricerParameters, + recorder *streamtest.Recorder, + gsocListener func(soc.SOC), + signer crypto.Signer, + mockOpts ...mock.Option, +) (*pushsync.PushSync, *testStorer, accounting.Interface) { + t.Helper() + mockAccounting := accountingmock.NewAccounting() + ps, mstorer := createPushSyncNodeWithAccounting(t, addr, prices, recorder, nil, signer, mockAccounting, log.Noop, gsocListener, mockOpts...) return ps, mstorer, mockAccounting } @@ -788,6 +878,7 @@ func createPushSyncNodeWithAccounting( signer crypto.Signer, acct accounting.Interface, logger log.Logger, + gsocListener func(soc.SOC), mockOpts ...mock.Option, ) (*pushsync.PushSync, *testStorer) { t.Helper() @@ -803,7 +894,9 @@ func createPushSyncNodeWithAccounting( if unwrap == nil { unwrap = func(swarm.Chunk) {} } - gsocListener := func(soc.SOC) {} + if gsocListener == nil { + gsocListener = func(soc.SOC) {} + } validStamp := func(ch swarm.Chunk) (swarm.Chunk, error) { return ch, nil From 385a5293f3cb08c876319a34892d553cf3c52c22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 11 Sep 2024 16:25:00 +0200 Subject: [PATCH 20/54] test: gsoc listener --- pkg/pushsync/pushsync_test.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/pushsync/pushsync_test.go b/pkg/pushsync/pushsync_test.go index 4c127ff84a7..bea49d652e5 100644 --- a/pkg/pushsync/pushsync_test.go +++ b/pkg/pushsync/pushsync_test.go @@ -136,6 +136,12 @@ func TestSocListener(t *testing.T) { t.Fatal(err) } sch2 = sch2.WithStamp(chunk2.Stamp()) + expectedPayload := chunk1.Data() + gsocListener := func(soc soc.SOC) { + if !bytes.Equal(soc.WrappedChunk().Data(), expectedPayload) { + t.Fatalf("unexpected SOC payload on GSOC listener. got %s, want %s", soc.WrappedChunk().Data(), expectedPayload) + } + } // create a pivot node and a mocked closest node pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000 @@ -149,7 +155,7 @@ func TestSocListener(t *testing.T) { // pivot node needs the streamer since the chunk is intercepted by // the chunk worker, then gets sent by opening a new stream - psPivot, _, _ := createGsocPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer)) + psPivot, _, _ := createGsocPushSyncNode(t, pivotNode, defaultPrices, recorder, gsocListener, defaultSigner, mock.WithClosestPeer(closestPeer)) // Trigger the sending of chunk to the closest node receipt, err := psPivot.PushChunkToClosest(context.Background(), sch1) @@ -168,6 +174,7 @@ func TestSocListener(t *testing.T) { waitOnRecordAndTest(t, closestPeer, recorder, sch1.Address(), nil) recorder.Reset() + expectedPayload = chunk2.Data() // Trigger the sending of chunk to the closest node receipt, err = psPivot.PushChunkToClosest(context.Background(), sch2) From 6e28bb2c5c9d7236beb3e85330f8e067067023f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 11 Sep 2024 17:09:33 +0200 Subject: [PATCH 21/54] fix: param mismatch after rebasing --- pkg/node/node.go | 2 +- pkg/pushsync/pushsync_test.go | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/node/node.go b/pkg/node/node.go index 9bb480e1480..fbe7ffaccc5 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -944,7 +944,7 @@ func NewBee( } } - pushSyncProtocol := pushsync.New(swarmAddress, nonce, p2ps, localStore, kad, o.FullNodeMode, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, acc, pricer, signer, tracer, warmupTime) + pushSyncProtocol := pushsync.New(swarmAddress, networkID, nonce, p2ps, localStore, waitNetworkRFunc, kad, o.FullNodeMode, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, acc, pricer, signer, tracer, warmupTime) b.pushSyncCloser = pushSyncProtocol // set the pushSyncer in the PSS diff --git a/pkg/pushsync/pushsync_test.go b/pkg/pushsync/pushsync_test.go index bea49d652e5..118b9d57257 100644 --- a/pkg/pushsync/pushsync_test.go +++ b/pkg/pushsync/pushsync_test.go @@ -115,6 +115,10 @@ func TestPushClosest(t *testing.T) { // and expects a receipt. The message is intercepted in the outgoing stream to check for correctness. func TestSocListener(t *testing.T) { t.Parallel() + defaultSigner := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) { + return nil, nil + })) + // chunk data to upload privKey, err := crypto.DecodeSecp256k1PrivateKey(swarm.MustParseHexAddress("b0baf37700000000000000000000000000000000000000000000000000000000").Bytes()) if err != nil { @@ -870,7 +874,7 @@ func createPushSyncNodeWithRadius( radiusFunc := func() (uint8, error) { return radius, nil } - ps := pushsync.New(addr, 1, blockHash.Bytes(), recorderDisconnecter, storer, radiusFunc, mockTopology, true, unwrap, validStamp, log.Noop, accountingmock.NewAccounting(), mockPricer, signer, nil, -1) + ps := pushsync.New(addr, 1, blockHash.Bytes(), recorderDisconnecter, storer, radiusFunc, mockTopology, true, unwrap, func(soc.SOC) {}, validStamp, log.Noop, accountingmock.NewAccounting(), mockPricer, signer, nil, -1) t.Cleanup(func() { ps.Close() }) return ps, storer From 83c9309296457247cdd628f19a8beec1ec2b5f9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 12 Sep 2024 14:17:29 +0200 Subject: [PATCH 22/54] fix: idAddress in pushsync where it is needed --- pkg/pushsync/pushsync.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go index 939b45cb980..aa0d29bb638 100644 --- a/pkg/pushsync/pushsync.go +++ b/pkg/pushsync/pushsync.go @@ -443,7 +443,7 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo // all future requests should land directly into the neighborhood if neighborsOnly && peerPO < rad { - skip.Forever(ch.Address(), peer) + skip.Forever(idAddress, peer) continue } @@ -481,7 +481,7 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo case err == nil: return result.receipt, nil case errors.Is(err, ErrShallowReceipt): - ps.errSkip.Add(ch.Address(), result.peer, skiplistDur) + ps.errSkip.Add(idAddress, result.peer, skiplistDur) return result.receipt, err } } @@ -490,7 +490,7 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "peer_address", result.peer, "error", result.err) sentErrorsLeft-- - ps.errSkip.Add(ch.Address(), result.peer, skiplistDur) + ps.errSkip.Add(idAddress, result.peer, skiplistDur) retry() } From 615955bec2b5a6ae12446efda071180304b16484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 12 Sep 2024 14:18:24 +0200 Subject: [PATCH 23/54] test: working signature for pushsync --- pkg/pushsync/pushsync_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/pushsync/pushsync_test.go b/pkg/pushsync/pushsync_test.go index 118b9d57257..73296496753 100644 --- a/pkg/pushsync/pushsync_test.go +++ b/pkg/pushsync/pushsync_test.go @@ -115,8 +115,12 @@ func TestPushClosest(t *testing.T) { // and expects a receipt. The message is intercepted in the outgoing stream to check for correctness. func TestSocListener(t *testing.T) { t.Parallel() - defaultSigner := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) { - return nil, nil + defaultSigner := cryptomock.New(cryptomock.WithSignFunc(func(addr []byte) ([]byte, error) { + key, _ := crypto.GenerateSecp256k1Key() + signer := crypto.NewDefaultSigner(key) + signature, _ := signer.Sign(addr) + + return signature, nil })) // chunk data to upload @@ -149,7 +153,7 @@ func TestSocListener(t *testing.T) { // create a pivot node and a mocked closest node pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000 - closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1 + closestPeer := swarm.MustParseHexAddress("8000000000000000000000000000000000000000000000000000000000000000") // binary 1000 -> po 1 // peer is the node responding to the chunk receipt message // mock should return ErrWantSelf since there's no one to forward to From eefbfd75c55fd41d1484fdec4a5ab77ae1182d19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 25 Sep 2024 19:37:33 +0200 Subject: [PATCH 24/54] refactor: log id_address on push failiure --- pkg/pushsync/pushsync.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go index aa0d29bb638..22348100dae 100644 --- a/pkg/pushsync/pushsync.go +++ b/pkg/pushsync/pushsync.go @@ -324,7 +324,6 @@ func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Re Nonce: r.Nonce, }, err } - if err != nil { return nil, err } @@ -471,7 +470,6 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo go ps.push(ctx, resultChan, peer, ch, action) case result := <-resultChan: - inflight-- ps.measurePushPeer(result.pushTime, result.err) @@ -487,7 +485,7 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo } ps.metrics.TotalFailedSendAttempts.Inc() - ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "peer_address", result.peer, "error", result.err) + ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "id_address", idAddress, "peer_address", result.peer, "error", result.err) sentErrorsLeft-- ps.errSkip.Add(idAddress, result.peer, skiplistDur) From 18702c69c7bc41987881b17aecee9185c112109e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 25 Sep 2024 19:43:14 +0200 Subject: [PATCH 25/54] feat: id address usage on pusher and its inflight handling --- pkg/pusher/inflight.go | 25 +++++++++++++++---------- pkg/pusher/pusher.go | 31 +++++++++++++++++++++---------- 2 files changed, 36 insertions(+), 20 deletions(-) diff --git a/pkg/pusher/inflight.go b/pkg/pusher/inflight.go index 788872d2652..99ec53c96ff 100644 --- a/pkg/pusher/inflight.go +++ b/pkg/pusher/inflight.go @@ -12,28 +12,33 @@ import ( type inflight struct { mtx sync.Mutex - inflight map[string]struct{} + inflight map[[64]byte]struct{} } func newInflight() *inflight { return &inflight{ - inflight: make(map[string]struct{}), + inflight: make(map[[64]byte]struct{}), } } -func (i *inflight) delete(ch swarm.Chunk) { - key := ch.Address().ByteString() + string(ch.Stamp().BatchID()) +func (i *inflight) delete(idAddress swarm.Address, batchID []byte) { + var key [64]byte + copy(key[:32], idAddress.Bytes()) + copy(key[32:], batchID) + i.mtx.Lock() delete(i.inflight, key) i.mtx.Unlock() } -func (i *inflight) set(ch swarm.Chunk) bool { +func (i *inflight) set(idAddress swarm.Address, batchID []byte) bool { + var key [64]byte + copy(key[:32], idAddress.Bytes()) + copy(key[32:], batchID) i.mtx.Lock() defer i.mtx.Unlock() - key := ch.Address().ByteString() + string(ch.Stamp().BatchID()) if _, ok := i.inflight[key]; ok { return true } @@ -50,16 +55,16 @@ type attempts struct { // try to log a chunk sync attempt. returns false when // maximum amount of attempts have been reached. -func (a *attempts) try(ch swarm.Address) bool { +func (a *attempts) try(idAddress swarm.Address) bool { a.mtx.Lock() defer a.mtx.Unlock() - key := ch.ByteString() + key := idAddress.ByteString() a.attempts[key]++ return a.attempts[key] < a.retryCount } -func (a *attempts) delete(ch swarm.Address) { +func (a *attempts) delete(idAddress swarm.Address) { a.mtx.Lock() - delete(a.attempts, ch.ByteString()) + delete(a.attempts, idAddress.ByteString()) a.mtx.Unlock() } diff --git a/pkg/pusher/pusher.go b/pkg/pusher/pusher.go index 7defcfb37cd..0945bdee05e 100644 --- a/pkg/pusher/pusher.go +++ b/pkg/pusher/pusher.go @@ -18,6 +18,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/postage" "github.com/ethersphere/bee/v2/pkg/pushsync" + "github.com/ethersphere/bee/v2/pkg/soc" storage "github.com/ethersphere/bee/v2/pkg/storage" "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/ethersphere/bee/v2/pkg/topology" @@ -214,7 +215,11 @@ func (s *Service) chunksWorker(warmupTime time.Duration) { for { select { case op := <-cc: - if s.inflight.set(op.Chunk) { + idAddress, err := soc.IdentityAddress(op.Chunk) + if err != nil { + op.Err <- err + } + if s.inflight.set(idAddress, op.Chunk.Stamp().BatchID()) { if op.Direct { select { case op.Err <- nil: @@ -240,8 +245,12 @@ func (s *Service) chunksWorker(warmupTime time.Duration) { func (s *Service) pushDeferred(ctx context.Context, logger log.Logger, op *Op) (bool, error) { loggerV1 := logger.V(1).Build() + idAddress, err := soc.IdentityAddress(op.Chunk) + if err != nil { + return true, err + } - defer s.inflight.delete(op.Chunk) + defer s.inflight.delete(idAddress, op.Chunk.Stamp().BatchID()) if _, err := s.validStamp(op.Chunk); err != nil { loggerV1.Warning( @@ -254,7 +263,7 @@ func (s *Service) pushDeferred(ctx context.Context, logger log.Logger, op *Op) ( return false, errors.Join(err, s.storer.Report(ctx, op.Chunk, storage.ChunkCouldNotSync)) } - switch receipt, err := s.pushSyncer.PushChunkToClosest(ctx, op.Chunk); { + switch _, err := s.pushSyncer.PushChunkToClosest(ctx, op.Chunk); { case errors.Is(err, topology.ErrWantSelf): // store the chunk loggerV1.Debug("chunk stays here, i'm the closest node", "chunk_address", op.Chunk.Address()) @@ -269,7 +278,7 @@ func (s *Service) pushDeferred(ctx context.Context, logger log.Logger, op *Op) ( return true, err } case errors.Is(err, pushsync.ErrShallowReceipt): - if retry := s.shallowReceipt(receipt); retry { + if retry := s.shallowReceipt(idAddress); retry { return true, err } if err := s.storer.Report(ctx, op.Chunk, storage.ChunkSynced); err != nil { @@ -291,11 +300,13 @@ func (s *Service) pushDeferred(ctx context.Context, logger log.Logger, op *Op) ( func (s *Service) pushDirect(ctx context.Context, logger log.Logger, op *Op) error { loggerV1 := logger.V(1).Build() - - var err error + idAddress, err := soc.IdentityAddress(op.Chunk) + if err != nil { + return err + } defer func() { - s.inflight.delete(op.Chunk) + s.inflight.delete(idAddress, op.Chunk.Stamp().BatchID()) select { case op.Err <- err: default: @@ -329,11 +340,11 @@ func (s *Service) pushDirect(ctx context.Context, logger log.Logger, op *Op) err return err } -func (s *Service) shallowReceipt(receipt *pushsync.Receipt) bool { - if s.attempts.try(receipt.Address) { +func (s *Service) shallowReceipt(idAddress swarm.Address) bool { + if s.attempts.try(idAddress) { return true } - s.attempts.delete(receipt.Address) + s.attempts.delete(idAddress) return false } From 7d42efd42d2855edc202a51950279e02c53acb79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 25 Sep 2024 19:44:22 +0200 Subject: [PATCH 26/54] fix: remove unnecessary stamp higher condition --- pkg/storer/internal/reserve/reserve.go | 44 ++++++++++---------------- 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 4e7bfed506a..bb0c83d4dc9 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -141,38 +141,26 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { // same chunk address, same batch if sameAddressOldStamp != nil { - sameAddressOldStampIndex, err := stampindex.Load(s.IndexStore(), reserveScope, sameAddressOldStamp) - if err != nil { - return err - } - prev := binary.BigEndian.Uint64(sameAddressOldStampIndex.StampTimestamp) - curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp()) - if prev >= curr { - return fmt.Errorf("overwrite same chunk. prev %d cur %d batch %s: %w", prev, curr, hex.EncodeToString(chunk.Stamp().BatchID()), storage.ErrOverwriteNewerChunk) - } - // index collision with another chunk - if loadedStampIndex { - prev := binary.BigEndian.Uint64(oldStampIndex.StampTimestamp) - if prev >= curr { - return fmt.Errorf("overwrite same chunk. prev %d cur %d batch %s: %w", prev, curr, hex.EncodeToString(chunk.Stamp().BatchID()), storage.ErrOverwriteNewerChunk) - } - if !chunk.Address().Equal(oldStampIndex.ChunkAddress) { - r.logger.Debug( - "replacing chunk stamp index", - "old_chunk", oldStampIndex.ChunkAddress, - "new_chunk", chunk.Address(), - "batch_id", hex.EncodeToString(chunk.Stamp().BatchID()), - ) - // remove index items and chunk data - err = r.removeChunk(ctx, s, oldStampIndex.ChunkAddress, oldStampIndex.BatchID, oldStampIndex.StampHash) - if err != nil { - return fmt.Errorf("failed removing older chunk %s: %w", oldStampIndex.ChunkAddress, err) - } - shouldDecrReserveSize = true + if loadedStampIndex && !chunk.Address().Equal(oldStampIndex.ChunkAddress) { + r.logger.Debug( + "replacing chunk stamp index", + "old_chunk", oldStampIndex.ChunkAddress, + "new_chunk", chunk.Address(), + "batch_id", hex.EncodeToString(chunk.Stamp().BatchID()), + ) + // remove index items and chunk data + err = r.removeChunk(ctx, s, oldStampIndex.ChunkAddress, oldStampIndex.BatchID, oldStampIndex.StampHash) + if err != nil { + return fmt.Errorf("failed removing older chunk %s: %w", oldStampIndex.ChunkAddress, err) } + shouldDecrReserveSize = true } + sameAddressOldStampIndex, err := stampindex.Load(s.IndexStore(), reserveScope, sameAddressOldStamp) + if err != nil { + return err + } oldBatchRadiusItem := &BatchRadiusItem{ Bin: bin, Address: chunk.Address(), From b69dcc78c86deb1b30ebf33289d30f02365ccf9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 26 Sep 2024 13:37:13 +0200 Subject: [PATCH 27/54] fix: reserve put --- pkg/storer/internal/reserve/reserve.go | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index bb0c83d4dc9..8480f2b1c9b 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -5,6 +5,7 @@ package reserve import ( + "bytes" "context" "encoding/binary" "encoding/hex" @@ -139,10 +140,24 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { return err } - // same chunk address, same batch - if sameAddressOldStamp != nil { + // same chunk address, same batch, same index + if sameAddressOldStamp != nil && bytes.Equal(sameAddressOldStamp.Index(), chunk.Stamp().Index()) { + sameAddressOldStampIndex, err := stampindex.Load(s.IndexStore(), reserveScope, sameAddressOldStamp) + if err != nil { + return err + } + prev := binary.BigEndian.Uint64(sameAddressOldStampIndex.StampTimestamp) + curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp()) + if prev >= curr { + return fmt.Errorf("overwrite same chunk. prev %d cur %d batch %s: %w", prev, curr, hex.EncodeToString(chunk.Stamp().BatchID()), storage.ErrOverwriteNewerChunk) + } + // index collision with another chunk if loadedStampIndex && !chunk.Address().Equal(oldStampIndex.ChunkAddress) { + prev := binary.BigEndian.Uint64(oldStampIndex.StampTimestamp) + if prev >= curr { + return fmt.Errorf("overwrite same chunk. prev %d cur %d batch %s: %w", prev, curr, hex.EncodeToString(chunk.Stamp().BatchID()), storage.ErrOverwriteNewerChunk) + } r.logger.Debug( "replacing chunk stamp index", "old_chunk", oldStampIndex.ChunkAddress, @@ -157,10 +172,6 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { shouldDecrReserveSize = true } - sameAddressOldStampIndex, err := stampindex.Load(s.IndexStore(), reserveScope, sameAddressOldStamp) - if err != nil { - return err - } oldBatchRadiusItem := &BatchRadiusItem{ Bin: bin, Address: chunk.Address(), From 4c40e9a307eb13e92d4e97554c1332301de44c0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Mon, 30 Sep 2024 11:26:56 +0200 Subject: [PATCH 28/54] fix: important reserve changes same ps index --- pkg/storer/internal/reserve/reserve.go | 132 +++++++++----------- pkg/storer/internal/reserve/reserve_test.go | 45 +++++-- 2 files changed, 92 insertions(+), 85 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 8480f2b1c9b..e2ca74a381f 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -140,99 +140,83 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { return err } - // same chunk address, same batch, same index - if sameAddressOldStamp != nil && bytes.Equal(sameAddressOldStamp.Index(), chunk.Stamp().Index()) { + // same chunk address, same batch + if sameAddressOldStamp != nil { sameAddressOldStampIndex, err := stampindex.Load(s.IndexStore(), reserveScope, sameAddressOldStamp) if err != nil { return err } - prev := binary.BigEndian.Uint64(sameAddressOldStampIndex.StampTimestamp) - curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp()) - if prev >= curr { - return fmt.Errorf("overwrite same chunk. prev %d cur %d batch %s: %w", prev, curr, hex.EncodeToString(chunk.Stamp().BatchID()), storage.ErrOverwriteNewerChunk) - } - // index collision with another chunk - if loadedStampIndex && !chunk.Address().Equal(oldStampIndex.ChunkAddress) { - prev := binary.BigEndian.Uint64(oldStampIndex.StampTimestamp) + // same index + if bytes.Equal(sameAddressOldStamp.Index(), chunk.Stamp().Index()) { + prev := binary.BigEndian.Uint64(sameAddressOldStampIndex.StampTimestamp) + curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp()) if prev >= curr { return fmt.Errorf("overwrite same chunk. prev %d cur %d batch %s: %w", prev, curr, hex.EncodeToString(chunk.Stamp().BatchID()), storage.ErrOverwriteNewerChunk) } - r.logger.Debug( - "replacing chunk stamp index", - "old_chunk", oldStampIndex.ChunkAddress, - "new_chunk", chunk.Address(), - "batch_id", hex.EncodeToString(chunk.Stamp().BatchID()), - ) - // remove index items and chunk data - err = r.removeChunk(ctx, s, oldStampIndex.ChunkAddress, oldStampIndex.BatchID, oldStampIndex.StampHash) + + oldBatchRadiusItem := &BatchRadiusItem{ + Bin: bin, + Address: chunk.Address(), + BatchID: sameAddressOldStampIndex.BatchID, + StampHash: sameAddressOldStampIndex.StampHash, + } + // load item to get the binID + err = s.IndexStore().Get(oldBatchRadiusItem) if err != nil { - return fmt.Errorf("failed removing older chunk %s: %w", oldStampIndex.ChunkAddress, err) + return err } - shouldDecrReserveSize = true - } - oldBatchRadiusItem := &BatchRadiusItem{ - Bin: bin, - Address: chunk.Address(), - BatchID: sameAddressOldStampIndex.BatchID, - StampHash: sameAddressOldStampIndex.StampHash, - } - // load item to get the binID - err = s.IndexStore().Get(oldBatchRadiusItem) - if err != nil { - return err - } + // delete old chunk index items + err = errors.Join( + s.IndexStore().Delete(oldBatchRadiusItem), + s.IndexStore().Delete(&ChunkBinItem{Bin: oldBatchRadiusItem.Bin, BinID: oldBatchRadiusItem.BinID}), + stampindex.Delete(s.IndexStore(), reserveScope, sameAddressOldStamp), + chunkstamp.DeleteWithStamp(s.IndexStore(), reserveScope, oldBatchRadiusItem.Address, sameAddressOldStamp), + ) + if err != nil { + return err + } - // delete old chunk index items - err = errors.Join( - s.IndexStore().Delete(oldBatchRadiusItem), - s.IndexStore().Delete(&ChunkBinItem{Bin: oldBatchRadiusItem.Bin, BinID: oldBatchRadiusItem.BinID}), - stampindex.Delete(s.IndexStore(), reserveScope, sameAddressOldStamp), - chunkstamp.DeleteWithStamp(s.IndexStore(), reserveScope, oldBatchRadiusItem.Address, sameAddressOldStamp), - ) - if err != nil { - return err - } + binID, err := r.IncBinID(s.IndexStore(), bin) + if err != nil { + return err + } - binID, err := r.IncBinID(s.IndexStore(), bin) - if err != nil { - return err - } + err = errors.Join( + stampindex.Store(s.IndexStore(), reserveScope, chunk), + chunkstamp.Store(s.IndexStore(), reserveScope, chunk), + s.IndexStore().Put(&BatchRadiusItem{ + Bin: bin, + BinID: binID, + Address: chunk.Address(), + BatchID: chunk.Stamp().BatchID(), + StampHash: stampHash, + }), + s.IndexStore().Put(&ChunkBinItem{ + Bin: bin, + BinID: binID, + Address: chunk.Address(), + BatchID: chunk.Stamp().BatchID(), + ChunkType: chunkType, + StampHash: stampHash, + }), + ) + if err != nil { + return err + } - err = errors.Join( - stampindex.Store(s.IndexStore(), reserveScope, chunk), - chunkstamp.Store(s.IndexStore(), reserveScope, chunk), - s.IndexStore().Put(&BatchRadiusItem{ - Bin: bin, - BinID: binID, - Address: chunk.Address(), - BatchID: chunk.Stamp().BatchID(), - StampHash: stampHash, - }), - s.IndexStore().Put(&ChunkBinItem{ - Bin: bin, - BinID: binID, - Address: chunk.Address(), - BatchID: chunk.Stamp().BatchID(), - ChunkType: chunkType, - StampHash: stampHash, - }), - ) - if err != nil { - return err - } + if chunkType != swarm.ChunkTypeSingleOwner { + return nil + } - if chunkType != swarm.ChunkTypeSingleOwner { - return nil + r.logger.Debug("replacing soc in chunkstore", "address", chunk.Address()) + return s.ChunkStore().Replace(ctx, chunk) } - - r.logger.Debug("replacing soc in chunkstore", "address", chunk.Address()) - return s.ChunkStore().Replace(ctx, chunk) } // different address, same batch, index collision - if loadedStampIndex { + if loadedStampIndex && !chunk.Address().Equal(oldStampIndex.ChunkAddress) { prev := binary.BigEndian.Uint64(oldStampIndex.StampTimestamp) curr := binary.BigEndian.Uint64(chunk.Stamp().Timestamp()) if prev >= curr { diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go index ad7dd13d019..d06bb86ca46 100644 --- a/pkg/storer/internal/reserve/reserve_test.go +++ b/pkg/storer/internal/reserve/reserve_test.go @@ -197,12 +197,14 @@ func TestSameChunkAddress(t *testing.T) { bin := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) binBinIDs[bin] += 1 err = r.Put(ctx, ch2) - if !errors.Is(err, storage.ErrOverwriteNewerChunk) { - t.Fatal("expected error") + if err != nil { + t.Fatal(err) } + bin2 := swarm.Proximity(baseAddr.Bytes(), ch2.Address().Bytes()) + binBinIDs[bin2] += 1 size2 := r.Size() - if size2-size1 != 1 { - t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) + if size2-size1 != 2 { + t.Fatalf("expected reserve size to increase by 2, got %d", size2-size1) } }) @@ -269,11 +271,20 @@ func TestSameChunkAddress(t *testing.T) { s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) ch2 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 1, 6)) bin := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) + err := r.Put(ctx, ch1) + if err != nil { + t.Fatal(err) + } + err = r.Put(ctx, ch2) + if err != nil { + t.Fatal(err) + } binBinIDs[bin] += 2 - replace(t, ch1, ch2, binBinIDs[bin]-1, binBinIDs[bin]) + checkChunkInIndexStore(t, ts.IndexStore(), bin, binBinIDs[bin]-1, ch1) + checkChunkInIndexStore(t, ts.IndexStore(), bin, binBinIDs[bin], ch2) size2 := r.Size() - if size2-size1 != 1 { - t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) + if size2-size1 != 2 { + t.Fatalf("expected reserve size to increase by 2, got %d", size2-size1) } }) @@ -435,16 +446,17 @@ func TestSameChunkAddress(t *testing.T) { ch3BinID := binBinIDs[bin2] checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin1, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address(), StampHash: ch1StampHash}, true) - checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin2, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address(), StampHash: ch2StampHash}, true) + // different index, same batch + checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin2, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address(), StampHash: ch2StampHash}, false) checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin2, BatchID: ch3.Stamp().BatchID(), Address: ch3.Address(), StampHash: ch3StampHash}, false) checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin1, BinID: ch1BinID, StampHash: ch1StampHash}, true) - checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin2, BinID: ch2BinID, StampHash: ch2StampHash}, true) + checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin2, BinID: ch2BinID, StampHash: ch2StampHash}, false) checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin2, BinID: ch3BinID, StampHash: ch3StampHash}, false) size2 := r.Size() - // (ch1 + ch2) == 2 and then ch3 reduces reserve size by 1 - if size2-size1 != 1 { + // (ch1 + ch2) == 2 + if size2-size1 != 2 { t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) } }) @@ -908,3 +920,14 @@ func getSigner(t *testing.T) crypto.Signer { } return crypto.NewDefaultSigner(privKey) } + +func checkChunkInIndexStore(t *testing.T, s storage.Reader, bin uint8, binId uint64, ch swarm.Chunk) { + t.Helper() + stampHash, err := ch.Stamp().Hash() + if err != nil { + t.Fatal(err) + } + + checkStore(t, s, &reserve.BatchRadiusItem{Bin: bin, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, false) + checkStore(t, s, &reserve.ChunkBinItem{Bin: bin, BinID: binId, StampHash: stampHash}, false) +} From e6e98f70453ffb9ff6a9f20ae8fedbdb688a45e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Mon, 30 Sep 2024 15:54:34 +0200 Subject: [PATCH 29/54] fix: lock by batch id and stamp --- pkg/storer/internal/reserve/reserve.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index e2ca74a381f..a79462ccd3a 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -102,8 +102,9 @@ func New( func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { // batchID lock, Put vs Eviction - r.multx.Lock(string(chunk.Stamp().BatchID())) - defer r.multx.Unlock(string(chunk.Stamp().BatchID())) + lockId := lockId(chunk.Stamp()) + r.multx.Lock(lockId) + defer r.multx.Unlock(lockId) stampHash, err := chunk.Stamp().Hash() if err != nil { @@ -641,3 +642,7 @@ func (r *Reserve) IncBinID(store storage.IndexStore, bin uint8) (uint64, error) return item.BinID, store.Put(item) } + +func lockId(stamp swarm.Stamp) string { + return string(stamp.BatchID()) + string(stamp.Index()) +} From a3faffdb6ef604c92e0af51f15eff4632146099b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 1 Oct 2024 16:19:28 +0200 Subject: [PATCH 30/54] fix: new multex to lock reserve put --- pkg/storer/internal/reserve/reserve.go | 30 +++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index a79462ccd3a..fda425591e8 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -25,11 +25,27 @@ import ( "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/ethersphere/bee/v2/pkg/topology" "golang.org/x/sync/errgroup" - "resenje.org/multex" ) const reserveScope = "reserve" +type multexLock struct { + mul map[string]struct{} + mu chan struct{} +} + +func (m *multexLock) Lock(id string) { + m.mu <- struct{}{} + m.mul[id] = struct{}{} + <-m.mu +} + +func (m *multexLock) Unlock(id string) { + m.mu <- struct{}{} + delete(m.mul, id) + <-m.mu +} + type Reserve struct { baseAddr swarm.Address radiusSetter topology.SetStorageRadiuser @@ -39,7 +55,7 @@ type Reserve struct { size atomic.Int64 radius atomic.Uint32 - multx *multex.Multex + multx multexLock st transaction.Storage } @@ -57,7 +73,7 @@ func New( capacity: capacity, radiusSetter: radiusSetter, logger: logger.WithName(reserveScope).Register(), - multx: multex.New(), + multx: multexLock{mul: make(map[string]struct{}), mu: make(chan struct{}, 1)}, } err := st.Run(context.Background(), func(s transaction.Store) error { @@ -101,6 +117,11 @@ func New( // if the new chunk has a higher stamp timestamp (regardless of batch type and chunk type, eg CAC & SOC). func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { + chunkType := storage.ChunkType(chunk) + if chunkType == swarm.ChunkTypeUnspecified { + return errors.New("chunk type unspecified") + } + // batchID lock, Put vs Eviction lockId := lockId(chunk.Stamp()) r.multx.Lock(lockId) @@ -120,7 +141,6 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { return nil } - chunkType := storage.ChunkType(chunk) bin := swarm.Proximity(r.baseAddr.Bytes(), chunk.Address().Bytes()) // bin lock @@ -644,5 +664,5 @@ func (r *Reserve) IncBinID(store storage.IndexStore, bin uint8) (uint64, error) } func lockId(stamp swarm.Stamp) string { - return string(stamp.BatchID()) + string(stamp.Index()) + return fmt.Sprintf("%x-%x", stamp.BatchID(), stamp.Index()) } From 450d9731890dfa1b085127943a16576205242045 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 1 Oct 2024 16:41:31 +0200 Subject: [PATCH 31/54] fix: remove ChunkTypeUnspecified check --- pkg/storer/internal/reserve/reserve.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index fda425591e8..aca61a48a53 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -118,9 +118,6 @@ func New( func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { chunkType := storage.ChunkType(chunk) - if chunkType == swarm.ChunkTypeUnspecified { - return errors.New("chunk type unspecified") - } // batchID lock, Put vs Eviction lockId := lockId(chunk.Stamp()) From 68f97ce7d61f33591f7bad268e83686ee1616f9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 15 Oct 2024 09:20:41 +0200 Subject: [PATCH 32/54] feat: postage stamping for gsoc --- pkg/api/api.go | 8 +++++++- pkg/api/envelope.go | 2 +- pkg/postage/mock/stamper.go | 7 ++++++- pkg/postage/stamp_test.go | 8 +++++++- pkg/postage/stamper.go | 18 ++++++++++++++---- pkg/postage/stamper_test.go | 16 ++++++++++------ pkg/pss/pss.go | 2 +- pkg/pss/pss_test.go | 13 ++++++++++--- pkg/steward/steward.go | 2 +- 9 files changed, 57 insertions(+), 19 deletions(-) diff --git a/pkg/api/api.go b/pkg/api/api.go index bced503484c..74911b83912 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -48,6 +48,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/settlement/swap" "github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook" "github.com/ethersphere/bee/v2/pkg/settlement/swap/erc20" + "github.com/ethersphere/bee/v2/pkg/soc" "github.com/ethersphere/bee/v2/pkg/status" "github.com/ethersphere/bee/v2/pkg/steward" storage "github.com/ethersphere/bee/v2/pkg/storage" @@ -685,7 +686,12 @@ type putterSessionWrapper struct { } func (p *putterSessionWrapper) Put(ctx context.Context, chunk swarm.Chunk) error { - stamp, err := p.stamper.Stamp(chunk.Address()) + idAddress, err := soc.IdentityAddress(chunk) + if err != nil { + return err + } + + stamp, err := p.stamper.Stamp(chunk.Address(), idAddress) if err != nil { return err } diff --git a/pkg/api/envelope.go b/pkg/api/envelope.go index 6a99b98f61c..ea12283ac4d 100644 --- a/pkg/api/envelope.go +++ b/pkg/api/envelope.go @@ -59,7 +59,7 @@ func (s *Service) envelopePostHandler(w http.ResponseWriter, r *http.Request) { return } - stamp, err := stamper.Stamp(paths.Address) + stamp, err := stamper.Stamp(paths.Address, paths.Address) if err != nil { logger.Debug("split write all failed", "error", err) logger.Error(nil, "split write all failed") diff --git a/pkg/postage/mock/stamper.go b/pkg/postage/mock/stamper.go index f95700eb5e5..9fbc5268b1c 100644 --- a/pkg/postage/mock/stamper.go +++ b/pkg/postage/mock/stamper.go @@ -17,6 +17,11 @@ func NewStamper() postage.Stamper { } // Stamp implements the Stamper interface. It returns an empty postage stamp. -func (mockStamper) Stamp(_ swarm.Address) (*postage.Stamp, error) { +func (mockStamper) Stamp(_, _ swarm.Address) (*postage.Stamp, error) { return &postage.Stamp{}, nil } + +// Stamp implements the Stamper interface. It returns an empty postage stamp. +func (mockStamper) BatchId() []byte { + return nil +} diff --git a/pkg/postage/stamp_test.go b/pkg/postage/stamp_test.go index 8704cf91b65..b1547b6581c 100644 --- a/pkg/postage/stamp_test.go +++ b/pkg/postage/stamp_test.go @@ -14,6 +14,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/postage" "github.com/ethersphere/bee/v2/pkg/postage/batchstore/mock" postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing" + "github.com/ethersphere/bee/v2/pkg/soc" "github.com/ethersphere/bee/v2/pkg/storage/inmemstore" chunktesting "github.com/ethersphere/bee/v2/pkg/storage/testing" ) @@ -103,7 +104,12 @@ func TestValidStamp(t *testing.T) { // stamp on execution ch := chunktesting.GenerateTestRandomChunk() - st, err := stamper.Stamp(ch.Address()) + idAddress, err := soc.IdentityAddress(ch) + if err != nil { + t.Fatal(err) + } + + st, err := stamper.Stamp(ch.Address(), idAddress) if err != nil { t.Fatal(err) } diff --git a/pkg/postage/stamper.go b/pkg/postage/stamper.go index d4f3b8187fa..101d6f78673 100644 --- a/pkg/postage/stamper.go +++ b/pkg/postage/stamper.go @@ -21,7 +21,8 @@ var ( // Stamper can issue stamps from the given address of chunk. type Stamper interface { - Stamp(swarm.Address) (*Stamp, error) + Stamp(idAddr, addr swarm.Address) (*Stamp, error) + BatchId() []byte } // stamper connects a stampissuer with a signer. @@ -39,13 +40,13 @@ func NewStamper(store storage.Store, issuer *StampIssuer, signer crypto.Signer) // Stamp takes chunk, see if the chunk can be included in the batch and // signs it with the owner of the batch of this Stamp issuer. -func (st *stamper) Stamp(addr swarm.Address) (*Stamp, error) { +func (st *stamper) Stamp(addr, idAddr swarm.Address) (*Stamp, error) { st.issuer.mtx.Lock() defer st.issuer.mtx.Unlock() item := &StampItem{ BatchID: st.issuer.data.BatchID, - chunkAddress: addr, + chunkAddress: idAddr, } switch err := st.store.Get(item); { case err == nil: @@ -81,6 +82,11 @@ func (st *stamper) Stamp(addr swarm.Address) (*Stamp, error) { return NewStamp(st.issuer.data.BatchID, item.BatchIndex, item.BatchTimestamp, sig), nil } +// BatchId gives back batch id of stamper +func (st *stamper) BatchId() []byte { + return st.issuer.data.BatchID +} + type presignedStamper struct { stamp *Stamp owner []byte @@ -90,7 +96,7 @@ func NewPresignedStamper(stamp *Stamp, owner []byte) Stamper { return &presignedStamper{stamp, owner} } -func (st *presignedStamper) Stamp(addr swarm.Address) (*Stamp, error) { +func (st *presignedStamper) Stamp(addr, _ swarm.Address) (*Stamp, error) { // check stored stamp is against the chunk address // Recover the public key from the signature signerAddr, err := RecoverBatchOwner(addr, st.stamp) @@ -104,3 +110,7 @@ func (st *presignedStamper) Stamp(addr swarm.Address) (*Stamp, error) { return st.stamp, nil } + +func (st *presignedStamper) BatchId() []byte { + return st.stamp.BatchID() +} diff --git a/pkg/postage/stamper_test.go b/pkg/postage/stamper_test.go index a1c589b145c..4069a6daac6 100644 --- a/pkg/postage/stamper_test.go +++ b/pkg/postage/stamper_test.go @@ -33,7 +33,7 @@ func TestStamperStamping(t *testing.T) { t.Helper() chunkAddr := swarm.RandAddress(t) - stamp, err := stamper.Stamp(chunkAddr) + stamp, err := stamper.Stamp(chunkAddr, chunkAddr) if err != nil { t.Fatal(err) } @@ -71,12 +71,14 @@ func TestStamperStamping(t *testing.T) { // issue another 15 // collision depth is 8, committed batch depth is 12, bucket volume 2^4 for i := 0; i < 14; i++ { - _, err = stamper.Stamp(swarm.RandAddressAt(t, chunkAddr, 8)) + randAddr := swarm.RandAddressAt(t, chunkAddr, 8) + _, err = stamper.Stamp(randAddr, randAddr) if err != nil { t.Fatalf("error adding stamp at step %d: %v", i, err) } } - stamp, err := stamper.Stamp(swarm.RandAddressAt(t, chunkAddr, 8)) + randAddr := swarm.RandAddressAt(t, chunkAddr, 8) + stamp, err := stamper.Stamp(randAddr, randAddr) if err != nil { t.Fatalf("error adding last stamp: %v", err) } @@ -95,13 +97,15 @@ func TestStamperStamping(t *testing.T) { // issue another 15 // collision depth is 8, committed batch depth is 12, bucket volume 2^4 for i := 0; i < 15; i++ { - _, err = stamper.Stamp(swarm.RandAddressAt(t, chunkAddr, 8)) + randAddr := swarm.RandAddressAt(t, chunkAddr, 8) + _, err = stamper.Stamp(randAddr, randAddr) if err != nil { t.Fatalf("error adding stamp at step %d: %v", i, err) } } + randAddr := swarm.RandAddressAt(t, chunkAddr, 8) // the bucket should now be full, not allowing a stamp for the pivot chunk - if _, err = stamper.Stamp(swarm.RandAddressAt(t, chunkAddr, 8)); !errors.Is(err, postage.ErrBucketFull) { + if _, err = stamper.Stamp(randAddr, randAddr); !errors.Is(err, postage.ErrBucketFull) { t.Fatalf("expected ErrBucketFull, got %v", err) } }) @@ -117,7 +121,7 @@ func TestStamperStamping(t *testing.T) { WithBatchIndex(index) testSt := &testStore{Store: inmemstore.New(), stampItem: testItem} stamper := postage.NewStamper(testSt, st, signer) - stamp, err := stamper.Stamp(chunkAddr) + stamp, err := stamper.Stamp(chunkAddr, chunkAddr) if err != nil { t.Fatal(err) } diff --git a/pkg/pss/pss.go b/pkg/pss/pss.go index 3a09db0b633..454f5960129 100644 --- a/pkg/pss/pss.go +++ b/pkg/pss/pss.go @@ -99,7 +99,7 @@ func (p *pss) Send(ctx context.Context, topic Topic, payload []byte, stamper pos return err } - stamp, err := stamper.Stamp(tc.Address()) + stamp, err := stamper.Stamp(tc.Address(), tc.Address()) if err != nil { return err } diff --git a/pkg/pss/pss_test.go b/pkg/pss/pss_test.go index 1d04237bbaa..685adb59fb9 100644 --- a/pkg/pss/pss_test.go +++ b/pkg/pss/pss_test.go @@ -236,8 +236,15 @@ func ensureCalls(t *testing.T, calls *int, exp int) { } } -type stamper struct{} +type stamper struct { + stamp *postage.Stamp +} + +func (s *stamper) Stamp(_, _ swarm.Address) (*postage.Stamp, error) { + stamp := postagetesting.MustNewStamp() + return stamp, nil +} -func (s *stamper) Stamp(_ swarm.Address) (*postage.Stamp, error) { - return postagetesting.MustNewStamp(), nil +func (s *stamper) BatchId() []byte { + return s.stamp.BatchID() } diff --git a/pkg/steward/steward.go b/pkg/steward/steward.go index 9726ed1baa3..f318711d8ff 100644 --- a/pkg/steward/steward.go +++ b/pkg/steward/steward.go @@ -61,7 +61,7 @@ func (s *steward) Reupload(ctx context.Context, root swarm.Address, stamper post return err } - stamp, err := stamper.Stamp(c.Address()) + stamp, err := stamper.Stamp(c.Address(), c.Address()) if err != nil { return fmt.Errorf("stamping chunk %s: %w", c.Address(), err) } From 074602b3561d8dd14f53ba9e4e50164d5f10405b Mon Sep 17 00:00:00 2001 From: Acha Bill <57879913+acha-bill@users.noreply.github.com> Date: Wed, 30 Oct 2024 13:13:25 -0400 Subject: [PATCH 33/54] fix: use resenje multex (#4883) --- pkg/storer/internal/reserve/reserve.go | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 45af6919a4c..25aa8d119a6 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -25,27 +25,11 @@ import ( "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/ethersphere/bee/v2/pkg/topology" "golang.org/x/sync/errgroup" + "resenje.org/multex" ) const reserveScope = "reserve" -type multexLock struct { - mul map[string]struct{} - mu chan struct{} -} - -func (m *multexLock) Lock(id string) { - m.mu <- struct{}{} - m.mul[id] = struct{}{} - <-m.mu -} - -func (m *multexLock) Unlock(id string) { - m.mu <- struct{}{} - delete(m.mul, id) - <-m.mu -} - type Reserve struct { baseAddr swarm.Address radiusSetter topology.SetStorageRadiuser @@ -55,7 +39,7 @@ type Reserve struct { size atomic.Int64 radius atomic.Uint32 - multx multexLock + multx *multex.Multex st transaction.Storage } @@ -73,7 +57,7 @@ func New( capacity: capacity, radiusSetter: radiusSetter, logger: logger.WithName(reserveScope).Register(), - multx: multexLock{mul: make(map[string]struct{}), mu: make(chan struct{}, 1)}, + multx: multex.New(), } err := st.Run(context.Background(), func(s transaction.Store) error { From cbec2171626a299bdd882f442089936a691e7bc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 31 Oct 2024 11:39:22 +0100 Subject: [PATCH 34/54] refactor: remove waitgroup in hook function calls --- pkg/gsoc/gsoc.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index c2f88fc551c..e0d4484d9b7 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -25,7 +25,7 @@ type listener struct { logger log.Logger } -// New returns a new pss service. +// New returns a new GSOC listener service. func New(logger log.Logger) Listener { return &listener{ logger: logger, @@ -34,7 +34,7 @@ func New(logger log.Logger) Listener { } } -// Subscribe allows the definition of a Handler func for a specific topic on the pss struct. +// Subscribe allows the definition of a Handler func on a specific GSOC address. func (l *listener) Subscribe(address [32]byte, handler handler) (cleanup func()) { l.handlersMu.Lock() defer l.handlersMu.Unlock() @@ -69,11 +69,8 @@ func (l *listener) Handle(c soc.SOC) { "GSOC Address", addr, "wrapped chunk address", c.WrappedChunk().Address()) - var wg sync.WaitGroup for _, hh := range h { - wg.Add(1) go func(hh handler) { - defer wg.Done() hh(c.WrappedChunk().Data()[swarm.SpanSize:]) }(*hh) } From 8abb1ad228014bd4b5acccfdc33bbefc287be787 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 31 Oct 2024 11:42:59 +0100 Subject: [PATCH 35/54] refactor: add closer function for gsoc sub --- pkg/node/node.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/node/node.go b/pkg/node/node.go index 2eea58d8b4a..c8c435d8a4a 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -104,6 +104,7 @@ type Bee struct { accountingCloser io.Closer pullSyncCloser io.Closer pssCloser io.Closer + gsocCloser io.Closer ethClientCloser func() transactionMonitorCloser io.Closer transactionCloser io.Closer @@ -901,6 +902,7 @@ func NewBee( pssService := pss.New(pssPrivateKey, logger) gsocService := gsoc.New(logger) b.pssCloser = pssService + b.gsocCloser = gsocService validStamp := postage.ValidStamp(batchStore) @@ -1255,11 +1257,15 @@ func (b *Bee) Shutdown() error { } var wg sync.WaitGroup - wg.Add(7) + wg.Add(8) go func() { defer wg.Done() tryClose(b.pssCloser, "pss") }() + go func() { + defer wg.Done() + tryClose(b.gsocCloser, "gsoc") + }() go func() { defer wg.Done() tryClose(b.pusherCloser, "pusher") From ec01cf90334f6b60dba7655bd3fe383beaafaa27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 31 Oct 2024 11:44:33 +0100 Subject: [PATCH 36/54] refactor: wrong parameter name ordering --- pkg/postage/stamper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/postage/stamper.go b/pkg/postage/stamper.go index 101d6f78673..77256ceecc0 100644 --- a/pkg/postage/stamper.go +++ b/pkg/postage/stamper.go @@ -21,7 +21,7 @@ var ( // Stamper can issue stamps from the given address of chunk. type Stamper interface { - Stamp(idAddr, addr swarm.Address) (*Stamp, error) + Stamp(addr, idAddr swarm.Address) (*Stamp, error) BatchId() []byte } From d7e49e7094ea55c701291b01dba6ef745f7ff64a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 31 Oct 2024 16:20:20 +0100 Subject: [PATCH 37/54] refactor: remove duplicated api route def --- pkg/api/router.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/api/router.go b/pkg/api/router.go index 08e66d870ef..eeb10402f61 100644 --- a/pkg/api/router.go +++ b/pkg/api/router.go @@ -338,10 +338,6 @@ func (s *Service) mountAPI() { ), }) - handle("/pss/subscribe/{topic}", web.ChainHandlers( - web.FinalHandlerFunc(s.pssWsHandler), - )) - handle("/gsoc/subscribe/{address}", web.ChainHandlers( web.FinalHandlerFunc(s.gsocWsHandler), )) From 8e4cb2e8307c535d07cd3e0a0cdbd12f38235997 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 31 Oct 2024 16:43:47 +0100 Subject: [PATCH 38/54] docs: added comments --- pkg/postage/stamper.go | 1 + pkg/soc/utils.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/pkg/postage/stamper.go b/pkg/postage/stamper.go index 77256ceecc0..bd9ce86a390 100644 --- a/pkg/postage/stamper.go +++ b/pkg/postage/stamper.go @@ -21,6 +21,7 @@ var ( // Stamper can issue stamps from the given address of chunk. type Stamper interface { + // addr is the request address of the chunk and idAddr is the identity address of the chunk. Stamp(addr, idAddr swarm.Address) (*Stamp, error) BatchId() []byte } diff --git a/pkg/soc/utils.go b/pkg/soc/utils.go index f07c2f2102d..6ca3364c967 100644 --- a/pkg/soc/utils.go +++ b/pkg/soc/utils.go @@ -7,6 +7,9 @@ package soc import "github.com/ethersphere/bee/v2/pkg/swarm" // IdentityAddress returns the internally used address for the chunk +// since the single owner chunk address is not a unique identifier for the chunk, +// but hashing the soc address and the wrapped chunk address is. +// it is used in the reserve sampling and other places where a key is needed to represent a chunk. func IdentityAddress(chunk swarm.Chunk) (swarm.Address, error) { // check the chunk is single owner chunk or cac if sch, err := FromChunk(chunk); err == nil { From 160f258d7296e7a17e3416da08a3693be76626c6 Mon Sep 17 00:00:00 2001 From: Acha Bill <57879913+acha-bill@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:45:12 -0400 Subject: [PATCH 39/54] test: add reserve case (#4886) --- pkg/storer/internal/reserve/reserve_test.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go index 7769af49389..6f36b80177a 100644 --- a/pkg/storer/internal/reserve/reserve_test.go +++ b/pkg/storer/internal/reserve/reserve_test.go @@ -399,7 +399,7 @@ func TestSameChunkAddress(t *testing.T) { t.Run("same address but index collision with different chunk", func(t *testing.T) { size1 := r.Size() batch := postagetesting.MustNewBatch() - ch1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 0)) + ch1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 1)) err = r.Put(ctx, ch1) if err != nil { t.Fatal(err) @@ -414,7 +414,7 @@ func TestSameChunkAddress(t *testing.T) { signer := getSigner(t) s1 := soctesting.GenerateMockSocWithSigner(t, []byte("data"), signer) - ch2 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 1, 1)) + ch2 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 1, 2)) err = r.Put(ctx, ch2) if err != nil { t.Fatal(err) @@ -432,8 +432,16 @@ func TestSameChunkAddress(t *testing.T) { checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin1, BinID: binBinIDs[bin1], StampHash: ch1StampHash}, false) checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin2, BinID: binBinIDs[bin2], StampHash: ch2StampHash}, false) + // attempt to replace existing (unrelated) chunk that has timestamp s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) - ch3 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 2)) + ch3 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 0)) + err = r.Put(ctx, ch3) + if !errors.Is(err, storage.ErrOverwriteNewerChunk) { + t.Fatal("expected error") + } + + s2 = soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) + ch3 = s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 3)) err = r.Put(ctx, ch3) if err != nil { t.Fatal(err) From c9098fda8b532c78ff93b302801d3d1968b354fa Mon Sep 17 00:00:00 2001 From: nugaon Date: Thu, 31 Oct 2024 16:59:32 +0100 Subject: [PATCH 40/54] test: correcting fata prompt message Co-authored-by: Acha Bill <57879913+acha-bill@users.noreply.github.com> --- pkg/storer/internal/reserve/reserve_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go index 6f36b80177a..15993fd7de8 100644 --- a/pkg/storer/internal/reserve/reserve_test.go +++ b/pkg/storer/internal/reserve/reserve_test.go @@ -465,7 +465,7 @@ func TestSameChunkAddress(t *testing.T) { // (ch1 + ch2) == 2 if size2-size1 != 2 { - t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) + t.Fatalf("expected reserve size to increase by 2, got %d", size2-size1) } }) From 4b27d650aa47ccc6574207728d199888378688e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 31 Oct 2024 17:02:36 +0100 Subject: [PATCH 41/54] refactor: remove shouldDecrReserveSize --- pkg/storer/internal/reserve/reserve.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 25aa8d119a6..e40c454be08 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -128,7 +128,7 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { r.multx.Lock(strconv.Itoa(int(bin))) defer r.multx.Unlock(strconv.Itoa(int(bin))) - var shouldIncReserveSize, shouldDecrReserveSize bool + var shouldIncReserveSize bool err = r.st.Run(ctx, func(s transaction.Store) error { @@ -290,9 +290,6 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { if shouldIncReserveSize { r.size.Add(1) } - if shouldDecrReserveSize { - r.size.Add(-1) - } return nil } From 691a43d51d2e2ea591543c6fc0620f534bdcb189 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 31 Oct 2024 17:34:57 +0100 Subject: [PATCH 42/54] docs: verbosing --- pkg/gsoc/gsoc.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index e0d4484d9b7..e342de9f549 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -94,4 +94,5 @@ func (l *listener) Close() error { } // handler defines code to be executed upon reception of a GSOC sub message. +// it is used as a parameter definition. type handler func([]byte) From 7432ed3d5bacfd50d2ae4153597b323792807e8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Mon, 4 Nov 2024 11:12:20 +0100 Subject: [PATCH 43/54] fix: eviction locking --- pkg/storer/internal/reserve/reserve.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index e40c454be08..2a7e7d07236 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -104,9 +104,8 @@ func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { chunkType := storage.ChunkType(chunk) // batchID lock, Put vs Eviction - lockId := lockId(chunk.Stamp()) - r.multx.Lock(lockId) - defer r.multx.Unlock(lockId) + r.multx.Lock(string(chunk.Stamp().BatchID())) + defer r.multx.Unlock(string(chunk.Stamp().BatchID())) stampHash, err := chunk.Stamp().Hash() if err != nil { @@ -659,7 +658,3 @@ func (r *Reserve) IncBinID(store storage.IndexStore, bin uint8) (uint64, error) return item.BinID, store.Put(item) } - -func lockId(stamp swarm.Stamp) string { - return fmt.Sprintf("%x-%x", stamp.BatchID(), stamp.Index()) -} From 0ce6407eb482ae57d651bad1575e05013d423e45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 5 Nov 2024 12:11:45 +0100 Subject: [PATCH 44/54] test: identity address unit --- pkg/soc/utils_test.go | 79 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 pkg/soc/utils_test.go diff --git a/pkg/soc/utils_test.go b/pkg/soc/utils_test.go new file mode 100644 index 00000000000..a86e79083cb --- /dev/null +++ b/pkg/soc/utils_test.go @@ -0,0 +1,79 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package soc_test + +import ( + "encoding/hex" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethersphere/bee/v2/pkg/cac" + "github.com/ethersphere/bee/v2/pkg/soc" + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +// TestIdentityAddress tests the IdentityAddress function. +func TestIdentityAddress(t *testing.T) { + t.Run("single owner chunk", func(t *testing.T) { + // Create a single owner chunk (SOC) + owner := common.HexToAddress("8d3766440f0d7b949a5e32995d09619a7f86e632") + // signature of hash(id + chunk address of foo) + sig, err := hex.DecodeString("5acd384febc133b7b245e5ddc62d82d2cded9182d2716126cd8844509af65a053deb418208027f548e3e88343af6f84a8772fb3cebc0a1833a0ea7ec0c1348311b") + if err != nil { + t.Fatal(err) + } + id := make([]byte, swarm.HashSize) + copy(id, []byte("id")) + payload := []byte("foo") + ch, err := cac.New(payload) + if err != nil { + t.Fatal(err) + } + sch, err := soc.NewSigned(id, ch, owner.Bytes(), sig) + if err != nil { + t.Fatal(err) + } + schChunk, err := sch.Chunk() + if err != nil { + t.Fatal(err) + } + schAddress, err := sch.Address() + if err != nil { + t.Fatal(err) + } + + idAddr, err := soc.IdentityAddress(schChunk) + if err != nil { + t.Fatalf("IdentityAddress returned error: %v", err) + } + + if idAddr.IsZero() { + t.Fatalf("expected non-zero address, got zero address") + } + + if idAddr.Equal(schAddress) { + t.Fatalf("expected identity address to be different from SOC address") + } + }) + + t.Run("content addressed chunk", func(t *testing.T) { + // Create a content addressed chunk (CAC) + data := []byte("data") + cacChunk, err := cac.New(data) + if err != nil { + t.Fatalf("failed to create content addressed chunk: %v", err) + } + + // Call IdentityAddress with the CAC + addr, err := soc.IdentityAddress(cacChunk) + if err != nil { + t.Fatalf("IdentityAddress returned error: %v", err) + } + + // Verify the address matches the CAC address + if !addr.Equal(cacChunk.Address()) { + t.Fatalf("expected address %s, got %s", cacChunk.Address(), addr) + } + }) +} From d54dff5767e5d9b0a692099d73e208c6ff351eeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 5 Nov 2024 12:51:40 +0100 Subject: [PATCH 45/54] refactor: gsoc handler param soc as reference --- pkg/api/gsoc_test.go | 6 +++--- pkg/gsoc/gsoc.go | 4 ++-- pkg/gsoc/gsoc_test.go | 8 ++++---- pkg/pullsync/pullsync.go | 6 +++--- pkg/pullsync/pullsync_test.go | 2 +- pkg/pushsync/pushsync.go | 6 +++--- pkg/pushsync/pushsync_test.go | 20 ++++++++++---------- 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pkg/api/gsoc_test.go b/pkg/api/gsoc_test.go index 5d0a70cc9c4..edef7a39842 100644 --- a/pkg/api/gsoc_test.go +++ b/pkg/api/gsoc_test.go @@ -45,7 +45,7 @@ func TestGsocWebsocketSingleHandler(t *testing.T) { socCh := soc.New(id, ch) ch, _ = socCh.Sign(signer) socCh, _ = soc.FromChunk(ch) - g.Handle(*socCh) + g.Handle(socCh) go expectMessage(t, cl, respC, payload) if err := <-respC; err != nil { @@ -87,7 +87,7 @@ func TestGsocWebsocketMultiHandler(t *testing.T) { t.Fatal(err) } - g.Handle(*socCh) + g.Handle(socCh) go expectMessage(t, cl, respC, payload) go expectMessage(t, cl2, respC, payload) @@ -125,7 +125,7 @@ func TestGsocPong(t *testing.T) { ch, _ = socCh.Sign(signer) socCh, _ = soc.FromChunk(ch) - g.Handle(*socCh) + g.Handle(socCh) go expectMessage(t, cl, respC, nil) if err := <-respC; err == nil || !strings.Contains(err.Error(), "i/o timeout") { diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index e342de9f549..4aecc27f5f6 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -14,7 +14,7 @@ import ( type Listener interface { Subscribe(address [32]byte, handler handler) (cleanup func()) - Handle(c soc.SOC) + Handle(c *soc.SOC) Close() error } @@ -56,7 +56,7 @@ func (l *listener) Subscribe(address [32]byte, handler handler) (cleanup func()) } // Handle is called by push/pull sync and passes the chunk its registered handler -func (l *listener) Handle(c soc.SOC) { +func (l *listener) Handle(c *soc.SOC) { addr, err := c.Address() if err != nil { return // no handler diff --git a/pkg/gsoc/gsoc_test.go b/pkg/gsoc/gsoc_test.go index 0bab1b39bfb..989225df068 100644 --- a/pkg/gsoc/gsoc_test.go +++ b/pkg/gsoc/gsoc_test.go @@ -66,7 +66,7 @@ func TestRegister(t *testing.T) { socCh2, _ = soc.FromChunk(ch2) // trigger soc upload on address1, check that only h1 is called - g.Handle(*socCh1) + g.Handle(socCh1) waitHandlerCallback(t, &msgChan, 1) @@ -76,7 +76,7 @@ func TestRegister(t *testing.T) { // register another handler on the first address cleanup := g.Subscribe([32]byte(address1.Bytes()), h3) - g.Handle(*socCh1) + g.Handle(socCh1) waitHandlerCallback(t, &msgChan, 2) @@ -86,7 +86,7 @@ func TestRegister(t *testing.T) { cleanup() // remove the last handler - g.Handle(*socCh1) + g.Handle(socCh1) waitHandlerCallback(t, &msgChan, 1) @@ -94,7 +94,7 @@ func TestRegister(t *testing.T) { ensureCalls(t, &h2Calls, 0) ensureCalls(t, &h3Calls, 1) - g.Handle(*socCh2) + g.Handle(socCh2) waitHandlerCallback(t, &msgChan, 1) diff --git a/pkg/pullsync/pullsync.go b/pkg/pullsync/pullsync.go index a917169e0d6..1f8d55d4495 100644 --- a/pkg/pullsync/pullsync.go +++ b/pkg/pullsync/pullsync.go @@ -71,7 +71,7 @@ type Syncer struct { store storer.Reserve quit chan struct{} unwrap func(swarm.Chunk) - gsocHandler func(soc.SOC) + gsocHandler func(*soc.SOC) validStamp postage.ValidStampFn intervalsSF singleflight.Group[string, *collectAddrsResult] syncInProgress atomic.Int32 @@ -88,7 +88,7 @@ func New( streamer p2p.Streamer, store storer.Reserve, unwrap func(swarm.Chunk), - gsocHandler func(soc.SOC), + gsocHandler func(*soc.SOC), validStamp postage.ValidStampFn, logger log.Logger, maxPage uint64, @@ -360,7 +360,7 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start if cac.Valid(chunk) { go s.unwrap(chunk) } else if chunk, err := soc.FromChunk(chunk); err == nil { - s.gsocHandler(*chunk) + s.gsocHandler(chunk) } else { s.logger.Debug("invalid cac/soc chunk", "error", swarm.ErrInvalidChunk, "peer_address", peer, "chunk", chunk) chunkErr = errors.Join(chunkErr, swarm.ErrInvalidChunk) diff --git a/pkg/pullsync/pullsync_test.go b/pkg/pullsync/pullsync_test.go index 68d9e04ecbc..fc80bae137f 100644 --- a/pkg/pullsync/pullsync_test.go +++ b/pkg/pullsync/pullsync_test.go @@ -354,7 +354,7 @@ func newPullSyncWithStamperValidator( storage := mock.NewReserve(o...) logger := log.Noop unwrap := func(swarm.Chunk) {} - socHandler := func(soc.SOC) {} + socHandler := func(*soc.SOC) {} ps := pullsync.New( s, storage, diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go index 22348100dae..4c7983660cf 100644 --- a/pkg/pushsync/pushsync.go +++ b/pkg/pushsync/pushsync.go @@ -85,7 +85,7 @@ type PushSync struct { store Storer topologyDriver topology.Driver unwrap func(swarm.Chunk) - gsocHandler func(soc.SOC) + gsocHandler func(*soc.SOC) logger log.Logger accounting accounting.Interface pricer pricer.Interface @@ -115,7 +115,7 @@ func New( topology topology.Driver, fullNode bool, unwrap func(swarm.Chunk), - gsocHandler func(soc.SOC), + gsocHandler func(*soc.SOC), validStamp postage.ValidStampFn, logger log.Logger, accounting accounting.Interface, @@ -229,7 +229,7 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) if cac.Valid(chunk) { go ps.unwrap(chunk) } else if chunk, err := soc.FromChunk(chunk); err == nil { - ps.gsocHandler(*chunk) + ps.gsocHandler(chunk) } else { return swarm.ErrInvalidChunk } diff --git a/pkg/pushsync/pushsync_test.go b/pkg/pushsync/pushsync_test.go index 73296496753..8773c693de0 100644 --- a/pkg/pushsync/pushsync_test.go +++ b/pkg/pushsync/pushsync_test.go @@ -145,7 +145,7 @@ func TestSocListener(t *testing.T) { } sch2 = sch2.WithStamp(chunk2.Stamp()) expectedPayload := chunk1.Data() - gsocListener := func(soc soc.SOC) { + gsocListener := func(soc *soc.SOC) { if !bytes.Equal(soc.WrappedChunk().Data(), expectedPayload) { t.Fatalf("unexpected SOC payload on GSOC listener. got %s, want %s", soc.WrappedChunk().Data(), expectedPayload) } @@ -468,7 +468,7 @@ func TestPushChunkToClosestErrorAttemptRetry(t *testing.T) { }), ) - psPivot, pivotStorer := createPushSyncNodeWithAccounting(t, pivotNode, defaultPrices, recorder, nil, defaultSigner(chunk), pivotAccounting, log.Noop, func(soc.SOC) {}, mock.WithPeers(peer1, peer2, peer3, peer4)) + psPivot, pivotStorer := createPushSyncNodeWithAccounting(t, pivotNode, defaultPrices, recorder, nil, defaultSigner(chunk), pivotAccounting, log.Noop, func(*soc.SOC) {}, mock.WithPeers(peer1, peer2, peer3, peer4)) // Trigger the sending of chunk to the closest node receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk) @@ -645,15 +645,15 @@ func TestPropagateErrMsg(t *testing.T) { captureLogger := log.NewLogger("test", log.WithSink(buf)) // Create the closest peer - psClosestPeer, _ := createPushSyncNodeWithAccounting(t, closestPeer, defaultPrices, nil, nil, faultySigner, accountingmock.NewAccounting(), log.Noop, func(soc.SOC) {}, mock.WithClosestPeerErr(topology.ErrWantSelf)) + psClosestPeer, _ := createPushSyncNodeWithAccounting(t, closestPeer, defaultPrices, nil, nil, faultySigner, accountingmock.NewAccounting(), log.Noop, func(*soc.SOC) {}, mock.WithClosestPeerErr(topology.ErrWantSelf)) // creating the pivot peer - psPivot, _ := createPushSyncNodeWithAccounting(t, pivotPeer, defaultPrices, nil, nil, defaultSigner(chunk), accountingmock.NewAccounting(), log.Noop, func(soc.SOC) {}, mock.WithPeers(closestPeer)) + psPivot, _ := createPushSyncNodeWithAccounting(t, pivotPeer, defaultPrices, nil, nil, defaultSigner(chunk), accountingmock.NewAccounting(), log.Noop, func(*soc.SOC) {}, mock.WithPeers(closestPeer)) combinedRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol(), psClosestPeer.Protocol()), streamtest.WithBaseAddr(triggerPeer)) // Creating the trigger peer - psTriggerPeer, _ := createPushSyncNodeWithAccounting(t, triggerPeer, defaultPrices, combinedRecorder, nil, defaultSigner(chunk), accountingmock.NewAccounting(), captureLogger, func(soc.SOC) {}, mock.WithPeers(pivotPeer)) + psTriggerPeer, _ := createPushSyncNodeWithAccounting(t, triggerPeer, defaultPrices, combinedRecorder, nil, defaultSigner(chunk), accountingmock.NewAccounting(), captureLogger, func(*soc.SOC) {}, mock.WithPeers(pivotPeer)) _, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk) if err == nil { @@ -829,7 +829,7 @@ func createPushSyncNode( ) (*pushsync.PushSync, *testStorer, accounting.Interface) { t.Helper() mockAccounting := accountingmock.NewAccounting() - ps, mstorer := createPushSyncNodeWithAccounting(t, addr, prices, recorder, unwrap, signer, mockAccounting, log.Noop, func(soc.SOC) {}, mockOpts...) + ps, mstorer := createPushSyncNodeWithAccounting(t, addr, prices, recorder, unwrap, signer, mockAccounting, log.Noop, func(*soc.SOC) {}, mockOpts...) return ps, mstorer, mockAccounting } @@ -838,7 +838,7 @@ func createGsocPushSyncNode( addr swarm.Address, prices pricerParameters, recorder *streamtest.Recorder, - gsocListener func(soc.SOC), + gsocListener func(*soc.SOC), signer crypto.Signer, mockOpts ...mock.Option, ) (*pushsync.PushSync, *testStorer, accounting.Interface) { @@ -878,7 +878,7 @@ func createPushSyncNodeWithRadius( radiusFunc := func() (uint8, error) { return radius, nil } - ps := pushsync.New(addr, 1, blockHash.Bytes(), recorderDisconnecter, storer, radiusFunc, mockTopology, true, unwrap, func(soc.SOC) {}, validStamp, log.Noop, accountingmock.NewAccounting(), mockPricer, signer, nil, -1) + ps := pushsync.New(addr, 1, blockHash.Bytes(), recorderDisconnecter, storer, radiusFunc, mockTopology, true, unwrap, func(*soc.SOC) {}, validStamp, log.Noop, accountingmock.NewAccounting(), mockPricer, signer, nil, -1) t.Cleanup(func() { ps.Close() }) return ps, storer @@ -893,7 +893,7 @@ func createPushSyncNodeWithAccounting( signer crypto.Signer, acct accounting.Interface, logger log.Logger, - gsocListener func(soc.SOC), + gsocListener func(*soc.SOC), mockOpts ...mock.Option, ) (*pushsync.PushSync, *testStorer) { t.Helper() @@ -910,7 +910,7 @@ func createPushSyncNodeWithAccounting( unwrap = func(swarm.Chunk) {} } if gsocListener == nil { - gsocListener = func(soc.SOC) {} + gsocListener = func(*soc.SOC) {} } validStamp := func(ch swarm.Chunk) (swarm.Chunk, error) { From c03ddeb0c3bd3cc74cae46752b359d32058c135f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 5 Nov 2024 12:56:50 +0100 Subject: [PATCH 46/54] docs: update put desc --- pkg/storer/internal/reserve/reserve.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go index 2a7e7d07236..cc48dc38197 100644 --- a/pkg/storer/internal/reserve/reserve.go +++ b/pkg/storer/internal/reserve/reserve.go @@ -93,11 +93,11 @@ func New( } // Reserve Put has to handle multiple possible scenarios. -// 1. Since the same chunk may belong to different postage batches, the reserve will support one chunk to many postage -// batches relationship. +// 1. Since the same chunk may belong to different postage stamp indices, the reserve will support one chunk to many postage +// stamp indices relationship. // 2. A new chunk that shares the same stamp index belonging to the same batch with an already stored chunk will overwrite // the existing chunk if the new chunk has a higher stamp timestamp (regardless of batch type). -// 3. A new chunk that has the same address belonging to the same batch with an already stored chunk will overwrite the existing chunk +// 3. A new chunk that has the same address belonging to the same stamp index with an already stored chunk will overwrite the existing chunk // if the new chunk has a higher stamp timestamp (regardless of batch type and chunk type, eg CAC & SOC). func (r *Reserve) Put(ctx context.Context, chunk swarm.Chunk) error { From 55e7af1211f18bfde3fa152fdc2fb2634cc7eb3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 5 Nov 2024 13:16:28 +0100 Subject: [PATCH 47/54] refactor: export handler --- pkg/gsoc/gsoc.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go index 4aecc27f5f6..91d5dcf925d 100644 --- a/pkg/gsoc/gsoc.go +++ b/pkg/gsoc/gsoc.go @@ -13,13 +13,13 @@ import ( ) type Listener interface { - Subscribe(address [32]byte, handler handler) (cleanup func()) + Subscribe(address [32]byte, handler Handler) (cleanup func()) Handle(c *soc.SOC) Close() error } type listener struct { - handlers map[[32]byte][]*handler + handlers map[[32]byte][]*Handler handlersMu sync.Mutex quit chan struct{} logger log.Logger @@ -29,13 +29,13 @@ type listener struct { func New(logger log.Logger) Listener { return &listener{ logger: logger, - handlers: make(map[[32]byte][]*handler), + handlers: make(map[[32]byte][]*Handler), quit: make(chan struct{}), } } // Subscribe allows the definition of a Handler func on a specific GSOC address. -func (l *listener) Subscribe(address [32]byte, handler handler) (cleanup func()) { +func (l *listener) Subscribe(address [32]byte, handler Handler) (cleanup func()) { l.handlersMu.Lock() defer l.handlersMu.Unlock() @@ -70,13 +70,13 @@ func (l *listener) Handle(c *soc.SOC) { "wrapped chunk address", c.WrappedChunk().Address()) for _, hh := range h { - go func(hh handler) { + go func(hh Handler) { hh(c.WrappedChunk().Data()[swarm.SpanSize:]) }(*hh) } } -func (p *listener) getHandlers(address [32]byte) []*handler { +func (p *listener) getHandlers(address [32]byte) []*Handler { p.handlersMu.Lock() defer p.handlersMu.Unlock() @@ -88,11 +88,11 @@ func (l *listener) Close() error { l.handlersMu.Lock() defer l.handlersMu.Unlock() - l.handlers = make(map[[32]byte][]*handler) //unset handlers on shutdown + l.handlers = make(map[[32]byte][]*Handler) //unset handlers on shutdown return nil } -// handler defines code to be executed upon reception of a GSOC sub message. +// Handler defines code to be executed upon reception of a GSOC sub message. // it is used as a parameter definition. -type handler func([]byte) +type Handler func([]byte) From 9e78194d644f8270e4a2b80bf7896c43557e00ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 7 Nov 2024 13:15:31 +0100 Subject: [PATCH 48/54] feat: remove pinning and uploadstore usage --- pkg/api/soc.go | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/pkg/api/soc.go b/pkg/api/soc.go index a6e0c37e187..7b65b279751 100644 --- a/pkg/api/soc.go +++ b/pkg/api/soc.go @@ -18,7 +18,6 @@ import ( "github.com/ethersphere/bee/v2/pkg/jsonhttp" "github.com/ethersphere/bee/v2/pkg/postage" "github.com/ethersphere/bee/v2/pkg/soc" - storage "github.com/ethersphere/bee/v2/pkg/storage" storer "github.com/ethersphere/bee/v2/pkg/storer" "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/gorilla/mux" @@ -51,7 +50,6 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { headers := struct { BatchID []byte `map:"Swarm-Postage-Batch-Id"` StampSig []byte `map:"Swarm-Postage-Stamp"` - Pin bool `map:"Swarm-Pin"` Act bool `map:"Swarm-Act"` HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"` }{} @@ -66,28 +64,10 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { return } - // if pinning header is set we do a deferred upload, else we do a direct upload var ( tag uint64 err error ) - if headers.Pin { - session, err := s.storer.NewSession() - if err != nil { - logger.Debug("get or create tag failed", "error", err) - logger.Error(nil, "get or create tag failed") - switch { - case errors.Is(err, storage.ErrNotFound): - jsonhttp.NotFound(w, "tag not found") - default: - jsonhttp.InternalServerError(w, "cannot get or create tag") - } - return - } - tag = session.TagID - } - - deferred := tag != 0 var putter storer.PutterSession if len(headers.StampSig) != 0 { @@ -103,15 +83,15 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { putter, err = s.newStampedPutter(r.Context(), putterOptions{ BatchID: stamp.BatchID(), TagID: tag, - Pin: headers.Pin, - Deferred: deferred, + Pin: false, + Deferred: false, }, &stamp) } else { putter, err = s.newStamperPutter(r.Context(), putterOptions{ BatchID: headers.BatchID, TagID: tag, - Pin: headers.Pin, - Deferred: deferred, + Pin: false, + Deferred: false, }) } if err != nil { From a9ba53bdfad7ba5da16b2d4fe2d24ae27676502f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 7 Nov 2024 14:42:32 +0100 Subject: [PATCH 49/54] feat: always save newer payload of soc --- pkg/storer/internal/chunkstore/chunkstore.go | 21 +++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/pkg/storer/internal/chunkstore/chunkstore.go b/pkg/storer/internal/chunkstore/chunkstore.go index 67fee1e6d77..e1952507bf6 100644 --- a/pkg/storer/internal/chunkstore/chunkstore.go +++ b/pkg/storer/internal/chunkstore/chunkstore.go @@ -89,7 +89,26 @@ func Put(ctx context.Context, s storage.IndexStore, sh storage.Sharky, ch swarm. return fmt.Errorf("chunk store: failed to read: %w", err) } - rIdx.RefCnt++ + // SOC will be replaced in the chunk store if it is already stored with the newer payload. + // Pull sync should sync the new SOC payload with the new stamp. + // TODO: remove this condition when postage stamping is refactored for GSOC. + chunkType := storage.ChunkType(ch) + if chunkType == swarm.ChunkTypeSingleOwner { + // replace old payload + err = sh.Release(ctx, rIdx.Location) + if err != nil { + return fmt.Errorf("chunkstore: failed to release sharky location: %w", err) + } + + loc, err := sh.Write(ctx, ch.Data()) + if err != nil { + return fmt.Errorf("chunk store: write to sharky failed: %w", err) + } + rIdx.Location = loc + rIdx.Timestamp = uint64(time.Now().Unix()) + } else { + rIdx.RefCnt++ + } return s.Put(rIdx) } From 489374989818e6f026df20ccb11cdfa8536a304c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 7 Nov 2024 16:16:30 +0100 Subject: [PATCH 50/54] test: always save newer payload of soc --- .../internal/chunkstore/chunkstore_test.go | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/pkg/storer/internal/chunkstore/chunkstore_test.go b/pkg/storer/internal/chunkstore/chunkstore_test.go index 9e30c1af876..a57a6643dca 100644 --- a/pkg/storer/internal/chunkstore/chunkstore_test.go +++ b/pkg/storer/internal/chunkstore/chunkstore_test.go @@ -5,6 +5,7 @@ package chunkstore_test import ( + "bytes" "context" "errors" "fmt" @@ -13,7 +14,9 @@ import ( "os" "testing" + "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/sharky" + "github.com/ethersphere/bee/v2/pkg/soc" "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction" "github.com/ethersphere/bee/v2/pkg/storage" @@ -153,6 +156,64 @@ func TestChunkStore(t *testing.T) { } }) + // TODO: remove this when postage stamping is refactored for GSOC. + t.Run("put two SOCs with different payloads", func(t *testing.T) { + key, _ := crypto.GenerateSecp256k1Key() + signer := crypto.NewDefaultSigner(key) + + // chunk data to upload + chunk1 := chunktest.FixtureChunk("7000") + chunk2 := chunktest.FixtureChunk("0033") + id := make([]byte, swarm.HashSize) + s1 := soc.New(id, chunk1) + s2 := soc.New(id, chunk2) + sch1, err := s1.Sign(signer) + if err != nil { + t.Fatal(err) + } + sch1 = sch1.WithStamp(chunk1.Stamp()) + sch2, err := s2.Sign(signer) + if err != nil { + t.Fatal(err) + } + sch2 = sch2.WithStamp(chunk2.Stamp()) + + // Put the first SOC into the chunk store + err = st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.TODO(), sch1) + }) + if err != nil { + t.Fatalf("failed putting first single owner chunk: %v", err) + } + + // Put the second SOC into the chunk store + err = st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.TODO(), sch2) + }) + if err != nil { + t.Fatalf("failed putting second single owner chunk: %v", err) + } + + // Retrieve the chunk from the chunk store + var retrievedChunk swarm.Chunk + err = st.Run(context.Background(), func(s transaction.Store) error { + retrievedChunk, err = s.ChunkStore().Get(context.TODO(), sch1.Address()) + return err + }) + if err != nil { + t.Fatalf("failed retrieving chunk: %v", err) + } + schRetrieved, err := soc.FromChunk(retrievedChunk) + if err != nil { + t.Fatalf("failed converting chunk to SOC: %v", err) + } + + // Verify that the retrieved chunk contains the latest payload + if !bytes.Equal(chunk2.Data(), schRetrieved.WrappedChunk().Data()) { + t.Fatalf("expected payload %s, got %s", chunk2.Data(), schRetrieved.WrappedChunk().Data()) + } + }) + t.Run("get chunks", func(t *testing.T) { for _, ch := range testChunks { readCh, err := st.ChunkStore().Get(context.TODO(), ch.Address()) From b0d6912775153afcadb1f503ebd690fddd6b0998 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 7 Nov 2024 16:35:34 +0100 Subject: [PATCH 51/54] test: move testcase away --- .../internal/chunkstore/chunkstore_test.go | 116 +++++++++--------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/pkg/storer/internal/chunkstore/chunkstore_test.go b/pkg/storer/internal/chunkstore/chunkstore_test.go index a57a6643dca..c86fbbb6b91 100644 --- a/pkg/storer/internal/chunkstore/chunkstore_test.go +++ b/pkg/storer/internal/chunkstore/chunkstore_test.go @@ -156,64 +156,6 @@ func TestChunkStore(t *testing.T) { } }) - // TODO: remove this when postage stamping is refactored for GSOC. - t.Run("put two SOCs with different payloads", func(t *testing.T) { - key, _ := crypto.GenerateSecp256k1Key() - signer := crypto.NewDefaultSigner(key) - - // chunk data to upload - chunk1 := chunktest.FixtureChunk("7000") - chunk2 := chunktest.FixtureChunk("0033") - id := make([]byte, swarm.HashSize) - s1 := soc.New(id, chunk1) - s2 := soc.New(id, chunk2) - sch1, err := s1.Sign(signer) - if err != nil { - t.Fatal(err) - } - sch1 = sch1.WithStamp(chunk1.Stamp()) - sch2, err := s2.Sign(signer) - if err != nil { - t.Fatal(err) - } - sch2 = sch2.WithStamp(chunk2.Stamp()) - - // Put the first SOC into the chunk store - err = st.Run(context.Background(), func(s transaction.Store) error { - return s.ChunkStore().Put(context.TODO(), sch1) - }) - if err != nil { - t.Fatalf("failed putting first single owner chunk: %v", err) - } - - // Put the second SOC into the chunk store - err = st.Run(context.Background(), func(s transaction.Store) error { - return s.ChunkStore().Put(context.TODO(), sch2) - }) - if err != nil { - t.Fatalf("failed putting second single owner chunk: %v", err) - } - - // Retrieve the chunk from the chunk store - var retrievedChunk swarm.Chunk - err = st.Run(context.Background(), func(s transaction.Store) error { - retrievedChunk, err = s.ChunkStore().Get(context.TODO(), sch1.Address()) - return err - }) - if err != nil { - t.Fatalf("failed retrieving chunk: %v", err) - } - schRetrieved, err := soc.FromChunk(retrievedChunk) - if err != nil { - t.Fatalf("failed converting chunk to SOC: %v", err) - } - - // Verify that the retrieved chunk contains the latest payload - if !bytes.Equal(chunk2.Data(), schRetrieved.WrappedChunk().Data()) { - t.Fatalf("expected payload %s, got %s", chunk2.Data(), schRetrieved.WrappedChunk().Data()) - } - }) - t.Run("get chunks", func(t *testing.T) { for _, ch := range testChunks { readCh, err := st.ChunkStore().Get(context.TODO(), ch.Address()) @@ -397,6 +339,64 @@ func TestChunkStore(t *testing.T) { } }) + // TODO: remove this when postage stamping is refactored for GSOC. + t.Run("put two SOCs with different payloads", func(t *testing.T) { + key, _ := crypto.GenerateSecp256k1Key() + signer := crypto.NewDefaultSigner(key) + + // chunk data to upload + chunk1 := chunktest.FixtureChunk("7000") + chunk2 := chunktest.FixtureChunk("0033") + id := make([]byte, swarm.HashSize) + s1 := soc.New(id, chunk1) + s2 := soc.New(id, chunk2) + sch1, err := s1.Sign(signer) + if err != nil { + t.Fatal(err) + } + sch1 = sch1.WithStamp(chunk1.Stamp()) + sch2, err := s2.Sign(signer) + if err != nil { + t.Fatal(err) + } + sch2 = sch2.WithStamp(chunk2.Stamp()) + + // Put the first SOC into the chunk store + err = st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.TODO(), sch1) + }) + if err != nil { + t.Fatalf("failed putting first single owner chunk: %v", err) + } + + // Put the second SOC into the chunk store + err = st.Run(context.Background(), func(s transaction.Store) error { + return s.ChunkStore().Put(context.TODO(), sch2) + }) + if err != nil { + t.Fatalf("failed putting second single owner chunk: %v", err) + } + + // Retrieve the chunk from the chunk store + var retrievedChunk swarm.Chunk + err = st.Run(context.Background(), func(s transaction.Store) error { + retrievedChunk, err = s.ChunkStore().Get(context.TODO(), sch1.Address()) + return err + }) + if err != nil { + t.Fatalf("failed retrieving chunk: %v", err) + } + schRetrieved, err := soc.FromChunk(retrievedChunk) + if err != nil { + t.Fatalf("failed converting chunk to SOC: %v", err) + } + + // Verify that the retrieved chunk contains the latest payload + if !bytes.Equal(chunk2.Data(), schRetrieved.WrappedChunk().Data()) { + t.Fatalf("expected payload %s, got %s", chunk2.Data(), schRetrieved.WrappedChunk().Data()) + } + }) + t.Run("close store", func(t *testing.T) { err := st.Close() if err != nil { From 4d8661f20dd7efee8f26430a790c96ac66d4e1f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 7 Nov 2024 17:52:34 +0100 Subject: [PATCH 52/54] fix: replace gsoc payload --- pkg/storer/internal/chunkstore/chunkstore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/storer/internal/chunkstore/chunkstore.go b/pkg/storer/internal/chunkstore/chunkstore.go index e1952507bf6..39729fd256d 100644 --- a/pkg/storer/internal/chunkstore/chunkstore.go +++ b/pkg/storer/internal/chunkstore/chunkstore.go @@ -93,7 +93,7 @@ func Put(ctx context.Context, s storage.IndexStore, sh storage.Sharky, ch swarm. // Pull sync should sync the new SOC payload with the new stamp. // TODO: remove this condition when postage stamping is refactored for GSOC. chunkType := storage.ChunkType(ch) - if chunkType == swarm.ChunkTypeSingleOwner { + if !errors.Is(err, storage.ErrNotFound) && chunkType == swarm.ChunkTypeSingleOwner { // replace old payload err = sh.Release(ctx, rIdx.Location) if err != nil { From 515b519afd8b9f5a0a3f9e03749805a08f5150cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 7 Nov 2024 17:54:40 +0100 Subject: [PATCH 53/54] revert: remove pinning and uploadstore usage --- pkg/api/soc.go | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/pkg/api/soc.go b/pkg/api/soc.go index 7b65b279751..d1595d5b811 100644 --- a/pkg/api/soc.go +++ b/pkg/api/soc.go @@ -18,6 +18,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/jsonhttp" "github.com/ethersphere/bee/v2/pkg/postage" "github.com/ethersphere/bee/v2/pkg/soc" + "github.com/ethersphere/bee/v2/pkg/storage" storer "github.com/ethersphere/bee/v2/pkg/storer" "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/gorilla/mux" @@ -50,6 +51,7 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { headers := struct { BatchID []byte `map:"Swarm-Postage-Batch-Id"` StampSig []byte `map:"Swarm-Postage-Stamp"` + Pin bool `map:"Swarm-Pin"` Act bool `map:"Swarm-Act"` HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"` }{} @@ -64,10 +66,28 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { return } + // if pinning header is set we do a deferred upload, else we do a direct upload var ( tag uint64 err error ) + if headers.Pin { + session, err := s.storer.NewSession() + if err != nil { + logger.Debug("get or create tag failed", "error", err) + logger.Error(nil, "get or create tag failed") + switch { + case errors.Is(err, storage.ErrNotFound): + jsonhttp.NotFound(w, "tag not found") + default: + jsonhttp.InternalServerError(w, "cannot get or create tag") + } + return + } + tag = session.TagID + } + + deferred := tag != 0 var putter storer.PutterSession if len(headers.StampSig) != 0 { @@ -83,15 +103,15 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { putter, err = s.newStampedPutter(r.Context(), putterOptions{ BatchID: stamp.BatchID(), TagID: tag, - Pin: false, - Deferred: false, + Pin: headers.Pin, + Deferred: deferred, }, &stamp) } else { putter, err = s.newStamperPutter(r.Context(), putterOptions{ BatchID: headers.BatchID, TagID: tag, - Pin: false, - Deferred: false, + Pin: headers.Pin, + Deferred: deferred, }) } if err != nil { From 8c416afe1854467058e09d5e5133b76140868bb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 7 Nov 2024 20:10:02 +0100 Subject: [PATCH 54/54] fix: replace gsoc payload --- pkg/storer/internal/chunkstore/chunkstore.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/storer/internal/chunkstore/chunkstore.go b/pkg/storer/internal/chunkstore/chunkstore.go index 39729fd256d..f17eed79af3 100644 --- a/pkg/storer/internal/chunkstore/chunkstore.go +++ b/pkg/storer/internal/chunkstore/chunkstore.go @@ -71,8 +71,9 @@ func Has(_ context.Context, r storage.Reader, addr swarm.Address) (bool, error) func Put(ctx context.Context, s storage.IndexStore, sh storage.Sharky, ch swarm.Chunk) error { var ( - rIdx = &RetrievalIndexItem{Address: ch.Address()} - loc sharky.Location + rIdx = &RetrievalIndexItem{Address: ch.Address()} + loc sharky.Location + inserted bool ) err := s.Get(rIdx) switch { @@ -85,6 +86,7 @@ func Put(ctx context.Context, s storage.IndexStore, sh storage.Sharky, ch swarm. } rIdx.Location = loc rIdx.Timestamp = uint64(time.Now().Unix()) + inserted = true case err != nil: return fmt.Errorf("chunk store: failed to read: %w", err) } @@ -93,7 +95,7 @@ func Put(ctx context.Context, s storage.IndexStore, sh storage.Sharky, ch swarm. // Pull sync should sync the new SOC payload with the new stamp. // TODO: remove this condition when postage stamping is refactored for GSOC. chunkType := storage.ChunkType(ch) - if !errors.Is(err, storage.ErrNotFound) && chunkType == swarm.ChunkTypeSingleOwner { + if !inserted && chunkType == swarm.ChunkTypeSingleOwner { // replace old payload err = sh.Release(ctx, rIdx.Location) if err != nil {