From 930c52e027d1701c2ef2ef25aa8674ed5a7bdbce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tr=C3=B3n?= Date: Sun, 24 Sep 2023 08:42:01 +0200 Subject: [PATCH 01/10] feat(bmt): bmt changes for storage incentives phase 4 (#4343) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Viktor Levente Tóth --- pkg/bmt/bmt.go | 13 +++++++++++++ pkg/bmt/proof.go | 39 +++++++++++++++++++++++++++++++++------ pkg/bmt/proof_test.go | 30 ++++++++++++++++++------------ pkg/bmt/trhasher.go | 25 ------------------------- pkg/storer/sample.go | 9 ++++++++- pkg/swarm/hasher.go | 10 +++++----- pkg/swarm/hasher_test.go | 2 +- 7 files changed, 78 insertions(+), 50 deletions(-) delete mode 100644 pkg/bmt/trhasher.go diff --git a/pkg/bmt/bmt.go b/pkg/bmt/bmt.go index e13aa3ec1cb..38c0e8bff5f 100644 --- a/pkg/bmt/bmt.go +++ b/pkg/bmt/bmt.go @@ -40,6 +40,19 @@ type Hasher struct { span []byte // The span of the data subsumed under the chunk } +// facade +func NewHasher(hasherFact func() hash.Hash) *Hasher { + conf := NewConf(hasherFact, swarm.BmtBranches, 32) + + return &Hasher{ + Conf: conf, + result: make(chan []byte), + errc: make(chan error, 1), + span: make([]byte, SpanSize), + bmt: newTree(conf.segmentSize, conf.maxSize, conf.depth, conf.hasher), + } +} + // Capacity returns the maximum amount of bytes that will be processed by this hasher implementation. // since BMT assumes a balanced binary tree, capacity it is always a power of 2 func (h *Hasher) Capacity() int { diff --git a/pkg/bmt/proof.go b/pkg/bmt/proof.go index b9a958db9ab..fa39174c3c5 100644 --- a/pkg/bmt/proof.go +++ b/pkg/bmt/proof.go @@ -17,6 +17,17 @@ type Proof struct { Index int } +// Override base hash function of Hasher to fill buffer with zeros until chunk length +func (p Prover) Hash(b []byte) ([]byte, error) { + for i := p.size; i < p.maxSize; i += len(zerosection) { + _, err := p.Write(zerosection) + if err != nil { + return []byte{}, err + } + } + return p.Hasher.Hash(b) +} + // Proof returns the inclusion proof of the i-th data segment func (p Prover) Proof(i int) Proof { index := i @@ -36,26 +47,42 @@ func (p Prover) Proof(i int) Proof { secsize := 2 * p.segmentSize offset := i * secsize section := p.bmt.buffer[offset : offset+secsize] - return Proof{section, sisters, p.span, index} + left := section[:p.segmentSize] + right := section[p.segmentSize:] + var segment, firstSegmentSister []byte + if index%2 == 0 { + segment, firstSegmentSister = left, right + } else { + segment, firstSegmentSister = right, left + } + sisters = append([][]byte{firstSegmentSister}, sisters...) + return Proof{segment, sisters, p.span, index} } // Verify returns the bmt hash obtained from the proof which can then be checked against // the BMT hash of the chunk func (p Prover) Verify(i int, proof Proof) (root []byte, err error) { + var section []byte + if i%2 == 0 { + section = append(append(section, proof.ProveSegment...), proof.ProofSegments[0]...) + } else { + section = append(append(section, proof.ProofSegments[0]...), proof.ProveSegment...) + } i = i / 2 n := p.bmt.leaves[i] + hasher := p.hasher() isLeft := n.isLeft - root, err = doHash(n.hasher, proof.ProveSegment) + root, err = doHash(hasher, section) if err != nil { return nil, err } n = n.parent - for _, sister := range proof.ProofSegments { + for _, sister := range proof.ProofSegments[1:] { if isLeft { - root, err = doHash(n.hasher, root, sister) + root, err = doHash(hasher, root, sister) } else { - root, err = doHash(n.hasher, sister, root) + root, err = doHash(hasher, sister, root) } if err != nil { return nil, err @@ -63,7 +90,7 @@ func (p Prover) Verify(i int, proof Proof) (root []byte, err error) { isLeft = n.isLeft n = n.parent } - return sha3hash(proof.Span, root) + return doHash(hasher, proof.Span, root) } func (n *node) getSister(isLeft bool) []byte { diff --git a/pkg/bmt/proof_test.go b/pkg/bmt/proof_test.go index 337b1bf3420..1b7f6d3b3dd 100644 --- a/pkg/bmt/proof_test.go +++ b/pkg/bmt/proof_test.go @@ -20,7 +20,8 @@ func TestProofCorrectness(t *testing.T) { t.Parallel() testData := []byte("hello world") - testData = append(testData, make([]byte, 4096-len(testData))...) + testDataPadded := make([]byte, swarm.ChunkSize) + copy(testDataPadded, testData) verifySegments := func(t *testing.T, exp []string, found [][]byte) { t.Helper() @@ -57,8 +58,8 @@ func TestProofCorrectness(t *testing.T) { if err != nil { t.Fatal(err) } - - rh, err := hh.Hash(nil) + pr := bmt.Prover{hh} + rh, err := pr.Hash(nil) if err != nil { t.Fatal(err) } @@ -66,9 +67,10 @@ func TestProofCorrectness(t *testing.T) { t.Run("proof for left most", func(t *testing.T) { t.Parallel() - proof := bmt.Prover{hh}.Proof(0) + proof := pr.Proof(0) expSegmentStrings := []string{ + "0000000000000000000000000000000000000000000000000000000000000000", "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", @@ -79,7 +81,7 @@ func TestProofCorrectness(t *testing.T) { verifySegments(t, expSegmentStrings, proof.ProofSegments) - if !bytes.Equal(proof.ProveSegment, testData[:2*hh.Size()]) { + if !bytes.Equal(proof.ProveSegment, testDataPadded[:hh.Size()]) { t.Fatal("section incorrect") } @@ -91,9 +93,10 @@ func TestProofCorrectness(t *testing.T) { t.Run("proof for right most", func(t *testing.T) { t.Parallel() - proof := bmt.Prover{hh}.Proof(127) + proof := pr.Proof(127) expSegmentStrings := []string{ + "0000000000000000000000000000000000000000000000000000000000000000", "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", @@ -104,7 +107,7 @@ func TestProofCorrectness(t *testing.T) { verifySegments(t, expSegmentStrings, proof.ProofSegments) - if !bytes.Equal(proof.ProveSegment, testData[126*hh.Size():]) { + if !bytes.Equal(proof.ProveSegment, testDataPadded[127*hh.Size():]) { t.Fatal("section incorrect") } @@ -116,9 +119,10 @@ func TestProofCorrectness(t *testing.T) { t.Run("proof for middle", func(t *testing.T) { t.Parallel() - proof := bmt.Prover{hh}.Proof(64) + proof := pr.Proof(64) expSegmentStrings := []string{ + "0000000000000000000000000000000000000000000000000000000000000000", "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", @@ -129,7 +133,7 @@ func TestProofCorrectness(t *testing.T) { verifySegments(t, expSegmentStrings, proof.ProofSegments) - if !bytes.Equal(proof.ProveSegment, testData[64*hh.Size():66*hh.Size()]) { + if !bytes.Equal(proof.ProveSegment, testDataPadded[64*hh.Size():65*hh.Size()]) { t.Fatal("section incorrect") } @@ -142,6 +146,7 @@ func TestProofCorrectness(t *testing.T) { t.Parallel() segmentStrings := []string{ + "0000000000000000000000000000000000000000000000000000000000000000", "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", @@ -159,9 +164,9 @@ func TestProofCorrectness(t *testing.T) { segments = append(segments, decoded) } - segment := testData[64*hh.Size() : 66*hh.Size()] + segment := testDataPadded[64*hh.Size() : 65*hh.Size()] - rootHash, err := bmt.Prover{hh}.Verify(64, bmt.Proof{ + rootHash, err := pr.Verify(64, bmt.Proof{ ProveSegment: segment, ProofSegments: segments, Span: bmt.LengthToSpan(4096), @@ -200,6 +205,7 @@ func TestProof(t *testing.T) { } rh, err := hh.Hash(nil) + pr := bmt.Prover{hh} if err != nil { t.Fatal(err) } @@ -209,7 +215,7 @@ func TestProof(t *testing.T) { t.Run(fmt.Sprintf("segmentIndex %d", i), func(t *testing.T) { t.Parallel() - proof := bmt.Prover{hh}.Proof(i) + proof := pr.Proof(i) h := pool.Get() defer pool.Put(h) diff --git a/pkg/bmt/trhasher.go b/pkg/bmt/trhasher.go deleted file mode 100644 index 00df6664b85..00000000000 --- a/pkg/bmt/trhasher.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2023 The Swarm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bmt - -import ( - "hash" - - "github.com/ethersphere/bee/pkg/swarm" -) - -func NewTrHasher(prefix []byte) *Hasher { - capacity := 32 - hasherFact := func() hash.Hash { return swarm.NewTrHasher(prefix) } - conf := NewConf(hasherFact, swarm.BmtBranches, capacity) - - return &Hasher{ - Conf: conf, - result: make(chan []byte), - errc: make(chan error, 1), - span: make([]byte, SpanSize), - bmt: newTree(conf.segmentSize, conf.maxSize, conf.depth, conf.hasher), - } -} diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index 690e06affc3..07c92885ac0 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -10,6 +10,7 @@ import ( "crypto/hmac" "encoding/binary" "fmt" + "hash" "math/big" "sort" "sync" @@ -43,7 +44,13 @@ type Sample struct { func RandSample(t *testing.T, anchor []byte) Sample { t.Helper() - hasher := bmt.NewTrHasher(anchor) + prefixHasherFactory := func() hash.Hash { + return swarm.NewPrefixHasher(anchor) + } + pool := bmt.NewPool(bmt.NewConf(prefixHasherFactory, swarm.BmtBranches, 8)) + + hasher := pool.Get() + defer pool.Put(hasher) items := make([]SampleItem, SampleSize) for i := 0; i < SampleSize; i++ { diff --git a/pkg/swarm/hasher.go b/pkg/swarm/hasher.go index 485b61ab398..b9823bb50a1 100644 --- a/pkg/swarm/hasher.go +++ b/pkg/swarm/hasher.go @@ -15,15 +15,15 @@ func NewHasher() hash.Hash { return sha3.NewLegacyKeccak256() } -type trHasher struct { +type PrefixHasher struct { hash.Hash prefix []byte } -// NewTrHasher returns new hasher which is Keccak-256 hasher +// NewPrefixHasher returns new hasher which is Keccak-256 hasher // with prefix value added as initial data. -func NewTrHasher(prefix []byte) hash.Hash { - h := &trHasher{ +func NewPrefixHasher(prefix []byte) hash.Hash { + h := &PrefixHasher{ Hash: NewHasher(), prefix: prefix, } @@ -32,7 +32,7 @@ func NewTrHasher(prefix []byte) hash.Hash { return h } -func (h *trHasher) Reset() { +func (h *PrefixHasher) Reset() { h.Hash.Reset() _, _ = h.Write(h.prefix) } diff --git a/pkg/swarm/hasher_test.go b/pkg/swarm/hasher_test.go index bfee0a78e98..3811e09605a 100644 --- a/pkg/swarm/hasher_test.go +++ b/pkg/swarm/hasher_test.go @@ -66,7 +66,7 @@ func TestNewTrHasher(t *testing.T) { // Run tests cases against TrHasher for _, tc := range tests { - h := swarm.NewTrHasher(tc.prefix) + h := swarm.NewPrefixHasher(tc.prefix) _, err := h.Write(tc.plaintext) if err != nil { From 0a9afe14142cb4a816fbfc31591cf3436c5b4336 Mon Sep 17 00:00:00 2001 From: zelig Date: Sun, 24 Sep 2023 22:44:08 +0200 Subject: [PATCH 02/10] feat(storageincentives): generate sample of SOCs for phase 4 test --- pkg/storageincentives/soc_mine_test.go | 201 +++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 pkg/storageincentives/soc_mine_test.go diff --git a/pkg/storageincentives/soc_mine_test.go b/pkg/storageincentives/soc_mine_test.go new file mode 100644 index 00000000000..d5b93241b21 --- /dev/null +++ b/pkg/storageincentives/soc_mine_test.go @@ -0,0 +1,201 @@ +// Copyright 2023 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package storageincentives_test + +import ( + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "math/big" + "os" + "sync" + "testing" + + "github.com/ethersphere/bee/pkg/bmt" + "github.com/ethersphere/bee/pkg/cac" + "github.com/ethersphere/bee/pkg/crypto" + "github.com/ethersphere/bee/pkg/soc" + "github.com/ethersphere/bee/pkg/swarm" + "golang.org/x/sync/errgroup" +) + +// TestSocMine dumps a sample made out SOCs to upload for storage incestives + +// dump chunks + +// go test -v ./pkg/storageincentives/ -run TestSocMine -count 1 > socs.txt + +// to generate uploads using the input +// cat socs.txt | tail 19 | head 16 | perl -pne 's/([a-f0-9]+)\t([a-f0-9]+)\t([a-f0-9]+)\t([a-f0-9]+)/echo -n $4 | xxd -r -p | curl -X POST \"http:\/\/localhost:1633\/soc\/$1\/$2?sig=$3\" -H \"accept: application\/json, text\/plain, \/\" -H \"content-type: application\/octet-stream\" -H \"swarm-postage-batch-id: 14b26beca257e763609143c6b04c2c487f01a051798c535c2f542ce75a97c05f\" --data-binary \@-/' +func TestSocMine(t *testing.T) { + t.Parallel() + // the anchor used in neighbourhood selection and reserve salt for sampling + prefix, err := hex.DecodeString("3617319a054d772f909f7c479a2cebe5066e836a939412e32403c99029b92eff") + if err != nil { + t.Fatal(err) + } + // the transformed address hasher factory function + prefixhasher := func() hash.Hash { return swarm.NewPrefixHasher(prefix) } + trHasher := func() hash.Hash { return bmt.NewHasher(prefixhasher) } + // the bignum cast of the maximum sample value (upper bound on transformed addresses as a 256-bit article) + // this constant is for a minimum reserve size of 2 million chunks with sample size of 16 + // = 1.284401 * 10^71 = 1284401 + 66 0-s + mstring := "1284401" + for i := 0; i < 66; i++ { + mstring = mstring + "0" + } + n, ok := new(big.Int).SetString(mstring, 10) + if !ok { + t.Fatalf("SetString: error setting to '%s'", mstring) + } + // the filter function on the SOC address + // meant to make sure we pass check for proof of retrievability for + // a node of overlay 0x65xxx with a reserve depth of 1, i.e., + // SOC address must start with zero bit + filterSOCAddr := func(a swarm.Address) bool { + return a.Bytes()[0]&0x80 != 0x00 + } + // the filter function on the transformed address using the density estimation constant + filterTrAddr := func(a swarm.Address) (bool, error) { + m := new(big.Int).SetBytes(a.Bytes()) + return m.Cmp(n) < 0, nil + } + // setup the signer with a private key from a fixture + data, err := hex.DecodeString("634fb5a872396d9693e5c9f9d7233cfa93f395c093371017ff44aa9ae6564cdd") + if err != nil { + t.Fatal(err) + } + privKey, err := crypto.DecodeSecp256k1PrivateKey(data) + if err != nil { + t.Fatal(err) + } + signer := crypto.NewDefaultSigner(privKey) + + sampleSize := 16 + // for sanity check: given a filterSOCAddr requiring a 0 leading bit (chance of 1/2) + // we expect an overall rough 4 million chunks to be mined to create this sample + // for 8 workers that is half a million round on average per worker + err = makeChunks(t, signer, sampleSize, filterSOCAddr, filterTrAddr, trHasher) + if err != nil { + t.Fatal(err) + } +} + +func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAddr func(swarm.Address) bool, filterTrAddr func(swarm.Address) (bool, error), trHasher func() hash.Hash) error { + t.Helper() + + // set owner address from signer + ethAddress, err := signer.EthereumAddress() + if err != nil { + return err + } + ownerAddressBytes := ethAddress.Bytes() + + // use the same wrapped chunk for all mined SOCs + payload := []byte("foo") + ch, err := cac.New(payload) + if err != nil { + return err + } + + var done bool // to signal sampleSize number of chunks found + sampleC := make(chan *soc.SOC, 1) // channel to push resuls on + sample := make([]*soc.SOC, sampleSize) // to collect the sample + ctx, cancel := context.WithCancel(context.Background()) + eg, ectx := errgroup.WithContext(ctx) + // the main loop terminating after sampleSize SOCs have been generated + eg.Go(func() error { + defer cancel() + for i := 0; i < sampleSize; i++ { + select { + case sample[i] = <-sampleC: + case <-ectx.Done(): + return ectx.Err() + } + } + done = true + return nil + }) + + // loop to start mining workers + count := 8 // number of parallel workers + wg := sync.WaitGroup{} + for i := 0; i < count; i++ { + i := i + wg.Add(1) + eg.Go(func() (err error) { + offset := i * 4 + found := 0 + for seed := uint32(1); ; seed++ { + select { + case <-ectx.Done(): + defer wg.Done() + t.Logf("LOG quit worker: %d, rounds: %d, found: %d\n", i, seed, found) + return ectx.Err() + default: + } + id := make([]byte, 32) + binary.BigEndian.PutUint32(id[offset:], seed) + s := soc.New(id, ch) + addr, err := soc.CreateAddress(id, ownerAddressBytes) + if err != nil { + return err + } + // continue if mined SOC addr is not good + if !filterSOCAddr(addr) { + continue + } + hasher := trHasher() + data := s.WrappedChunk().Data() + hasher.(*bmt.Hasher).SetHeader(data[:8]) + _, err = hasher.Write(data[8:]) + if err != nil { + return err + } + trAddr := hasher.Sum(nil) + // hashing the transformed wrapped chunk address with the SOC address + // to arrive at a unique transformed SOC address despite identical payloads + trSocAddr, err := soc.CreateAddress(addr.Bytes(), trAddr) + if err != nil { + return err + } + ok, err := filterTrAddr(trSocAddr) + if err != nil { + return err + } + if ok { + select { + case sampleC <- s: + found++ + t.Logf("LOG worker: %d, rounds: %d, found: %d, id:%x\n", i, seed, found, id) + case <-ectx.Done(): + defer wg.Done() + t.Logf("LOG quit worker: %d, rounds: %d, found: %d\n", i, seed, found) + return ectx.Err() + } + } + } + }) + } + if err := eg.Wait(); !done && err != nil { + return err + } + wg.Wait() + for _, s := range sample { + + // signs the chunk + sch, err := s.Sign(signer) + if err != nil { + return err + } + data := sch.Data() + id, sig, payload := data[:32], data[32:97], data[97:] + fmt.Fprintf(os.Stdout, "%x\t%x\t%x\t%x\n", ownerAddressBytes, id, sig, payload) + + } + return nil +} From f89ad31f6a88457a7c7967bd8189de2295bb5934 Mon Sep 17 00:00:00 2001 From: nugaon <50576770+nugaon@users.noreply.github.com> Date: Mon, 25 Sep 2023 16:44:58 +0200 Subject: [PATCH 03/10] feat(ph4): agent and api (#4323) --- pkg/api/api_test.go | 3 +- pkg/api/rchash.go | 12 +- pkg/api/router.go | 12 +- pkg/storageincentives/agent.go | 47 +++- pkg/storageincentives/agent_test.go | 2 +- pkg/storageincentives/export_test.go | 5 +- pkg/storageincentives/proof.go | 207 ++++++++++++++++-- pkg/storageincentives/proof_test.go | 138 +++++++++++- .../redistribution/inclusionproof.go | 122 +++++++++++ .../redistribution/redistribution.go | 6 +- .../redistribution/redistribution_test.go | 191 +++++++++------- pkg/storageincentives/redistributionstate.go | 10 +- .../redistributionstate_test.go | 4 +- .../testdata/inclusion-proofs.json | 126 +++++++++++ pkg/storer/sample.go | 81 +++++-- 15 files changed, 815 insertions(+), 151 deletions(-) create mode 100644 pkg/storageincentives/redistribution/inclusionproof.go create mode 100644 pkg/storageincentives/testdata/inclusion-proofs.json diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index 8655853474f..dd4eaabb25f 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -57,6 +57,7 @@ import ( "github.com/ethersphere/bee/pkg/storage/inmemstore" testingc "github.com/ethersphere/bee/pkg/storage/testing" "github.com/ethersphere/bee/pkg/storageincentives" + "github.com/ethersphere/bee/pkg/storageincentives/redistribution" "github.com/ethersphere/bee/pkg/storageincentives/staking" mock2 "github.com/ethersphere/bee/pkg/storageincentives/staking/mock" mockstorer "github.com/ethersphere/bee/pkg/storer/mock" @@ -775,7 +776,7 @@ func (m *mockContract) IsWinner(context.Context) (bool, error) { return false, nil } -func (m *mockContract) Claim(context.Context) (common.Hash, error) { +func (m *mockContract) Claim(context.Context, redistribution.ChunkInclusionProofs) (common.Hash, error) { m.mtx.Lock() defer m.mtx.Unlock() m.callsList = append(m.callsList, claimCall) diff --git a/pkg/api/rchash.go b/pkg/api/rchash.go index c37f642da2b..b38e63ef601 100644 --- a/pkg/api/rchash.go +++ b/pkg/api/rchash.go @@ -19,8 +19,9 @@ func (s *Service) rchash(w http.ResponseWriter, r *http.Request) { logger := s.logger.WithName("get_rchash").Build() paths := struct { - Depth *uint8 `map:"depth" validate:"required"` + Depth uint8 `map:"depth" validate:"min=0"` Anchor1 string `map:"anchor1" validate:"required"` + Anchor2 string `map:"anchor2" validate:"required"` }{} if response := s.mapStructure(mux.Vars(r), &paths); response != nil { response("invalid path params", logger, w) @@ -34,7 +35,14 @@ func (s *Service) rchash(w http.ResponseWriter, r *http.Request) { return } - resp, err := s.redistributionAgent.SampleWithProofs(r.Context(), anchor1, *paths.Depth) + anchor2, err := hex.DecodeString(paths.Anchor2) + if err != nil { + logger.Error(err, "invalid hex params") + jsonhttp.InternalServerError(w, "invalid hex params") + return + } + + resp, err := s.redistributionAgent.SampleWithProofs(r.Context(), anchor1, anchor2, paths.Depth) if err != nil { logger.Error(err, "failed making sample with proofs") jsonhttp.InternalServerError(w, "failed making sample with proofs") diff --git a/pkg/api/router.go b/pkg/api/router.go index b7f83a6623c..6408fd7b9af 100644 --- a/pkg/api/router.go +++ b/pkg/api/router.go @@ -339,12 +339,6 @@ func (s *Service) mountAPI() { web.FinalHandlerFunc(s.healthHandler), )) - handle("/rchash/{depth}/{anchor1}", web.ChainHandlers( - web.FinalHandler(jsonhttp.MethodHandler{ - "GET": http.HandlerFunc(s.rchash), - }), - )) - if s.Restricted { handle("/auth", jsonhttp.MethodHandler{ "POST": web.ChainHandlers( @@ -601,4 +595,10 @@ func (s *Service) mountBusinessDebug(restricted bool) { web.FinalHandlerFunc(s.statusGetPeersHandler), ), }) + + handle("/rchash/{depth}/{anchor1}/{anchor2}", web.ChainHandlers( + web.FinalHandler(jsonhttp.MethodHandler{ + "GET": http.HandlerFunc(s.rchash), + }), + )) } diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index 3a6ba36074a..c6e746a5c45 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -353,7 +353,23 @@ func (a *Agent) handleClaim(ctx context.Context, round uint64) error { a.logger.Info("could not set balance", "err", err) } - txHash, err := a.contract.Claim(ctx) + sampleData, exists := a.state.SampleData(round - 1) + if !exists { + return fmt.Errorf("sample not found") + } + + anchor2, err := a.contract.ReserveSalt(ctx) + if err != nil { + a.logger.Info("failed getting anchor after second reveal", "err", err) + } + + proofs, err := makeInclusionProofs(sampleData.ReserveSampleItems, sampleData.Anchor1, anchor2) + + if err != nil { + return fmt.Errorf("making inclusion proofs: %w", err) + } + + txHash, err := a.contract.Claim(ctx, proofs) if err != nil { a.metrics.ErrClaim.Inc() return fmt.Errorf("claiming win: %w", err) @@ -413,7 +429,6 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { return false, nil } - now := time.Now() sample, err := a.makeSample(ctx, storageRadius) if err != nil { return false, err @@ -421,7 +436,7 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", sample.StorageRadius, "round", round) - a.state.SetSampleData(round, sample, time.Since(now)) + a.state.SetSampleData(round, sample) return true, nil } @@ -442,7 +457,8 @@ func (a *Agent) makeSample(ctx context.Context, storageRadius uint8) (SampleData if err != nil { return SampleData{}, err } - a.metrics.SampleDuration.Set(time.Since(t).Seconds()) + dur := time.Since(t) + a.metrics.SampleDuration.Set(dur.Seconds()) sampleHash, err := sampleHash(rSample.Items) if err != nil { @@ -450,8 +466,10 @@ func (a *Agent) makeSample(ctx context.Context, storageRadius uint8) (SampleData } sample := SampleData{ - ReserveSampleHash: sampleHash, - StorageRadius: storageRadius, + Anchor1: salt, + ReserveSampleItems: rSample.Items, + ReserveSampleHash: sampleHash, + StorageRadius: storageRadius, } return sample, nil @@ -538,14 +556,16 @@ func (a *Agent) Status() (*Status, error) { } type SampleWithProofs struct { - Items []storer.SampleItem - Hash swarm.Address - Duration time.Duration + Hash swarm.Address `json:"hash"` + Proofs redistribution.ChunkInclusionProofs `json:"proofs"` + Duration time.Duration `json:"duration"` } +// Only called by rchash API func (a *Agent) SampleWithProofs( ctx context.Context, anchor1 []byte, + anchor2 []byte, storageRadius uint8, ) (SampleWithProofs, error) { sampleStartTime := time.Now() @@ -562,12 +582,17 @@ func (a *Agent) SampleWithProofs( hash, err := sampleHash(rSample.Items) if err != nil { - return SampleWithProofs{}, fmt.Errorf("sample hash: %w:", err) + return SampleWithProofs{}, fmt.Errorf("sample hash: %w", err) + } + + proofs, err := makeInclusionProofs(rSample.Items, anchor1, anchor2) + if err != nil { + return SampleWithProofs{}, fmt.Errorf("make proofs: %w", err) } return SampleWithProofs{ - Items: rSample.Items, Hash: hash, + Proofs: proofs, Duration: time.Since(sampleStartTime), }, nil } diff --git a/pkg/storageincentives/agent_test.go b/pkg/storageincentives/agent_test.go index 5d7ca5ffe90..58a06eaf6eb 100644 --- a/pkg/storageincentives/agent_test.go +++ b/pkg/storageincentives/agent_test.go @@ -276,7 +276,7 @@ func (m *mockContract) IsWinner(context.Context) (bool, error) { return false, nil } -func (m *mockContract) Claim(context.Context) (common.Hash, error) { +func (m *mockContract) Claim(context.Context, redistribution.ChunkInclusionProofs) (common.Hash, error) { m.mtx.Lock() defer m.mtx.Unlock() m.callsList = append(m.callsList, claimCall) diff --git a/pkg/storageincentives/export_test.go b/pkg/storageincentives/export_test.go index 8c26fc1761c..f617501ba29 100644 --- a/pkg/storageincentives/export_test.go +++ b/pkg/storageincentives/export_test.go @@ -5,6 +5,7 @@ package storageincentives var ( - NewEvents = newEvents - SampleChunk = sampleChunk + NewEvents = newEvents + SampleChunk = sampleChunk + MakeInclusionProofs = makeInclusionProofs ) diff --git a/pkg/storageincentives/proof.go b/pkg/storageincentives/proof.go index 6415dd51af6..3500a3a07ba 100644 --- a/pkg/storageincentives/proof.go +++ b/pkg/storageincentives/proof.go @@ -5,12 +5,197 @@ package storageincentives import ( + "errors" + "fmt" + "hash" + "math/big" + + "github.com/ethersphere/bee/pkg/bmt" "github.com/ethersphere/bee/pkg/bmtpool" "github.com/ethersphere/bee/pkg/cac" + "github.com/ethersphere/bee/pkg/soc" + "github.com/ethersphere/bee/pkg/storageincentives/redistribution" storer "github.com/ethersphere/bee/pkg/storer" "github.com/ethersphere/bee/pkg/swarm" ) +var errMessage = errors.New("reserve commitment hasher: failure in proof creation") + +// returns the byte index of chunkdata where the spansize starts +func spanOffset(sampleItem storer.SampleItem) uint8 { + ch := swarm.NewChunk(sampleItem.ChunkAddress, sampleItem.ChunkData) + if soc.Valid(ch) { + return swarm.HashSize + swarm.SocSignatureSize + } + + return 0 +} + +// makeInclusionProofs creates transaction data for claim method. +// In the document this logic, result data, is also called Proof of entitlement (POE). +func makeInclusionProofs( + reserveSampleItems []storer.SampleItem, + anchor1 []byte, + anchor2 []byte, +) (redistribution.ChunkInclusionProofs, error) { + if len(reserveSampleItems) != storer.SampleSize { + return redistribution.ChunkInclusionProofs{}, fmt.Errorf("reserve sample items should have %d elements", storer.SampleSize) + } + if len(anchor1) == 0 { + return redistribution.ChunkInclusionProofs{}, errors.New("anchor1 is not set") + } + if len(anchor2) == 0 { + return redistribution.ChunkInclusionProofs{}, errors.New("anchor2 is not set") + } + + require3 := storer.SampleSize - 1 + require1 := new(big.Int).Mod(new(big.Int).SetBytes(anchor2), big.NewInt(int64(require3))).Uint64() + require2 := new(big.Int).Mod(new(big.Int).SetBytes(anchor2), big.NewInt(int64(require3-1))).Uint64() + if require2 >= require1 { + require2++ + } + + // TODO: refactor, make it global / anchor (cleanup?) + prefixHasherFactory := func() hash.Hash { + return swarm.NewPrefixHasher(anchor1) + } + prefixHasherPool := bmt.NewPool(bmt.NewConf(prefixHasherFactory, swarm.BmtBranches, 8)) + + // Sample chunk proofs + rccontent := bmt.Prover{Hasher: bmtpool.Get()} + rccontent.SetHeaderInt64(swarm.HashSize * storer.SampleSize * 2) + rsc, err := sampleChunk(reserveSampleItems) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + rscData := rsc.Data() + _, err = rccontent.Write(rscData[swarm.SpanSize:]) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + _, err = rccontent.Hash(nil) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + proof1p1 := rccontent.Proof(int(require1) * 2) + proof2p1 := rccontent.Proof(int(require2) * 2) + proofLastp1 := rccontent.Proof(require3 * 2) + bmtpool.Put(rccontent.Hasher) + + // Witness1 proofs + segmentIndex := int(new(big.Int).Mod(new(big.Int).SetBytes(anchor2), big.NewInt(int64(128))).Uint64()) + // OG chunk proof + chunk1Content := bmt.Prover{Hasher: bmtpool.Get()} + chunk1Offset := spanOffset(reserveSampleItems[require1]) + chunk1Content.SetHeader(reserveSampleItems[require1].ChunkData[chunk1Offset : chunk1Offset+swarm.SpanSize]) + chunk1ContentPayload := reserveSampleItems[require1].ChunkData[chunk1Offset+swarm.SpanSize:] + _, err = chunk1Content.Write(chunk1ContentPayload) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + _, err = chunk1Content.Hash(nil) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + proof1p2 := chunk1Content.Proof(segmentIndex) + // TR chunk proof + chunk1TrContent := bmt.Prover{Hasher: prefixHasherPool.Get()} + chunk1TrContent.SetHeader(reserveSampleItems[require1].ChunkData[chunk1Offset : chunk1Offset+swarm.SpanSize]) + _, err = chunk1TrContent.Write(chunk1ContentPayload) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + _, err = chunk1TrContent.Hash(nil) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + proof1p3 := chunk1TrContent.Proof(segmentIndex) + // cleanup + bmtpool.Put(chunk1Content.Hasher) + prefixHasherPool.Put(chunk1TrContent.Hasher) + + // Witness2 proofs + // OG Chunk proof + chunk2Offset := spanOffset(reserveSampleItems[require2]) + chunk2Content := bmt.Prover{Hasher: bmtpool.Get()} + chunk2ContentPayload := reserveSampleItems[require2].ChunkData[chunk2Offset+swarm.SpanSize:] + chunk2Content.SetHeader(reserveSampleItems[require2].ChunkData[chunk2Offset : chunk2Offset+swarm.SpanSize]) + _, err = chunk2Content.Write(chunk2ContentPayload) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + _, err = chunk2Content.Hash(nil) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + proof2p2 := chunk2Content.Proof(segmentIndex) + // TR Chunk proof + chunk2TrContent := bmt.Prover{Hasher: prefixHasherPool.Get()} + chunk2TrContent.SetHeader(reserveSampleItems[require2].ChunkData[chunk2Offset : chunk2Offset+swarm.SpanSize]) + _, err = chunk2TrContent.Write(chunk2ContentPayload) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + _, err = chunk2TrContent.Hash(nil) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + proof2p3 := chunk2TrContent.Proof(segmentIndex) + // cleanup + bmtpool.Put(chunk2Content.Hasher) + prefixHasherPool.Put(chunk2TrContent.Hasher) + + // Witness3 proofs + // OG Chunk proof + chunkLastOffset := spanOffset(reserveSampleItems[require3]) + chunkLastContent := bmt.Prover{Hasher: bmtpool.Get()} + chunkLastContent.SetHeader(reserveSampleItems[require3].ChunkData[chunkLastOffset : chunkLastOffset+swarm.SpanSize]) + chunkLastContentPayload := reserveSampleItems[require3].ChunkData[chunkLastOffset+swarm.SpanSize:] + _, err = chunkLastContent.Write(chunkLastContentPayload) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + _, err = chunkLastContent.Hash(nil) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + proofLastp2 := chunkLastContent.Proof(segmentIndex) + // TR Chunk Proof + chunkLastTrContent := bmt.Prover{Hasher: prefixHasherPool.Get()} + chunkLastTrContent.SetHeader(reserveSampleItems[require3].ChunkData[chunkLastOffset : chunkLastOffset+swarm.SpanSize]) + _, err = chunkLastTrContent.Write(chunkLastContentPayload) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + _, err = chunkLastTrContent.Hash(nil) + if err != nil { + return redistribution.ChunkInclusionProofs{}, errMessage + } + proofLastp3 := chunkLastTrContent.Proof(segmentIndex) + // cleanup + bmtpool.Put(chunkLastContent.Hasher) + prefixHasherPool.Put(chunkLastTrContent.Hasher) + + // map to output and add SOC related data if it is necessary + A, err := redistribution.NewChunkInclusionProof(proof1p1, proof1p2, proof1p3, reserveSampleItems[require1]) + if err != nil { + return redistribution.ChunkInclusionProofs{}, err + } + B, err := redistribution.NewChunkInclusionProof(proof2p1, proof2p2, proof2p3, reserveSampleItems[require2]) + if err != nil { + return redistribution.ChunkInclusionProofs{}, err + } + C, err := redistribution.NewChunkInclusionProof(proofLastp1, proofLastp2, proofLastp3, reserveSampleItems[require3]) + if err != nil { + return redistribution.ChunkInclusionProofs{}, err + } + return redistribution.ChunkInclusionProofs{ + A: A, + B: B, + C: C, + }, nil +} + func sampleChunk(items []storer.SampleItem) (swarm.Chunk, error) { contentSize := len(items) * 2 * swarm.HashSize @@ -27,23 +212,9 @@ func sampleChunk(items []storer.SampleItem) (swarm.Chunk, error) { } func sampleHash(items []storer.SampleItem) (swarm.Address, error) { - hasher := bmtpool.Get() - defer bmtpool.Put(hasher) - - for _, s := range items { - _, err := hasher.Write(s.TransformedAddress.Bytes()) - if err != nil { - return swarm.ZeroAddress, err - } + ch, err := sampleChunk(items) + if err != nil { + return swarm.ZeroAddress, err } - hash := hasher.Sum(nil) - - return swarm.NewAddress(hash), nil - - // PH4_Logic: - // ch, err := sampleChunk(items) - // if err != nil { - // return swarm.ZeroAddress, err - // } - // return ch.Address(), nil + return ch.Address(), nil } diff --git a/pkg/storageincentives/proof_test.go b/pkg/storageincentives/proof_test.go index 36350c2af2c..655582953e9 100644 --- a/pkg/storageincentives/proof_test.go +++ b/pkg/storageincentives/proof_test.go @@ -6,20 +6,152 @@ package storageincentives_test import ( "bytes" + "encoding/json" + "fmt" + "math/big" + "os" "testing" + "github.com/ethersphere/bee/pkg/cac" + "github.com/ethersphere/bee/pkg/crypto" + "github.com/ethersphere/bee/pkg/postage" + postagetesting "github.com/ethersphere/bee/pkg/postage/testing" + "github.com/ethersphere/bee/pkg/soc" "github.com/ethersphere/bee/pkg/storageincentives" + "github.com/ethersphere/bee/pkg/storageincentives/redistribution" storer "github.com/ethersphere/bee/pkg/storer" "github.com/ethersphere/bee/pkg/swarm" + "github.com/ethersphere/bee/pkg/util/testutil" + "github.com/google/go-cmp/cmp" ) +// Test asserts valid case for MakeInclusionProofs. +func TestMakeInclusionProofs(t *testing.T) { + t.Parallel() + + anchor := testutil.RandBytes(t, 1) + sample := storer.RandSample(t, anchor) + + _, err := storageincentives.MakeInclusionProofs(sample.Items, anchor, anchor) + if err != nil { + t.Fatal(err) + } +} + +// Test asserts that MakeInclusionProofs will generate the same +// output for given sample. +func TestMakeInclusionProofsRegression(t *testing.T) { + t.Parallel() + + const sampleSize = 16 + + keyRaw := `00000000000000000000000000000000` + privKey, err := crypto.DecodeSecp256k1PrivateKey([]byte(keyRaw)) + if err != nil { + t.Fatal(err) + } + signer := crypto.NewDefaultSigner(privKey) + + stampID, _ := crypto.LegacyKeccak256([]byte("The Inverted Jenny")) + index := []byte{0, 0, 0, 0, 0, 8, 3, 3} + timestamp := []byte{0, 0, 0, 0, 0, 3, 3, 8} + stamper := func(addr swarm.Address) *postage.Stamp { + sig := postagetesting.MustNewValidSignature(signer, addr, stampID, index, timestamp) + return postage.NewStamp(stampID, index, timestamp, sig) + } + + anchor1 := big.NewInt(100).Bytes() + anchor2 := big.NewInt(30).Bytes() // this anchor will pick chunks 3, 6, 15 + + // generate chunks that will be used as sample + sampleChunks := make([]swarm.Chunk, 0, sampleSize) + for i := 0; i < sampleSize; i++ { + ch, err := cac.New([]byte(fmt.Sprintf("Unstoppable data! Chunk #%d", i+1))) + if err != nil { + t.Fatal(err) + } + + if i%2 == 0 { + id, err := crypto.LegacyKeccak256([]byte(fmt.Sprintf("ID #%d", i+1))) + if err != nil { + t.Fatal(err) + } + + socCh, err := soc.New(id, ch).Sign(signer) + if err != nil { + t.Fatal(err) + } + + ch = socCh + } + + ch = ch.WithStamp(stamper(ch.Address())) + + sampleChunks = append(sampleChunks, ch) + } + + // make sample from chunks + sample, err := storer.MakeSampleUsingChunks(sampleChunks, anchor1) + if err != nil { + t.Fatal(err) + } + + // assert that sample chunk hash/address does not change + sch, err := storageincentives.SampleChunk(sample.Items) + if err != nil { + t.Fatal(err) + } + if want := swarm.MustParseHexAddress("193bbea3dd0656d813c2c1e27b821f141286bbe6ab0dbf8e26fc7dd491e8f921"); !sch.Address().Equal(want) { + t.Fatalf("expecting sample chunk address %v, got %v", want, sch.Address()) + } + + // assert that inclusion proofs values does not change + proofs, err := storageincentives.MakeInclusionProofs(sample.Items, anchor1, anchor2) + if err != nil { + t.Fatal(err) + } + + expectedProofs := redistribution.ChunkInclusionProofs{} + + data, _ := os.ReadFile("testdata/inclusion-proofs.json") + _ = json.Unmarshal(data, &expectedProofs) + + if diff := cmp.Diff(proofs, expectedProofs); diff != "" { + t.Fatalf("unexpected inclusion proofs (-want +have):\n%s", diff) + } +} + +// Test asserts cases when MakeInclusionProofs should return error. +func TestMakeInclusionProofsExpectedError(t *testing.T) { + t.Parallel() + + t.Run("invalid sample length", func(t *testing.T) { + anchor := testutil.RandBytes(t, 8) + sample := storer.RandSample(t, anchor) + + _, err := storageincentives.MakeInclusionProofs(sample.Items[:1], anchor, anchor) + if err == nil { + t.Fatal("expecting error") + } + }) + + t.Run("empty anchor", func(t *testing.T) { + sample := storer.RandSample(t, []byte{}) + + _, err := storageincentives.MakeInclusionProofs(sample.Items[:1], []byte{}, []byte{}) + if err == nil { + t.Fatal("expecting error") + } + }) +} + // Tests asserts that creating sample chunk is valid for all lengths [1-MaxSampleSize] func TestSampleChunk(t *testing.T) { t.Parallel() sample := storer.RandSample(t, nil) - for i := 1; i < len(sample.Items); i++ { + for i := 0; i < len(sample.Items); i++ { items := sample.Items[:i] chunk, err := storageincentives.SampleChunk(items) @@ -40,6 +172,10 @@ func TestSampleChunk(t *testing.T) { } pos += swarm.HashSize } + + if !chunk.Address().IsValidNonEmpty() { + t.Error("address shouldn't be empty") + } } } diff --git a/pkg/storageincentives/redistribution/inclusionproof.go b/pkg/storageincentives/redistribution/inclusionproof.go new file mode 100644 index 00000000000..2c07a645483 --- /dev/null +++ b/pkg/storageincentives/redistribution/inclusionproof.go @@ -0,0 +1,122 @@ +// Copyright 2023 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Used for inclusion proof utilities + +package redistribution + +import ( + "encoding/binary" + "encoding/hex" + + "github.com/ethersphere/bee/pkg/bmt" + "github.com/ethersphere/bee/pkg/soc" + "github.com/ethersphere/bee/pkg/storer" + "github.com/ethersphere/bee/pkg/swarm" +) + +type ChunkInclusionProofs struct { + A ChunkInclusionProof `json:"proof1"` + B ChunkInclusionProof `json:"proof2"` + C ChunkInclusionProof `json:"proofLast"` +} + +// ChunkInclusionProof structure must exactly match +// corresponding structure (of the same name) in Redistribution.sol smart contract. +// github.com/ethersphere/storage-incentives/blob/ph_f2/src/Redistribution.sol +// github.com/ethersphere/storage-incentives/blob/master/src/Redistribution.sol (when merged to master) +type ChunkInclusionProof struct { + ProofSegments []string `json:"proofSegments"` + ProveSegment string `json:"proveSegment"` + ProofSegments2 []string `json:"proofSegments2"` + ProveSegment2 string `json:"proveSegment2"` + ChunkSpan uint64 `json:"chunkSpan"` + ProofSegments3 []string `json:"proofSegments3"` + PostageProof PostageProof `json:"postageProof"` + SocProof []SOCProof `json:"socProof"` +} + +// SOCProof structure must exactly match +// corresponding structure (of the same name) in Redistribution.sol smart contract. +type PostageProof struct { + Signature string `json:"signature"` + PostageId string `json:"postageId"` + Index string `json:"index"` + TimeStamp string `json:"timeStamp"` +} + +// SOCProof structure must exactly match +// corresponding structure (of the same name) in Redistribution.sol smart contract. +type SOCProof struct { + Signer string `json:"signer"` + Signature string `json:"signature"` + Identifier string `json:"identifier"` + ChunkAddr string `json:"chunkAddr"` +} + +// Transforms arguments to ChunkInclusionProof object +func NewChunkInclusionProof(proofp1, proofp2 bmt.Proof, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) { + proofp1Hex := newHexProofs(proofp1) + proofp2Hex := newHexProofs(proofp2) + proofp3Hex := newHexProofs(proofp3) + + socProof, err := makeSOCProof(sampleItem) + if err != nil { + return ChunkInclusionProof{}, err + } + + return ChunkInclusionProof{ + ProofSegments: proofp1Hex.ProofSegments, + ProveSegment: proofp1Hex.ProveSegment, + ProofSegments2: proofp2Hex.ProofSegments, + ProveSegment2: proofp2Hex.ProveSegment, + ChunkSpan: binary.LittleEndian.Uint64(proofp2.Span[:swarm.SpanSize]), // should be uint64 on the other size; copied from pkg/api/bytes.go + ProofSegments3: proofp3Hex.ProofSegments, + PostageProof: PostageProof{ + Signature: hex.EncodeToString(sampleItem.Stamp.Sig()), + PostageId: hex.EncodeToString(sampleItem.Stamp.BatchID()), + Index: hex.EncodeToString(sampleItem.Stamp.Index()), + TimeStamp: hex.EncodeToString(sampleItem.Stamp.Timestamp()), + }, + SocProof: socProof, + }, nil +} + +func makeSOCProof(sampleItem storer.SampleItem) ([]SOCProof, error) { + var emptySOCProof = make([]SOCProof, 0) + ch := swarm.NewChunk(sampleItem.ChunkAddress, sampleItem.ChunkData) + if !soc.Valid(ch) { + return emptySOCProof, nil + } + + socCh, err := soc.FromChunk(ch) + if err != nil { + return emptySOCProof, err + } + + return []SOCProof{{ + Signer: hex.EncodeToString(socCh.OwnerAddress()), + Signature: hex.EncodeToString(socCh.Signature()), + Identifier: hex.EncodeToString(socCh.ID()), + ChunkAddr: hex.EncodeToString(socCh.WrappedChunk().Address().Bytes()), + }}, nil +} + +type hexProof struct { + ProofSegments []string + ProveSegment string +} + +// Transforms proof object to its hexadecimal representation +func newHexProofs(proof bmt.Proof) hexProof { + proofSegments := make([]string, len(proof.ProofSegments)) + for i := 0; i < len(proof.ProofSegments); i++ { + proofSegments[i] = hex.EncodeToString(proof.ProofSegments[i]) + } + + return hexProof{ + ProveSegment: hex.EncodeToString(proof.ProveSegment), + ProofSegments: proofSegments, + } +} diff --git a/pkg/storageincentives/redistribution/redistribution.go b/pkg/storageincentives/redistribution/redistribution.go index d7187726b42..aecc47d1dec 100644 --- a/pkg/storageincentives/redistribution/redistribution.go +++ b/pkg/storageincentives/redistribution/redistribution.go @@ -23,7 +23,7 @@ type Contract interface { ReserveSalt(context.Context) ([]byte, error) IsPlaying(context.Context, uint8) (bool, error) IsWinner(context.Context) (bool, error) - Claim(context.Context) (common.Hash, error) + Claim(context.Context, ChunkInclusionProofs) (common.Hash, error) Commit(context.Context, []byte, *big.Int) (common.Hash, error) Reveal(context.Context, uint8, []byte, []byte) (common.Hash, error) } @@ -92,8 +92,8 @@ func (c *contract) IsWinner(ctx context.Context) (isWinner bool, err error) { } // Claim sends a transaction to blockchain if a win is claimed. -func (c *contract) Claim(ctx context.Context) (common.Hash, error) { - callData, err := c.incentivesContractABI.Pack("claim") +func (c *contract) Claim(ctx context.Context, proofs ChunkInclusionProofs) (common.Hash, error) { + callData, err := c.incentivesContractABI.Pack("claim", proofs.A, proofs.B, proofs.C) if err != nil { return common.Hash{}, err } diff --git a/pkg/storageincentives/redistribution/redistribution_test.go b/pkg/storageincentives/redistribution/redistribution_test.go index d47790d3d4b..321681f7f04 100644 --- a/pkg/storageincentives/redistribution/redistribution_test.go +++ b/pkg/storageincentives/redistribution/redistribution_test.go @@ -27,6 +27,35 @@ import ( var redistributionContractABI = abiutil.MustParseABI(chaincfg.Testnet.RedistributionABI) +// TODO uncomment when ABI is updated +// func randChunkInclusionProof(t *testing.T) redistribution.ChunkInclusionProof { +// t.Helper() + +// return redistribution.ChunkInclusionProof{ +// ProofSegments: []string{hex.EncodeToString(testutil.RandBytes(t, 32))}, +// ProveSegment: hex.EncodeToString(testutil.RandBytes(t, 32)), +// ProofSegments2: []string{hex.EncodeToString(testutil.RandBytes(t, 32))}, +// ProveSegment2: hex.EncodeToString(testutil.RandBytes(t, 32)), +// ProofSegments3: []string{hex.EncodeToString(testutil.RandBytes(t, 32))}, +// ChunkSpan: 1, +// Signature: string(testutil.RandBytes(t, 32)), +// ChunkAddr: hex.EncodeToString(testutil.RandBytes(t, 32)), +// PostageId: hex.EncodeToString(testutil.RandBytes(t, 32)), +// Index: hex.EncodeToString(testutil.RandBytes(t, 32)), +// TimeStamp: strconv.Itoa(time.Now().Nanosecond()), +// } +// } + +// func randChunkInclusionProofs(t *testing.T) redistribution.ChunkInclusionProofs { +// t.Helper() + +// return redistribution.ChunkInclusionProofs{ +// A: randChunkInclusionProof(t), +// B: randChunkInclusionProof(t), +// C: randChunkInclusionProof(t), +// } +// } + func TestRedistribution(t *testing.T) { t.Parallel() @@ -150,83 +179,91 @@ func TestRedistribution(t *testing.T) { } }) - t.Run("Claim", func(t *testing.T) { - t.Parallel() - - expectedCallData, err := redistributionContractABI.Pack("claim") - if err != nil { - t.Fatal(err) - } - contract := redistribution.New( - owner, - log.Noop, - transactionMock.New( - transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { - if *request.To == redistributionContractAddress { - if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { - return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) - } - return txHashDeposited, nil - } - return common.Hash{}, errors.New("sent to wrong contract") - }), - transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { - if txHash == txHashDeposited { - return &types.Receipt{ - Status: 1, - }, nil - } - return nil, errors.New("unknown tx hash") - }), - ), - redistributionContractAddress, - redistributionContractABI, - ) - - _, err = contract.Claim(ctx) - if err != nil { - t.Fatal(err) - } - }) - - t.Run("Claim with tx reverted", func(t *testing.T) { - t.Parallel() - - expectedCallData, err := redistributionContractABI.Pack("claim") - if err != nil { - t.Fatal(err) - } - contract := redistribution.New( - owner, - log.Noop, - transactionMock.New( - transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { - if *request.To == redistributionContractAddress { - if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { - return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) - } - return txHashDeposited, nil - } - return common.Hash{}, errors.New("sent to wrong contract") - }), - transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { - if txHash == txHashDeposited { - return &types.Receipt{ - Status: 0, - }, nil - } - return nil, errors.New("unknown tx hash") - }), - ), - redistributionContractAddress, - redistributionContractABI, - ) - - _, err = contract.Claim(ctx) - if !errors.Is(err, transaction.ErrTransactionReverted) { - t.Fatal(err) - } - }) + // t.Run("Claim", func(t *testing.T) { + // t.Parallel() + + // proofs := randChunkInclusionProofs(t) + // // TODO: use this when abi is updated + // // expectedCallData, err := redistributionContractABI.Pack("claim", proofs.A, proofs.B, proofs.C) + + // expectedCallData, err := redistributionContractABI.Pack("claim") + // if err != nil { + // t.Fatal(err) + // } + // contract := redistribution.New( + // owner, + // log.Noop, + // transactionMock.New( + // transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + // if *request.To == redistributionContractAddress { + // if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { + // return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) + // } + // return txHashDeposited, nil + // } + // return common.Hash{}, errors.New("sent to wrong contract") + // }), + // transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + // if txHash == txHashDeposited { + // return &types.Receipt{ + // Status: 1, + // }, nil + // } + // return nil, errors.New("unknown tx hash") + // }), + // ), + // redistributionContractAddress, + // redistributionContractABI, + // ) + + // _, err = contract.Claim(ctx, proofs) + // if err != nil { + // t.Fatal(err) + // } + // }) + + // NOTE: skip until storage-incentives-abi gets update + // t.Run("Claim with tx reverted", func(t *testing.T) { + // t.Parallel() + + // proofs := randChunkInclusionProofs(t) + // // TODO_PH4: use this when abi is updated + // // expectedCallData, err := redistributionContractABI.Pack("claim", proofs.A, proofs.B, proofs.C) + // expectedCallData, err := redistributionContractABI.Pack("claim") + // if err != nil { + // t.Fatal(err) + // } + // contract := redistribution.New( + // owner, + // log.Noop, + // transactionMock.New( + // transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + // if *request.To == redistributionContractAddress { + // if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { + // return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) + // } + // return txHashDeposited, nil + // } + // return common.Hash{}, errors.New("sent to wrong contract") + // }), + // transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + // if txHash == txHashDeposited { + // return &types.Receipt{ + // Status: 0, + // }, nil + // } + // return nil, errors.New("unknown tx hash") + // }), + // ), + // redistributionContractAddress, + // redistributionContractABI, + // ) + + // _, err = contract.Claim(ctx, proofs) + // if !errors.Is(err, transaction.ErrTransactionReverted) { + // t.Fatal(err) + // } + // }) t.Run("Commit", func(t *testing.T) { t.Parallel() diff --git a/pkg/storageincentives/redistributionstate.go b/pkg/storageincentives/redistributionstate.go index b61c7a5cbd2..20f61c19cc0 100644 --- a/pkg/storageincentives/redistributionstate.go +++ b/pkg/storageincentives/redistributionstate.go @@ -15,6 +15,7 @@ import ( "github.com/ethersphere/bee/pkg/log" "github.com/ethersphere/bee/pkg/settlement/swap/erc20" "github.com/ethersphere/bee/pkg/storage" + storer "github.com/ethersphere/bee/pkg/storer" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/transaction" ) @@ -63,8 +64,10 @@ type RoundData struct { } type SampleData struct { - ReserveSampleHash swarm.Address - StorageRadius uint8 + Anchor1 []byte + ReserveSampleItems []storer.SampleItem + ReserveSampleHash swarm.Address + StorageRadius uint8 } func NewStatus() *Status { @@ -241,14 +244,13 @@ func (r *RedistributionState) SampleData(round uint64) (SampleData, bool) { return *rd.SampleData, true } -func (r *RedistributionState) SetSampleData(round uint64, sd SampleData, dur time.Duration) { +func (r *RedistributionState) SetSampleData(round uint64, sd SampleData) { r.mtx.Lock() defer r.mtx.Unlock() rd := r.status.RoundData[round] rd.SampleData = &sd r.status.RoundData[round] = rd - r.status.SampleDuration = dur r.save() } diff --git a/pkg/storageincentives/redistributionstate_test.go b/pkg/storageincentives/redistributionstate_test.go index 9c7f750d64c..8fd8e3d6805 100644 --- a/pkg/storageincentives/redistributionstate_test.go +++ b/pkg/storageincentives/redistributionstate_test.go @@ -107,7 +107,7 @@ func TestStateRoundData(t *testing.T) { ReserveSampleHash: swarm.RandAddress(t), StorageRadius: 3, } - state.SetSampleData(1, savedSample, 0) + state.SetSampleData(1, savedSample) sample, exists := state.SampleData(1) if !exists { @@ -171,7 +171,7 @@ func TestPurgeRoundData(t *testing.T) { } commitKey := testutil.RandBytes(t, swarm.HashSize) - state.SetSampleData(round, savedSample, 0) + state.SetSampleData(round, savedSample) state.SetCommitKey(round, commitKey) state.SetHasRevealed(round) } diff --git a/pkg/storageincentives/testdata/inclusion-proofs.json b/pkg/storageincentives/testdata/inclusion-proofs.json new file mode 100644 index 00000000000..a5ae32e68b6 --- /dev/null +++ b/pkg/storageincentives/testdata/inclusion-proofs.json @@ -0,0 +1,126 @@ +{ + "proof1": { + "proofSegments": [ + "0875605dea48e812c9685ffba220a2b848bdbafdb95e02d087ba4a32925ea34f", + "f873df729270d5f4064286f3f018385a07cb4228734d8aca794299fee6e3e3e5", + "1fa8767fe303fe7487f5d58e4d72e5e170cf135f58a91b4fe19e4b19e5b67b5a", + "0f64ed713e25291e2c5a0561f584fa78c55a399e31919903d215dd622bcfd0ec", + "34dac0c73538614801c1ad16e272ef57f0b96a972073d15418f38daf9eb401c0", + "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + ], + "proveSegment": "7133885ac59dca7b97773acb740e978d41a4af45bd563067c8a3d863578488f1", + "proofSegments2": [ + "0000000000000000000000000000000000000000000000000000000000000000", + "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "2047b070a295f8d517121d9ac9b3d5f9a944bac6cfab72dd5a7c625ab4558b0a", + "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + ], + "proveSegment2": "0000000000000000000000000000000000000000000000000000000000000000", + "chunkSpan": 26, + "proofSegments3": [ + "0000000000000000000000000000000000000000000000000000000000000000", + "a7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", + "066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", + "df43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", + "7327aecc9178bab420bb6fe482e07b65af69775b55666ec1ac8ab3da5bcec6dc", + "b68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", + "478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" + ], + "postageProof": { + "signature": "a7c8d18a8279d3803169ebcf4e5a7dbdd4dffefa591eaad8d1ceaa636a793ad975e7f7b1087bcea4176525b0002edde0acbfda20dbd2dfbbe777cab38968fdc61b", + "postageId": "4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", + "index": "0000000000080303", + "timeStamp": "0000000000030308" + }, + "socProof": [ + { + "signer": "827b44d53df2854057713b25cdd653eb70fe36c4", + "signature": "4e9576949338e4c23f4703bf81367256ab859b32934fef4db2ee46a76bf6be354e96ac628b8784b2de0bbeae5975469783192d6d1705485fcaadd8dedde6e2aa1b", + "identifier": "6223cfdd75a40440ccd32d0b11b24f08562ec63b1ea3b8cb1a59dfc3e3c33595", + "chunkAddr": "f32442586d93d8c002372ed41fa2ea1f281f38311c161d535c3665de5d9bfd92" + } + ] + }, + "proof2": { + "proofSegments": [ + "463aeb4ca5f000064c082e56eba387004265d2f47bf1226ef2d86cb163bcca3a", + "829af58b2a2f1c6c156baa196f03be4df510a96419f2dd54c456d3da30166312", + "dee4815ec42efa507b79cf4eb1f272e07be1b526cbd48137a287d9e5b2b2808a", + "0f64ed713e25291e2c5a0561f584fa78c55a399e31919903d215dd622bcfd0ec", + "34dac0c73538614801c1ad16e272ef57f0b96a972073d15418f38daf9eb401c0", + "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + ], + "proveSegment": "535e6df58a122a8f5e6c851c19b3e042f4cd1b5c5a8c499581c9f6d4e3509182", + "proofSegments2": [ + "0000000000000000000000000000000000000000000000000000000000000000", + "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "46f43b515833749217540ac60c79e0c6a54c73f3500850b5869b31d5c89d101f", + "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + ], + "proveSegment2": "0000000000000000000000000000000000000000000000000000000000000000", + "chunkSpan": 26, + "proofSegments3": [ + "0000000000000000000000000000000000000000000000000000000000000000", + "a7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", + "066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", + "df43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", + "4284c510d7d64c9e052c73bddadb1fca522fd26caf2ebf007faad50a9a0f09fa", + "b68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", + "478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" + ], + "postageProof": { + "signature": "b0274fcda59e8aaffee803021971a764a017ce2c0f41c8ceb6eefdea807056f621a98feab5ebf33bb6065e49c050f413ec8840b008fc224d882ce5244ce3e0171c", + "postageId": "4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", + "index": "0000000000080303", + "timeStamp": "0000000000030308" + }, + "socProof": [] + }, + "proofLast": { + "proofSegments": [ + "fee18543782df46a86f85456e62dc973a4c84369b6b1cd4f93e57fe247f9730e", + "23a0858ee2b8b4cb0ba66d3533f468d6b583a6b77df0cc78fc6df64dc735a917", + "b6bffa54dec44ad57349f9aef6cb65a1f8807f15447462ec519751220e5a5bc3", + "553aae9948fc13c33d8b353cf5694ecadc7c40c8316ce09cbd4d864dbb94f026", + "af7db874a9b5addf602b3e899194480a32afec6d6cd4ec0fadf9e065db739dd5", + "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + ], + "proveSegment": "5ba2c8b912fad4aeb4a11a960946d07b9f66bc40ac54d87224914d75f5aeea5f", + "proofSegments2": [ + "0000000000000000000000000000000000000000000000000000000000000000", + "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "7f575db255ef42dcaeb7658df9f33fe5a1aad5d41af51a72a381acea29d98a12", + "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + ], + "proveSegment2": "0000000000000000000000000000000000000000000000000000000000000000", + "chunkSpan": 27, + "proofSegments3": [ + "0000000000000000000000000000000000000000000000000000000000000000", + "a7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", + "066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", + "df43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", + "7683427ba0ef1fbebf97f2fc36859df88ead8123369fe38d7b767b7a7eda5294", + "b68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", + "478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" + ], + "postageProof": { + "signature": "6747c58ce8613486c696f5bb7393c9c59094371969c3a52bfaf75192c605f4ad7c70c6e71fdd320e20d005e42e94ee32102c234eb465f4f5fd9db60fcad0356b1c", + "postageId": "4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", + "index": "0000000000080303", + "timeStamp": "0000000000030308" + }, + "socProof": [] + } +} \ No newline at end of file diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index 07c92885ac0..71c7a180cf7 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -7,7 +7,6 @@ package storer import ( "bytes" "context" - "crypto/hmac" "encoding/binary" "fmt" "hash" @@ -18,6 +17,7 @@ import ( "time" "github.com/ethersphere/bee/pkg/bmt" + "github.com/ethersphere/bee/pkg/cac" "github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/soc" chunk "github.com/ethersphere/bee/pkg/storage/testing" @@ -27,7 +27,7 @@ import ( "golang.org/x/sync/errgroup" ) -const SampleSize = 8 +const SampleSize = 16 type SampleItem struct { TransformedAddress swarm.Address @@ -41,24 +41,37 @@ type Sample struct { Items []SampleItem } +// RandSample returns Sample with random values. func RandSample(t *testing.T, anchor []byte) Sample { t.Helper() - prefixHasherFactory := func() hash.Hash { - return swarm.NewPrefixHasher(anchor) + chunks := make([]swarm.Chunk, SampleSize) + for i := 0; i < SampleSize; i++ { + ch := chunk.GenerateTestRandomChunk() + if i%3 == 0 { + ch = chunk.GenerateTestRandomSoChunk(t, ch) + } + chunks[i] = ch } - pool := bmt.NewPool(bmt.NewConf(prefixHasherFactory, swarm.BmtBranches, 8)) - hasher := pool.Get() - defer pool.Put(hasher) + sample, err := MakeSampleUsingChunks(chunks, anchor) + if err != nil { + t.Fatal(err) + } - items := make([]SampleItem, SampleSize) - for i := 0; i < SampleSize; i++ { - ch := chunk.GenerateTestRandomChunk() + return sample +} - tr, err := transformedAddress(hasher, ch, swarm.ChunkTypeContentAddressed) +// MakeSampleUsingChunks returns Sample constructed using supplied chunks. +func MakeSampleUsingChunks(chunks []swarm.Chunk, anchor []byte) (Sample, error) { + prefixHasherFactory := func() hash.Hash { + return swarm.NewPrefixHasher(anchor) + } + items := make([]SampleItem, len(chunks)) + for i, ch := range chunks { + tr, err := transformedAddress(bmt.NewHasher(prefixHasherFactory), ch, swarm.ChunkTypeContentAddressed) if err != nil { - t.Fatal(err) + return Sample{}, err } items[i] = SampleItem{ @@ -73,7 +86,7 @@ func RandSample(t *testing.T, anchor []byte) Sample { return items[i].TransformedAddress.Compare(items[j].TransformedAddress) == -1 }) - return Sample{Items: items} + return Sample{Items: items}, nil } func newStamp(s swarm.Stamp) *postage.Stamp { @@ -142,19 +155,25 @@ func (db *DB) ReserveSample( // Phase 2: Get the chunk data and calculate transformed hash sampleItemChan := make(chan SampleItem, 64) + prefixHasherFactory := func() hash.Hash { + return swarm.NewPrefixHasher(anchor) + } + const workers = 6 + for i := 0; i < workers; i++ { g.Go(func() error { wstat := SampleStats{} + hasher := bmt.NewHasher(prefixHasherFactory) defer func() { addStats(wstat) }() - hmacr := hmac.New(swarm.NewHasher, anchor) for chItem := range chunkC { // exclude chunks who's batches balance are below minimum if _, found := excludedBatchIDs[string(chItem.BatchID)]; found { wstat.BelowBalanceIgnored++ + continue } @@ -176,16 +195,12 @@ func (db *DB) ReserveSample( wstat.ChunkLoadDuration += time.Since(chunkLoadStart) - hmacrStart := time.Now() - - hmacr.Reset() - _, err = hmacr.Write(chunk.Data()) + taddrStart := time.Now() + taddr, err := transformedAddress(hasher, chunk, chItem.Type) if err != nil { return err } - taddr := swarm.NewAddress(hmacr.Sum(nil)) - - wstat.HmacrDuration += time.Since(hmacrStart) + wstat.TaddrDuration += time.Since(taddrStart) select { case sampleItemChan <- SampleItem{ @@ -229,6 +244,15 @@ func (db *DB) ReserveSample( } } + contains := func(addr swarm.Address) int { + for index, item := range sampleItems { + if item.ChunkAddress.Compare(addr) == 0 { + return index + } + } + return -1 + } + // Phase 3: Assemble the sample. Here we need to assemble only the first SampleSize // no of items from the results of the 2nd phase. // In this step stamps are loaded and validated only if chunk will be added to sample. @@ -266,6 +290,17 @@ func (db *DB) ReserveSample( stats.ValidStampDuration += time.Since(start) item.Stamp = stamp + + // check if sample contains transformed address + if index := contains(item.TransformedAddress); index != -1 { + // TODO change back to SOC + if cac.Valid(ch) { + continue + } + // replace the chunk at index + sampleItems[index] = item + continue + } insert(item) stats.SampleInserts++ } @@ -366,7 +401,7 @@ type SampleStats struct { NewIgnored int64 InvalidStamp int64 BelowBalanceIgnored int64 - HmacrDuration time.Duration + TaddrDuration time.Duration ValidStampDuration time.Duration BatchesBelowValueDuration time.Duration RogueChunk int64 @@ -383,7 +418,7 @@ func (s *SampleStats) add(other SampleStats) { s.NewIgnored += other.NewIgnored s.InvalidStamp += other.InvalidStamp s.BelowBalanceIgnored += other.BelowBalanceIgnored - s.HmacrDuration += other.HmacrDuration + s.TaddrDuration += other.TaddrDuration s.ValidStampDuration += other.ValidStampDuration s.BatchesBelowValueDuration += other.BatchesBelowValueDuration s.RogueChunk += other.RogueChunk From 654aaa0522bec199054bc5ae930db0243d3ce6b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 26 Sep 2023 16:23:29 +0200 Subject: [PATCH 04/10] fix: set sample duration into status --- pkg/storageincentives/agent.go | 8 ++++---- pkg/storageincentives/redistributionstate.go | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index c6e746a5c45..0d151a66641 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -429,14 +429,17 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { return false, nil } + t := time.Now() sample, err := a.makeSample(ctx, storageRadius) if err != nil { return false, err } + dur := time.Since(t) + a.metrics.SampleDuration.Set(dur.Seconds()) a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", sample.StorageRadius, "round", round) - a.state.SetSampleData(round, sample) + a.state.SetSampleData(round, sample, dur) return true, nil } @@ -452,13 +455,10 @@ func (a *Agent) makeSample(ctx context.Context, storageRadius uint8) (SampleData return SampleData{}, err } - t := time.Now() rSample, err := a.store.ReserveSample(ctx, salt, storageRadius, uint64(timeLimiter), a.minBatchBalance()) if err != nil { return SampleData{}, err } - dur := time.Since(t) - a.metrics.SampleDuration.Set(dur.Seconds()) sampleHash, err := sampleHash(rSample.Items) if err != nil { diff --git a/pkg/storageincentives/redistributionstate.go b/pkg/storageincentives/redistributionstate.go index 20f61c19cc0..b1f4b60dd03 100644 --- a/pkg/storageincentives/redistributionstate.go +++ b/pkg/storageincentives/redistributionstate.go @@ -244,13 +244,14 @@ func (r *RedistributionState) SampleData(round uint64) (SampleData, bool) { return *rd.SampleData, true } -func (r *RedistributionState) SetSampleData(round uint64, sd SampleData) { +func (r *RedistributionState) SetSampleData(round uint64, sd SampleData, dur time.Duration) { r.mtx.Lock() defer r.mtx.Unlock() rd := r.status.RoundData[round] rd.SampleData = &sd r.status.RoundData[round] = rd + r.status.SampleDuration = dur r.save() } From 1d0c46580fb013db3fd5488b943f6370b879dbac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Tue, 26 Sep 2023 16:29:10 +0200 Subject: [PATCH 05/10] fix: pass duration in tests as well --- pkg/storageincentives/redistributionstate_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storageincentives/redistributionstate_test.go b/pkg/storageincentives/redistributionstate_test.go index 8fd8e3d6805..9c7f750d64c 100644 --- a/pkg/storageincentives/redistributionstate_test.go +++ b/pkg/storageincentives/redistributionstate_test.go @@ -107,7 +107,7 @@ func TestStateRoundData(t *testing.T) { ReserveSampleHash: swarm.RandAddress(t), StorageRadius: 3, } - state.SetSampleData(1, savedSample) + state.SetSampleData(1, savedSample, 0) sample, exists := state.SampleData(1) if !exists { @@ -171,7 +171,7 @@ func TestPurgeRoundData(t *testing.T) { } commitKey := testutil.RandBytes(t, swarm.HashSize) - state.SetSampleData(round, savedSample) + state.SetSampleData(round, savedSample, 0) state.SetCommitKey(round, commitKey) state.SetHasRevealed(round) } From a4671f595d7afaedd5b493cd292b0e37899a29de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 27 Sep 2023 17:30:18 +0200 Subject: [PATCH 06/10] chore: review --- pkg/api/rchash.go | 2 +- pkg/bmt/bmt.go | 2 +- pkg/bmt/proof.go | 16 ++++----- pkg/storageincentives/agent.go | 5 ++- pkg/storageincentives/proof.go | 34 +++++++++---------- .../redistribution/inclusionproof.go | 8 ++--- pkg/storer/sample.go | 2 +- 7 files changed, 31 insertions(+), 38 deletions(-) diff --git a/pkg/api/rchash.go b/pkg/api/rchash.go index b38e63ef601..0b5b1d8866e 100644 --- a/pkg/api/rchash.go +++ b/pkg/api/rchash.go @@ -19,7 +19,7 @@ func (s *Service) rchash(w http.ResponseWriter, r *http.Request) { logger := s.logger.WithName("get_rchash").Build() paths := struct { - Depth uint8 `map:"depth" validate:"min=0"` + Depth uint8 `map:"depth"` Anchor1 string `map:"anchor1" validate:"required"` Anchor2 string `map:"anchor2" validate:"required"` }{} diff --git a/pkg/bmt/bmt.go b/pkg/bmt/bmt.go index 38c0e8bff5f..f314e012776 100644 --- a/pkg/bmt/bmt.go +++ b/pkg/bmt/bmt.go @@ -40,7 +40,7 @@ type Hasher struct { span []byte // The span of the data subsumed under the chunk } -// facade +// NewHasher gives back an instance of a Hasher struct func NewHasher(hasherFact func() hash.Hash) *Hasher { conf := NewConf(hasherFact, swarm.BmtBranches, 32) diff --git a/pkg/bmt/proof.go b/pkg/bmt/proof.go index fa39174c3c5..51be412dc86 100644 --- a/pkg/bmt/proof.go +++ b/pkg/bmt/proof.go @@ -17,15 +17,15 @@ type Proof struct { Index int } -// Override base hash function of Hasher to fill buffer with zeros until chunk length +// Hash overrides base hash function of Hasher to fill buffer with zeros until chunk length func (p Prover) Hash(b []byte) ([]byte, error) { for i := p.size; i < p.maxSize; i += len(zerosection) { _, err := p.Write(zerosection) if err != nil { - return []byte{}, err + return nil, err } } - return p.Hasher.Hash(b) + return p.Hash(b) } // Proof returns the inclusion proof of the i-th data segment @@ -47,13 +47,9 @@ func (p Prover) Proof(i int) Proof { secsize := 2 * p.segmentSize offset := i * secsize section := p.bmt.buffer[offset : offset+secsize] - left := section[:p.segmentSize] - right := section[p.segmentSize:] - var segment, firstSegmentSister []byte - if index%2 == 0 { - segment, firstSegmentSister = left, right - } else { - segment, firstSegmentSister = right, left + segment, firstSegmentSister := section[:p.segmentSize], section[p.segmentSize:] + if index%2 != 0 { + segment, firstSegmentSister = firstSegmentSister, segment } sisters = append([][]byte{firstSegmentSister}, sisters...) return Proof{segment, sisters, p.span, index} diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index 0d151a66641..68c2b526ec7 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -364,7 +364,6 @@ func (a *Agent) handleClaim(ctx context.Context, round uint64) error { } proofs, err := makeInclusionProofs(sampleData.ReserveSampleItems, sampleData.Anchor1, anchor2) - if err != nil { return fmt.Errorf("making inclusion proofs: %w", err) } @@ -429,12 +428,12 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { return false, nil } - t := time.Now() + now := time.Now() sample, err := a.makeSample(ctx, storageRadius) if err != nil { return false, err } - dur := time.Since(t) + dur := time.Since(now) a.metrics.SampleDuration.Set(dur.Seconds()) a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", sample.StorageRadius, "round", round) diff --git a/pkg/storageincentives/proof.go b/pkg/storageincentives/proof.go index 3500a3a07ba..e1c84710831 100644 --- a/pkg/storageincentives/proof.go +++ b/pkg/storageincentives/proof.go @@ -19,9 +19,9 @@ import ( "github.com/ethersphere/bee/pkg/swarm" ) -var errMessage = errors.New("reserve commitment hasher: failure in proof creation") +var errProofCreation = errors.New("reserve commitment hasher: failure in proof creation") -// returns the byte index of chunkdata where the spansize starts +// spanOffset returns the byte index of chunkdata where the spansize starts func spanOffset(sampleItem storer.SampleItem) uint8 { ch := swarm.NewChunk(sampleItem.ChunkAddress, sampleItem.ChunkData) if soc.Valid(ch) { @@ -66,16 +66,16 @@ func makeInclusionProofs( rccontent.SetHeaderInt64(swarm.HashSize * storer.SampleSize * 2) rsc, err := sampleChunk(reserveSampleItems) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } rscData := rsc.Data() _, err = rccontent.Write(rscData[swarm.SpanSize:]) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } _, err = rccontent.Hash(nil) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } proof1p1 := rccontent.Proof(int(require1) * 2) proof2p1 := rccontent.Proof(int(require2) * 2) @@ -91,11 +91,11 @@ func makeInclusionProofs( chunk1ContentPayload := reserveSampleItems[require1].ChunkData[chunk1Offset+swarm.SpanSize:] _, err = chunk1Content.Write(chunk1ContentPayload) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } _, err = chunk1Content.Hash(nil) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } proof1p2 := chunk1Content.Proof(segmentIndex) // TR chunk proof @@ -103,11 +103,11 @@ func makeInclusionProofs( chunk1TrContent.SetHeader(reserveSampleItems[require1].ChunkData[chunk1Offset : chunk1Offset+swarm.SpanSize]) _, err = chunk1TrContent.Write(chunk1ContentPayload) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } _, err = chunk1TrContent.Hash(nil) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } proof1p3 := chunk1TrContent.Proof(segmentIndex) // cleanup @@ -122,11 +122,11 @@ func makeInclusionProofs( chunk2Content.SetHeader(reserveSampleItems[require2].ChunkData[chunk2Offset : chunk2Offset+swarm.SpanSize]) _, err = chunk2Content.Write(chunk2ContentPayload) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } _, err = chunk2Content.Hash(nil) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } proof2p2 := chunk2Content.Proof(segmentIndex) // TR Chunk proof @@ -134,11 +134,11 @@ func makeInclusionProofs( chunk2TrContent.SetHeader(reserveSampleItems[require2].ChunkData[chunk2Offset : chunk2Offset+swarm.SpanSize]) _, err = chunk2TrContent.Write(chunk2ContentPayload) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } _, err = chunk2TrContent.Hash(nil) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } proof2p3 := chunk2TrContent.Proof(segmentIndex) // cleanup @@ -153,11 +153,11 @@ func makeInclusionProofs( chunkLastContentPayload := reserveSampleItems[require3].ChunkData[chunkLastOffset+swarm.SpanSize:] _, err = chunkLastContent.Write(chunkLastContentPayload) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } _, err = chunkLastContent.Hash(nil) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } proofLastp2 := chunkLastContent.Proof(segmentIndex) // TR Chunk Proof @@ -165,11 +165,11 @@ func makeInclusionProofs( chunkLastTrContent.SetHeader(reserveSampleItems[require3].ChunkData[chunkLastOffset : chunkLastOffset+swarm.SpanSize]) _, err = chunkLastTrContent.Write(chunkLastContentPayload) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } _, err = chunkLastTrContent.Hash(nil) if err != nil { - return redistribution.ChunkInclusionProofs{}, errMessage + return redistribution.ChunkInclusionProofs{}, errProofCreation } proofLastp3 := chunkLastTrContent.Proof(segmentIndex) // cleanup diff --git a/pkg/storageincentives/redistribution/inclusionproof.go b/pkg/storageincentives/redistribution/inclusionproof.go index 2c07a645483..b38c82e56fe 100644 --- a/pkg/storageincentives/redistribution/inclusionproof.go +++ b/pkg/storageincentives/redistribution/inclusionproof.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Used for inclusion proof utilities - package redistribution import ( @@ -55,8 +53,8 @@ type SOCProof struct { ChunkAddr string `json:"chunkAddr"` } -// Transforms arguments to ChunkInclusionProof object -func NewChunkInclusionProof(proofp1, proofp2 bmt.Proof, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) { +// NewChunkInclusionProof transforms arguments to ChunkInclusionProof object +func NewChunkInclusionProof(proofp1, proofp2, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) { proofp1Hex := newHexProofs(proofp1) proofp2Hex := newHexProofs(proofp2) proofp3Hex := newHexProofs(proofp3) @@ -108,7 +106,7 @@ type hexProof struct { ProveSegment string } -// Transforms proof object to its hexadecimal representation +// newHexProofs transforms proof object to its hexadecimal representation func newHexProofs(proof bmt.Proof) hexProof { proofSegments := make([]string, len(proof.ProofSegments)) for i := 0; i < len(proof.ProofSegments); i++ { diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index 71c7a180cf7..f48efad246c 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -291,7 +291,7 @@ func (db *DB) ReserveSample( item.Stamp = stamp - // check if sample contains transformed address + // ensuring to pass the check order function of redistribution contract if index := contains(item.TransformedAddress); index != -1 { // TODO change back to SOC if cac.Valid(ch) { From 206c71d5bd3f47ac975e530e85a19fe7ca12be7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 27 Sep 2023 17:38:15 +0200 Subject: [PATCH 07/10] refactor: anchor deserialization --- pkg/api/api.go | 5 +++++ pkg/api/rchash.go | 19 ++++--------------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/pkg/api/api.go b/pkg/api/api.go index 3ecea7ad000..49331689a0f 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -10,6 +10,7 @@ import ( "context" "crypto/ecdsa" "encoding/base64" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -286,6 +287,10 @@ func New( buf, err := base64.URLEncoding.DecodeString(v) return string(buf), err }, + "decHex": func(v string) (string, error) { + buf, err := hex.DecodeString(v) + return string(buf), err + }, } s.validate = validator.New() s.validate.RegisterTagNameFunc(func(fld reflect.StructField) string { diff --git a/pkg/api/rchash.go b/pkg/api/rchash.go index 0b5b1d8866e..702356d7331 100644 --- a/pkg/api/rchash.go +++ b/pkg/api/rchash.go @@ -4,7 +4,6 @@ package api import ( - "encoding/hex" "net/http" "github.com/ethersphere/bee/pkg/jsonhttp" @@ -20,27 +19,17 @@ func (s *Service) rchash(w http.ResponseWriter, r *http.Request) { paths := struct { Depth uint8 `map:"depth"` - Anchor1 string `map:"anchor1" validate:"required"` - Anchor2 string `map:"anchor2" validate:"required"` + Anchor1 string `map:"anchor1,decHex" validate:"required"` + Anchor2 string `map:"anchor2,decHex" validate:"required"` }{} if response := s.mapStructure(mux.Vars(r), &paths); response != nil { response("invalid path params", logger, w) return } - anchor1, err := hex.DecodeString(paths.Anchor1) - if err != nil { - logger.Error(err, "invalid hex params") - jsonhttp.InternalServerError(w, "invalid hex params") - return - } + anchor1 := []byte(paths.Anchor1) - anchor2, err := hex.DecodeString(paths.Anchor2) - if err != nil { - logger.Error(err, "invalid hex params") - jsonhttp.InternalServerError(w, "invalid hex params") - return - } + anchor2 := []byte(paths.Anchor2) resp, err := s.redistributionAgent.SampleWithProofs(r.Context(), anchor1, anchor2, paths.Depth) if err != nil { From 3d6c27ed0c66da6355764a3221470174e21f7a34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Wed, 27 Sep 2023 17:40:59 +0200 Subject: [PATCH 08/10] revert: use p.Hasher.Hash instead of p.Hash --- pkg/bmt/proof.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/bmt/proof.go b/pkg/bmt/proof.go index 51be412dc86..aee820ad9ba 100644 --- a/pkg/bmt/proof.go +++ b/pkg/bmt/proof.go @@ -25,7 +25,7 @@ func (p Prover) Hash(b []byte) ([]byte, error) { return nil, err } } - return p.Hash(b) + return p.Hasher.Hash(b) } // Proof returns the inclusion proof of the i-th data segment From c53032de6ca0420a95abefeee6609a6274bb0684 Mon Sep 17 00:00:00 2001 From: nugaon <50576770+nugaon@users.noreply.github.com> Date: Wed, 27 Sep 2023 18:47:49 +0200 Subject: [PATCH 09/10] fix: bump storage-incentives abi to v0.6.0-rc3 (#4348) Co-authored-by: istae <14264581+istae@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- pkg/api/api_test.go | 2 +- pkg/api/rchash.go | 106 +++++++- pkg/config/chain.go | 6 +- pkg/storageincentives/agent.go | 2 +- pkg/storageincentives/agent_test.go | 2 +- pkg/storageincentives/proof.go | 1 - pkg/storageincentives/proof_test.go | 13 +- .../redistribution/inclusionproof.go | 99 ++++---- .../redistribution/redistribution.go | 4 +- .../redistribution/redistribution_test.go | 231 +++++++++--------- .../testdata/inclusion-proofs.json | 170 ++++++------- pkg/storer/sample.go | 5 - 14 files changed, 363 insertions(+), 284 deletions(-) diff --git a/go.mod b/go.mod index 95539b3cbd6..9ca865cf074 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/coreos/go-semver v0.3.0 github.com/ethereum/go-ethereum v1.12.2 github.com/ethersphere/go-price-oracle-abi v0.1.0 - github.com/ethersphere/go-storage-incentives-abi v0.5.0 + github.com/ethersphere/go-storage-incentives-abi v0.6.0-rc3 github.com/ethersphere/go-sw3-abi v0.4.0 github.com/ethersphere/langos v1.0.0 github.com/go-playground/validator/v10 v10.11.1 diff --git a/go.sum b/go.sum index 0668711533d..8b98c44d2ed 100644 --- a/go.sum +++ b/go.sum @@ -223,8 +223,8 @@ github.com/ethereum/go-ethereum v1.12.2 h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6 github.com/ethereum/go-ethereum v1.12.2/go.mod h1:1cRAEV+rp/xX0zraSCBnu9Py3HQ+geRMj3HdR+k0wfI= github.com/ethersphere/go-price-oracle-abi v0.1.0 h1:yg/hK8nETNvk+GEBASlbakMFv/CVp7HXiycrHw1pRV8= github.com/ethersphere/go-price-oracle-abi v0.1.0/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk= -github.com/ethersphere/go-storage-incentives-abi v0.5.0 h1:dd01OZmPraCjOIiSX5FsCfFFwUR2b9PuTO/LDcYxS+s= -github.com/ethersphere/go-storage-incentives-abi v0.5.0/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc= +github.com/ethersphere/go-storage-incentives-abi v0.6.0-rc3 h1:tXux2FnhuU6DbrY+Z4nVQMGp63JkJPq7pKb5Xi2Sjxo= +github.com/ethersphere/go-storage-incentives-abi v0.6.0-rc3/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc= github.com/ethersphere/go-sw3-abi v0.4.0 h1:T3ANY+ktWrPAwe2U0tZi+DILpkHzto5ym/XwV/Bbz8g= github.com/ethersphere/go-sw3-abi v0.4.0/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU= github.com/ethersphere/langos v1.0.0 h1:NBtNKzXTTRSue95uOlzPN4py7Aofs0xWPzyj4AI1Vcc= diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index dd4eaabb25f..21e535f10ac 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -783,7 +783,7 @@ func (m *mockContract) Claim(context.Context, redistribution.ChunkInclusionProof return common.Hash{}, nil } -func (m *mockContract) Commit(context.Context, []byte, *big.Int) (common.Hash, error) { +func (m *mockContract) Commit(context.Context, []byte, uint32) (common.Hash, error) { m.mtx.Lock() defer m.mtx.Unlock() m.callsList = append(m.callsList, commitCall) diff --git a/pkg/api/rchash.go b/pkg/api/rchash.go index 702356d7331..ad7a25f9992 100644 --- a/pkg/api/rchash.go +++ b/pkg/api/rchash.go @@ -4,14 +4,106 @@ package api import ( + "encoding/hex" "net/http" + "strconv" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethersphere/bee/pkg/jsonhttp" - "github.com/ethersphere/bee/pkg/storageincentives" + "github.com/ethersphere/bee/pkg/storageincentives/redistribution" + "github.com/ethersphere/bee/pkg/swarm" "github.com/gorilla/mux" ) -type RCHashResponse storageincentives.SampleWithProofs +type RCHashResponse struct { + Hash swarm.Address `json:"hash"` + Proofs ChunkInclusionProofs `json:"proofs"` + Duration time.Duration `json:"duration"` +} + +type ChunkInclusionProofs struct { + A ChunkInclusionProof `json:"proof1"` + B ChunkInclusionProof `json:"proof2"` + C ChunkInclusionProof `json:"proofLast"` +} + +// ChunkInclusionProof structure must exactly match +// corresponding structure (of the same name) in Redistribution.sol smart contract. +// github.com/ethersphere/storage-incentives/blob/ph_f2/src/Redistribution.sol +// github.com/ethersphere/storage-incentives/blob/master/src/Redistribution.sol (when merged to master) +type ChunkInclusionProof struct { + ProofSegments []string `json:"proofSegments"` + ProveSegment string `json:"proveSegment"` + ProofSegments2 []string `json:"proofSegments2"` + ProveSegment2 string `json:"proveSegment2"` + ChunkSpan uint64 `json:"chunkSpan"` + ProofSegments3 []string `json:"proofSegments3"` + PostageProof PostageProof `json:"postageProof"` + SocProof []SOCProof `json:"socProof"` +} + +// SOCProof structure must exactly match +// corresponding structure (of the same name) in Redistribution.sol smart contract. +type PostageProof struct { + Signature string `json:"signature"` + PostageId string `json:"postageId"` + Index string `json:"index"` + TimeStamp string `json:"timeStamp"` +} + +// SOCProof structure must exactly match +// corresponding structure (of the same name) in Redistribution.sol smart contract. +type SOCProof struct { + Signer string `json:"signer"` + Signature string `json:"signature"` + Identifier string `json:"identifier"` + ChunkAddr string `json:"chunkAddr"` +} + +func renderChunkInclusionProofs(proofs redistribution.ChunkInclusionProofs) ChunkInclusionProofs { + return ChunkInclusionProofs{ + A: renderChunkInclusionProof(proofs.A), + B: renderChunkInclusionProof(proofs.B), + C: renderChunkInclusionProof(proofs.C), + } +} + +func renderChunkInclusionProof(proof redistribution.ChunkInclusionProof) ChunkInclusionProof { + var socProof []SOCProof + if len(proof.SocProof) == 1 { + socProof = []SOCProof{{ + Signer: hex.EncodeToString(proof.SocProof[0].Signer.Bytes()), + Signature: hex.EncodeToString(proof.SocProof[0].Signature[:]), + Identifier: hex.EncodeToString(proof.SocProof[0].Identifier.Bytes()), + ChunkAddr: hex.EncodeToString(proof.SocProof[0].ChunkAddr.Bytes()), + }} + } + + return ChunkInclusionProof{ + ProveSegment: hex.EncodeToString(proof.ProveSegment.Bytes()), + ProofSegments: renderCommonHash(proof.ProofSegments), + ProveSegment2: hex.EncodeToString(proof.ProveSegment2.Bytes()), + ProofSegments2: renderCommonHash(proof.ProofSegments2), + ProofSegments3: renderCommonHash(proof.ProofSegments3), + ChunkSpan: proof.ChunkSpan, + PostageProof: PostageProof{ + Signature: hex.EncodeToString(proof.PostageProof.Signature[:]), + PostageId: hex.EncodeToString(proof.PostageProof.PostageId[:]), + Index: strconv.FormatUint(proof.PostageProof.Index, 16), + TimeStamp: strconv.FormatUint(proof.PostageProof.TimeStamp, 16), + }, + SocProof: socProof, + } +} + +func renderCommonHash(proofSegments []common.Hash) []string { + output := make([]string, len(proofSegments)) + for i, s := range proofSegments { + output[i] = hex.EncodeToString(s.Bytes()) + } + return output +} // This API is kept for testing the sampler. As a result, no documentation or tests are added here. func (s *Service) rchash(w http.ResponseWriter, r *http.Request) { @@ -31,12 +123,18 @@ func (s *Service) rchash(w http.ResponseWriter, r *http.Request) { anchor2 := []byte(paths.Anchor2) - resp, err := s.redistributionAgent.SampleWithProofs(r.Context(), anchor1, anchor2, paths.Depth) + swp, err := s.redistributionAgent.SampleWithProofs(r.Context(), anchor1, anchor2, paths.Depth) if err != nil { logger.Error(err, "failed making sample with proofs") jsonhttp.InternalServerError(w, "failed making sample with proofs") return } - jsonhttp.OK(w, RCHashResponse(resp)) + resp := RCHashResponse{ + Hash: swp.Hash, + Duration: swp.Duration, + Proofs: renderChunkInclusionProofs(swp.Proofs), + } + + jsonhttp.OK(w, resp) } diff --git a/pkg/config/chain.go b/pkg/config/chain.go index 2fd2e6f8fe7..4d7fd19e419 100644 --- a/pkg/config/chain.go +++ b/pkg/config/chain.go @@ -42,7 +42,7 @@ var ( SwarmTokenSymbol: "gBZZ", StakingAddress: common.HexToAddress(abi.TestnetStakingAddress), - PostageStampAddress: common.HexToAddress(abi.TestnetPostageStampStampAddress), + PostageStampAddress: common.HexToAddress(abi.TestnetPostageStampAddress), RedistributionAddress: common.HexToAddress(abi.TestnetRedistributionAddress), SwapPriceOracleAddress: common.HexToAddress("0x0c9de531dcb38b758fe8a2c163444a5e54ee0db2"), CurrentFactoryAddress: common.HexToAddress("0x73c412512E1cA0be3b89b77aB3466dA6A1B9d273"), @@ -51,7 +51,7 @@ var ( }, StakingABI: abi.TestnetStakingABI, - PostageStampABI: abi.TestnetPostageStampStampABI, + PostageStampABI: abi.TestnetPostageStampABI, RedistributionABI: abi.TestnetRedistributionABI, } @@ -84,7 +84,7 @@ func GetByChainID(chainID int64) (ChainConfig, bool) { NativeTokenSymbol: Testnet.NativeTokenSymbol, SwarmTokenSymbol: Testnet.SwarmTokenSymbol, StakingABI: abi.TestnetStakingABI, - PostageStampABI: abi.TestnetPostageStampStampABI, + PostageStampABI: abi.TestnetPostageStampABI, RedistributionABI: abi.TestnetRedistributionABI, }, false } diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index 68c2b526ec7..ee12c11fab7 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -510,7 +510,7 @@ func (a *Agent) commit(ctx context.Context, sample SampleData, round uint64) err return err } - txHash, err := a.contract.Commit(ctx, obfuscatedHash, big.NewInt(int64(round))) + txHash, err := a.contract.Commit(ctx, obfuscatedHash, uint32(round)) if err != nil { a.metrics.ErrCommit.Inc() return err diff --git a/pkg/storageincentives/agent_test.go b/pkg/storageincentives/agent_test.go index 58a06eaf6eb..70ec3193975 100644 --- a/pkg/storageincentives/agent_test.go +++ b/pkg/storageincentives/agent_test.go @@ -283,7 +283,7 @@ func (m *mockContract) Claim(context.Context, redistribution.ChunkInclusionProof return common.Hash{}, nil } -func (m *mockContract) Commit(context.Context, []byte, *big.Int) (common.Hash, error) { +func (m *mockContract) Commit(context.Context, []byte, uint32) (common.Hash, error) { m.mtx.Lock() defer m.mtx.Unlock() m.callsList = append(m.callsList, commitCall) diff --git a/pkg/storageincentives/proof.go b/pkg/storageincentives/proof.go index e1c84710831..ccc04cd935b 100644 --- a/pkg/storageincentives/proof.go +++ b/pkg/storageincentives/proof.go @@ -55,7 +55,6 @@ func makeInclusionProofs( require2++ } - // TODO: refactor, make it global / anchor (cleanup?) prefixHasherFactory := func() hash.Hash { return swarm.NewPrefixHasher(anchor1) } diff --git a/pkg/storageincentives/proof_test.go b/pkg/storageincentives/proof_test.go index 655582953e9..9ee3745e1af 100644 --- a/pkg/storageincentives/proof_test.go +++ b/pkg/storageincentives/proof_test.go @@ -6,10 +6,10 @@ package storageincentives_test import ( "bytes" + _ "embed" "encoding/json" "fmt" "math/big" - "os" "testing" "github.com/ethersphere/bee/pkg/cac" @@ -38,6 +38,9 @@ func TestMakeInclusionProofs(t *testing.T) { } } +//go:embed testdata/inclusion-proofs.json +var testData []byte + // Test asserts that MakeInclusionProofs will generate the same // output for given sample. func TestMakeInclusionProofsRegression(t *testing.T) { @@ -111,10 +114,12 @@ func TestMakeInclusionProofsRegression(t *testing.T) { t.Fatal(err) } - expectedProofs := redistribution.ChunkInclusionProofs{} + var expectedProofs redistribution.ChunkInclusionProofs - data, _ := os.ReadFile("testdata/inclusion-proofs.json") - _ = json.Unmarshal(data, &expectedProofs) + err = json.Unmarshal(testData, &expectedProofs) + if err != nil { + t.Fatal(err) + } if diff := cmp.Diff(proofs, expectedProofs); diff != "" { t.Fatalf("unexpected inclusion proofs (-want +have):\n%s", diff) diff --git a/pkg/storageincentives/redistribution/inclusionproof.go b/pkg/storageincentives/redistribution/inclusionproof.go index b38c82e56fe..d2e6946ed92 100644 --- a/pkg/storageincentives/redistribution/inclusionproof.go +++ b/pkg/storageincentives/redistribution/inclusionproof.go @@ -6,8 +6,8 @@ package redistribution import ( "encoding/binary" - "encoding/hex" + "github.com/ethereum/go-ethereum/common" "github.com/ethersphere/bee/pkg/bmt" "github.com/ethersphere/bee/pkg/soc" "github.com/ethersphere/bee/pkg/storer" @@ -25,96 +25,81 @@ type ChunkInclusionProofs struct { // github.com/ethersphere/storage-incentives/blob/ph_f2/src/Redistribution.sol // github.com/ethersphere/storage-incentives/blob/master/src/Redistribution.sol (when merged to master) type ChunkInclusionProof struct { - ProofSegments []string `json:"proofSegments"` - ProveSegment string `json:"proveSegment"` - ProofSegments2 []string `json:"proofSegments2"` - ProveSegment2 string `json:"proveSegment2"` - ChunkSpan uint64 `json:"chunkSpan"` - ProofSegments3 []string `json:"proofSegments3"` - PostageProof PostageProof `json:"postageProof"` - SocProof []SOCProof `json:"socProof"` + ProofSegments []common.Hash `json:"proofSegments"` + ProveSegment common.Hash `json:"proveSegment"` + ProofSegments2 []common.Hash `json:"proofSegments2"` + ProveSegment2 common.Hash `json:"proveSegment2"` + ChunkSpan uint64 `json:"chunkSpan"` + ProofSegments3 []common.Hash `json:"proofSegments3"` + PostageProof PostageProof `json:"postageProof"` + SocProof []SOCProof `json:"socProof"` } // SOCProof structure must exactly match // corresponding structure (of the same name) in Redistribution.sol smart contract. type PostageProof struct { - Signature string `json:"signature"` - PostageId string `json:"postageId"` - Index string `json:"index"` - TimeStamp string `json:"timeStamp"` + Signature []byte `json:"signature"` + PostageId common.Hash `json:"postageId"` + Index uint64 `json:"index"` + TimeStamp uint64 `json:"timeStamp"` } // SOCProof structure must exactly match // corresponding structure (of the same name) in Redistribution.sol smart contract. type SOCProof struct { - Signer string `json:"signer"` - Signature string `json:"signature"` - Identifier string `json:"identifier"` - ChunkAddr string `json:"chunkAddr"` + Signer common.Address `json:"signer"` + Signature []byte `json:"signature"` + Identifier common.Hash `json:"identifier"` + ChunkAddr common.Hash `json:"chunkAddr"` } -// NewChunkInclusionProof transforms arguments to ChunkInclusionProof object -func NewChunkInclusionProof(proofp1, proofp2, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) { - proofp1Hex := newHexProofs(proofp1) - proofp2Hex := newHexProofs(proofp2) - proofp3Hex := newHexProofs(proofp3) - +// Transforms arguments to ChunkInclusionProof object +func NewChunkInclusionProof(proofp1, proofp2 bmt.Proof, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) { socProof, err := makeSOCProof(sampleItem) if err != nil { return ChunkInclusionProof{}, err } return ChunkInclusionProof{ - ProofSegments: proofp1Hex.ProofSegments, - ProveSegment: proofp1Hex.ProveSegment, - ProofSegments2: proofp2Hex.ProofSegments, - ProveSegment2: proofp2Hex.ProveSegment, + ProofSegments: toCommonHash(proofp1.ProofSegments), + ProveSegment: common.BytesToHash(proofp1.ProveSegment), + ProofSegments2: toCommonHash(proofp2.ProofSegments), + ProveSegment2: common.BytesToHash(proofp2.ProveSegment), ChunkSpan: binary.LittleEndian.Uint64(proofp2.Span[:swarm.SpanSize]), // should be uint64 on the other size; copied from pkg/api/bytes.go - ProofSegments3: proofp3Hex.ProofSegments, + ProofSegments3: toCommonHash(proofp3.ProofSegments), PostageProof: PostageProof{ - Signature: hex.EncodeToString(sampleItem.Stamp.Sig()), - PostageId: hex.EncodeToString(sampleItem.Stamp.BatchID()), - Index: hex.EncodeToString(sampleItem.Stamp.Index()), - TimeStamp: hex.EncodeToString(sampleItem.Stamp.Timestamp()), + Signature: sampleItem.Stamp.Sig(), + PostageId: common.BytesToHash(sampleItem.Stamp.BatchID()), + Index: binary.BigEndian.Uint64(sampleItem.Stamp.Index()), + TimeStamp: binary.BigEndian.Uint64(sampleItem.Stamp.Timestamp()), }, SocProof: socProof, }, nil } +func toCommonHash(hashes [][]byte) []common.Hash { + output := make([]common.Hash, len(hashes)) + for i, s := range hashes { + output[i] = common.BytesToHash(s) + } + return output +} + func makeSOCProof(sampleItem storer.SampleItem) ([]SOCProof, error) { - var emptySOCProof = make([]SOCProof, 0) ch := swarm.NewChunk(sampleItem.ChunkAddress, sampleItem.ChunkData) if !soc.Valid(ch) { - return emptySOCProof, nil + return []SOCProof{}, nil } socCh, err := soc.FromChunk(ch) if err != nil { - return emptySOCProof, err + return []SOCProof{}, err } return []SOCProof{{ - Signer: hex.EncodeToString(socCh.OwnerAddress()), - Signature: hex.EncodeToString(socCh.Signature()), - Identifier: hex.EncodeToString(socCh.ID()), - ChunkAddr: hex.EncodeToString(socCh.WrappedChunk().Address().Bytes()), + Signer: common.BytesToAddress(socCh.OwnerAddress()), + Signature: socCh.Signature(), + Identifier: common.BytesToHash(socCh.ID()), + ChunkAddr: common.BytesToHash(socCh.WrappedChunk().Address().Bytes()), }}, nil } - -type hexProof struct { - ProofSegments []string - ProveSegment string -} - -// newHexProofs transforms proof object to its hexadecimal representation -func newHexProofs(proof bmt.Proof) hexProof { - proofSegments := make([]string, len(proof.ProofSegments)) - for i := 0; i < len(proof.ProofSegments); i++ { - proofSegments[i] = hex.EncodeToString(proof.ProofSegments[i]) - } - - return hexProof{ - ProveSegment: hex.EncodeToString(proof.ProveSegment), - ProofSegments: proofSegments, - } -} diff --git a/pkg/storageincentives/redistribution/redistribution.go b/pkg/storageincentives/redistribution/redistribution.go index aecc47d1dec..2c163f5a76c 100644 --- a/pkg/storageincentives/redistribution/redistribution.go +++ b/pkg/storageincentives/redistribution/redistribution.go @@ -24,7 +24,7 @@ type Contract interface { IsPlaying(context.Context, uint8) (bool, error) IsWinner(context.Context) (bool, error) Claim(context.Context, ChunkInclusionProofs) (common.Hash, error) - Commit(context.Context, []byte, *big.Int) (common.Hash, error) + Commit(context.Context, []byte, uint32) (common.Hash, error) Reveal(context.Context, uint8, []byte, []byte) (common.Hash, error) } @@ -115,7 +115,7 @@ func (c *contract) Claim(ctx context.Context, proofs ChunkInclusionProofs) (comm } // Commit submits the obfusHash hash by sending a transaction to the blockchain. -func (c *contract) Commit(ctx context.Context, obfusHash []byte, round *big.Int) (common.Hash, error) { +func (c *contract) Commit(ctx context.Context, obfusHash []byte, round uint32) (common.Hash, error) { callData, err := c.incentivesContractABI.Pack("commit", common.BytesToHash(obfusHash), common.BytesToHash(c.overlay.Bytes()), round) if err != nil { return common.Hash{}, err diff --git a/pkg/storageincentives/redistribution/redistribution_test.go b/pkg/storageincentives/redistribution/redistribution_test.go index 321681f7f04..943d3013ba4 100644 --- a/pkg/storageincentives/redistribution/redistribution_test.go +++ b/pkg/storageincentives/redistribution/redistribution_test.go @@ -7,6 +7,7 @@ package redistribution_test import ( "bytes" "context" + "encoding/binary" "errors" "fmt" "math/big" @@ -27,34 +28,35 @@ import ( var redistributionContractABI = abiutil.MustParseABI(chaincfg.Testnet.RedistributionABI) -// TODO uncomment when ABI is updated -// func randChunkInclusionProof(t *testing.T) redistribution.ChunkInclusionProof { -// t.Helper() - -// return redistribution.ChunkInclusionProof{ -// ProofSegments: []string{hex.EncodeToString(testutil.RandBytes(t, 32))}, -// ProveSegment: hex.EncodeToString(testutil.RandBytes(t, 32)), -// ProofSegments2: []string{hex.EncodeToString(testutil.RandBytes(t, 32))}, -// ProveSegment2: hex.EncodeToString(testutil.RandBytes(t, 32)), -// ProofSegments3: []string{hex.EncodeToString(testutil.RandBytes(t, 32))}, -// ChunkSpan: 1, -// Signature: string(testutil.RandBytes(t, 32)), -// ChunkAddr: hex.EncodeToString(testutil.RandBytes(t, 32)), -// PostageId: hex.EncodeToString(testutil.RandBytes(t, 32)), -// Index: hex.EncodeToString(testutil.RandBytes(t, 32)), -// TimeStamp: strconv.Itoa(time.Now().Nanosecond()), -// } -// } - -// func randChunkInclusionProofs(t *testing.T) redistribution.ChunkInclusionProofs { -// t.Helper() - -// return redistribution.ChunkInclusionProofs{ -// A: randChunkInclusionProof(t), -// B: randChunkInclusionProof(t), -// C: randChunkInclusionProof(t), -// } -// } +func randChunkInclusionProof(t *testing.T) redistribution.ChunkInclusionProof { + t.Helper() + + return redistribution.ChunkInclusionProof{ + ProofSegments: []common.Hash{common.BytesToHash(testutil.RandBytes(t, 32))}, + ProveSegment: common.BytesToHash(testutil.RandBytes(t, 32)), + ProofSegments2: []common.Hash{common.BytesToHash(testutil.RandBytes(t, 32))}, + ProveSegment2: common.BytesToHash(testutil.RandBytes(t, 32)), + ProofSegments3: []common.Hash{common.BytesToHash(testutil.RandBytes(t, 32))}, + PostageProof: redistribution.PostageProof{ + Signature: testutil.RandBytes(t, 65), + PostageId: common.BytesToHash(testutil.RandBytes(t, 32)), + Index: binary.BigEndian.Uint64(testutil.RandBytes(t, 8)), + TimeStamp: binary.BigEndian.Uint64(testutil.RandBytes(t, 8)), + }, + ChunkSpan: 1, + SocProof: []redistribution.SOCProof{}, + } +} + +func randChunkInclusionProofs(t *testing.T) redistribution.ChunkInclusionProofs { + t.Helper() + + return redistribution.ChunkInclusionProofs{ + A: randChunkInclusionProof(t), + B: randChunkInclusionProof(t), + C: randChunkInclusionProof(t), + } +} func TestRedistribution(t *testing.T) { t.Parallel() @@ -179,98 +181,93 @@ func TestRedistribution(t *testing.T) { } }) - // t.Run("Claim", func(t *testing.T) { - // t.Parallel() - - // proofs := randChunkInclusionProofs(t) - // // TODO: use this when abi is updated - // // expectedCallData, err := redistributionContractABI.Pack("claim", proofs.A, proofs.B, proofs.C) - - // expectedCallData, err := redistributionContractABI.Pack("claim") - // if err != nil { - // t.Fatal(err) - // } - // contract := redistribution.New( - // owner, - // log.Noop, - // transactionMock.New( - // transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { - // if *request.To == redistributionContractAddress { - // if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { - // return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) - // } - // return txHashDeposited, nil - // } - // return common.Hash{}, errors.New("sent to wrong contract") - // }), - // transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { - // if txHash == txHashDeposited { - // return &types.Receipt{ - // Status: 1, - // }, nil - // } - // return nil, errors.New("unknown tx hash") - // }), - // ), - // redistributionContractAddress, - // redistributionContractABI, - // ) - - // _, err = contract.Claim(ctx, proofs) - // if err != nil { - // t.Fatal(err) - // } - // }) - - // NOTE: skip until storage-incentives-abi gets update - // t.Run("Claim with tx reverted", func(t *testing.T) { - // t.Parallel() - - // proofs := randChunkInclusionProofs(t) - // // TODO_PH4: use this when abi is updated - // // expectedCallData, err := redistributionContractABI.Pack("claim", proofs.A, proofs.B, proofs.C) - // expectedCallData, err := redistributionContractABI.Pack("claim") - // if err != nil { - // t.Fatal(err) - // } - // contract := redistribution.New( - // owner, - // log.Noop, - // transactionMock.New( - // transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { - // if *request.To == redistributionContractAddress { - // if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { - // return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) - // } - // return txHashDeposited, nil - // } - // return common.Hash{}, errors.New("sent to wrong contract") - // }), - // transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { - // if txHash == txHashDeposited { - // return &types.Receipt{ - // Status: 0, - // }, nil - // } - // return nil, errors.New("unknown tx hash") - // }), - // ), - // redistributionContractAddress, - // redistributionContractABI, - // ) - - // _, err = contract.Claim(ctx, proofs) - // if !errors.Is(err, transaction.ErrTransactionReverted) { - // t.Fatal(err) - // } - // }) + t.Run("Claim", func(t *testing.T) { + t.Parallel() + + proofs := randChunkInclusionProofs(t) + + expectedCallData, err := redistributionContractABI.Pack("claim", proofs.A, proofs.B, proofs.C) + if err != nil { + t.Fatal(err) + } + contract := redistribution.New( + owner, + log.Noop, + transactionMock.New( + transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + if *request.To == redistributionContractAddress { + if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { + return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) + } + return txHashDeposited, nil + } + return common.Hash{}, errors.New("sent to wrong contract") + }), + transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + if txHash == txHashDeposited { + return &types.Receipt{ + Status: 1, + }, nil + } + return nil, errors.New("unknown tx hash") + }), + ), + redistributionContractAddress, + redistributionContractABI, + ) + + _, err = contract.Claim(ctx, proofs) + if err != nil { + t.Fatal(err) + } + }) + + t.Run("Claim with tx reverted", func(t *testing.T) { + t.Parallel() + + proofs := randChunkInclusionProofs(t) + expectedCallData, err := redistributionContractABI.Pack("claim", proofs.A, proofs.B, proofs.C) + if err != nil { + t.Fatal(err) + } + contract := redistribution.New( + owner, + log.Noop, + transactionMock.New( + transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) { + if *request.To == redistributionContractAddress { + if !bytes.Equal(expectedCallData[:32], request.Data[:32]) { + return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data) + } + return txHashDeposited, nil + } + return common.Hash{}, errors.New("sent to wrong contract") + }), + transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + if txHash == txHashDeposited { + return &types.Receipt{ + Status: 0, + }, nil + } + return nil, errors.New("unknown tx hash") + }), + ), + redistributionContractAddress, + redistributionContractABI, + ) + + _, err = contract.Claim(ctx, proofs) + if !errors.Is(err, transaction.ErrTransactionReverted) { + t.Fatal(err) + } + }) t.Run("Commit", func(t *testing.T) { t.Parallel() var obfus [32]byte testobfus := common.Hex2Bytes("hash") copy(obfus[:], testobfus) - expectedCallData, err := redistributionContractABI.Pack("commit", obfus, common.BytesToHash(owner.Bytes()), big.NewInt(0)) + expectedCallData, err := redistributionContractABI.Pack("commit", obfus, common.BytesToHash(owner.Bytes()), uint32(0)) if err != nil { t.Fatal(err) } @@ -300,7 +297,7 @@ func TestRedistribution(t *testing.T) { redistributionContractABI, ) - _, err = contract.Commit(ctx, testobfus, big.NewInt(0)) + _, err = contract.Commit(ctx, testobfus, uint32(0)) if err != nil { t.Fatal(err) } @@ -405,7 +402,7 @@ func TestRedistribution(t *testing.T) { t.Run("invalid call data", func(t *testing.T) { t.Parallel() - expectedCallData, err := redistributionContractABI.Pack("commit", common.BytesToHash(common.Hex2Bytes("some hash")), common.BytesToHash(common.Hex2Bytes("some address")), big.NewInt(0)) + expectedCallData, err := redistributionContractABI.Pack("commit", common.BytesToHash(common.Hex2Bytes("some hash")), common.BytesToHash(common.Hex2Bytes("some address")), uint32(0)) if err != nil { t.Fatal(err) } @@ -427,7 +424,7 @@ func TestRedistribution(t *testing.T) { redistributionContractABI, ) - _, err = contract.Commit(ctx, common.Hex2Bytes("hash"), big.NewInt(0)) + _, err = contract.Commit(ctx, common.Hex2Bytes("hash"), uint32(0)) if err == nil { t.Fatal("expected error") } diff --git a/pkg/storageincentives/testdata/inclusion-proofs.json b/pkg/storageincentives/testdata/inclusion-proofs.json index a5ae32e68b6..fe79666096e 100644 --- a/pkg/storageincentives/testdata/inclusion-proofs.json +++ b/pkg/storageincentives/testdata/inclusion-proofs.json @@ -1,125 +1,125 @@ { "proof1": { "proofSegments": [ - "0875605dea48e812c9685ffba220a2b848bdbafdb95e02d087ba4a32925ea34f", - "f873df729270d5f4064286f3f018385a07cb4228734d8aca794299fee6e3e3e5", - "1fa8767fe303fe7487f5d58e4d72e5e170cf135f58a91b4fe19e4b19e5b67b5a", - "0f64ed713e25291e2c5a0561f584fa78c55a399e31919903d215dd622bcfd0ec", - "34dac0c73538614801c1ad16e272ef57f0b96a972073d15418f38daf9eb401c0", - "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + "0x0875605dea48e812c9685ffba220a2b848bdbafdb95e02d087ba4a32925ea34f", + "0xf873df729270d5f4064286f3f018385a07cb4228734d8aca794299fee6e3e3e5", + "0x1fa8767fe303fe7487f5d58e4d72e5e170cf135f58a91b4fe19e4b19e5b67b5a", + "0x0f64ed713e25291e2c5a0561f584fa78c55a399e31919903d215dd622bcfd0ec", + "0x34dac0c73538614801c1ad16e272ef57f0b96a972073d15418f38daf9eb401c0", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" ], - "proveSegment": "7133885ac59dca7b97773acb740e978d41a4af45bd563067c8a3d863578488f1", + "proveSegment": "0x7133885ac59dca7b97773acb740e978d41a4af45bd563067c8a3d863578488f1", "proofSegments2": [ - "0000000000000000000000000000000000000000000000000000000000000000", - "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", - "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "2047b070a295f8d517121d9ac9b3d5f9a944bac6cfab72dd5a7c625ab4558b0a", - "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0x2047b070a295f8d517121d9ac9b3d5f9a944bac6cfab72dd5a7c625ab4558b0a", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" ], - "proveSegment2": "0000000000000000000000000000000000000000000000000000000000000000", + "proveSegment2": "0x0000000000000000000000000000000000000000000000000000000000000000", "chunkSpan": 26, "proofSegments3": [ - "0000000000000000000000000000000000000000000000000000000000000000", - "a7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", - "066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", - "df43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", - "7327aecc9178bab420bb6fe482e07b65af69775b55666ec1ac8ab3da5bcec6dc", - "b68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", - "478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", + "0x066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", + "0xdf43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", + "0x7327aecc9178bab420bb6fe482e07b65af69775b55666ec1ac8ab3da5bcec6dc", + "0xb68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", + "0x478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" ], "postageProof": { - "signature": "a7c8d18a8279d3803169ebcf4e5a7dbdd4dffefa591eaad8d1ceaa636a793ad975e7f7b1087bcea4176525b0002edde0acbfda20dbd2dfbbe777cab38968fdc61b", - "postageId": "4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", - "index": "0000000000080303", - "timeStamp": "0000000000030308" + "signature": "p8jRioJ504AxaevPTlp9vdTf/vpZHqrY0c6qY2p5Otl15/exCHvOpBdlJbAALt3grL/aINvS37vnd8qziWj9xhs=", + "postageId": "0x4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", + "index": 525059, + "timeStamp": 197384 }, "socProof": [ { - "signer": "827b44d53df2854057713b25cdd653eb70fe36c4", - "signature": "4e9576949338e4c23f4703bf81367256ab859b32934fef4db2ee46a76bf6be354e96ac628b8784b2de0bbeae5975469783192d6d1705485fcaadd8dedde6e2aa1b", - "identifier": "6223cfdd75a40440ccd32d0b11b24f08562ec63b1ea3b8cb1a59dfc3e3c33595", - "chunkAddr": "f32442586d93d8c002372ed41fa2ea1f281f38311c161d535c3665de5d9bfd92" + "signer": "0x827b44d53df2854057713b25cdd653eb70fe36c4", + "signature": "TpV2lJM45MI/RwO/gTZyVquFmzKTT+9Nsu5Gp2v2vjVOlqxii4eEst4Lvq5ZdUaXgxktbRcFSF/Krdje3ebiqhs=", + "identifier": "0x6223cfdd75a40440ccd32d0b11b24f08562ec63b1ea3b8cb1a59dfc3e3c33595", + "chunkAddr": "0xf32442586d93d8c002372ed41fa2ea1f281f38311c161d535c3665de5d9bfd92" } ] }, "proof2": { "proofSegments": [ - "463aeb4ca5f000064c082e56eba387004265d2f47bf1226ef2d86cb163bcca3a", - "829af58b2a2f1c6c156baa196f03be4df510a96419f2dd54c456d3da30166312", - "dee4815ec42efa507b79cf4eb1f272e07be1b526cbd48137a287d9e5b2b2808a", - "0f64ed713e25291e2c5a0561f584fa78c55a399e31919903d215dd622bcfd0ec", - "34dac0c73538614801c1ad16e272ef57f0b96a972073d15418f38daf9eb401c0", - "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + "0x463aeb4ca5f000064c082e56eba387004265d2f47bf1226ef2d86cb163bcca3a", + "0x829af58b2a2f1c6c156baa196f03be4df510a96419f2dd54c456d3da30166312", + "0xdee4815ec42efa507b79cf4eb1f272e07be1b526cbd48137a287d9e5b2b2808a", + "0x0f64ed713e25291e2c5a0561f584fa78c55a399e31919903d215dd622bcfd0ec", + "0x34dac0c73538614801c1ad16e272ef57f0b96a972073d15418f38daf9eb401c0", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" ], - "proveSegment": "535e6df58a122a8f5e6c851c19b3e042f4cd1b5c5a8c499581c9f6d4e3509182", + "proveSegment": "0x535e6df58a122a8f5e6c851c19b3e042f4cd1b5c5a8c499581c9f6d4e3509182", "proofSegments2": [ - "0000000000000000000000000000000000000000000000000000000000000000", - "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", - "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "46f43b515833749217540ac60c79e0c6a54c73f3500850b5869b31d5c89d101f", - "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0x46f43b515833749217540ac60c79e0c6a54c73f3500850b5869b31d5c89d101f", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" ], - "proveSegment2": "0000000000000000000000000000000000000000000000000000000000000000", + "proveSegment2": "0x0000000000000000000000000000000000000000000000000000000000000000", "chunkSpan": 26, "proofSegments3": [ - "0000000000000000000000000000000000000000000000000000000000000000", - "a7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", - "066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", - "df43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", - "4284c510d7d64c9e052c73bddadb1fca522fd26caf2ebf007faad50a9a0f09fa", - "b68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", - "478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", + "0x066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", + "0xdf43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", + "0x4284c510d7d64c9e052c73bddadb1fca522fd26caf2ebf007faad50a9a0f09fa", + "0xb68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", + "0x478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" ], "postageProof": { - "signature": "b0274fcda59e8aaffee803021971a764a017ce2c0f41c8ceb6eefdea807056f621a98feab5ebf33bb6065e49c050f413ec8840b008fc224d882ce5244ce3e0171c", - "postageId": "4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", - "index": "0000000000080303", - "timeStamp": "0000000000030308" + "signature": "sCdPzaWeiq/+6AMCGXGnZKAXziwPQcjOtu796oBwVvYhqY/qtevzO7YGXknAUPQT7IhAsAj8Ik2ILOUkTOPgFxw=", + "postageId": "0x4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", + "index": 525059, + "timeStamp": 197384 }, "socProof": [] }, "proofLast": { "proofSegments": [ - "fee18543782df46a86f85456e62dc973a4c84369b6b1cd4f93e57fe247f9730e", - "23a0858ee2b8b4cb0ba66d3533f468d6b583a6b77df0cc78fc6df64dc735a917", - "b6bffa54dec44ad57349f9aef6cb65a1f8807f15447462ec519751220e5a5bc3", - "553aae9948fc13c33d8b353cf5694ecadc7c40c8316ce09cbd4d864dbb94f026", - "af7db874a9b5addf602b3e899194480a32afec6d6cd4ec0fadf9e065db739dd5", - "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + "0xfee18543782df46a86f85456e62dc973a4c84369b6b1cd4f93e57fe247f9730e", + "0x23a0858ee2b8b4cb0ba66d3533f468d6b583a6b77df0cc78fc6df64dc735a917", + "0xb6bffa54dec44ad57349f9aef6cb65a1f8807f15447462ec519751220e5a5bc3", + "0x553aae9948fc13c33d8b353cf5694ecadc7c40c8316ce09cbd4d864dbb94f026", + "0xaf7db874a9b5addf602b3e899194480a32afec6d6cd4ec0fadf9e065db739dd5", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" ], - "proveSegment": "5ba2c8b912fad4aeb4a11a960946d07b9f66bc40ac54d87224914d75f5aeea5f", + "proveSegment": "0x5ba2c8b912fad4aeb4a11a960946d07b9f66bc40ac54d87224914d75f5aeea5f", "proofSegments2": [ - "0000000000000000000000000000000000000000000000000000000000000000", - "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", - "b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "7f575db255ef42dcaeb7658df9f33fe5a1aad5d41af51a72a381acea29d98a12", - "0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0x7f575db255ef42dcaeb7658df9f33fe5a1aad5d41af51a72a381acea29d98a12", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968" ], - "proveSegment2": "0000000000000000000000000000000000000000000000000000000000000000", + "proveSegment2": "0x0000000000000000000000000000000000000000000000000000000000000000", "chunkSpan": 27, "proofSegments3": [ - "0000000000000000000000000000000000000000000000000000000000000000", - "a7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", - "066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", - "df43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", - "7683427ba0ef1fbebf97f2fc36859df88ead8123369fe38d7b767b7a7eda5294", - "b68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", - "478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa7f526447b68535121d36909a7585c9610d4fe6d4115540464c70499b0d7136d", + "0x066dd7ce6f4f1c97e78ff1c271916db25cb06128c92f8c8520807a0fa2ba93ff", + "0xdf43c86b00db2156e769e8a8df1f08dc89ab5661c6fbaa9563f96fb9c051fc63", + "0x7683427ba0ef1fbebf97f2fc36859df88ead8123369fe38d7b767b7a7eda5294", + "0xb68323ecaad1185a5e078f41c94c59d0b6dda5d57e109866e64d44acb8702846", + "0x478adfa93a7bb904d0aa86ff0d559f43aa915ee7865592e717b72a24452181cb" ], "postageProof": { - "signature": "6747c58ce8613486c696f5bb7393c9c59094371969c3a52bfaf75192c605f4ad7c70c6e71fdd320e20d005e42e94ee32102c234eb465f4f5fd9db60fcad0356b1c", - "postageId": "4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", - "index": "0000000000080303", - "timeStamp": "0000000000030308" + "signature": "Z0fFjOhhNIbGlvW7c5PJxZCUNxlpw6Ur+vdRksYF9K18cMbnH90yDiDQBeQulO4yECwjTrRl9PX9nbYPytA1axw=", + "postageId": "0x4c8efc14c8e3cee608174f995d7afe155897bf643a31226e4f1363bc97686aef", + "index": 525059, + "timeStamp": 197384 }, "socProof": [] } diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index f48efad246c..7a37fb62e99 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -17,7 +17,6 @@ import ( "time" "github.com/ethersphere/bee/pkg/bmt" - "github.com/ethersphere/bee/pkg/cac" "github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/soc" chunk "github.com/ethersphere/bee/pkg/storage/testing" @@ -293,10 +292,6 @@ func (db *DB) ReserveSample( // ensuring to pass the check order function of redistribution contract if index := contains(item.TransformedAddress); index != -1 { - // TODO change back to SOC - if cac.Valid(ch) { - continue - } // replace the chunk at index sampleItems[index] = item continue From 84efd850ef7bdc3319cbb17d9beb228a796d9112 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Levente=20T=C3=B3th?= Date: Thu, 28 Sep 2023 12:38:18 +0200 Subject: [PATCH 10/10] refactor: review --- pkg/bmt/proof.go | 2 +- pkg/storageincentives/agent.go | 2 +- pkg/storageincentives/redistribution/inclusionproof.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/bmt/proof.go b/pkg/bmt/proof.go index aee820ad9ba..cc59fd33766 100644 --- a/pkg/bmt/proof.go +++ b/pkg/bmt/proof.go @@ -20,7 +20,7 @@ type Proof struct { // Hash overrides base hash function of Hasher to fill buffer with zeros until chunk length func (p Prover) Hash(b []byte) ([]byte, error) { for i := p.size; i < p.maxSize; i += len(zerosection) { - _, err := p.Write(zerosection) + _, err := p.Hasher.Write(zerosection) if err != nil { return nil, err } diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index ee12c11fab7..b856625025e 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -560,7 +560,7 @@ type SampleWithProofs struct { Duration time.Duration `json:"duration"` } -// Only called by rchash API +// SampleWithProofs is called only by rchash API func (a *Agent) SampleWithProofs( ctx context.Context, anchor1 []byte, diff --git a/pkg/storageincentives/redistribution/inclusionproof.go b/pkg/storageincentives/redistribution/inclusionproof.go index d2e6946ed92..b786d1d3001 100644 --- a/pkg/storageincentives/redistribution/inclusionproof.go +++ b/pkg/storageincentives/redistribution/inclusionproof.go @@ -53,8 +53,8 @@ type SOCProof struct { ChunkAddr common.Hash `json:"chunkAddr"` } -// Transforms arguments to ChunkInclusionProof object -func NewChunkInclusionProof(proofp1, proofp2 bmt.Proof, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) { +// NewChunkInclusionProof transforms arguments to ChunkInclusionProof object +func NewChunkInclusionProof(proofp1, proofp2, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) { socProof, err := makeSOCProof(sampleItem) if err != nil { return ChunkInclusionProof{}, err