Skip to content

Commit

Permalink
chore: review
Browse files Browse the repository at this point in the history
  • Loading branch information
nugaon committed Sep 27, 2023
1 parent 1d0c465 commit a4671f5
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 38 deletions.
2 changes: 1 addition & 1 deletion pkg/api/rchash.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func (s *Service) rchash(w http.ResponseWriter, r *http.Request) {
logger := s.logger.WithName("get_rchash").Build()

paths := struct {
Depth uint8 `map:"depth" validate:"min=0"`
Depth uint8 `map:"depth"`
Anchor1 string `map:"anchor1" validate:"required"`
Anchor2 string `map:"anchor2" validate:"required"`
}{}
Expand Down
2 changes: 1 addition & 1 deletion pkg/bmt/bmt.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ type Hasher struct {
span []byte // The span of the data subsumed under the chunk
}

// facade
// NewHasher gives back an instance of a Hasher struct
func NewHasher(hasherFact func() hash.Hash) *Hasher {
conf := NewConf(hasherFact, swarm.BmtBranches, 32)

Expand Down
16 changes: 6 additions & 10 deletions pkg/bmt/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@ type Proof struct {
Index int
}

// Override base hash function of Hasher to fill buffer with zeros until chunk length
// Hash overrides base hash function of Hasher to fill buffer with zeros until chunk length
func (p Prover) Hash(b []byte) ([]byte, error) {
for i := p.size; i < p.maxSize; i += len(zerosection) {
_, err := p.Write(zerosection)
if err != nil {
return []byte{}, err
return nil, err
}
}
return p.Hasher.Hash(b)
return p.Hash(b)
}

// Proof returns the inclusion proof of the i-th data segment
Expand All @@ -47,13 +47,9 @@ func (p Prover) Proof(i int) Proof {
secsize := 2 * p.segmentSize
offset := i * secsize
section := p.bmt.buffer[offset : offset+secsize]
left := section[:p.segmentSize]
right := section[p.segmentSize:]
var segment, firstSegmentSister []byte
if index%2 == 0 {
segment, firstSegmentSister = left, right
} else {
segment, firstSegmentSister = right, left
segment, firstSegmentSister := section[:p.segmentSize], section[p.segmentSize:]
if index%2 != 0 {
segment, firstSegmentSister = firstSegmentSister, segment
}
sisters = append([][]byte{firstSegmentSister}, sisters...)
return Proof{segment, sisters, p.span, index}
Expand Down
5 changes: 2 additions & 3 deletions pkg/storageincentives/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,6 @@ func (a *Agent) handleClaim(ctx context.Context, round uint64) error {
}

proofs, err := makeInclusionProofs(sampleData.ReserveSampleItems, sampleData.Anchor1, anchor2)

if err != nil {
return fmt.Errorf("making inclusion proofs: %w", err)
}
Expand Down Expand Up @@ -429,12 +428,12 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) {
return false, nil
}

t := time.Now()
now := time.Now()
sample, err := a.makeSample(ctx, storageRadius)
if err != nil {
return false, err
}
dur := time.Since(t)
dur := time.Since(now)
a.metrics.SampleDuration.Set(dur.Seconds())

a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", sample.StorageRadius, "round", round)
Expand Down
34 changes: 17 additions & 17 deletions pkg/storageincentives/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ import (
"github.com/ethersphere/bee/pkg/swarm"
)

var errMessage = errors.New("reserve commitment hasher: failure in proof creation")
var errProofCreation = errors.New("reserve commitment hasher: failure in proof creation")

// returns the byte index of chunkdata where the spansize starts
// spanOffset returns the byte index of chunkdata where the spansize starts
func spanOffset(sampleItem storer.SampleItem) uint8 {
ch := swarm.NewChunk(sampleItem.ChunkAddress, sampleItem.ChunkData)
if soc.Valid(ch) {
Expand Down Expand Up @@ -66,16 +66,16 @@ func makeInclusionProofs(
rccontent.SetHeaderInt64(swarm.HashSize * storer.SampleSize * 2)
rsc, err := sampleChunk(reserveSampleItems)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
rscData := rsc.Data()
_, err = rccontent.Write(rscData[swarm.SpanSize:])
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
_, err = rccontent.Hash(nil)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
proof1p1 := rccontent.Proof(int(require1) * 2)
proof2p1 := rccontent.Proof(int(require2) * 2)
Expand All @@ -91,23 +91,23 @@ func makeInclusionProofs(
chunk1ContentPayload := reserveSampleItems[require1].ChunkData[chunk1Offset+swarm.SpanSize:]
_, err = chunk1Content.Write(chunk1ContentPayload)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
_, err = chunk1Content.Hash(nil)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
proof1p2 := chunk1Content.Proof(segmentIndex)
// TR chunk proof
chunk1TrContent := bmt.Prover{Hasher: prefixHasherPool.Get()}
chunk1TrContent.SetHeader(reserveSampleItems[require1].ChunkData[chunk1Offset : chunk1Offset+swarm.SpanSize])
_, err = chunk1TrContent.Write(chunk1ContentPayload)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
_, err = chunk1TrContent.Hash(nil)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
proof1p3 := chunk1TrContent.Proof(segmentIndex)
// cleanup
Expand All @@ -122,23 +122,23 @@ func makeInclusionProofs(
chunk2Content.SetHeader(reserveSampleItems[require2].ChunkData[chunk2Offset : chunk2Offset+swarm.SpanSize])
_, err = chunk2Content.Write(chunk2ContentPayload)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
_, err = chunk2Content.Hash(nil)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
proof2p2 := chunk2Content.Proof(segmentIndex)
// TR Chunk proof
chunk2TrContent := bmt.Prover{Hasher: prefixHasherPool.Get()}
chunk2TrContent.SetHeader(reserveSampleItems[require2].ChunkData[chunk2Offset : chunk2Offset+swarm.SpanSize])
_, err = chunk2TrContent.Write(chunk2ContentPayload)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
_, err = chunk2TrContent.Hash(nil)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
proof2p3 := chunk2TrContent.Proof(segmentIndex)
// cleanup
Expand All @@ -153,23 +153,23 @@ func makeInclusionProofs(
chunkLastContentPayload := reserveSampleItems[require3].ChunkData[chunkLastOffset+swarm.SpanSize:]
_, err = chunkLastContent.Write(chunkLastContentPayload)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
_, err = chunkLastContent.Hash(nil)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
proofLastp2 := chunkLastContent.Proof(segmentIndex)
// TR Chunk Proof
chunkLastTrContent := bmt.Prover{Hasher: prefixHasherPool.Get()}
chunkLastTrContent.SetHeader(reserveSampleItems[require3].ChunkData[chunkLastOffset : chunkLastOffset+swarm.SpanSize])
_, err = chunkLastTrContent.Write(chunkLastContentPayload)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
_, err = chunkLastTrContent.Hash(nil)
if err != nil {
return redistribution.ChunkInclusionProofs{}, errMessage
return redistribution.ChunkInclusionProofs{}, errProofCreation
}
proofLastp3 := chunkLastTrContent.Proof(segmentIndex)
// cleanup
Expand Down
8 changes: 3 additions & 5 deletions pkg/storageincentives/redistribution/inclusionproof.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Used for inclusion proof utilities

package redistribution

import (
Expand Down Expand Up @@ -55,8 +53,8 @@ type SOCProof struct {
ChunkAddr string `json:"chunkAddr"`
}

// Transforms arguments to ChunkInclusionProof object
func NewChunkInclusionProof(proofp1, proofp2 bmt.Proof, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) {
// NewChunkInclusionProof transforms arguments to ChunkInclusionProof object
func NewChunkInclusionProof(proofp1, proofp2, proofp3 bmt.Proof, sampleItem storer.SampleItem) (ChunkInclusionProof, error) {
proofp1Hex := newHexProofs(proofp1)
proofp2Hex := newHexProofs(proofp2)
proofp3Hex := newHexProofs(proofp3)
Expand Down Expand Up @@ -108,7 +106,7 @@ type hexProof struct {
ProveSegment string
}

// Transforms proof object to its hexadecimal representation
// newHexProofs transforms proof object to its hexadecimal representation
func newHexProofs(proof bmt.Proof) hexProof {
proofSegments := make([]string, len(proof.ProofSegments))
for i := 0; i < len(proof.ProofSegments); i++ {
Expand Down
2 changes: 1 addition & 1 deletion pkg/storer/sample.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ func (db *DB) ReserveSample(

item.Stamp = stamp

// check if sample contains transformed address
// ensuring to pass the check order function of redistribution contract
if index := contains(item.TransformedAddress); index != -1 {
// TODO change back to SOC
if cac.Valid(ch) {
Expand Down

0 comments on commit a4671f5

Please sign in to comment.