Skip to content

Commit

Permalink
chore: rename batchHash to stampHash
Browse files Browse the repository at this point in the history
  • Loading branch information
acha-bill committed Jul 2, 2024
1 parent ba21eab commit 0ae70fb
Show file tree
Hide file tree
Showing 19 changed files with 144 additions and 144 deletions.
62 changes: 31 additions & 31 deletions pkg/pullsync/pb/pullsync.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pkg/pullsync/pb/pullsync.proto
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ message Get {
message Chunk {
bytes Address = 1;
bytes BatchID = 2;
bytes BatchHash = 3;
bytes StampHash = 3;
}

message Offer {
Expand Down
10 changes: 5 additions & 5 deletions pkg/pullsync/pullsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start

addr := offer.Chunks[i].Address
batchID := offer.Chunks[i].BatchID
batchHash := offer.Chunks[i].BatchHash
stampHash := offer.Chunks[i].StampHash
if len(addr) != swarm.HashSize {
return 0, 0, fmt.Errorf("inconsistent hash length")
}
Expand All @@ -183,7 +183,7 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start
}
s.metrics.Offered.Inc()
if s.store.IsWithinStorageRadius(a) {
have, err = s.store.ReserveHas(a, batchID, batchHash)
have, err = s.store.ReserveHas(a, batchID, stampHash)
if err != nil {
s.logger.Debug("storage has", "error", err)
return 0, 0, err
Expand Down Expand Up @@ -378,7 +378,7 @@ func (s *Syncer) makeOffer(ctx context.Context, rn pb.Get) (*pb.Offer, error) {
o.Topmost = top
o.Chunks = make([]*pb.Chunk, 0, len(addrs))
for _, v := range addrs {
o.Chunks = append(o.Chunks, &pb.Chunk{Address: v.Address.Bytes(), BatchID: v.BatchID, BatchHash: v.BatchHash})
o.Chunks = append(o.Chunks, &pb.Chunk{Address: v.Address.Bytes(), BatchID: v.BatchID, StampHash: v.StampHash})
}
return o, nil
}
Expand Down Expand Up @@ -420,7 +420,7 @@ func (s *Syncer) collectAddrs(ctx context.Context, bin uint8, start uint64) ([]*
break LOOP // The stream has been closed.
}

chs = append(chs, &storer.BinC{Address: c.Address, BatchID: c.BatchID, BatchHash: c.BatchHash})
chs = append(chs, &storer.BinC{Address: c.Address, BatchID: c.BatchID, StampHash: c.StampHash})
if c.BinID > topmost {
topmost = c.BinID
}
Expand Down Expand Up @@ -466,7 +466,7 @@ func (s *Syncer) processWant(ctx context.Context, o *pb.Offer, w *pb.Want) ([]sw
if bv.Get(i) {
ch := o.Chunks[i]
addr := swarm.NewAddress(ch.Address)
c, err := s.store.ReserveGet(ctx, addr, ch.BatchID, ch.BatchHash)
c, err := s.store.ReserveGet(ctx, addr, ch.BatchID, ch.StampHash)
if err != nil {
s.logger.Debug("processing want: unable to find chunk", "chunk_address", addr, "batch_id", hex.EncodeToString(ch.BatchID))
chunks = append(chunks, swarm.NewChunk(swarm.ZeroAddress, nil))
Expand Down
12 changes: 6 additions & 6 deletions pkg/pullsync/pullsync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,12 @@ func init() {
for i := 0; i < n; i++ {
chunks[i] = testingc.GenerateTestRandomChunk()
addrs[i] = chunks[i].Address()
batchHash, _ := chunks[i].Stamp().Hash()
stampHash, _ := chunks[i].Stamp().Hash()
results[i] = &storer.BinC{
Address: addrs[i],
BatchID: chunks[i].Stamp().BatchID(),
BinID: uint64(i),
BatchHash: batchHash,
StampHash: stampHash,
}
}
}
Expand Down Expand Up @@ -160,15 +160,15 @@ func TestIncoming_WantErrors(t *testing.T) {

tResults := make([]*storer.BinC, len(tChunks))
for i, c := range tChunks {
batchHash, err := c.Stamp().Hash()
stampHash, err := c.Stamp().Hash()
if err != nil {
t.Fatal(err)
}
tResults[i] = &storer.BinC{
Address: c.Address(),
BatchID: c.Stamp().BatchID(),
BinID: uint64(i + 5), // start from a higher bin id
BatchHash: batchHash,
StampHash: stampHash,
}
}

Expand Down Expand Up @@ -312,11 +312,11 @@ func TestGetCursorsError(t *testing.T) {
func haveChunks(t *testing.T, s *mock.ReserveStore, chunks ...swarm.Chunk) {
t.Helper()
for _, c := range chunks {
batchHash, err := c.Stamp().Hash()
stampHash, err := c.Stamp().Hash()
if err != nil {
t.Fatal(err)
}
have, err := s.ReserveHas(c.Address(), c.Stamp().BatchID(), batchHash)
have, err := s.ReserveHas(c.Address(), c.Stamp().BatchID(), stampHash)
if err != nil {
t.Fatal(err)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/storagetest/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
"testing"

"github.com/ethersphere/bee/v2/pkg/encryption"
storage "github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storage/storageutil"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/google/go-cmp/cmp"
Expand Down
8 changes: 4 additions & 4 deletions pkg/storer/compact_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,11 @@ func TestCompact(t *testing.T) {
}

for _, ch := range chunks {
batchHash, err := ch.Stamp().Hash()
stampHash, err := ch.Stamp().Hash()
if err != nil {
t.Fatal(err)
}
has, err := st.ReserveHas(ch.Address(), ch.Stamp().BatchID(), batchHash)
has, err := st.ReserveHas(ch.Address(), ch.Stamp().BatchID(), stampHash)
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -178,11 +178,11 @@ func TestCompactNoEvictions(t *testing.T) {
}

for _, ch := range chunks {
batchHash, err := ch.Stamp().Hash()
stampHash, err := ch.Stamp().Hash()
if err != nil {
t.Fatal(err)
}
has, err := st.ReserveHas(ch.Address(), ch.Stamp().BatchID(), batchHash)
has, err := st.ReserveHas(ch.Address(), ch.Stamp().BatchID(), stampHash)
if err != nil {
t.Fatal(err)
}
Expand Down
20 changes: 10 additions & 10 deletions pkg/storer/internal/reserve/items.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ var (
type BatchRadiusItem struct {
Bin uint8
BatchID []byte
BatchHash []byte
StampHash []byte
Address swarm.Address
BinID uint64
}
Expand All @@ -32,9 +32,9 @@ func (b *BatchRadiusItem) Namespace() string {
return "batchRadius"
}

// batchID/bin/ChunkAddr/batchHash
// batchID/bin/ChunkAddr/stampHash
func (b *BatchRadiusItem) ID() string {
return string(b.BatchID) + string(b.Bin) + b.Address.ByteString() + string(b.BatchHash)
return string(b.BatchID) + string(b.Bin) + b.Address.ByteString() + string(b.StampHash)
}

func (b *BatchRadiusItem) String() string {
Expand All @@ -50,7 +50,7 @@ func (b *BatchRadiusItem) Clone() storage.Item {
BatchID: copyBytes(b.BatchID),
Address: b.Address.Clone(),
BinID: b.BinID,
BatchHash: copyBytes(b.BatchHash),
StampHash: copyBytes(b.StampHash),
}
}

Expand All @@ -75,7 +75,7 @@ func (b *BatchRadiusItem) Marshal() ([]byte, error) {
copy(buf[i:i+swarm.HashSize], b.Address.Bytes())
i += swarm.HashSize

copy(buf[i:i+swarm.HashSize], b.BatchHash)
copy(buf[i:i+swarm.HashSize], b.StampHash)
i += swarm.HashSize

binary.BigEndian.PutUint64(buf[i:i+8], b.BinID)
Expand All @@ -99,7 +99,7 @@ func (b *BatchRadiusItem) Unmarshal(buf []byte) error {
b.Address = swarm.NewAddress(buf[i : i+swarm.HashSize]).Clone()
i += swarm.HashSize

b.BatchHash = copyBytes(buf[i : i+swarm.HashSize])
b.StampHash = copyBytes(buf[i : i+swarm.HashSize])
i += swarm.HashSize

b.BinID = binary.BigEndian.Uint64(buf[i : i+8])
Expand All @@ -114,7 +114,7 @@ type ChunkBinItem struct {
BinID uint64
Address swarm.Address
BatchID []byte
BatchHash []byte
StampHash []byte
ChunkType swarm.ChunkType
}

Expand Down Expand Up @@ -146,7 +146,7 @@ func (c *ChunkBinItem) Clone() storage.Item {
BinID: c.BinID,
Address: c.Address.Clone(),
BatchID: copyBytes(c.BatchID),
BatchHash: copyBytes(c.BatchHash),
StampHash: copyBytes(c.StampHash),
ChunkType: c.ChunkType,
}
}
Expand Down Expand Up @@ -174,7 +174,7 @@ func (c *ChunkBinItem) Marshal() ([]byte, error) {
copy(buf[i:i+swarm.HashSize], c.BatchID)
i += swarm.HashSize

copy(buf[i:i+swarm.HashSize], c.BatchHash)
copy(buf[i:i+swarm.HashSize], c.StampHash)
i += swarm.HashSize

buf[i] = uint8(c.ChunkType)
Expand All @@ -201,7 +201,7 @@ func (c *ChunkBinItem) Unmarshal(buf []byte) error {
c.BatchID = copyBytes(buf[i : i+swarm.HashSize])
i += swarm.HashSize

c.BatchHash = copyBytes(buf[i : i+swarm.HashSize])
c.StampHash = copyBytes(buf[i : i+swarm.HashSize])
i += swarm.HashSize

c.ChunkType = swarm.ChunkType(buf[i])
Expand Down
4 changes: 2 additions & 2 deletions pkg/storer/internal/reserve/items_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestReserveItems(t *testing.T) {
Address: swarm.NewAddress(storagetest.MaxAddressBytes[:]),
Bin: 9,
BinID: 100,
BatchHash: storagetest.MaxAddressBytes[:],
StampHash: storagetest.MaxAddressBytes[:],
},
Factory: func() storage.Item { return new(reserve.BatchRadiusItem) },
},
Expand All @@ -42,7 +42,7 @@ func TestReserveItems(t *testing.T) {
BatchID: storagetest.MaxAddressBytes[:],
Bin: 9,
BinID: 100,
BatchHash: storagetest.MaxAddressBytes[:],
StampHash: storagetest.MaxAddressBytes[:],
},
Factory: func() storage.Item { return new(reserve.ChunkBinItem) },
},
Expand Down
Loading

0 comments on commit 0ae70fb

Please sign in to comment.