diff --git a/openapi/SwarmCommon.yaml b/openapi/SwarmCommon.yaml index aa95ce70401..dcd54c3853a 100644 --- a/openapi/SwarmCommon.yaml +++ b/openapi/SwarmCommon.yaml @@ -928,7 +928,7 @@ components: type: boolean lastSyncedBlock: type: integer - commitedDepth: + committedDepth: type: integer StatusResponse: diff --git a/pkg/api/status.go b/pkg/api/status.go index 5c6c315088a..804cd94aef5 100644 --- a/pkg/api/status.go +++ b/pkg/api/status.go @@ -30,7 +30,7 @@ type statusSnapshotResponse struct { BatchCommitment uint64 `json:"batchCommitment"` IsReachable bool `json:"isReachable"` LastSyncedBlock uint64 `json:"lastSyncedBlock"` - CommitedDepth uint8 `json:"commitedDepth"` + CommittedDepth uint8 `json:"committedDepth"` } type statusResponse struct { @@ -95,7 +95,7 @@ func (s *Service) statusGetHandler(w http.ResponseWriter, _ *http.Request) { BatchCommitment: ss.BatchCommitment, IsReachable: ss.IsReachable, LastSyncedBlock: ss.LastSyncedBlock, - CommitedDepth: uint8(ss.CommitedDepth), + CommittedDepth: uint8(ss.CommittedDepth), }) } @@ -143,7 +143,7 @@ func (s *Service) statusGetPeersHandler(w http.ResponseWriter, r *http.Request) snapshot.BatchCommitment = ss.BatchCommitment snapshot.IsReachable = ss.IsReachable snapshot.LastSyncedBlock = ss.LastSyncedBlock - snapshot.CommitedDepth = uint8(ss.CommitedDepth) + snapshot.CommittedDepth = uint8(ss.CommittedDepth) } mu.Lock() diff --git a/pkg/api/status_test.go b/pkg/api/status_test.go index 3d9a458b77a..0ef4dc6c95c 100644 --- a/pkg/api/status_test.go +++ b/pkg/api/status_test.go @@ -40,7 +40,7 @@ func TestGetStatus(t *testing.T) { BatchCommitment: 1, IsReachable: true, LastSyncedBlock: 6092500, - CommitedDepth: 1, + CommittedDepth: 1, } ssMock := &statusSnapshotMock{ @@ -50,7 +50,7 @@ func TestGetStatus(t *testing.T) { storageRadius: ssr.StorageRadius, commitment: ssr.BatchCommitment, chainState: &postage.ChainState{Block: ssr.LastSyncedBlock}, - commitedDepth: ssr.CommitedDepth, + committedDepth: ssr.CommittedDepth, } statusSvc := status.NewService( @@ -124,7 +124,7 @@ type statusSnapshotMock struct { commitment uint64 chainState *postage.ChainState neighborhoods []*storer.NeighborhoodStat - commitedDepth uint8 + committedDepth uint8 } func (m *statusSnapshotMock) SyncRate() float64 { return m.syncRate } @@ -138,4 +138,4 @@ func (m *statusSnapshotMock) ReserveSizeWithinRadius() uint64 { func (m *statusSnapshotMock) NeighborhoodsStat(ctx context.Context) ([]*storer.NeighborhoodStat, error) { return m.neighborhoods, nil } -func (m *statusSnapshotMock) CommitedDepth() uint8 { return m.commitedDepth } +func (m *statusSnapshotMock) CommittedDepth() uint8 { return m.committedDepth } diff --git a/pkg/salud/salud.go b/pkg/salud/salud.go index 71d0f05b64e..d47abf3fd76 100644 --- a/pkg/salud/salud.go +++ b/pkg/salud/salud.go @@ -169,7 +169,7 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, return } - networkRadius, nHoodRadius := s.commitedDepth(peers) + networkRadius, nHoodRadius := s.committedDepth(peers) avgDur := totaldur / float64(len(peers)) pDur := percentileDur(peers, durPercentile) pConns := percentileConns(peers, connsPercentile) @@ -195,8 +195,8 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, continue } - if networkRadius > 0 && peer.status.CommitedDepth < uint32(networkRadius-2) { - s.logger.Debug("radius health failure", "radius", peer.status.CommitedDepth, "peer_address", peer.addr) + if networkRadius > 0 && peer.status.CommittedDepth < uint32(networkRadius-2) { + s.logger.Debug("radius health failure", "radius", peer.status.CommittedDepth, "peer_address", peer.addr) } else if peer.dur.Seconds() > pDur { s.logger.Debug("response duration below threshold", "duration", peer.dur, "peer_address", peer.addr) } else if peer.status.ConnectedPeers < pConns { @@ -217,9 +217,9 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, } selfHealth := true - if nHoodRadius == networkRadius && s.reserve.CommitedDepth() != networkRadius { + if nHoodRadius == networkRadius && s.reserve.CommittedDepth() != networkRadius { selfHealth = false - s.logger.Warning("node is unhealthy due to storage radius discrepancy", "self_radius", s.reserve.CommitedDepth(), "network_radius", networkRadius) + s.logger.Warning("node is unhealthy due to storage radius discrepancy", "self_radius", s.reserve.CommittedDepth(), "network_radius", networkRadius) } s.isSelfHealthy.Store(selfHealth) @@ -288,17 +288,17 @@ func percentileConns(peers []peer, p float64) uint64 { } // radius finds the most common radius. -func (s *service) commitedDepth(peers []peer) (uint8, uint8) { +func (s *service) committedDepth(peers []peer) (uint8, uint8) { var networkDepth [swarm.MaxBins]int var nHoodDepth [swarm.MaxBins]int for _, peer := range peers { - if peer.status.CommitedDepth < uint32(swarm.MaxBins) { + if peer.status.CommittedDepth < uint32(swarm.MaxBins) { if peer.neighbor { - nHoodDepth[peer.status.CommitedDepth]++ + nHoodDepth[peer.status.CommittedDepth]++ } - networkDepth[peer.status.CommitedDepth]++ + networkDepth[peer.status.CommittedDepth]++ } } diff --git a/pkg/salud/salud_test.go b/pkg/salud/salud_test.go index eb30b8efa79..e430bf1c868 100644 --- a/pkg/salud/salud_test.go +++ b/pkg/salud/salud_test.go @@ -31,28 +31,28 @@ func TestSalud(t *testing.T) { t.Parallel() peers := []peer{ // fully healhy - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 1, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, true}, // healthy since radius >= most common radius - 2 - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 7, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 7}, 1, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 7, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 7}, 1, true}, // radius too low - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 5, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 5}, 1, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 5, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 5}, 1, false}, // dur too long - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 2, false}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 2, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 2, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 2, false}, // connections not enough - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 90, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommitedDepth: 8}, 1, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 90, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100, CommittedDepth: 8}, 1, false}, // commitment wrong - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 35, ReserveSize: 100, CommitedDepth: 8}, 1, false}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 35, ReserveSize: 100, CommittedDepth: 8}, 1, false}, } statusM := &statusMock{make(map[string]peer)} @@ -137,8 +137,8 @@ func TestSelfHealthyCapacityDoubling(t *testing.T) { t.Parallel() peers := []peer{ // fully healhy - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", CommitedDepth: 8}, 0, true}, - {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", CommitedDepth: 8}, 0, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", CommittedDepth: 8}, 0, true}, + {swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", CommittedDepth: 8}, 0, true}, } statusM := &statusMock{make(map[string]peer)} diff --git a/pkg/status/internal/pb/status.pb.go b/pkg/status/internal/pb/status.pb.go index afa4c34edba..d5cd8454235 100644 --- a/pkg/status/internal/pb/status.pb.go +++ b/pkg/status/internal/pb/status.pb.go @@ -74,7 +74,7 @@ type Snapshot struct { IsReachable bool `protobuf:"varint,8,opt,name=IsReachable,proto3" json:"IsReachable,omitempty"` ReserveSizeWithinRadius uint64 `protobuf:"varint,9,opt,name=ReserveSizeWithinRadius,proto3" json:"ReserveSizeWithinRadius,omitempty"` LastSyncedBlock uint64 `protobuf:"varint,10,opt,name=LastSyncedBlock,proto3" json:"LastSyncedBlock,omitempty"` - CommitedDepth uint32 `protobuf:"varint,11,opt,name=CommitedDepth,proto3" json:"CommitedDepth,omitempty"` + CommittedDepth uint32 `protobuf:"varint,11,opt,name=CommittedDepth,proto3" json:"CommittedDepth,omitempty"` } func (m *Snapshot) Reset() { *m = Snapshot{} } @@ -180,9 +180,9 @@ func (m *Snapshot) GetLastSyncedBlock() uint64 { return 0 } -func (m *Snapshot) GetCommitedDepth() uint32 { +func (m *Snapshot) GetCommittedDepth() uint32 { if m != nil { - return m.CommitedDepth + return m.CommittedDepth } return 0 } @@ -195,28 +195,28 @@ func init() { func init() { proto.RegisterFile("status.proto", fileDescriptor_dfe4fce6682daf5b) } var fileDescriptor_dfe4fce6682daf5b = []byte{ - // 331 bytes of a gzipped FileDescriptorProto + // 332 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd1, 0x4f, 0x4b, 0xc3, 0x30, 0x18, 0x06, 0xf0, 0x65, 0xff, 0x97, 0x6d, 0x2a, 0xb9, 0x98, 0x83, 0x94, 0x32, 0x44, 0x8a, 0x07, 0x2f, 0x5e, 0x3c, 0x77, 0x82, 0x08, 0x2a, 0x23, 0x3d, 0x08, 0xde, 0xd2, 0xf6, 0x65, 0x29, 0x76, - 0x49, 0x69, 0xde, 0x09, 0xf3, 0x53, 0xf8, 0x79, 0xfc, 0x04, 0x1e, 0x77, 0xf4, 0x28, 0xdb, 0x17, - 0x91, 0xa5, 0x0a, 0xdb, 0xc4, 0xe3, 0xfb, 0x6b, 0x79, 0xf3, 0xe4, 0x09, 0x1d, 0x58, 0x94, 0x38, - 0xb7, 0x17, 0x45, 0x69, 0xd0, 0xb0, 0x76, 0x35, 0x8d, 0x5a, 0xb4, 0x71, 0x03, 0x38, 0x7a, 0x6f, - 0xd0, 0x6e, 0xa4, 0x65, 0x61, 0x95, 0x41, 0xe6, 0xd3, 0xbe, 0x00, 0x0b, 0xe5, 0x0b, 0x44, 0xd9, - 0x2b, 0x70, 0xe2, 0x93, 0xa0, 0x29, 0xb6, 0x89, 0x8d, 0xe8, 0x60, 0x32, 0xcf, 0x73, 0xbb, 0xd0, - 0x89, 0x90, 0x08, 0xbc, 0xee, 0x93, 0x80, 0x88, 0x1d, 0x63, 0xa7, 0x74, 0x18, 0xa1, 0x29, 0xe5, - 0x14, 0x84, 0x4c, 0xb3, 0xb9, 0xe5, 0x0d, 0x9f, 0x04, 0x43, 0xb1, 0x8b, 0xec, 0x8c, 0x1e, 0x8c, - 0x8d, 0xd6, 0x90, 0x20, 0xa4, 0x13, 0x80, 0xd2, 0xf2, 0xa6, 0x3b, 0x6e, 0x4f, 0xd9, 0x39, 0x3d, + 0x49, 0x69, 0xde, 0x09, 0xf3, 0x53, 0xf8, 0x81, 0xfc, 0x00, 0x1e, 0x77, 0xf4, 0x28, 0xdb, 0x17, + 0x91, 0x65, 0x0a, 0x5b, 0xc5, 0xe3, 0xfb, 0x6b, 0x79, 0xf3, 0xe4, 0x09, 0x1d, 0x58, 0x94, 0x38, + 0xb7, 0x17, 0x45, 0x69, 0xd0, 0xb0, 0xf6, 0x76, 0x1a, 0xb5, 0x68, 0xe3, 0x06, 0x70, 0xf4, 0xde, + 0xa0, 0xdd, 0x48, 0xcb, 0xc2, 0x2a, 0x83, 0xcc, 0xa7, 0x7d, 0x01, 0x16, 0xca, 0x17, 0x88, 0xb2, + 0x57, 0xe0, 0xc4, 0x27, 0x41, 0x53, 0xec, 0x12, 0x1b, 0xd1, 0xc1, 0x64, 0x9e, 0xe7, 0x76, 0xa1, + 0x13, 0x21, 0x11, 0x78, 0xdd, 0x27, 0x01, 0x11, 0x7b, 0xc6, 0x4e, 0xe9, 0x30, 0x42, 0x53, 0xca, + 0x29, 0x08, 0x99, 0x66, 0x73, 0xcb, 0x1b, 0x3e, 0x09, 0x86, 0x62, 0x1f, 0xd9, 0x19, 0x3d, 0x18, + 0x1b, 0xad, 0x21, 0x41, 0x48, 0x27, 0x00, 0xa5, 0xe5, 0x4d, 0x77, 0x5c, 0x45, 0xd9, 0x39, 0x3d, 0x7a, 0x80, 0x6c, 0xaa, 0x62, 0x53, 0x2a, 0x63, 0x52, 0x17, 0xac, 0xe5, 0xfe, 0xfc, 0xe3, 0x8c, 0xd3, 0x4e, 0x08, 0x70, 0x6f, 0x52, 0xe0, 0x6d, 0x9f, 0x04, 0x3d, 0xf1, 0x3b, 0xb2, 0x80, 0x1e, - 0x86, 0x12, 0x13, 0x35, 0x36, 0xb3, 0x59, 0x86, 0x33, 0xd0, 0xc8, 0x3b, 0x6e, 0xc9, 0x3e, 0x6f, - 0x3a, 0xb8, 0xb5, 0x02, 0x64, 0xa2, 0x64, 0x9c, 0x03, 0xef, 0xfa, 0x24, 0xe8, 0x8a, 0x6d, 0x62, - 0x57, 0xf4, 0x78, 0xab, 0x92, 0xc7, 0x0c, 0x55, 0xa6, 0x7f, 0x6e, 0xda, 0x73, 0x3b, 0xff, 0xfb, - 0xbc, 0x49, 0x71, 0x27, 0x2d, 0x46, 0x0b, 0x9d, 0x40, 0x1a, 0xe6, 0x26, 0x79, 0xe6, 0xb4, 0x4a, - 0xb1, 0xc7, 0x9b, 0x0e, 0xab, 0x4c, 0x90, 0x5e, 0x43, 0x81, 0x8a, 0xf7, 0xab, 0x0e, 0x77, 0x30, - 0x3c, 0xf9, 0x58, 0x79, 0x64, 0xb9, 0xf2, 0xc8, 0xd7, 0xca, 0x23, 0x6f, 0x6b, 0xaf, 0xb6, 0x5c, - 0x7b, 0xb5, 0xcf, 0xb5, 0x57, 0x7b, 0xaa, 0x17, 0x71, 0xdc, 0x76, 0x0f, 0x7e, 0xf9, 0x1d, 0x00, - 0x00, 0xff, 0xff, 0xed, 0xf5, 0xf8, 0xee, 0x00, 0x02, 0x00, 0x00, + 0x86, 0x12, 0x13, 0x35, 0x36, 0xb3, 0x59, 0x86, 0x33, 0xd0, 0xc8, 0x3b, 0x6e, 0x49, 0x95, 0x37, + 0x1d, 0xdc, 0x5a, 0x01, 0x32, 0x51, 0x32, 0xce, 0x81, 0x77, 0x7d, 0x12, 0x74, 0xc5, 0x2e, 0xb1, + 0x2b, 0x7a, 0xbc, 0x53, 0xc9, 0x63, 0x86, 0x2a, 0xd3, 0x3f, 0x37, 0xed, 0xb9, 0x9d, 0xff, 0x7d, + 0xde, 0xa4, 0xb8, 0x93, 0x16, 0xa3, 0x85, 0x4e, 0x20, 0x0d, 0x73, 0x93, 0x3c, 0x73, 0xba, 0x4d, + 0x51, 0xe1, 0x6d, 0x3b, 0x9b, 0x4c, 0x08, 0xe9, 0x35, 0x14, 0xa8, 0x78, 0xdf, 0x95, 0x58, 0xd1, + 0xf0, 0xe4, 0x63, 0xe5, 0x91, 0xe5, 0xca, 0x23, 0x5f, 0x2b, 0x8f, 0xbc, 0xad, 0xbd, 0xda, 0x72, + 0xed, 0xd5, 0x3e, 0xd7, 0x5e, 0xed, 0xa9, 0x5e, 0xc4, 0x71, 0xdb, 0x3d, 0xf9, 0xe5, 0x77, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x97, 0x7e, 0x47, 0xd4, 0x02, 0x02, 0x00, 0x00, } func (m *Get) Marshal() (dAtA []byte, err error) { @@ -262,8 +262,8 @@ func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.CommitedDepth != 0 { - i = encodeVarintStatus(dAtA, i, uint64(m.CommitedDepth)) + if m.CommittedDepth != 0 { + i = encodeVarintStatus(dAtA, i, uint64(m.CommittedDepth)) i-- dAtA[i] = 0x58 } @@ -385,8 +385,8 @@ func (m *Snapshot) Size() (n int) { if m.LastSyncedBlock != 0 { n += 1 + sovStatus(uint64(m.LastSyncedBlock)) } - if m.CommitedDepth != 0 { - n += 1 + sovStatus(uint64(m.CommitedDepth)) + if m.CommittedDepth != 0 { + n += 1 + sovStatus(uint64(m.CommittedDepth)) } return n } @@ -677,9 +677,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CommitedDepth", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CommittedDepth", wireType) } - m.CommitedDepth = 0 + m.CommittedDepth = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus @@ -689,7 +689,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CommitedDepth |= uint32(b&0x7F) << shift + m.CommittedDepth |= uint32(b&0x7F) << shift if b < 0x80 { break } diff --git a/pkg/status/internal/pb/status.proto b/pkg/status/internal/pb/status.proto index bfc25a447d3..7885139fd9c 100644 --- a/pkg/status/internal/pb/status.proto +++ b/pkg/status/internal/pb/status.proto @@ -25,5 +25,5 @@ message Snapshot { bool IsReachable = 8; uint64 ReserveSizeWithinRadius = 9; uint64 LastSyncedBlock = 10; - uint32 CommitedDepth = 11; + uint32 CommittedDepth = 11; } diff --git a/pkg/status/status.go b/pkg/status/status.go index 68bb8e1cd98..38cccc7bea1 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -39,7 +39,7 @@ type Reserve interface { ReserveSize() int ReserveSizeWithinRadius() uint64 StorageRadius() uint8 - CommitedDepth() uint8 + CommittedDepth() uint8 } type topologyDriver interface { @@ -87,14 +87,14 @@ func (s *Service) LocalSnapshot() (*Snapshot, error) { reserveSizeWithinRadius uint64 connectedPeers uint64 neighborhoodSize uint64 - commitedDepth uint8 + committedDepth uint8 ) if s.reserve != nil { storageRadius = s.reserve.StorageRadius() reserveSize = uint64(s.reserve.ReserveSize()) reserveSizeWithinRadius = s.reserve.ReserveSizeWithinRadius() - commitedDepth = s.reserve.CommitedDepth() + committedDepth = s.reserve.CommittedDepth() } if s.sync != nil { @@ -131,7 +131,7 @@ func (s *Service) LocalSnapshot() (*Snapshot, error) { BatchCommitment: commitment, IsReachable: s.topologyDriver.IsReachable(), LastSyncedBlock: s.chainState.GetChainState().Block, - CommitedDepth: uint32(commitedDepth), + CommittedDepth: uint32(committedDepth), }, nil } diff --git a/pkg/status/status_test.go b/pkg/status/status_test.go index f8d38cd367c..5c60e887959 100644 --- a/pkg/status/status_test.go +++ b/pkg/status/status_test.go @@ -33,7 +33,7 @@ func TestStatus(t *testing.T) { NeighborhoodSize: 1, IsReachable: true, LastSyncedBlock: 6092500, - CommitedDepth: 1, + CommittedDepth: 1, } sssMock := &statusSnapshotMock{want} @@ -204,4 +204,4 @@ func (m *statusSnapshotMock) GetChainState() *postage.ChainState { func (m *statusSnapshotMock) ReserveSizeWithinRadius() uint64 { return m.Snapshot.ReserveSizeWithinRadius } -func (m *statusSnapshotMock) CommitedDepth() uint8 { return uint8(m.Snapshot.CommitedDepth) } +func (m *statusSnapshotMock) CommittedDepth() uint8 { return uint8(m.Snapshot.CommittedDepth) } diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go index 8a9d7d15db1..3be9ebb28ea 100644 --- a/pkg/storageincentives/agent.go +++ b/pkg/storageincentives/agent.go @@ -391,14 +391,14 @@ func (a *Agent) handleClaim(ctx context.Context, round uint64) error { func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { // minimum proximity between the achor and the stored chunks - commitedDepth := a.store.CommitedDepth() + committedDepth := a.store.CommittedDepth() if a.state.IsFrozen() { a.logger.Info("skipping round because node is frozen") return false, nil } - isPlaying, err := a.contract.IsPlaying(ctx, commitedDepth) + isPlaying, err := a.contract.IsPlaying(ctx, committedDepth) if err != nil { a.metrics.ErrCheckIsPlaying.Inc() return false, err @@ -431,21 +431,21 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) { } now := time.Now() - sample, err := a.makeSample(ctx, commitedDepth) + sample, err := a.makeSample(ctx, committedDepth) if err != nil { return false, err } dur := time.Since(now) a.metrics.SampleDuration.Set(dur.Seconds()) - a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", commitedDepth, "round", round) + a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", committedDepth, "round", round) a.state.SetSampleData(round, sample, dur) return true, nil } -func (a *Agent) makeSample(ctx context.Context, commitedDepth uint8) (SampleData, error) { +func (a *Agent) makeSample(ctx context.Context, committedDepth uint8) (SampleData, error) { salt, err := a.contract.ReserveSalt(ctx) if err != nil { return SampleData{}, err @@ -456,7 +456,7 @@ func (a *Agent) makeSample(ctx context.Context, commitedDepth uint8) (SampleData return SampleData{}, err } - rSample, err := a.store.ReserveSample(ctx, salt, commitedDepth, uint64(timeLimiter), a.minBatchBalance()) + rSample, err := a.store.ReserveSample(ctx, salt, committedDepth, uint64(timeLimiter), a.minBatchBalance()) if err != nil { return SampleData{}, err } @@ -470,7 +470,7 @@ func (a *Agent) makeSample(ctx context.Context, commitedDepth uint8) (SampleData Anchor1: salt, ReserveSampleItems: rSample.Items, ReserveSampleHash: sampleHash, - StorageRadius: commitedDepth, + StorageRadius: committedDepth, } return sample, nil diff --git a/pkg/storer/mock/mockreserve.go b/pkg/storer/mock/mockreserve.go index 5f72fa9cbf6..7c6a1642aba 100644 --- a/pkg/storer/mock/mockreserve.go +++ b/pkg/storer/mock/mockreserve.go @@ -146,7 +146,7 @@ func (s *ReserveStore) SetStorageRadius(r uint8) { s.radius = r s.mtx.Unlock() } -func (s *ReserveStore) CommitedDepth() uint8 { +func (s *ReserveStore) CommittedDepth() uint8 { s.mtx.Lock() defer s.mtx.Unlock() return s.radius + uint8(s.capacityDoubling) diff --git a/pkg/storer/mock/mockstorer.go b/pkg/storer/mock/mockstorer.go index 523dc68f829..6ab457ab759 100644 --- a/pkg/storer/mock/mockstorer.go +++ b/pkg/storer/mock/mockstorer.go @@ -220,7 +220,7 @@ func (m *mockStorer) ChunkStore() storage.ReadOnlyChunkStore { func (m *mockStorer) StorageRadius() uint8 { return 0 } -func (m *mockStorer) CommitedDepth() uint8 { return 0 } +func (m *mockStorer) CommittedDepth() uint8 { return 0 } func (m *mockStorer) IsWithinStorageRadius(_ swarm.Address) bool { return true } diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go index 16642959628..3ae3e6df99d 100644 --- a/pkg/storer/reserve.go +++ b/pkg/storer/reserve.go @@ -416,7 +416,7 @@ func (db *DB) StorageRadius() uint8 { return db.reserve.Radius() } -func (db *DB) CommitedDepth() uint8 { +func (db *DB) CommittedDepth() uint8 { if db.reserve == nil { return 0 } @@ -514,20 +514,20 @@ type NeighborhoodStat struct { func (db *DB) NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error) { radius := db.StorageRadius() - commitedDepth := db.CommitedDepth() + committedDepth := db.CommittedDepth() prefixes := neighborhoodPrefixes(db.baseAddr, int(radius), db.reserveOptions.capacityDoubling) neighs := make([]*NeighborhoodStat, len(prefixes)) for i, n := range prefixes { neighs[i] = &NeighborhoodStat{ - Neighborhood: swarm.NewNeighborhood(n, commitedDepth), + Neighborhood: swarm.NewNeighborhood(n, committedDepth), ReserveSizeWithinRadius: 0, - Proximity: min(commitedDepth, swarm.Proximity(n.Bytes(), db.baseAddr.Bytes()))} + Proximity: min(committedDepth, swarm.Proximity(n.Bytes(), db.baseAddr.Bytes()))} } err := db.reserve.IterateChunksItems(0, func(ch *reserve.ChunkBinItem) (bool, error) { for _, n := range neighs { - if swarm.Proximity(ch.Address.Bytes(), n.Neighborhood.Bytes()) >= commitedDepth { + if swarm.Proximity(ch.Address.Bytes(), n.Neighborhood.Bytes()) >= committedDepth { n.ReserveSizeWithinRadius++ break } diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go index 965e8e5b8ad..9ecd97423df 100644 --- a/pkg/storer/sample.go +++ b/pkg/storer/sample.go @@ -61,7 +61,7 @@ type Sample struct { func (db *DB) ReserveSample( ctx context.Context, anchor []byte, - commitedDepth uint8, + committedDepth uint8, consensusTime uint64, minBatchBalance *big.Int, ) (Sample, error) { @@ -98,7 +98,7 @@ func (db *DB) ReserveSample( }() err := db.reserve.IterateChunksItems(db.StorageRadius(), func(ch *reserve.ChunkBinItem) (bool, error) { - if swarm.Proximity(ch.Address.Bytes(), anchor) < commitedDepth { + if swarm.Proximity(ch.Address.Bytes(), anchor) < committedDepth { return false, nil } select { @@ -261,12 +261,12 @@ func (db *DB) ReserveSample( allStats.TotalDuration = time.Since(t) if err := g.Wait(); err != nil { - db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", commitedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) + db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", committedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) return Sample{}, fmt.Errorf("sampler: failed creating sample: %w", err) } - db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", commitedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) + db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", committedDepth, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats)) return Sample{Stats: *allStats, Items: sampleItems}, nil } diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go index fccc036422e..2628807a24e 100644 --- a/pkg/storer/storer.go +++ b/pkg/storer/storer.go @@ -163,7 +163,7 @@ type ReserveStore interface { type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 - CommitedDepth() uint8 + CommittedDepth() uint8 } // LocalStore is a read-only ChunkStore. It can be used to check if chunk is known