Skip to content

Commit

Permalink
chore: add metric
Browse files Browse the repository at this point in the history
  • Loading branch information
vgonkivs committed Jan 10, 2024
1 parent d0f3c1b commit b2986df
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 4 deletions.
28 changes: 24 additions & 4 deletions sync/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ type metrics struct {
syncLoopStarted metric.Int64Counter
trustedPeersOutOfSync metric.Int64Counter
laggingHeadersStart metric.Int64Counter
readHeader metric.Int64Counter

subjectiveHead atomic.Int64
blockTime metric.Float64Histogram
Expand Down Expand Up @@ -62,6 +63,16 @@ func newMetrics(headersThreshold time.Duration) (*metrics, error) {
return nil, err
}

readHeader, err := meter.Int64Counter(
"hdr_sync_getter_read",
metric.WithDescription(
"sync getter used to get the header instead of receiving it through the subscription",
),
)
if err != nil {
return nil, err
}

subjectiveHead, err := meter.Int64ObservableGauge(
"hdr_sync_subjective_head",
metric.WithDescription("subjective head height"),
Expand All @@ -83,6 +94,7 @@ func newMetrics(headersThreshold time.Duration) (*metrics, error) {
syncLoopStarted: syncLoopStarted,
trustedPeersOutOfSync: trustedPeersOutOfSync,
laggingHeadersStart: laggingHeadersStart,
readHeader: readHeader,
blockTime: blockTime,
subjectiveHeadInst: subjectiveHead,
headersThreshold: headersThreshold,
Expand Down Expand Up @@ -127,23 +139,31 @@ func (m *metrics) syncingStarted(ctx context.Context) {
})
}

func (m *metrics) laggingNetworkHead(ctx context.Context) {
m.observe(ctx, func(ctx context.Context) {
m.laggingHeadersStart.Add(ctx, 1)
})
}

func (m *metrics) peersOutOufSync(ctx context.Context) {
m.observe(ctx, func(ctx context.Context) {
m.trustedPeersOutOfSync.Add(ctx, 1)
})
}

func (m *metrics) readHeaderGetter(ctx context.Context) {
m.observe(ctx, func(ctx context.Context) {
m.readHeader.Add(ctx, 1)
})
}

func (m *metrics) observeNewSubjectiveHead(ctx context.Context, height int64, timestamp time.Time) {
m.observe(ctx, func(ctx context.Context) {
m.subjectiveHead.Store(height)

if !m.prevHeader.IsZero() {
m.blockTime.Record(ctx, timestamp.Sub(m.prevHeader).Seconds())
}

if time.Since(m.headerReceived) > m.headersThreshold {
m.laggingHeadersStart.Add(ctx, 1)
}
m.prevHeader = timestamp
m.headerReceived = time.Now()
})
Expand Down
4 changes: 4 additions & 0 deletions sync/sync_head.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ func (s *Syncer[H]) Head(ctx context.Context, _ ...header.HeadOption[H]) (H, err
if isRecent(sbjHead, s.Params.blockTime, s.Params.recencyThreshold) {
return sbjHead, nil
}
s.metrics.laggingNetworkHead(s.ctx)
// otherwise, request head from the network
//
// TODO(@Wondertan): Here is another potential networking optimization:
Expand All @@ -38,6 +39,7 @@ func (s *Syncer[H]) Head(ctx context.Context, _ ...header.HeadOption[H]) (H, err
return s.Head(ctx)
}
defer s.getter.Unlock()
s.metrics.readHeaderGetter(s.ctx)
netHead, err := s.getter.Head(ctx, header.WithTrustedHead[H](sbjHead))
if err != nil {
log.Warnw("failed to get recent head, returning current subjective", "sbjHead", sbjHead.Height(), "err", err)
Expand Down Expand Up @@ -84,6 +86,7 @@ func (s *Syncer[H]) subjectiveHead(ctx context.Context) (H, error) {
return s.subjectiveHead(ctx)
}
defer s.getter.Unlock()
s.metrics.readHeaderGetter(s.ctx)
trustHead, err := s.getter.Head(ctx)
if err != nil {
return trustHead, err
Expand All @@ -99,6 +102,7 @@ func (s *Syncer[H]) subjectiveHead(ctx context.Context) (H, error) {
case isExpired(trustHead, s.Params.TrustingPeriod):
log.Warnw("subjective initialization with an expired header", "height", trustHead.Height())
case !isRecent(trustHead, s.Params.blockTime, s.Params.recencyThreshold):
s.metrics.laggingNetworkHead(s.ctx)
log.Warnw("subjective initialization with an old header", "height", trustHead.Height())
}
log.Warn("trusted peer is out of sync")
Expand Down

0 comments on commit b2986df

Please sign in to comment.