Skip to content
This repository has been archived by the owner on Apr 19, 2024. It is now read-only.

Commit

Permalink
Tidy tracing code.
Browse files Browse the repository at this point in the history
  • Loading branch information
Baliedge committed Oct 10, 2023
1 parent f535067 commit 2d57822
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 18 deletions.
27 changes: 9 additions & 18 deletions algorithms.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *
if ok {
if item.Value == nil {
msgPart := "tokenBucket: Invalid cache item; Value is nil"
span := trace.SpanFromContext(ctx)
span.AddEvent(msgPart, trace.WithAttributes(
trace.SpanFromContext(ctx).AddEvent(msgPart, trace.WithAttributes(
attribute.String("hashKey", hashKey),
attribute.String("key", r.UniqueKey),
attribute.String("name", r.Name),
Expand All @@ -58,8 +57,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *
ok = false
} else if item.Key != hashKey {
msgPart := "tokenBucket: Invalid cache item; key mismatch"
span := trace.SpanFromContext(ctx)
span.AddEvent(msgPart, trace.WithAttributes(
trace.SpanFromContext(ctx).AddEvent(msgPart, trace.WithAttributes(
attribute.String("itemKey", item.Key),
attribute.String("hashKey", hashKey),
attribute.String("name", r.Name),
Expand Down Expand Up @@ -93,8 +91,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *
t, ok := item.Value.(*TokenBucketItem)
if !ok {
// Client switched algorithms; perhaps due to a migration?
span := trace.SpanFromContext(ctx)
span.AddEvent("Client switched algorithms; perhaps due to a migration?")
trace.SpanFromContext(ctx).AddEvent("Client switched algorithms; perhaps due to a migration?")

c.Remove(hashKey)

Expand Down Expand Up @@ -163,8 +160,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *

// If we are already at the limit.
if rl.Remaining == 0 && r.Hits > 0 {
span := trace.SpanFromContext(ctx)
span.AddEvent("Already over the limit")
trace.SpanFromContext(ctx).AddEvent("Already over the limit")
metricOverLimitCounter.Add(1)
rl.Status = Status_OVER_LIMIT
t.Status = rl.Status
Expand All @@ -173,8 +169,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *

// If requested hits takes the remainder.
if t.Remaining == r.Hits {
span := trace.SpanFromContext(ctx)
span.AddEvent("At the limit")
trace.SpanFromContext(ctx).AddEvent("At the limit")
t.Remaining = 0
rl.Remaining = 0
return rl, nil
Expand All @@ -183,8 +178,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *
// If requested is more than available, then return over the limit
// without updating the cache.
if r.Hits > t.Remaining {
span := trace.SpanFromContext(ctx)
span.AddEvent("Over the limit")
trace.SpanFromContext(ctx).AddEvent("Over the limit")
metricOverLimitCounter.Add(1)
rl.Status = Status_OVER_LIMIT
return rl, nil
Expand Down Expand Up @@ -235,8 +229,7 @@ func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq)

// Client could be requesting that we always return OVER_LIMIT.
if r.Hits > r.Limit {
span := trace.SpanFromContext(ctx)
span.AddEvent("Over the limit")
trace.SpanFromContext(ctx).AddEvent("Over the limit")
metricOverLimitCounter.Add(1)
rl.Status = Status_OVER_LIMIT
rl.Remaining = r.Limit
Expand Down Expand Up @@ -279,8 +272,7 @@ func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *
if ok {
if item.Value == nil {
msgPart := "leakyBucket: Invalid cache item; Value is nil"
span := trace.SpanFromContext(ctx)
span.AddEvent(msgPart, trace.WithAttributes(
trace.SpanFromContext(ctx).AddEvent(msgPart, trace.WithAttributes(
attribute.String("hashKey", hashKey),
attribute.String("key", r.UniqueKey),
attribute.String("name", r.Name),
Expand All @@ -289,8 +281,7 @@ func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *
ok = false
} else if item.Key != hashKey {
msgPart := "leakyBucket: Invalid cache item; key mismatch"
span := trace.SpanFromContext(ctx)
span.AddEvent(msgPart, trace.WithAttributes(
trace.SpanFromContext(ctx).AddEvent(msgPart, trace.WithAttributes(
attribute.String("itemKey", item.Key),
attribute.String("hashKey", hashKey),
attribute.String("name", r.Name),
Expand Down
2 changes: 2 additions & 0 deletions gubernator.go
Original file line number Diff line number Diff line change
Expand Up @@ -588,6 +588,8 @@ func (s *V1Instance) getLocalRateLimit(ctx context.Context, r *RateLimitReq) (*R
ctx = tracing.StartNamedScope(ctx, "V1Instance.getLocalRateLimit", trace.WithAttributes(
attribute.String("ratelimit.key", r.UniqueKey),
attribute.String("ratelimit.name", r.Name),
attribute.Int64("ratelimit.limit", r.Limit),
attribute.Int64("ratelimit.hits", r.Hits),
))

defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getLocalRateLimit")).ObserveDuration()
Expand Down

0 comments on commit 2d57822

Please sign in to comment.