Skip to content

Commit

Permalink
Update README and improve rate limit control
Browse files Browse the repository at this point in the history
  • Loading branch information
vearutop committed Sep 15, 2020
1 parent f89aab3 commit 5f9db26
Show file tree
Hide file tree
Showing 2 changed files with 95 additions and 60 deletions.
119 changes: 71 additions & 48 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,17 @@ Scale curl requests, cousin of `ab`, `siege`, [`hey`](https://github.com/rakyll/
## Install

```
go get -u github.com/vearutop/plt
go get -u github.com/vearutop/plt@latest
```

Or

```
GO111MODULE=on go get github.com/vearutop/plt@latest
```

Or download binary from [releases](https://github.com/vearutop/plt/releases).

## Usage

```
Expand Down Expand Up @@ -49,64 +57,79 @@ If the server is wrapped with Envoy proxy, upstream latency distribution will be
## Example

```bash
plt --live-ui --duration=1m --number=0 --rate-limit=60 curl -X GET "https://acme-dummy-service.staging-k8s.acme.io/" -H "accept: application/json"
plt --live-ui --duration=1m --rate-limit=60 curl -X GET "https://acme-dummy-service.staging-k8s.acme.io/" -H "accept: application/json"
```

```
Requests per second: 60.80
Requests per second: 60.79
Total requests: 3650
Time spent: 1m0.041s
Request latency percentiles:
99%: 101.00ms
95%: 45.89ms
90%: 42.58ms
50%: 41.44ms
Request latency distribution in ms:
[ min max] cnt total% (3650 events)
[ 32.17 32.17] 1 0.03%
[ 32.19 32.26] 4 0.11%
[ 32.30 32.62] 20 0.55%
[ 32.64 33.52] 314 8.60% ........
[ 33.52 34.84] 1808 49.53% .................................................
[ 34.84 37.31] 445 12.19% ............
[ 37.31 46.23] 304 8.33% ........
[ 46.24 67.19] 151 4.14% ....
[ 67.44 121.88] 240 6.58% ......
[121.90 319.30] 363 9.95% .........
[ 39.62 39.62] 1 0.03%
[ 39.67 39.67] 1 0.03%
[ 39.76 39.91] 12 0.33%
[ 39.92 40.46] 217 5.95% .....
[ 40.46 41.71] 2206 60.44% ............................................................
[ 41.71 47.29] 1058 28.99% ............................
[ 47.36 55.04] 74 2.03% ..
[ 55.28 74.25] 18 0.49%
[ 74.40 161.25] 62 1.70% .
[187.05 187.05] 1 0.03%
Requests with latency more than 1s: 0
Envoy upstream latency percentiles:
99%: 12ms
95%: 5ms
90%: 3ms
50%: 2ms
Envoy upstream latency distribution in ms:
[ min max] cnt total% (3650 events)
[ 1.00 1.00] 474 12.99% ............
[ 2.00 2.00] 2157 59.10% ...........................................................
[ 3.00 4.00] 809 22.16% ......................
[ 5.00 6.00] 69 1.89% .
[ 7.00 10.00] 86 2.36% ..
[ 11.00 14.00] 35 0.96%
[ 15.00 23.00] 9 0.25%
[ 28.00 40.00] 5 0.14%
[ 49.00 81.00] 3 0.08%
[ 98.00 148.00] 3 0.08%
DNS latency distribution in ms:
[ min max] cnt total% (61 events)
[0.03 0.03] 1 1.64% .
[0.18 0.18] 1 1.64% .
[0.48 0.48] 1 1.64% .
[0.66 0.80] 3 4.92% ....
[0.87 1.10] 6 9.84% .........
[1.61 1.89] 10 16.39% ................
[1.98 2.50] 17 27.87% ...........................
[2.62 2.83] 8 13.11% .............
[2.97 3.43] 12 19.67% ...................
[3.89 3.95] 2 3.28% ...
TLS handshake latency distribution in ms:
[ min max] cnt total% (61 events)
[ 78.60 78.60] 1 1.64% .
[ 78.63 78.63] 1 1.64% .
[ 78.66 78.66] 1 1.64% .
[ 91.66 91.66] 1 1.64% .
[ 92.93 92.94] 2 3.28% ...
[ 94.22 94.56] 3 4.92% ....
[ 95.14 95.57] 2 3.28% ...
[203.56 210.54] 48 78.69% ..............................................................................
[241.46 241.46] 1 1.64% .
[415.45 415.45] 1 1.64% .
[ min max] cnt total% (50 events)
[ 9.88 9.88] 1 2.00% ..
[10.13 10.15] 3 6.00% ......
[10.18 10.22] 4 8.00% ........
[10.25 10.31] 5 10.00% ..........
[10.32 10.36] 4 8.00% ........
[10.37 10.41] 12 24.00% ........................
[10.42 10.48] 7 14.00% ..............
[10.49 10.56] 6 12.00% ............
[10.59 10.71] 7 14.00% ..............
[10.76 10.76] 1 2.00% ..
Connection latency distribution in ms:
[ min max] cnt total% (61 events)
[31.86 31.86] 1 1.64% .
[31.89 31.89] 1 1.64% .
[32.17 32.22] 4 6.56% ......
[32.26 32.35] 5 8.20% ........
[32.72 32.72] 1 1.64% .
[33.19 33.50] 7 11.48% ...........
[33.89 34.13] 7 11.48% ...........
[34.94 35.88] 14 22.95% ......................
[37.01 38.08] 19 31.15% ...............................
[40.07 40.33] 2 3.28% ...
[ min max] cnt total% (50 events)
[36.36 36.36] 1 2.00% ..
[36.38 36.39] 6 12.00% ............
[36.39 36.41] 8 16.00% ................
[36.42 36.43] 6 12.00% ............
[36.44 36.48] 5 10.00% ..........
[36.50 36.54] 2 4.00% ....
[36.58 36.66] 2 4.00% ....
[36.70 36.86] 12 24.00% ........................
[36.86 37.09] 4 8.00% ........
[37.11 37.48] 4 8.00% ........
Responses by status code
[200] 3650
Expand Down
36 changes: 24 additions & 12 deletions loadgen/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type runner struct {
mu sync.Mutex
concurrencyLimit int64
rateLimit int64
currentReqRate int64
rl *rate.Limiter
}

Expand Down Expand Up @@ -81,7 +82,7 @@ func Run(lf Flags, jobProducer JobProducer) {

if lf.RateLimit > 0 {
r.rateLimit = int64(lf.RateLimit)
r.rl = rate.NewLimiter(rate.Limit(lf.RateLimit), int(r.concurrencyLimit))
r.rl = rate.NewLimiter(rate.Limit(lf.RateLimit), 1)
}

r.exit = make(chan os.Signal, 1)
Expand All @@ -97,12 +98,23 @@ func Run(lf Flags, jobProducer JobProducer) {
return
}

rl := rate.NewLimiter(rate.Limit(lim), int(atomic.LoadInt64(&r.concurrencyLimit)))
rl := rate.NewLimiter(rate.Limit(lim), 1)

r.mu.Lock()
r.rl = rl
r.mu.Unlock()
}

rateLimit := func() int64 {
lim := atomic.LoadInt64(&r.rateLimit)

if lim == 0 {
lim = atomic.LoadInt64(&r.currentReqRate)
}

return lim
}

uiEvents := ui.PollEvents()
for e := range uiEvents {
switch e.ID {
Expand All @@ -118,7 +130,6 @@ func Run(lf Flags, jobProducer JobProducer) {
for i := int64(0); i < delta; i++ {
<-limiter
}
refreshRateLimiter()
}
case "<Left>": // Decrease concurrency.
lim := atomic.LoadInt64(&r.concurrencyLimit)
Expand All @@ -132,18 +143,16 @@ func Run(lf Flags, jobProducer JobProducer) {
}
}

refreshRateLimiter()

case "<Up>": // Increase rate limit.
lim := atomic.LoadInt64(&r.rateLimit)
lim := rateLimit()
delta := int64(0.05 * float64(lim))
atomic.AddInt64(&r.rateLimit, delta)
atomic.StoreInt64(&r.rateLimit, lim+delta)
refreshRateLimiter()

case "<Down>": // Decrease rate limit.
lim := atomic.LoadInt64(&r.rateLimit)
lim := rateLimit()
delta := int64(0.05 * float64(lim))
atomic.AddInt64(&r.rateLimit, -delta)
atomic.StoreInt64(&r.rateLimit, lim-delta)
refreshRateLimiter()
}
}
Expand Down Expand Up @@ -211,7 +220,7 @@ func Run(lf Flags, jobProducer JobProducer) {
fmt.Println()
fmt.Println("Requests per second:", fmt.Sprintf("%.2f", float64(r.roundTripHist.Count)/time.Since(r.start).Seconds()))
fmt.Println("Total requests:", r.roundTripHist.Count)
fmt.Println("Time spent:", time.Since(r.start))
fmt.Println("Time spent:", time.Since(r.start).Round(time.Millisecond))

fmt.Println()
fmt.Println("Request latency percentiles:")
Expand Down Expand Up @@ -239,7 +248,8 @@ func (r *runner) runLiveUI() {
latencyPlot.Data = [][]float64{0: {}, 1: {}}
latencyPlot.HorizontalScale = 2

ticker := time.NewTicker(500 * time.Millisecond).C
tickerDuration := 500 * time.Millisecond
ticker := time.NewTicker(tickerDuration).C

prev := time.Now()
reqPrev := 0
Expand All @@ -263,6 +273,8 @@ func (r *runner) runLiveUI() {
reqPrev = reqNum
prev = time.Now()

atomic.StoreInt64(&r.currentReqRate, int64(reqRateTick))

latencyPercentiles := widgets.NewParagraph()
latencyPercentiles.Title = "Round trip latency, ms"
latencyPercentiles.Text = ""
Expand Down Expand Up @@ -325,7 +337,7 @@ func (r *runner) runLiveUI() {
reqRate,
reqRateTick,
r.roundTripHist.Count,
elaDur.String(),
elaDur.Round(tickerDuration).String(),
)

latencyPlot.Data[0] = append(latencyPlot.Data[0], r.roundTripRolling.Min)
Expand Down

0 comments on commit 5f9db26

Please sign in to comment.