Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: avoid useless io in large-payload queries #500

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions config/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,9 @@ kill_query_user: <kill_query_user_config> | optional
heartbeat: <heartbeat_config> | optional

# RetryNumber - user configuration for query retry when one host cannot respond.
# By default there are no query retries.
# Note: Retrying may impact performance if there are many large-payload requests (such as inserts),
# because it requires copying the request body for reuse.
retry_number: 0

```
Expand Down Expand Up @@ -381,11 +384,11 @@ max_execution_time: <duration> | optional | default = 120s
# By default there are no per-minute limits
requests_per_minute: <int> | optional | default = 0

# The burst of request packet size token bucket for user
# The burst of request packet body size token bucket for user
# By default there are no request packet size limits
request_packet_size_tokens_burst: <byte_size> | optional | default = 0

# The request packet size tokens produced rate per second for user
# The request packet body size tokens produced rate per second for user
# By default there are no request packet size limits
request_packet_size_tokens_rate: <byte_size> | optional | default = 0

Expand Down
23 changes: 10 additions & 13 deletions proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,21 +205,22 @@ func executeWithRetry(
startTime := time.Now()
var since float64

var body []byte
var err error
// Use readAndRestoreRequestBody to read the entire request body into a byte slice,
// and to restore req.Body so that it can be reused later in the code.
body, err := readAndRestoreRequestBody(req)
if err != nil {
since := time.Since(startTime).Seconds()
return since, err
if maxRetry > 0 {
body, err = readAndRestoreRequestBody(req)
if err != nil {
since := time.Since(startTime).Seconds()
return since, err
}
}

numRetry := 0
for {
rp(rw, req)

// Restore req.Body after it's consumed by 'rp' for potential reuse.
req.Body = io.NopCloser(bytes.NewBuffer(body))

err := ctx.Err()
if err != nil {
since = time.Since(startTime).Seconds()
Expand Down Expand Up @@ -269,6 +270,8 @@ func executeWithRetry(
since = time.Since(startTime).Seconds()
break
}
// Restore req.Body after it's consumed by 'rp' for potential reuse.
req.Body = io.NopCloser(bytes.NewBuffer(body))
numRetry++
}
return since, nil
Expand Down Expand Up @@ -915,11 +918,5 @@ func (rp *reverseProxy) getScope(req *http.Request) (*scope, int, error) {
}

s := newScope(req, u, c, cu, sessionId, sessionTimeout)

q, err := getFullQuery(req)
if err != nil {
return nil, http.StatusBadRequest, fmt.Errorf("%s: cannot read query: %w", s, err)
}
s.requestPacketSize = len(q)
return s, 0, nil
}
2 changes: 2 additions & 0 deletions scope.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ func newScope(req *http.Request, u *user, c *cluster, cu *clusterUser, sessionId
"replica": h.ReplicaName(),
"cluster_node": h.Host(),
},

requestPacketSize: max(0, int(req.ContentLength)),
}
return s
}
Expand Down
Loading