Skip to content

Commit

Permalink
MINOR: stream: maintain per-stream counters of the number of passes o…
Browse files Browse the repository at this point in the history
…n code

Process_stream() is a complex function and a few times some lopos were
either witnessed or suspected. Each time this happens it's extremely
difficult to figure why because it involves combinations of analysers,
filters, errors etc.

Let's at least maintain a set of 4 counters per stream that report the
number of times we've been through each of the 4 most important blocks
(stconn changes, request analysers, response analysers, and propagation
of changes down). These ones are stored in the stream and reported in
"show sess all", just like they will be reported in panic dumps.
  • Loading branch information
wtarreau committed Oct 22, 2024
1 parent ce314cf commit 37d5c6f
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 1 deletion.
4 changes: 4 additions & 0 deletions include/haproxy/stream-t.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,10 @@ struct stream {
uint64_t lat_time; /* total latency time experienced */
uint64_t cpu_time; /* total CPU time consumed */
struct freq_ctr call_rate; /* stream task call rate without making progress */
uint32_t passes_stconn; /* number of passes on the stconn evaluation code */
uint32_t passes_reqana; /* number of passes on the req analysers block */
uint32_t passes_resana; /* number of passes on the res analysers block */
uint32_t passes_propag; /* number of passes on the shut/err propag code */

unsigned short max_retries; /* Maximum number of connection retried (=0 is backend is not set) */
short store_count;
Expand Down
9 changes: 8 additions & 1 deletion src/stream.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,6 +421,7 @@ struct stream *stream_new(struct session *sess, struct stconn *sc, struct buffer

s->lat_time = s->cpu_time = 0;
s->call_rate.curr_tick = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
s->passes_stconn = s->passes_reqana = s->passes_resana = s->passes_propag = 0;
s->pcli_next_pid = 0;
s->pcli_flags = 0;
s->unique_id = IST_NULL;
Expand Down Expand Up @@ -1840,6 +1841,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
* the client cannot have connect (hence retryable) errors. Also, the
* connection setup code must be able to deal with any type of abort.
*/
s->passes_stconn++;
srv = objt_server(s->target);
if (unlikely(scf->flags & SC_FL_ERROR)) {
if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS)) {
Expand Down Expand Up @@ -1969,6 +1971,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
*/

resync_request:
s->passes_reqana++;
/* Analyse request */
if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
Expand Down Expand Up @@ -2073,6 +2076,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
req_ana_back = req->analysers;

resync_response:
s->passes_resana++;
/* Analyse response */

if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
Expand Down Expand Up @@ -2155,7 +2159,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
* both buffers.
*/


s->passes_propag++;
/*
* Now we propagate unhandled errors to the stream. Normally
* we're just in a data phase here since it means we have not
Expand Down Expand Up @@ -3308,6 +3312,9 @@ void strm_dump_to_buffer(struct buffer *buf, const struct stream *strm, const ch
strm->conn_err_type, strm->srv_conn, strm->pend_pos,
LIST_INLIST(&strm->buffer_wait.list), strm->stream_epoch);

chunk_appendf(buf, "%s p_stc=%u p_req=%u p_res=%u p_prp=%u\n", pfx,
strm->passes_stconn, strm->passes_reqana, strm->passes_resana, strm->passes_propag);

chunk_appendf(buf,
"%s frontend=%s (id=%u mode=%s), listener=%s (id=%u)", pfx,
HA_ANON_STR(anon_key, strm_fe(strm)->id), strm_fe(strm)->uuid, proxy_mode_str(strm_fe(strm)->mode),
Expand Down

0 comments on commit 37d5c6f

Please sign in to comment.