Skip to content

Commit

Permalink
fix: revise kafka batch sink (#3520)
Browse files Browse the repository at this point in the history
Signed-off-by: Song Gao <[email protected]>
  • Loading branch information
Yisaer authored Jan 17, 2025
1 parent d5ab84d commit a936944
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 13 deletions.
2 changes: 1 addition & 1 deletion extensions/sinks/kafka/ext/kafka.go
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ func getDefaultKafkaConf() *kafkaConf {
BatchSize: 5000,
// send batch ASAP
BatchTimeout: time.Microsecond,
BatchBytes: 1048576 * 10, // 10MB
BatchBytes: 10 * 1048576, // 1 MB
}
return c
}
Expand Down
1 change: 1 addition & 0 deletions internal/topo/node/batch_op.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ func (b *BatchOp) send() {
Content: make([]xsql.Row, 0, b.batchSize),
}
b.currIndex = 0
b.statManager.SetBufferLength(int64(len(b.input) + b.currIndex))
}

func (b *BatchOp) runWithBatchSize(ctx api.StreamContext) {
Expand Down
2 changes: 1 addition & 1 deletion internal/topo/node/metric/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func newPrometheusMetrics() *PrometheusMetrics {
processLatencyHist := prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: prefix + "_" + ProcessLatencyUsHist,
Help: "Histograms of process latency in millisecond of " + prefix,
Buckets: prometheus.ExponentialBuckets(10, 2, 20), // 10us ~ 5s
Buckets: prometheus.ExponentialBuckets(10, 2, 25), // 10us ~ 160s
}, labelNames)
bufferLength := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: prefix + "_" + BufferLength,
Expand Down
30 changes: 20 additions & 10 deletions internal/topo/node/sink_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,10 +287,13 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
if resendCh == nil { // no resend strategy
for {
select {
case data := <-m.input:
receiveQ(data)
case data := <-dataOutCh:
normalQ(data)
default:
}
select {
case data := <-m.input:
receiveQ(data)
case <-ctx.Done():
doneQ()
return nil
Expand All @@ -300,10 +303,13 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
if sconf.ResendPriority == 0 {
for {
select {
case data := <-m.input:
receiveQ(data)
case data := <-dataOutCh:
normalQ(data)
default:
}
select {
case data := <-m.input:
receiveQ(data)
case data := <-rq.Out:
resendQ(data)
case <-ctx.Done():
Expand All @@ -314,16 +320,17 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
} else if sconf.ResendPriority < 0 { // normal queue has higher priority
for {
select {
case data := <-m.input:
receiveQ(data)
case data := <-dataOutCh:
normalQ(data)
default:
}
select {
case data := <-m.input:
receiveQ(data)
default:
select {
case data := <-m.input:
receiveQ(data)
case data := <-dataOutCh:
normalQ(data)
case data := <-rq.Out:
resendQ(data)
case <-ctx.Done():
Expand All @@ -334,6 +341,11 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
}
} else {
for {
select {
case data := <-dataOutCh:
normalQ(data)
default:
}
select {
case data := <-m.input:
receiveQ(data)
Expand All @@ -343,8 +355,6 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
select {
case data := <-m.input:
receiveQ(data)
case data := <-dataOutCh:
normalQ(data)
case data := <-rq.Out:
resendQ(data)
case <-ctx.Done():
Expand Down
2 changes: 1 addition & 1 deletion internal/topo/node/source_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func (m *SourceNode) Open(ctx api.StreamContext, errCh chan<- error) {
}
}
bl := 102400
if c, ok := props["bufferLength"]; ok {
if c, ok := props["bufferlength"]; ok {
if t, err := cast.ToInt(c, cast.STRICT); err != nil || t <= 0 {
logger.Warnf("invalid type for bufferLength property, should be positive integer but found %t", c)
} else {
Expand Down

0 comments on commit a936944

Please sign in to comment.