Skip to content

Commit

Permalink
[chore] Fix MergeSplit benchmarks and always initialize using new (#1…
Browse files Browse the repository at this point in the history
…2250)

Signed-off-by: Bogdan Drutu <[email protected]>
  • Loading branch information
bogdandrutu authored Feb 3, 2025
1 parent bcf27e1 commit 643a35f
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 18 deletions.
12 changes: 6 additions & 6 deletions exporter/exporterhelper/logs_batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,9 @@ func BenchmarkSplittingBasedOnItemCountManySmallLogs(b *testing.B) {
// All requests merge into a single batch.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000}
for i := 0; i < b.N; i++ {
merged := []Request{&logsRequest{ld: testdata.GenerateLogs(10)}}
merged := []Request{newLogsRequest(testdata.GenerateLogs(10), nil)}
for j := 0; j < 1000; j++ {
lr2 := &logsRequest{ld: testdata.GenerateLogs(10)}
lr2 := newLogsRequest(testdata.GenerateLogs(10), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
}
Expand All @@ -171,9 +171,9 @@ func BenchmarkSplittingBasedOnItemCountManyLogsSlightlyAboveLimit(b *testing.B)
// Every incoming request results in a split.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000}
for i := 0; i < b.N; i++ {
merged := []Request{&logsRequest{ld: testdata.GenerateLogs(0)}}
merged := []Request{newLogsRequest(testdata.GenerateLogs(0), nil)}
for j := 0; j < 10; j++ {
lr2 := &logsRequest{ld: testdata.GenerateLogs(10001)}
lr2 := newLogsRequest(testdata.GenerateLogs(10001), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
}
Expand All @@ -185,8 +185,8 @@ func BenchmarkSplittingBasedOnItemCountHugeLogs(b *testing.B) {
// One request splits into many batches.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000}
for i := 0; i < b.N; i++ {
merged := []Request{&logsRequest{ld: testdata.GenerateLogs(0)}}
lr2 := &logsRequest{ld: testdata.GenerateLogs(100000)}
merged := []Request{newLogsRequest(testdata.GenerateLogs(0), nil)}
lr2 := newLogsRequest(testdata.GenerateLogs(100000), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
assert.Len(b, merged, 10)
Expand Down
12 changes: 6 additions & 6 deletions exporter/exporterhelper/metrics_batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,9 @@ func BenchmarkSplittingBasedOnItemCountManySmallMetrics(b *testing.B) {
// All requests merge into a single batch.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 20000}
for i := 0; i < b.N; i++ {
merged := []Request{&metricsRequest{md: testdata.GenerateMetrics(10)}}
merged := []Request{newMetricsRequest(testdata.GenerateMetrics(10), nil)}
for j := 0; j < 1000; j++ {
lr2 := &metricsRequest{md: testdata.GenerateMetrics(10)}
lr2 := newMetricsRequest(testdata.GenerateMetrics(10), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
}
Expand All @@ -179,9 +179,9 @@ func BenchmarkSplittingBasedOnItemCountManyMetricsSlightlyAboveLimit(b *testing.
// Every incoming request results in a split.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 20000}
for i := 0; i < b.N; i++ {
merged := []Request{&metricsRequest{md: testdata.GenerateMetrics(0)}}
merged := []Request{newMetricsRequest(testdata.GenerateMetrics(0), nil)}
for j := 0; j < 10; j++ {
lr2 := &metricsRequest{md: testdata.GenerateMetrics(10001)}
lr2 := newMetricsRequest(testdata.GenerateMetrics(10001), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
}
Expand All @@ -193,8 +193,8 @@ func BenchmarkSplittingBasedOnItemCountHugeMetrics(b *testing.B) {
// One request splits into many batches.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 20000}
for i := 0; i < b.N; i++ {
merged := []Request{&metricsRequest{md: testdata.GenerateMetrics(0)}}
lr2 := &metricsRequest{md: testdata.GenerateMetrics(100000)}
merged := []Request{newMetricsRequest(testdata.GenerateMetrics(0), nil)}
lr2 := newMetricsRequest(testdata.GenerateMetrics(100000), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
assert.Len(b, merged, 10)
Expand Down
14 changes: 8 additions & 6 deletions exporter/exporterhelper/traces_batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,11 @@ func TestExtractTraces(t *testing.T) {
func BenchmarkSplittingBasedOnItemCountManySmallTraces(b *testing.B) {
// All requests merge into a single batch.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
merged := []Request{&tracesRequest{td: testdata.GenerateTraces(10)}}
merged := []Request{newTracesRequest(testdata.GenerateTraces(10), nil)}
for j := 0; j < 1000; j++ {
lr2 := &tracesRequest{td: testdata.GenerateTraces(10)}
lr2 := newTracesRequest(testdata.GenerateTraces(10), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
}
Expand All @@ -177,10 +178,11 @@ func BenchmarkSplittingBasedOnItemCountManySmallTraces(b *testing.B) {
func BenchmarkSplittingBasedOnItemCountManyTracesSlightlyAboveLimit(b *testing.B) {
// Every incoming request results in a split.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
merged := []Request{&tracesRequest{td: testdata.GenerateTraces(0)}}
merged := []Request{newTracesRequest(testdata.GenerateTraces(0), nil)}
for j := 0; j < 10; j++ {
lr2 := &tracesRequest{td: testdata.GenerateTraces(10001)}
lr2 := newTracesRequest(testdata.GenerateTraces(10001), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
}
Expand All @@ -192,8 +194,8 @@ func BenchmarkSplittingBasedOnItemCountHugeTraces(b *testing.B) {
// One request splits into many batches.
cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000}
for i := 0; i < b.N; i++ {
merged := []Request{&tracesRequest{td: testdata.GenerateTraces(0)}}
lr2 := &tracesRequest{td: testdata.GenerateTraces(100000)}
merged := []Request{newTracesRequest(testdata.GenerateTraces(0), nil)}
lr2 := newTracesRequest(testdata.GenerateTraces(100000), nil)
res, _ := merged[len(merged)-1].MergeSplit(context.Background(), cfg, lr2)
merged = append(merged[0:len(merged)-1], res...)
assert.Len(b, merged, 10)
Expand Down

0 comments on commit 643a35f

Please sign in to comment.