From d0b3d3aec17a4b269191b96cc995b21d64e61ef1 Mon Sep 17 00:00:00 2001 From: Jason Cwik Date: Wed, 16 Oct 2024 08:49:51 -0500 Subject: [PATCH] Fix hang at end of distributed warp with influxdb (#340) Fixed issue where distributed warp hangs at end When running distributed warp with influxdb, the app would always hang at the end of each run. This is because the channel used to send operations to Influx isn't used on the initiator process and thus, never gets closed. The app then hangs on the global wait group for the influx goroutine to complete. --- cli/benchmark.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cli/benchmark.go b/cli/benchmark.go index 51cf323..46ef82f 100644 --- a/cli/benchmark.go +++ b/cli/benchmark.go @@ -102,7 +102,12 @@ func runBench(ctx *cli.Context, b bench.Benchmark) error { b.GetCommon().ClientIdx = ab.clientIdx return runClientBenchmark(ctx, b, ab) } + if done, err := runServerBenchmark(ctx, b); done || err != nil { + // Close all extra output channels so the benchmark will terminate + for _, out := range b.GetCommon().ExtraOut { + close(out) + } fatalIf(probe.NewError(err), "Error running remote benchmark") return nil }