Skip to content

Commit

Permalink
Add confidence metric to csv and json
Browse files Browse the repository at this point in the history
Adding the confidence metric to the csv and json output.

closes #108

Signed-off-by: Joe Talerico <[email protected]>
  • Loading branch information
Joe Talerico committed Nov 9, 2023
1 parent 017b0dd commit 59b606a
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 17 deletions.
15 changes: 7 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -225,12 +225,11 @@ k8s-netperf will report TCP Retransmissions and UDP Loss for both workload drive
Example output
```csv
Profile,Same node,Host Network,Service,Duration,Parallelism,# of Samples,Message Size,Avg Throughput,Throughput Metric,99%tile Observed Latency,Latency Metric
TCP_STREAM,false,false,false,10,1,3,1024,1131.150000,Mb/s,23,usec
TCP_STREAM,false,false,false,10,2,3,1024,1710.150000,Mb/s,34,usec
TCP_STREAM,false,false,false,10,1,3,8192,4437.520000,Mb/s,30,usec
UDP_STREAM,false,false,false,10,1,3,1024,1159.790000,Mb/s,14,usec
TCP_CRR,false,false,false,10,1,3,1024,5954.940000,OP/s,456,usec
TCP_CRR,false,false,true,10,1,3,1024,1455.470000,OP/s,248,usec
TCP_RR,false,false,false,10,1,3,1024,41330.000000,OP/s,85,usec
Driver,Profile,Same node,Host Network,Service,Duration,Parallelism,# of Samples,Message Size,Confidence metric - low,Confidence metric - high,Avg Throughput,Throughput Metric,99%tile Observed Latency,Latency Metric
netperf,TCP_STREAM,false,false,false,10,1,3,1024,861.9391413991156,885.2741919342178,873.606667,Mb/s,3.3333333333333335,usec
netperf,TCP_STREAM,false,false,false,10,1,3,8192,178.12442996547009,1310.3422367011967,744.233333,Mb/s,2394.6666666666665,usec
netperf,UDP_STREAM,false,false,false,10,1,3,1024,584.3478157889886,993.4588508776783,788.903333,Mb/s,23,usec
netperf,TCP_CRR,false,false,false,10,1,3,1024,1889.3183973002176,2558.074936033115,2223.696667,OP/s,4682.666666666667,usec
netperf,TCP_CRR,false,false,true,10,1,3,1024,1169.206855676418,2954.3464776569153,2061.776667,OP/s,4679.333333333333,usec
netperf,TCP_RR,false,false,false,10,1,3,1024,6582.5359452538705,12085.437388079461,9333.986667,OP/s,451.3333333333333,usec
```
29 changes: 22 additions & 7 deletions pkg/archive/archive.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ type Doc struct {
ClientPodCPU []metrics.PodCPU `json:"clientPods"`
ClientNodeLabels map[string]string `json:"clientNodeLabels"`
ServerNodeLabels map[string]string `json:"serverNodeLabels"`
Confidence []float64 `json:"confidence"`
}

// Connect returns a client connected to the desired cluster.
Expand Down Expand Up @@ -80,6 +81,11 @@ func BuildDocs(sr result.ScenarioResults, uuid string) ([]interface{}, error) {
if len(r.Driver) < 1 {
continue
}
var lo, hi float64
if r.Samples > 1 {
_, lo, hi = result.ConfidenceInterval(r.ThroughputSummary, 0.95)
}
c := []float64{lo, hi}
d := Doc{
UUID: uuid,
Timestamp: time,
Expand All @@ -101,6 +107,7 @@ func BuildDocs(sr result.ScenarioResults, uuid string) ([]interface{}, error) {
ServerNodeLabels: r.ServerNodeLabels,
ClientNodeLabels: r.ClientNodeLabels,
AcrossAZ: r.AcrossAZ,
Confidence: c,
}
UDPLossPercent, e := result.Average(r.LossSummary)
if e != nil {
Expand Down Expand Up @@ -147,11 +154,17 @@ func commonCsvHeaderFields() []string {
"Parallelism",
"# of Samples",
"Message Size",
"Confidence metric - low",
"Confidence metric - high",
}
}

// Common csv data fields.
func commonCsvDataFeilds(row result.Data) []string {
func commonCsvDataFields(row result.Data) []string {
var lo, hi float64
if row.Samples > 1 {
_, lo, hi = result.ConfidenceInterval(row.ThroughputSummary, 0.95)
}
return []string{
fmt.Sprint(row.Driver),
fmt.Sprint(row.Profile),
Expand All @@ -162,15 +175,17 @@ func commonCsvDataFeilds(row result.Data) []string {
strconv.Itoa(row.Parallelism),
strconv.Itoa(row.Samples),
strconv.Itoa(row.MessageSize),
strconv.FormatFloat(lo, 'f', -1, 64),
strconv.FormatFloat(hi, 'f', -1, 64),
}
}

// Writes all the mertic feilds to the archive.
// Writes all the mertics to the archive.
func writeArchive(cpuarchive, podarchive *csv.Writer, role string, row result.Data, podResults []metrics.PodCPU) error {
roleFieldData := []string{role}
for _, pod := range podResults {
if err := podarchive.Write(append(append(roleFieldData,
commonCsvDataFeilds(row)...),
commonCsvDataFields(row)...),
pod.Name,
fmt.Sprintf("%f", pod.Value),
)); err != nil {
Expand All @@ -183,7 +198,7 @@ func writeArchive(cpuarchive, podarchive *csv.Writer, role string, row result.Da
cpu = row.ServerMetrics
}
if err := cpuarchive.Write(append(append(roleFieldData,
commonCsvDataFeilds(row)...),
commonCsvDataFields(row)...),
fmt.Sprintf("%f", cpu.Idle),
fmt.Sprintf("%f", cpu.User),
fmt.Sprintf("%f", cpu.System),
Expand Down Expand Up @@ -265,7 +280,7 @@ func WriteSpecificCSV(r result.ScenarioResults) error {
if strings.Contains(row.Profile, "UDP_STREAM") {
loss, _ := result.Average(row.LossSummary)
header := []string{"UDP Percent Loss"}
data := append(header, commonCsvDataFeilds(row)...)
data := append(header, commonCsvDataFields(row)...)
iperfdata = append(data, fmt.Sprintf("%f", loss))
if err := archive.Write(iperfdata); err != nil {
return fmt.Errorf("failed to write result archive to file")
Expand All @@ -274,7 +289,7 @@ func WriteSpecificCSV(r result.ScenarioResults) error {
if strings.Contains(row.Profile, "TCP_STREAM") {
rt, _ := result.Average(row.RetransmitSummary)
header := []string{"TCP Retransmissions"}
data := append(header, commonCsvDataFeilds(row)...)
data := append(header, commonCsvDataFields(row)...)
iperfdata = append(data, fmt.Sprintf("%f", rt))
if err := archive.Write(iperfdata); err != nil {
return fmt.Errorf("failed to write result archive to file")
Expand Down Expand Up @@ -322,7 +337,7 @@ func WriteCSVResult(r result.ScenarioResults) error {
for _, row := range r.Results {
avg, _ := result.Average(row.ThroughputSummary)
lavg, _ := result.Average(row.LatencySummary)
data := append(commonCsvDataFeilds(row),
data := append(commonCsvDataFields(row),
fmt.Sprintf("%f", avg),
row.Metric,
fmt.Sprint(lavg),
Expand Down
4 changes: 2 additions & 2 deletions pkg/results/result.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func Percentile(vals []float64, ptile float64) (float64, error) {
}

// Confidence accepts array of floats to calculate average
func confidenceInterval(vals []float64, ci float64) (float64, float64, float64) {
func ConfidenceInterval(vals []float64, ci float64) (float64, float64, float64) {
return math.MeanCI(vals, ci)
}

Expand Down Expand Up @@ -250,7 +250,7 @@ func renderResults(s ScenarioResults, testType string) {
avg, _ := Average(r.ThroughputSummary)
var lo, hi float64
if r.Samples > 1 {
_, lo, hi = confidenceInterval(r.ThroughputSummary, 0.95)
_, lo, hi = ConfidenceInterval(r.ThroughputSummary, 0.95)
}
table.Append([]string{fmt.Sprintf("📊 %s Results", caser.String(strings.ToLower(testType))), r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", avg, r.Metric), fmt.Sprintf("%f-%f (%s)", lo, hi, r.Metric)})
}
Expand Down

0 comments on commit 59b606a

Please sign in to comment.