Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for uperf driver #118

Merged
merged 1 commit into from
Dec 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ Flags:
--debug Enable debug log
-h, --help help for k8s-netperf
--iperf Use iperf3 as load driver (along with netperf)
--uperf Use uperf as load driver (along with netperf)
--json Instead of human-readable output, return JSON to stdout
--local Run network performance tests with Server-Pods/Client-Pods on the same Node
--metrics Show all system metrics retrieved from prom
Expand Down Expand Up @@ -124,16 +125,22 @@ $ ./k8s-netperf --tcp-tolerance 1
+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+
| 📊 Stream Results | netperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2661.006667 (Mb/s) |
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2483.078229 (Mb/s) |
| 📊 Stream Results | uperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2581.705097 (Mb/s) |
| 📊 Stream Results | netperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2702.230000 (Mb/s) |
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2523.434069 (Mb/s) |
| 📊 Stream Results | uperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2567.665412 (Mb/s) |
| 📊 Stream Results | netperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2697.276667 (Mb/s) |
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2542.793728 (Mb/s) |
| 📊 Stream Results | uperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2571.881579 (Mb/s) |
| 📊 Stream Results | netperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2707.076667 (Mb/s) |
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2604.067072 (Mb/s) |
| 📊 Stream Results | uperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2687.276667 (Mb/s) |
| 📊 Stream Results | netperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1143.926667 (Mb/s) |
| 📊 Stream Results | iperf3 | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1202.428288 (Mb/s) |
| 📊 Stream Results | uperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1242.059988 (Mb/s) |
| 📊 Stream Results | netperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1145.066667 (Mb/s) |
| 📊 Stream Results | iperf3 | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1239.580672 (Mb/s) |
| 📊 Stream Results | uperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1261.840000 (Mb/s) |
+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+
+---------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+---------------------+
| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | AVG VALUE |
Expand Down
45 changes: 40 additions & 5 deletions cmd/k8s-netperf/k8s-netperf.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"github.com/cloud-bulldozer/k8s-netperf/pkg/netperf"
result "github.com/cloud-bulldozer/k8s-netperf/pkg/results"
"github.com/cloud-bulldozer/k8s-netperf/pkg/sample"
uperf_driver "github.com/cloud-bulldozer/k8s-netperf/pkg/uperf"
"github.com/google/uuid"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -36,6 +37,7 @@ var (
nl bool
clean bool
iperf3 bool
uperf bool
acrossAZ bool
full bool
debug bool
Expand Down Expand Up @@ -158,24 +160,36 @@ var rootCmd = &cobra.Command{
if s.HostNetwork {
// No need to run hostNetwork through Service.
if !nc.Service {
npr := executeWorkload(nc, s, true, false)
npr := executeWorkload(nc, s, true, false, false)
sr.Results = append(sr.Results, npr)
if iperf3 {
ipr := executeWorkload(nc, s, true, true)
ipr := executeWorkload(nc, s, true, true, false)
if len(ipr.Profile) > 1 {
sr.Results = append(sr.Results, ipr)
}
}
if uperf {
upr := executeWorkload(nc, s, true, false, true)
if len(upr.Profile) > 1 {
sr.Results = append(sr.Results, upr)
}
}
}
}
npr := executeWorkload(nc, s, false, false)
npr := executeWorkload(nc, s, false, false, false)
sr.Results = append(sr.Results, npr)
if iperf3 {
ipr := executeWorkload(nc, s, false, true)
ipr := executeWorkload(nc, s, false, true, false)
if len(ipr.Profile) > 1 {
sr.Results = append(sr.Results, ipr)
}
}
if uperf {
upr := executeWorkload(nc, s, false, false, true)
if len(upr.Profile) > 1 {
sr.Results = append(sr.Results, upr)
}
}
}

var fTime time.Time
Expand Down Expand Up @@ -323,7 +337,7 @@ func cleanup(client *kubernetes.Clientset) {

}

func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, iperf3 bool) result.Data {
func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, iperf3 bool, uperf bool) result.Data {
serverIP := ""
service := false
sameNode := true
Expand All @@ -332,6 +346,8 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
service = true
if iperf3 {
serverIP = s.IperfService.Spec.ClusterIP
} else if uperf {
serverIP = s.UperfService.Spec.ClusterIP
} else {
serverIP = s.NetperfService.Spec.ClusterIP
}
Expand All @@ -356,6 +372,12 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
return npr
}
}
if uperf {
// uperf doesn't support all tests cases
if !uperf_driver.TestSupported(nc.Profile) {
return npr
}
}

npr.Config = nc
npr.Metric = nc.Metric
Expand Down Expand Up @@ -383,6 +405,18 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
log.Error(err)
os.Exit(1)
}
} else if uperf {
npr.Driver = "uperf"
r, err := uperf_driver.Run(s.ClientSet, s.RestConfig, nc, Client, serverIP)
if err != nil {
log.Error(err)
os.Exit(1)
}
nr, err = uperf_driver.ParseResults(&r)
if err != nil {
log.Error(err)
os.Exit(1)
}
} else {
npr.Driver = "netperf"
r, err := netperf.Run(s.ClientSet, s.RestConfig, nc, Client, serverIP)
Expand Down Expand Up @@ -435,6 +469,7 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
func main() {
rootCmd.Flags().StringVar(&cfgfile, "config", "netperf.yml", "K8s netperf Configuration File")
rootCmd.Flags().BoolVar(&iperf3, "iperf", false, "Use iperf3 as load driver (along with netperf)")
rootCmd.Flags().BoolVar(&uperf, "uperf", false, "Use uperf as load driver (along with netperf)")
rootCmd.Flags().BoolVar(&clean, "clean", true, "Clean-up resources created by k8s-netperf")
rootCmd.Flags().BoolVar(&json, "json", false, "Instead of human-readable output, return JSON to stdout")
rootCmd.Flags().BoolVar(&nl, "local", false, "Run network performance tests with Server-Pods/Client-Pods on the same Node")
Expand Down
3 changes: 3 additions & 0 deletions containers/Containerfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@ ARG RHEL_VERSION
FROM registry.access.redhat.com/${RHEL_VERSION}:latest

COPY appstream.repo /etc/yum.repos.d/centos8-appstream.repo

COPY netperf.diff /tmp/netperf.diff
RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && dnf clean all
RUN dnf install -y uperf && dnf clean all

RUN dnf install -y --nodocs make automake --enablerepo=centos9 --allowerasing && \
dnf install -y --nodocs gcc git bc lksctp-tools-devel texinfo --enablerepo=*
Expand Down
1 change: 1 addition & 0 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ type PerfScenarios struct {
ServerHost apiv1.PodList
NetperfService *apiv1.Service
IperfService *apiv1.Service
UperfService *apiv1.Service
RestConfig rest.Config
ClientSet *kubernetes.Clientset
}
Expand Down
62 changes: 48 additions & 14 deletions pkg/k8s/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@ import (
)

// DeploymentParams describes the deployment
// Server pod can run multiple containers, each command in Commands will represent a container command
type DeploymentParams struct {
HostNetwork bool
Name string
Namespace string
Replicas int32
Image string
Labels map[string]string
Command []string
Commands [][]string
PodAffinity apiv1.PodAffinity
PodAntiAffinity apiv1.PodAntiAffinity
NodeAffinity apiv1.NodeAffinity
Expand All @@ -47,12 +48,18 @@ const NetperfServerCtlPort = 12865
// IperfServerCtlPort control port for the service
const IperfServerCtlPort = 22865

// UperferverCtlPort control port for the service
const UperfServerCtlPort = 30000

// NetperfServerDataPort data port for the service
const NetperfServerDataPort = 42424

// IperfServerDataPort data port for the service
const IperfServerDataPort = 43433

// UperfServerDataPort data port for the service
const UperfServerDataPort = 30001

// Labels we will apply to k8s assets.
const serverRole = "server"
const clientRole = "client-local"
Expand Down Expand Up @@ -136,7 +143,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
Replicas: 1,
Image: "quay.io/cloud-bulldozer/netperf:latest",
Labels: map[string]string{"role": clientRole},
Command: []string{"/bin/bash", "-c", "sleep 10000000"},
Commands: [][]string{{"/bin/bash", "-c", "sleep 10000000"}},
Port: NetperfServerCtlPort,
}
if z != "" && numNodes > 1 {
Expand Down Expand Up @@ -180,6 +187,19 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
return fmt.Errorf("😥 Unable to create iperf service")
}

// Create uperf service
uperfSVC := ServiceParams{
Name: "uperf-service",
Namespace: "netperf",
Labels: map[string]string{"role": serverRole},
CtlPort: UperfServerCtlPort,
DataPort: UperfServerDataPort,
jtaleric marked this conversation as resolved.
Show resolved Hide resolved
}
s.UperfService, err = CreateService(uperfSVC, client)
if err != nil {
return fmt.Errorf("😥 Unable to create uperf service")
}

// Create netperf service
netperfSVC := ServiceParams{
Name: "netperf-service",
Expand All @@ -198,7 +218,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
Replicas: 1,
Image: "quay.io/cloud-bulldozer/netperf:latest",
Labels: map[string]string{"role": clientAcrossRole},
Command: []string{"/bin/bash", "-c", "sleep 10000000"},
Commands: [][]string{{"/bin/bash", "-c", "sleep 10000000"}},
Port: NetperfServerCtlPort,
}
cdpAcross.PodAntiAffinity = apiv1.PodAntiAffinity{
Expand All @@ -212,7 +232,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
HostNetwork: true,
Image: "quay.io/cloud-bulldozer/netperf:latest",
Labels: map[string]string{"role": hostNetClientRole},
Command: []string{"/bin/bash", "-c", "sleep 10000000"},
Commands: [][]string{{"/bin/bash", "-c", "sleep 10000000"}},
Port: NetperfServerCtlPort,
}
if z != "" {
Expand Down Expand Up @@ -247,14 +267,20 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
return err
}
}

// Use separate containers for servers
dpCommands := [][]string{{"/bin/bash", "-c", "netserver && sleep 10000000"},
{"/bin/bash", "-c", fmt.Sprintf("iperf3 -s -p %d && sleep 10000000", IperfServerCtlPort)},
{"/bin/bash", "-c", fmt.Sprintf("uperf -s -v -P %d && sleep 10000000", UperfServerCtlPort)}}

sdpHost := DeploymentParams{
Name: "server-host",
Namespace: "netperf",
Replicas: 1,
HostNetwork: true,
Image: "quay.io/cloud-bulldozer/netperf:latest",
Labels: map[string]string{"role": hostNetServerRole},
Command: []string{"/bin/bash", "-c", fmt.Sprintf("netserver && iperf3 -s -p %d && sleep 10000000", IperfServerCtlPort)},
Commands: dpCommands,
Port: NetperfServerCtlPort,
}
// Start netperf server
Expand All @@ -264,7 +290,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
Replicas: 1,
Image: "quay.io/cloud-bulldozer/netperf:latest",
Labels: map[string]string{"role": serverRole},
Command: []string{"/bin/bash", "-c", fmt.Sprintf("netserver && iperf3 -s -p %d && sleep 10000000", IperfServerCtlPort)},
Commands: dpCommands,
Port: NetperfServerCtlPort,
}
if s.NodeLocal {
Expand Down Expand Up @@ -451,6 +477,21 @@ func CreateDeployment(dp DeploymentParams, client *kubernetes.Clientset) (*appsv
}
log.Infof("🚀 Starting Deployment for: %s in namespace: %s", dp.Name, dp.Namespace)
dc := client.AppsV1().Deployments(dp.Namespace)

// Add containers to deployment
venkataanil marked this conversation as resolved.
Show resolved Hide resolved
var cmdContainers []apiv1.Container
for i := 0; i < len(dp.Commands); i++ {
// each container should have a unique name
containerName := fmt.Sprintf("%s-%d", dp.Name, i)
cmdContainers = append(cmdContainers,
apiv1.Container{
Name: containerName,
Image: dp.Image,
Command: dp.Commands[i],
ImagePullPolicy: apiv1.PullAlways,
})
}

deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: dp.Name,
Expand All @@ -467,14 +508,7 @@ func CreateDeployment(dp DeploymentParams, client *kubernetes.Clientset) (*appsv
Spec: apiv1.PodSpec{
ServiceAccountName: sa,
HostNetwork: dp.HostNetwork,
Containers: []apiv1.Container{
{
Name: dp.Name,
Image: dp.Image,
Command: dp.Command,
ImagePullPolicy: apiv1.PullAlways,
},
},
Containers: cmdContainers,
Affinity: &apiv1.Affinity{
NodeAffinity: &dp.NodeAffinity,
PodAffinity: &dp.PodAffinity,
Expand Down
4 changes: 2 additions & 2 deletions pkg/results/result.go
Original file line number Diff line number Diff line change
Expand Up @@ -281,11 +281,11 @@ func ShowRRResult(s ScenarioResults) {
func ShowLatencyResult(s ScenarioResults) {
if checkResults(s, "RR") {
logging.Debug("Rendering RR P99 Latency results")
table := initTable([]string{"Result Type", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Duration", "Samples", "Avg 99%tile value"})
table := initTable([]string{"Result Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Duration", "Samples", "Avg 99%tile value"})
for _, r := range s.Results {
if strings.Contains(r.Profile, "RR") {
p99, _ := Average(r.LatencySummary)
table.Append([]string{"RR Latency Results", r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", p99, "usec")})
table.Append([]string{"RR Latency Results", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", p99, "usec")})
}
}
table.Render()
Expand Down
Loading
Loading