From 11888db742c042af607fe084493d30cc2833742c Mon Sep 17 00:00:00 2001 From: Joe Talerico aka rook Date: Thu, 9 Jan 2025 15:10:54 -0500 Subject: [PATCH 1/2] Label hint If the user wants to provide hints to help schedule the client and server on specific nodes, they can add `netperf=true` label to the nodes they want tested. Signed-off-by: Joe Talerico aka rook --- README.md | 10 ++++++++++ pkg/k8s/kubernetes.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/README.md b/README.md index 6fa1d79c..c7bf5dd3 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,16 @@ $ cd k8s-netperf $ make container-build ``` +## Label nodes +k8s-netperf will make the best decision it can to schedule the client and server in your cluster. However, +you can provide hints to ensure the client and server will always land on specific nodes. + +To do this, apply a label to the nodes you want the client and server running + +```shell +$ oc label nodes node-name netperf=true +``` + ## Running with Pods Ensure your `kubeconfig` is properly set to the cluster you would like to run `k8s-netperf` against. diff --git a/pkg/k8s/kubernetes.go b/pkg/k8s/kubernetes.go index 36e953a1..26493180 100644 --- a/pkg/k8s/kubernetes.go +++ b/pkg/k8s/kubernetes.go @@ -342,6 +342,21 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { RequiredDuringSchedulingIgnoredDuringExecution: workerNodeSelectorExpression, } } + } else { + affinity := corev1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}}, + }, + }, + }, + }, + } + cdpAcross.NodeAffinity = affinity + cdpHostAcross.NodeAffinity = affinity } if ncount > 1 { @@ -427,6 +442,21 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { } sdp.NodeAffinity = affinity sdpHost.NodeAffinity = affinity + } else { + affinity := corev1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}}, + }, + }, + }, + }, + } + sdp.NodeAffinity = affinity + sdpHost.NodeAffinity = affinity } if ncount > 1 { antiAffinity := corev1.PodAntiAffinity{ @@ -583,6 +613,7 @@ func zoneNodeSelectorExpression(zone string) []corev1.PreferredSchedulingTerm { Preference: corev1.NodeSelectorTerm{ MatchExpressions: []corev1.NodeSelectorRequirement{ {Key: "topology.kubernetes.io/zone", Operator: corev1.NodeSelectorOpIn, Values: []string{zone}}, + {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}}, }, }, }, From 75fcc0e5cf8795bfb4394d7b911be1ce8a55e51a Mon Sep 17 00:00:00 2001 From: Joe Talerico aka rook Date: Fri, 10 Jan 2025 15:24:53 -0500 Subject: [PATCH 2/2] Modifying label Allow the user to pick the server and client hosts individually. Signed-off-by: Joe Talerico aka rook --- README.md | 5 +++-- pkg/k8s/kubernetes.go | 18 +++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index c7bf5dd3..763ee0a1 100644 --- a/README.md +++ b/README.md @@ -37,12 +37,13 @@ $ make container-build ## Label nodes k8s-netperf will make the best decision it can to schedule the client and server in your cluster. However, -you can provide hints to ensure the client and server will always land on specific nodes. +you can provide hints to ensure the client and server lands on specific nodes. To do this, apply a label to the nodes you want the client and server running ```shell -$ oc label nodes node-name netperf=true +$ oc label nodes node-name netperf=client +$ oc label nodes node-name netperf=server ``` ## Running with Pods diff --git a/pkg/k8s/kubernetes.go b/pkg/k8s/kubernetes.go index 26493180..f77f7e53 100644 --- a/pkg/k8s/kubernetes.go +++ b/pkg/k8s/kubernetes.go @@ -248,7 +248,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { } if z != "" && numNodes > 1 { cdp.NodeAffinity = corev1.NodeAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: zoneNodeSelectorExpression(z), + PreferredDuringSchedulingIgnoredDuringExecution: zoneNodeSelectorExpression(z, "client"), } } @@ -334,7 +334,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { if z != "" { if numNodes > 1 { cdpAcross.NodeAffinity = corev1.NodeAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: zoneNodeSelectorExpression(z), + PreferredDuringSchedulingIgnoredDuringExecution: zoneNodeSelectorExpression(z, "client"), RequiredDuringSchedulingIgnoredDuringExecution: workerNodeSelectorExpression, } } else { @@ -349,7 +349,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { Weight: 100, Preference: corev1.NodeSelectorTerm{ MatchExpressions: []corev1.NodeSelectorRequirement{ - {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}}, + {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"client"}}, }, }, }, @@ -362,7 +362,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { if ncount > 1 { if s.HostNetwork { cdpHostAcross.NodeAffinity = corev1.NodeAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: zoneNodeSelectorExpression(z), + PreferredDuringSchedulingIgnoredDuringExecution: zoneNodeSelectorExpression(z, "client"), RequiredDuringSchedulingIgnoredDuringExecution: workerNodeSelectorExpression, } cdpHostAcross.PodAntiAffinity = corev1.PodAntiAffinity{ @@ -427,9 +427,9 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { if z != "" { var affinity corev1.NodeAffinity if numNodes > 1 { - nodeZone := zoneNodeSelectorExpression(z) + nodeZone := zoneNodeSelectorExpression(z, "server") if s.AcrossAZ { - nodeZone = zoneNodeSelectorExpression(acrossZone) + nodeZone = zoneNodeSelectorExpression(acrossZone, "server") } affinity = corev1.NodeAffinity{ PreferredDuringSchedulingIgnoredDuringExecution: nodeZone, @@ -449,7 +449,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { Weight: 100, Preference: corev1.NodeSelectorTerm{ MatchExpressions: []corev1.NodeSelectorRequirement{ - {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}}, + {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"server"}}, }, }, }, @@ -606,14 +606,14 @@ func launchClientVM(perf *config.PerfScenarios, name string, podAff *corev1.PodA return nil } -func zoneNodeSelectorExpression(zone string) []corev1.PreferredSchedulingTerm { +func zoneNodeSelectorExpression(zone string, role string) []corev1.PreferredSchedulingTerm { return []corev1.PreferredSchedulingTerm{ { Weight: 100, Preference: corev1.NodeSelectorTerm{ MatchExpressions: []corev1.NodeSelectorRequirement{ {Key: "topology.kubernetes.io/zone", Operator: corev1.NodeSelectorOpIn, Values: []string{zone}}, - {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}}, + {Key: "netperf", Operator: corev1.NodeSelectorOpIn, Values: []string{role}}, }, }, },