From 87e9e98877f0c17b0e866cf3f85ec5b51d2ecc0c Mon Sep 17 00:00:00 2001 From: Raul Sevilla Date: Tue, 17 Sep 2024 12:37:03 +0200 Subject: [PATCH] Use NodeSystemInfo rather than node labels Signed-off-by: Raul Sevilla --- cmd/k8s-netperf/k8s-netperf.go | 3 - pkg/k8s/kubernetes.go | 114 ++++++++------------------------- pkg/metrics/system.go | 8 ++- pkg/results/result.go | 2 - 4 files changed, 31 insertions(+), 96 deletions(-) diff --git a/cmd/k8s-netperf/k8s-netperf.go b/cmd/k8s-netperf/k8s-netperf.go index e093ed5c..5acd87fc 100644 --- a/cmd/k8s-netperf/k8s-netperf.go +++ b/cmd/k8s-netperf/k8s-netperf.go @@ -465,9 +465,6 @@ func executeWorkload(nc config.Config, npr.EndTime = time.Now() npr.ClientNodeInfo = s.ClientNodeInfo npr.ServerNodeInfo = s.ServerNodeInfo - npr.ServerNodeLabels, _ = k8s.GetNodeLabels(s.ClientSet, s.ServerNodeInfo.Hostname) - npr.ClientNodeLabels, _ = k8s.GetNodeLabels(s.ClientSet, s.ClientNodeInfo.Hostname) - return npr } diff --git a/pkg/k8s/kubernetes.go b/pkg/k8s/kubernetes.go index 8a629613..dc36a023 100644 --- a/pkg/k8s/kubernetes.go +++ b/pkg/k8s/kubernetes.go @@ -12,6 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -218,7 +219,8 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { return err } } - s.ClientNodeInfo, _ = GetPodNodeInfo(client, cdp) + s.ClientNodeInfo, err = GetPodNodeInfo(client, labels.Set(cdp.Labels).String()) + return err } // Create iperf service @@ -431,9 +433,12 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error { if err != nil { return err } - s.ServerNodeInfo, _ = GetPodNodeInfo(client, sdp) + s.ClientNodeInfo, err = GetPodNodeInfo(client, labels.Set(sdp.Labels).String()) + if err != nil { + return err + } if !s.NodeLocal { - s.ClientNodeInfo, _ = GetPodNodeInfo(client, cdpAcross) + s.ClientNodeInfo, err = GetPodNodeInfo(client, labels.Set(cdpAcross.Labels).String()) } if err != nil { return err @@ -459,17 +464,17 @@ func launchServerVM(perf *config.PerfScenarios, name string, podAff *corev1.PodA return err } if strings.Contains(name, "host") { - perf.ServerHost, err = GetNakedPods(perf.ClientSet, fmt.Sprintf("app=%s", serverRole)) + perf.ServerHost, err = GetPods(perf.ClientSet, fmt.Sprintf("app=%s", serverRole)) if err != nil { return err } } else { - perf.Server, err = GetNakedPods(perf.ClientSet, fmt.Sprintf("app=%s", serverRole)) + perf.Server, err = GetPods(perf.ClientSet, fmt.Sprintf("app=%s", serverRole)) if err != nil { return err } } - perf.ServerNodeInfo, _ = GetNakedPodNodeInfo(perf.ClientSet, fmt.Sprintf("app=%s", serverRole)) + perf.ServerNodeInfo, _ = GetPodNodeInfo(perf.ClientSet, fmt.Sprintf("app=%s", serverRole)) return nil } @@ -485,17 +490,17 @@ func launchClientVM(perf *config.PerfScenarios, name string, podAff *corev1.PodA return err } if strings.Contains(name, "host") { - perf.ClientHost, err = GetNakedPods(perf.ClientSet, fmt.Sprintf("app=%s", name)) + perf.ClientHost, err = GetPods(perf.ClientSet, fmt.Sprintf("app=%s", name)) if err != nil { return err } } else { - perf.ClientAcross, err = GetNakedPods(perf.ClientSet, fmt.Sprintf("app=%s", name)) + perf.ClientAcross, err = GetPods(perf.ClientSet, fmt.Sprintf("app=%s", name)) if err != nil { return err } } - perf.ClientNodeInfo, _ = GetNakedPodNodeInfo(perf.ClientSet, fmt.Sprintf("app=%s", name)) + perf.ClientNodeInfo, _ = GetPodNodeInfo(perf.ClientSet, fmt.Sprintf("app=%s", name)) return nil } @@ -525,7 +530,7 @@ func deployDeployment(client *kubernetes.Clientset, dp DeploymentParams) (corev1 return pods, err } // Retrieve pods which match the server/client role labels - pods, err = GetPods(client, dp) + pods, err = GetPods(client, labels.Set(dp.Labels).String()) if err != nil { return pods, err } @@ -660,46 +665,8 @@ func CreateDeployment(dp DeploymentParams, client *kubernetes.Clientset) (*appsv return dc.Create(context.TODO(), deployment, metav1.CreateOptions{}) } -// GetNodeLabels Return Labels for a specific node -func GetNodeLabels(c *kubernetes.Clientset, node string) (map[string]string, error) { - log.Debugf("Looking for Node labels for node - %s", node) - nodeInfo, err := c.CoreV1().Nodes().Get(context.TODO(), node, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return nodeInfo.GetLabels(), nil -} - // GetPodNodeInfo collects the node information for a specific pod -func GetPodNodeInfo(c *kubernetes.Clientset, dp DeploymentParams) (metrics.NodeInfo, error) { - var info metrics.NodeInfo - d, err := c.AppsV1().Deployments(dp.Namespace).Get(context.TODO(), dp.Name, metav1.GetOptions{}) - if err != nil { - return info, fmt.Errorf("❌ Failure to capture deployment: %v", err) - } - selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return info, fmt.Errorf("❌ Failure to capture deployment label: %v", err) - } - pods, err := c.CoreV1().Pods(dp.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String(), FieldSelector: "status.phase=Running"}) - if err != nil { - return info, fmt.Errorf("❌ Failure to capture pods: %v", err) - } - for pod := range pods.Items { - p := pods.Items[pod] - if pods.Items[pod].DeletionTimestamp != nil { - continue - } else { - info.IP = p.Status.HostIP - info.Hostname = p.Spec.NodeName - } - } - log.Debugf("%s Running on %s with IP %s", d.Name, info.Hostname, info.IP) - return info, nil -} - -// GetNakedPodNodeInfo collects the node information for a specific pod -func GetNakedPodNodeInfo(c *kubernetes.Clientset, label string) (metrics.NodeInfo, error) { +func GetPodNodeInfo(c *kubernetes.Clientset, label string) (metrics.NodeInfo, error) { var info metrics.NodeInfo listOpt := metav1.ListOptions{ LabelSelector: label, @@ -716,58 +683,29 @@ func GetNakedPodNodeInfo(c *kubernetes.Clientset, label string) (metrics.NodeInf } else { info.IP = p.Status.HostIP info.Hostname = p.Spec.NodeName + node, err := c.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{}) + if err != nil { + return info, err + } + info.NodeSystemInfo = node.Status.NodeInfo } } log.Debugf("Machine with lablel %s is Running on %s with IP %s", label, info.Hostname, info.IP) return info, nil } -// GetPods searches for a specific set of pods from DeploymentParms -// It returns a PodList if the deployment is found. -// NOTE : Since we can update the replicas to be > 1, is why I return a PodList. -func GetPods(c *kubernetes.Clientset, dp DeploymentParams) (corev1.PodList, error) { - d, err := c.AppsV1().Deployments(dp.Namespace).Get(context.TODO(), dp.Name, metav1.GetOptions{}) - npl := corev1.PodList{} - if err != nil { - return npl, fmt.Errorf("❌ Failure to capture deployment: %v", err) - } - selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return npl, fmt.Errorf("❌ Failure to capture deployment label: %v", err) - } - pods, err := c.CoreV1().Pods(dp.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String(), FieldSelector: "status.phase=Running"}) - if err != nil { - return npl, fmt.Errorf("❌ Failure to capture pods: %v", err) - } - for pod := range pods.Items { - if pods.Items[pod].DeletionTimestamp != nil { - continue - } else { - npl.Items = append(npl.Items, pods.Items[pod]) - } - } - return npl, nil -} - -// GetNakedPods when we deploy pods without a higher-level controller like deployment -func GetNakedPods(c *kubernetes.Clientset, label string) (corev1.PodList, error) { - npl := corev1.PodList{} +// GetPods when we deploy pods without a higher-level controller like deployment +func GetPods(c *kubernetes.Clientset, label string) (corev1.PodList, error) { listOpt := metav1.ListOptions{ LabelSelector: label, + FieldSelector: "status.phase=Running", } log.Infof("Looking for pods with label %s", fmt.Sprint(label)) pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), listOpt) if err != nil { - return npl, fmt.Errorf("❌ Failure to capture pods: %v", err) - } - for pod := range pods.Items { - if pods.Items[pod].DeletionTimestamp != nil { - continue - } else { - npl.Items = append(npl.Items, pods.Items[pod]) - } + return *pods, fmt.Errorf("❌ Failure to capture pods: %v", err) } - return npl, nil + return *pods, nil } diff --git a/pkg/metrics/system.go b/pkg/metrics/system.go index 9f542184..6e7b300f 100644 --- a/pkg/metrics/system.go +++ b/pkg/metrics/system.go @@ -11,14 +11,16 @@ import ( "github.com/cloud-bulldozer/go-commons/prometheus" "github.com/cloud-bulldozer/k8s-netperf/pkg/logging" "github.com/prometheus/common/model" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/clientcmd" ) // NodeInfo stores the node metadata like IP and Hostname type NodeInfo struct { - IP string - Hostname string - NodeName string + IP string `json:"ip"` + Hostname string `json:"hostname"` + NodeName string `json:"nodeName"` + corev1.NodeSystemInfo } // NodeCPU stores CPU information for a specific Node diff --git a/pkg/results/result.go b/pkg/results/result.go index a3d64766..6b701ef1 100644 --- a/pkg/results/result.go +++ b/pkg/results/result.go @@ -44,8 +44,6 @@ type Data struct { ServerMetrics metrics.NodeCPU ClientPodCPU metrics.PodValues ServerPodCPU metrics.PodValues - ClientNodeLabels map[string]string - ServerNodeLabels map[string]string } // ScenarioResults each scenario could have multiple results