Skip to content

Commit

Permalink
Emitting 0 for cpu/memory over metrics when limit/resource not set
Browse files Browse the repository at this point in the history
  • Loading branch information
mitali-salvi committed Sep 19, 2023
1 parent e8cf38e commit c67565c
Show file tree
Hide file tree
Showing 2 changed files with 217 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,8 @@ func (p *PodStore) decorateCPU(metric CIMetric, pod *corev1.Pod) {
if ok && podCPULimit != 0 {
metric.AddField(ci.MetricName(ci.TypePod, ci.CPULimit), podCPULimit)
metric.AddField(ci.MetricName(ci.TypePod, ci.CPUUtilizationOverPodLimit), podCPUTotal.(float64)/float64(podCPULimit)*100)
} else {
metric.AddField(ci.MetricName(ci.TypePod, ci.CPUUtilizationOverPodLimit), float64(0))
}
}
} else if metric.GetTag(ci.MetricType) == ci.TypeContainer {
Expand All @@ -411,6 +413,8 @@ func (p *PodStore) decorateCPU(metric CIMetric, pod *corev1.Pod) {
if p.includeEnhancedMetrics {
metric.AddField(ci.MetricName(ci.TypeContainer, ci.CPUUtilizationOverContainerLimit), containerCPUTotal.(float64)/float64(containerCPULimit)*100)
}
} else if !ok && p.includeEnhancedMetrics {
metric.AddField(ci.MetricName(ci.TypeContainer, ci.CPUUtilizationOverContainerLimit), float64(0))
}
if containerCPUReq, ok := getRequestForContainer(cpuKey, containerSpec); ok {
metric.AddField(ci.MetricName(ci.TypeContainer, ci.CPURequest), containerCPUReq)
Expand Down Expand Up @@ -445,6 +449,8 @@ func (p *PodStore) decorateMem(metric CIMetric, pod *corev1.Pod) {
if ok && podMemLimit != 0 {
metric.AddField(ci.MetricName(ci.TypePod, ci.MemLimit), podMemLimit)
metric.AddField(ci.MetricName(ci.TypePod, ci.MemUtilizationOverPodLimit), float64(podMemWorkingset.(uint64))/float64(podMemLimit)*100)
} else {
metric.AddField(ci.MetricName(ci.TypePod, ci.MemUtilizationOverPodLimit), float64(0))
}
}
} else if metric.GetTag(ci.MetricType) == ci.TypeContainer {
Expand All @@ -460,6 +466,8 @@ func (p *PodStore) decorateMem(metric CIMetric, pod *corev1.Pod) {
if p.includeEnhancedMetrics {
metric.AddField(ci.MetricName(ci.TypeContainer, ci.MemUtilizationOverContainerLimit), float64(containerMemWorkingset.(uint64))/float64(containerMemLimit)*100)
}
} else if !ok && p.includeEnhancedMetrics {
metric.AddField(ci.MetricName(ci.TypeContainer, ci.MemUtilizationOverContainerLimit), float64(0))
}
if containerMemReq, ok := getRequestForContainer(memoryKey, containerSpec); ok {
metric.AddField(ci.MetricName(ci.TypeContainer, ci.MemRequest), containerMemReq)
Expand Down
209 changes: 209 additions & 0 deletions receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,162 @@ func getBaseTestPodInfo() *corev1.Pod {
return &pods.Items[0]
}

func getDefaultTestPodInfoWithoutLimitsSetInfo() *corev1.Pod {
podJSON := `
{
"kind": "PodList",
"apiVersion": "v1",
"metadata": {
},
"items": [
{
"metadata": {
"name": "cpu-limit",
"namespace": "default",
"ownerReferences": [
{
"apiVersion": "apps/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "DaemonSet",
"name": "DaemonSetTest",
"uid": "36779a62-4aca-11e9-977b-0672b6c6fc94"
}
],
"selfLink": "/api/v1/namespaces/default/pods/cpu-limit",
"uid": "764d01e1-2a2f-11e9-95ea-0a695d7ce286",
"resourceVersion": "5671573",
"creationTimestamp": "2019-02-06T16:51:34Z",
"labels": {
"app": "hello_test"
},
"annotations": {
"kubernetes.io/config.seen": "2019-02-19T00:06:56.109155665Z",
"kubernetes.io/config.source": "api"
}
},
"spec": {
"volumes": [
{
"name": "default-token-tlgw7",
"secret": {
"secretName": "default-token-tlgw7",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "ubuntu",
"image": "ubuntu",
"command": [
"/bin/bash"
],
"args": [
"-c",
"sleep 300000000"
],
"volumeMounts": [
{
"name": "default-token-tlgw7",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "ip-192-168-67-127.us-west-2.compute.internal",
"securityContext": {
},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-02-06T16:51:34Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-02-06T16:51:43Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-02-06T16:51:34Z"
}
],
"hostIP": "192.168.67.127",
"podIP": "192.168.76.93",
"startTime": "2019-02-06T16:51:34Z",
"containerStatuses": [
{
"name": "ubuntu",
"state": {
"running": {
"startedAt": "2019-02-06T16:51:42Z"
}
},
"lastState": {
},
"ready": true,
"restartCount": 0,
"image": "ubuntu:latest",
"imageID": "docker-pullable://ubuntu@sha256:7a47ccc3bbe8a451b500d2b53104868b46d60ee8f5b35a24b41a86077c650210",
"containerID": "docker://637631e2634ea92c0c1aa5d24734cfe794f09c57933026592c12acafbaf6972c"
}
],
"qosClass": "Guaranteed"
}
}
]
}`
pods := corev1.PodList{}
err := json.Unmarshal([]byte(podJSON), &pods)
if err != nil {
panic(fmt.Sprintf("unmarshal pod err %v", err))
}
return &pods.Items[0]
}

func getPodStore() *PodStore {
nodeInfo := newNodeInfo("testNode1", &mockNodeInfoProvider{}, zap.NewNop())
nodeInfo.setCPUCapacity(4000)
Expand Down Expand Up @@ -252,6 +408,59 @@ func TestPodStore_decorateCpu(t *testing.T) {
assert.Equal(t, float64(10), metric.GetField("container_cpu_utilization_over_container_limit").(float64))
}

func TestPodStore_decorateCpu_WithNoLimitsAndResources(t *testing.T) {
podStore := getPodStore()

pod := getDefaultTestPodInfoWithoutLimitsSetInfo()

// test pod metrics
tags := map[string]string{ci.MetricType: ci.TypePod}
fields := map[string]interface{}{ci.MetricName(ci.TypePod, ci.CPUTotal): float64(1)}

metric := generateMetric(fields, tags)
podStore.decorateCPU(metric, pod)

assert.Equal(t, float64(0), metric.GetField("pod_cpu_utilization_over_pod_limit").(float64))

// test container metrics
tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.ContainerNamekey: "ubuntu"}
fields = map[string]interface{}{ci.MetricName(ci.TypeContainer, ci.CPUTotal): float64(1)}
metric = generateMetric(fields, tags)
podStore.decorateCPU(metric, pod)

assert.False(t, metric.HasField("container_cpu_utilization_over_container_limit"))

podStore.includeEnhancedMetrics = true
podStore.decorateCPU(metric, pod)

assert.Equal(t, float64(0), metric.GetField("container_cpu_utilization_over_container_limit").(float64))
}

func TestPodStore_decorateMem_WithNoLimitsAndResources(t *testing.T) {
podStore := getPodStore()
pod := getDefaultTestPodInfoWithoutLimitsSetInfo()

tags := map[string]string{ci.MetricType: ci.TypePod}
fields := map[string]interface{}{ci.MetricName(ci.TypePod, ci.MemWorkingset): uint64(10 * 1024 * 1024)}

metric := generateMetric(fields, tags)
podStore.decorateMem(metric, pod)

assert.Equal(t, float64(0), metric.GetField("pod_memory_utilization_over_pod_limit").(float64))

tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.ContainerNamekey: "ubuntu"}
fields = map[string]interface{}{ci.MetricName(ci.TypeContainer, ci.MemWorkingset): uint64(10 * 1024 * 1024)}

metric = generateMetric(fields, tags)
podStore.decorateMem(metric, pod)
assert.False(t, metric.HasField("container_memory_utilization_over_container_limit"))

podStore.includeEnhancedMetrics = true
podStore.decorateMem(metric, pod)

assert.Equal(t, float64(0), metric.GetField("container_memory_utilization_over_container_limit").(float64))
}

func TestPodStore_decorateMem(t *testing.T) {
podStore := getPodStore()
pod := getBaseTestPodInfo()
Expand Down

0 comments on commit c67565c

Please sign in to comment.