From 6abf8684ee5c68c5b3e428c98c7d484fc87b61a8 Mon Sep 17 00:00:00 2001 From: Chad Patel Date: Mon, 9 Oct 2023 16:34:29 -0500 Subject: [PATCH] move container_status_ metrics to pod_container_ so we can pick up short lived container states --- .../awscontainerinsightreceiver/README.md | 104 +++++++++--------- .../internal/stores/podstore.go | 74 +++++++++---- .../internal/stores/podstore_test.go | 98 ++++++++++++----- 3 files changed, 170 insertions(+), 106 deletions(-) diff --git a/receiver/awscontainerinsightreceiver/README.md b/receiver/awscontainerinsightreceiver/README.md index 81502bb1954a..e3e4fd8bde34 100644 --- a/receiver/awscontainerinsightreceiver/README.md +++ b/receiver/awscontainerinsightreceiver/README.md @@ -687,52 +687,58 @@ kubectl apply -f config.yaml

### Pod -| Metric | Unit | -|---------------------------------------|--------------| -| pod_cpu_limit | Millicore | -| pod_cpu_request | Millicore | -| pod_cpu_reserved_capacity | Percent | -| pod_cpu_usage_system | Millicore | -| pod_cpu_usage_total | Millicore | -| pod_cpu_usage_user | Millicore | -| pod_cpu_utilization | Percent | -| pod_cpu_utilization_over_pod_limit | Percent | -| pod_memory_cache | Bytes | -| pod_memory_failcnt | Count | -| pod_memory_hierarchical_pgfault | Count/Second | -| pod_memory_hierarchical_pgmajfault | Count/Second | -| pod_memory_limit | Bytes | -| pod_memory_mapped_file | Bytes | -| pod_memory_max_usage | Bytes | -| pod_memory_pgfault | Count/Second | -| pod_memory_pgmajfault | Count/Second | -| pod_memory_request | Bytes | -| pod_memory_reserved_capacity | Percent | -| pod_memory_rss | Bytes | -| pod_memory_swap | Bytes | -| pod_memory_usage | Bytes | -| pod_memory_utilization | Percent | -| pod_memory_utilization_over_pod_limit | Percent | -| pod_memory_working_set | Bytes | -| pod_network_rx_bytes | Bytes/Second | -| pod_network_rx_dropped | Count/Second | -| pod_network_rx_errors | Count/Second | -| pod_network_rx_packets | Count/Second | -| pod_network_total_bytes | Bytes/Second | -| pod_network_tx_bytes | Bytes/Second | -| pod_network_tx_dropped | Count/Second | -| pod_network_tx_errors | Count/Second | -| pod_network_tx_packets | Count/Second | -| pod_number_of_container_restarts | Count | -| pod_number_of_containers | Count | -| pod_number_of_running_containers | Count | -| pod_status_ready | Count | -| pod_status_scheduled | Count | -| pod_status_unknown | Count | -| pod_status_failed | Count | -| pod_status_pending | Count | -| pod_status_running | Count | -| pod_status_succeeded | Count | +| Metric | Unit | +|-------------------------------------------------------------------|--------------| +| pod_cpu_limit | Millicore | +| pod_cpu_request | Millicore | +| pod_cpu_reserved_capacity | Percent | +| pod_cpu_usage_system | Millicore | +| pod_cpu_usage_total | Millicore | +| pod_cpu_usage_user | Millicore | +| pod_cpu_utilization | Percent | +| pod_cpu_utilization_over_pod_limit | Percent | +| pod_memory_cache | Bytes | +| pod_memory_failcnt | Count | +| pod_memory_hierarchical_pgfault | Count/Second | +| pod_memory_hierarchical_pgmajfault | Count/Second | +| pod_memory_limit | Bytes | +| pod_memory_mapped_file | Bytes | +| pod_memory_max_usage | Bytes | +| pod_memory_pgfault | Count/Second | +| pod_memory_pgmajfault | Count/Second | +| pod_memory_request | Bytes | +| pod_memory_reserved_capacity | Percent | +| pod_memory_rss | Bytes | +| pod_memory_swap | Bytes | +| pod_memory_usage | Bytes | +| pod_memory_utilization | Percent | +| pod_memory_utilization_over_pod_limit | Percent | +| pod_memory_working_set | Bytes | +| pod_network_rx_bytes | Bytes/Second | +| pod_network_rx_dropped | Count/Second | +| pod_network_rx_errors | Count/Second | +| pod_network_rx_packets | Count/Second | +| pod_network_total_bytes | Bytes/Second | +| pod_network_tx_bytes | Bytes/Second | +| pod_network_tx_dropped | Count/Second | +| pod_network_tx_errors | Count/Second | +| pod_network_tx_packets | Count/Second | +| pod_number_of_container_restarts | Count | +| pod_number_of_containers | Count | +| pod_number_of_running_containers | Count | +| pod_status_ready | Count | +| pod_status_scheduled | Count | +| pod_status_unknown | Count | +| pod_status_failed | Count | +| pod_status_pending | Count | +| pod_status_running | Count | +| pod_status_succeeded | Count | +| pod_container_status_waiting_reason_crash_loop_back_off | Count | +| pod_container_status_waiting_reason_image_pull_error | Count | +| pod_container_status_waiting_reason_start_error | Count | +| pod_container_status_waiting_reason_create_container_error | Count | +| pod_container_status_waiting_reason_create_container_config_error | Count | +| pod_container_status_terminated_reason_oom_killed | Count | | Resource Attribute | |----------------------| @@ -819,12 +825,6 @@ kubectl apply -f config.yaml | container_status_running | Count | | container_status_terminated | Count | | container_status_waiting | Count | -| container_status_waiting_reason_crash_loop_back_off | Count | -| container_status_waiting_reason_image_pull_error | Count | -| container_status_waiting_reason_start_error | Count | -| container_status_waiting_reason_create_container_error | Count | -| container_status_waiting_reason_create_container_config_error | Count | -| container_status_terminated_reason_oom_killed | Count |

diff --git a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go index 3babc6a37b45..6bd5902938dd 100644 --- a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go +++ b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go @@ -497,12 +497,14 @@ func (p *PodStore) addStatus(metric CIMetric, pod *corev1.Pod) { if p.includeEnhancedMetrics { p.addPodStatusMetrics(metric, pod) p.addPodConditionMetrics(metric, pod) + p.addPodContainerStatusMetrics(metric, pod) } var curContainerRestarts int for _, containerStatus := range pod.Status.ContainerStatuses { curContainerRestarts += int(containerStatus.RestartCount) } + podKey := createPodKeyFromMetric(metric) if podKey != "" { content, ok := p.getPrevMeasurement(ci.TypePod, podKey) @@ -520,28 +522,16 @@ func (p *PodStore) addStatus(metric CIMetric, pod *corev1.Pod) { if containerName := metric.GetTag(ci.ContainerNamekey); containerName != "" { for _, containerStatus := range pod.Status.ContainerStatuses { if containerStatus.Name == containerName { - possibleStatuses := map[string]int{ - ci.StatusRunning: 0, - ci.StatusWaiting: 0, - ci.StatusTerminated: 0, - } switch { case containerStatus.State.Running != nil: metric.AddTag(ci.ContainerStatus, "Running") - possibleStatuses[ci.StatusRunning] = 1 case containerStatus.State.Waiting != nil: metric.AddTag(ci.ContainerStatus, "Waiting") - possibleStatuses[ci.StatusWaiting] = 1 - reason := containerStatus.State.Waiting.Reason - if reason != "" { - metric.AddTag(ci.ContainerStatusReason, reason) - if val, ok := ci.WaitingReasonLookup[reason]; ok { - possibleStatuses[val] = 1 - } + if containerStatus.State.Waiting.Reason != "" { + metric.AddTag(ci.ContainerStatusReason, containerStatus.State.Waiting.Reason) } case containerStatus.State.Terminated != nil: metric.AddTag(ci.ContainerStatus, "Terminated") - possibleStatuses[ci.StatusTerminated] = 1 if containerStatus.State.Terminated.Reason != "" { metric.AddTag(ci.ContainerStatusReason, containerStatus.State.Terminated.Reason) } @@ -549,9 +539,6 @@ func (p *PodStore) addStatus(metric CIMetric, pod *corev1.Pod) { if containerStatus.LastTerminationState.Terminated != nil && containerStatus.LastTerminationState.Terminated.Reason != "" { metric.AddTag(ci.ContainerLastTerminationReason, containerStatus.LastTerminationState.Terminated.Reason) - if strings.Contains(containerStatus.LastTerminationState.Terminated.Reason, "OOMKilled") { - possibleStatuses[ci.StatusTerminatedReasonOOMKilled] = 1 - } } containerKey := createContainerKeyFromMetric(metric) if containerKey != "" { @@ -566,13 +553,6 @@ func (p *PodStore) addStatus(metric CIMetric, pod *corev1.Pod) { } p.setPrevMeasurement(ci.TypeContainer, containerKey, prevContainerMeasurement{restarts: int(containerStatus.RestartCount)}) } - - // add container containerStatus metrics - if p.includeEnhancedMetrics { - for name, val := range possibleStatuses { - metric.AddField(ci.MetricName(ci.TypeContainer, name), val) - } - } } } } @@ -613,6 +593,52 @@ func (p *PodStore) addPodConditionMetrics(metric CIMetric, pod *corev1.Pod) { } } +func (p *PodStore) addPodContainerStatusMetrics(metric CIMetric, pod *corev1.Pod) { + possibleStatuses := map[string]int{ + ci.StatusRunning: 0, + ci.StatusWaiting: 0, + ci.StatusTerminated: 0, + } + for _, containerStatus := range pod.Status.ContainerStatuses { + switch { + case containerStatus.State.Running != nil: + possibleStatuses[ci.StatusRunning]++ + case containerStatus.State.Waiting != nil: + possibleStatuses[ci.StatusWaiting]++ + reason := containerStatus.State.Waiting.Reason + if reason != "" { + if val, ok := ci.WaitingReasonLookup[reason]; ok { + if _, foundStatus := possibleStatuses[val]; foundStatus { + possibleStatuses[val]++ + } else { + possibleStatuses[val] = 1 + } + } + } + case containerStatus.State.Terminated != nil: + possibleStatuses[ci.StatusTerminated]++ + if containerStatus.State.Terminated.Reason != "" { + metric.AddTag(ci.ContainerStatusReason, containerStatus.State.Terminated.Reason) + } + } + + if containerStatus.LastTerminationState.Terminated != nil && containerStatus.LastTerminationState.Terminated.Reason != "" { + if strings.Contains(containerStatus.LastTerminationState.Terminated.Reason, "OOMKilled") { + if _, foundStatus := possibleStatuses[ci.StatusTerminatedReasonOOMKilled]; foundStatus { + possibleStatuses[ci.StatusTerminatedReasonOOMKilled]++ + } else { + possibleStatuses[ci.StatusTerminatedReasonOOMKilled] = 1 + } + } + } + } + + for name, val := range possibleStatuses { + // desired prefix: pod_container_ + metric.AddField(ci.MetricName(ci.TypePod, "container_"+name), val) + } +} + // It could be used to get limit/request(depend on the passed-in fn) per pod // return the sum of ResourceSetting and a bool which indicate whether all container set Resource func getResourceSettingForPod(pod *corev1.Pod, bound uint64, resource corev1.ResourceName, fn func(resource corev1.ResourceName, spec corev1.Container) (uint64, bool)) (uint64, bool) { diff --git a/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go b/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go index 6a5c98b32455..1a825b8ab0fd 100644 --- a/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go +++ b/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go @@ -629,9 +629,13 @@ func TestPodStore_addStatus_adds_all_pod_conditions_as_metrics_when_unexpected(t assert.Equal(t, 1, decoratedResultMetric.GetField(PodScheduledMetricName)) assert.Equal(t, 0, decoratedResultMetric.GetField(PodUnknownMetricName)) } - func TestPodStore_addStatus_enhanced_metrics(t *testing.T) { pod := getBaseTestPodInfo() + // add another container + containerCopy := pod.Status.ContainerStatuses[0] + containerCopy.Name = "ubuntu2" + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, containerCopy) + tags := map[string]string{ci.MetricType: ci.TypePod, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit"} fields := map[string]interface{}{ci.MetricName(ci.TypePod, ci.CPUTotal): float64(1)} podStore := getPodStore() @@ -644,21 +648,40 @@ func TestPodStore_addStatus_enhanced_metrics(t *testing.T) { val := metric.GetField(ci.MetricName(ci.TypePod, ci.ContainerRestartCount)) assert.Nil(t, val) + // set up container defaults tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit", ci.ContainerNamekey: "ubuntu"} metric = generateMetric(fields, tags) - podStore.addStatus(metric, pod) assert.Equal(t, "Running", metric.GetTag(ci.ContainerStatus)) val = metric.GetField(ci.ContainerRestartCount) assert.Nil(t, val) - val = metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusRunning)) + // set up the other container + tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit", ci.ContainerNamekey: "ubuntu2"} + metric = generateMetric(fields, tags) + podStore.addStatus(metric, pod) + assert.Equal(t, "Running", metric.GetTag(ci.ContainerStatus)) + val = metric.GetField(ci.ContainerRestartCount) + assert.Nil(t, val) + + tags = map[string]string{ci.MetricType: ci.TypePod, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit"} + metric = generateMetric(fields, tags) + + podStore.addStatus(metric, pod) + assert.Equal(t, "Running", metric.GetTag(ci.PodStatus)) + val = metric.GetField(ci.ContainerRestartCount) + assert.Nil(t, val) + val = metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusRunning)) assert.NotNil(t, val) - assert.Equal(t, 1, val) + assert.Equal(t, 2, val) pod.Status.ContainerStatuses[0].State.Running = nil pod.Status.ContainerStatuses[0].State.Terminated = &corev1.ContainerStateTerminated{} pod.Status.ContainerStatuses[0].LastTerminationState.Terminated = &corev1.ContainerStateTerminated{Reason: "OOMKilled"} pod.Status.ContainerStatuses[0].RestartCount = 1 + pod.Status.ContainerStatuses[1].State.Running = nil + pod.Status.ContainerStatuses[1].State.Terminated = &corev1.ContainerStateTerminated{} + pod.Status.ContainerStatuses[1].LastTerminationState.Terminated = &corev1.ContainerStateTerminated{Reason: "OOMKilled"} + pod.Status.ContainerStatuses[1].RestartCount = 1 pod.Status.Phase = "Succeeded" tags = map[string]string{ci.MetricType: ci.TypePod, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit"} @@ -666,69 +689,84 @@ func TestPodStore_addStatus_enhanced_metrics(t *testing.T) { podStore.addStatus(metric, pod) assert.Equal(t, "Succeeded", metric.GetTag(ci.PodStatus)) - assert.Equal(t, int(1), metric.GetField(ci.MetricName(ci.TypePod, ci.ContainerRestartCount)).(int)) + assert.Equal(t, 2, metric.GetField(ci.MetricName(ci.TypePod, ci.ContainerRestartCount))) + // update the container metrics + // set up container defaults tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit", ci.ContainerNamekey: "ubuntu"} metric = generateMetric(fields, tags) + podStore.addStatus(metric, pod) + assert.Equal(t, 1, metric.GetField(ci.ContainerRestartCount)) + + // test the other container + tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit", ci.ContainerNamekey: "ubuntu2"} + metric = generateMetric(fields, tags) + podStore.addStatus(metric, pod) + assert.Equal(t, 1, metric.GetField(ci.ContainerRestartCount)) + + tags = map[string]string{ci.MetricType: ci.TypePod, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit"} + metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, "Terminated", metric.GetTag(ci.ContainerStatus)) - assert.Equal(t, "OOMKilled", metric.GetTag(ci.ContainerLastTerminationReason)) - assert.Equal(t, int(1), metric.GetField(ci.ContainerRestartCount).(int)) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusTerminated))) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusTerminatedReasonOOMKilled))) + assert.Equal(t, 2, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusTerminated))) + assert.Equal(t, 2, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusTerminatedReasonOOMKilled))) pod.Status.ContainerStatuses[0].LastTerminationState.Terminated = nil pod.Status.ContainerStatuses[0].State.Waiting = &corev1.ContainerStateWaiting{Reason: "CrashLoopBackOff"} + pod.Status.ContainerStatuses[1].LastTerminationState.Terminated = nil + pod.Status.ContainerStatuses[1].State.Waiting = &corev1.ContainerStateWaiting{Reason: "CrashLoopBackOff"} - tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit", ci.ContainerNamekey: "ubuntu"} + tags = map[string]string{ci.MetricType: ci.TypePod, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit"} metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, "Waiting", metric.GetTag(ci.ContainerStatus)) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaiting))) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonCrashLoopBackOff))) + //assert.Equal(t, "Waiting", metric.GetTag(ci.ContainerStatus)) + assert.Equal(t, 2, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaiting))) + assert.Equal(t, 2, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonCrashLoopBackOff))) // sparse metrics - assert.Nil(t, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonImagePullError))) - assert.Nil(t, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusTerminatedReasonOOMKilled))) - assert.Nil(t, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonStartError))) - assert.Nil(t, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonCreateContainerError))) - assert.Nil(t, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonCreateContainerConfigError))) + assert.Nil(t, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonImagePullError))) + assert.Nil(t, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusTerminatedReasonOOMKilled))) + assert.Nil(t, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonStartError))) + assert.Nil(t, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonCreateContainerError))) + assert.Nil(t, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonCreateContainerConfigError))) pod.Status.ContainerStatuses[0].State.Waiting = &corev1.ContainerStateWaiting{Reason: "ImagePullBackOff"} + pod.Status.ContainerStatuses[1].State.Waiting = &corev1.ContainerStateWaiting{Reason: "StartError"} - tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit", ci.ContainerNamekey: "ubuntu"} + tags = map[string]string{ci.MetricType: ci.TypePod, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit"} metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, "Waiting", metric.GetTag(ci.ContainerStatus)) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaiting))) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonImagePullError))) + assert.Equal(t, "Succeeded", metric.GetTag(ci.PodStatus)) + assert.Equal(t, 2, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaiting))) + assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonImagePullError))) + assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonStartError))) pod.Status.ContainerStatuses[0].State.Waiting = &corev1.ContainerStateWaiting{Reason: "ErrImagePull"} metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonImagePullError))) + assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonImagePullError))) pod.Status.ContainerStatuses[0].State.Waiting = &corev1.ContainerStateWaiting{Reason: "InvalidImageName"} metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonImagePullError))) + assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonImagePullError))) pod.Status.ContainerStatuses[0].State.Waiting = &corev1.ContainerStateWaiting{Reason: "CreateContainerError"} metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonCreateContainerError))) + assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonCreateContainerError))) pod.Status.ContainerStatuses[0].State.Waiting = &corev1.ContainerStateWaiting{Reason: "CreateContainerConfigError"} metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonCreateContainerConfigError))) + assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonCreateContainerConfigError))) pod.Status.ContainerStatuses[0].State.Waiting = &corev1.ContainerStateWaiting{Reason: "StartError"} + pod.Status.ContainerStatuses[1].State.Waiting = nil metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypeContainer, ci.StatusWaitingReasonStartError))) + assert.Equal(t, 1, metric.GetField(ci.MetricName(ci.TypePod, "container_"+ci.StatusWaitingReasonStartError))) // test delta of restartCount pod.Status.ContainerStatuses[0].RestartCount = 3 @@ -736,13 +774,13 @@ func TestPodStore_addStatus_enhanced_metrics(t *testing.T) { metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, int(2), metric.GetField(ci.MetricName(ci.TypePod, ci.ContainerRestartCount)).(int)) + assert.Equal(t, 2, metric.GetField(ci.MetricName(ci.TypePod, ci.ContainerRestartCount))) tags = map[string]string{ci.MetricType: ci.TypeContainer, ci.K8sNamespace: "default", ci.K8sPodNameKey: "cpu-limit", ci.ContainerNamekey: "ubuntu"} metric = generateMetric(fields, tags) podStore.addStatus(metric, pod) - assert.Equal(t, int(2), metric.GetField(ci.ContainerRestartCount).(int)) + assert.Equal(t, 2, metric.GetField(ci.ContainerRestartCount)) } func TestPodStore_addStatus_without_enhanced_metrics(t *testing.T) {