diff --git a/ocp-metadata/ocp-metadata.go b/ocp-metadata/ocp-metadata.go index 0e6edc2..49dd8d3 100644 --- a/ocp-metadata/ocp-metadata.go +++ b/ocp-metadata/ocp-metadata.go @@ -129,7 +129,7 @@ func (meta *Metadata) GetCurrentPodCount() (int, error) { if err != nil { return podCount, err } - podList, err := meta.clientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{FieldSelector: "status.phase=Running"}) + podList, err := meta.clientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{FieldSelector: "status.phase=" + running}) if err != nil { return podCount, err } @@ -144,13 +144,36 @@ func (meta *Metadata) GetCurrentPodCount() (int, error) { return podCount, nil } +// Returns the number of current running VMIs in the cluster +func (meta *Metadata) GetCurrentVMICount() (int, error) { + var vmiCount int + vmis, err := meta.dynamicClient.Resource(schema.GroupVersionResource{ + Group: "kubevirt.io", + Version: "v1", + Resource: "virtualmachineinstances", + }).Namespace(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return vmiCount, err + } + for _, vmi := range vmis.Items { + status, found, err := unstructured.NestedString(vmi.UnstructuredContent(), "status", "phase") + if !found { + return vmiCount, fmt.Errorf("phase field not found in kubevirt.io/v1/namespaces/%s/virtualmachineinstances/%s status", vmi.GetNamespace(), vmi.GetName()) + } + if err != nil { + return vmiCount, err + } + if status == running { + vmiCount++ + } + } + return vmiCount, nil +} + // GetDefaultIngressDomain returns default ingress domain of the default ingress controller func (meta *Metadata) GetDefaultIngressDomain() (string, error) { - ingressController, err := meta.dynamicClient.Resource(schema.GroupVersionResource{ - Group: "operator.openshift.io", - Version: "v1", - Resource: "ingresscontrollers", - }).Namespace("openshift-ingress-operator").Get(context.TODO(), "default", metav1.GetOptions{}) + ingressController, err := meta.dynamicClient.Resource(vmiGVR). + Namespace("openshift-ingress-operator").Get(context.TODO(), "default", metav1.GetOptions{}) if err != nil { return "", err } @@ -163,11 +186,7 @@ func (meta *Metadata) GetDefaultIngressDomain() (string, error) { // getPrometheusURL Returns a valid prometheus endpoint from the openshift-monitoring/prometheus-k8s route func getPrometheusURL(dynamicClient dynamic.Interface) (string, error) { - route, err := dynamicClient.Resource(schema.GroupVersionResource{ - Group: routeGroup, - Version: routeVersion, - Resource: routeResource, - }).Namespace(monitoringNs).Get(context.TODO(), "prometheus-k8s", metav1.GetOptions{}) + route, err := dynamicClient.Resource(routeGVR).Namespace(monitoringNs).Get(context.TODO(), "prometheus-k8s", metav1.GetOptions{}) if err != nil { return "", err } diff --git a/ocp-metadata/types.go b/ocp-metadata/types.go index e8cbc76..79321f9 100644 --- a/ocp-metadata/types.go +++ b/ocp-metadata/types.go @@ -14,19 +14,33 @@ package ocpmetadata -import "time" +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" +) // OCP specific constants const ( - routeGroup = "route.openshift.io" - routeVersion = "v1" - routeResource = "routes" + running = "Running" completedUpdate = "Completed" workerNodeSelector = "node-role.kubernetes.io/worker=,node-role.kubernetes.io/infra!=,node-role.kubernetes.io/workload!=" monitoringNs = "openshift-monitoring" tokenExpiration = 10 * time.Hour ) +var routeGVR = schema.GroupVersionResource{ + Group: "route.openshift.io", + Version: "v1", + Resource: "routes", +} + +var vmiGVR = schema.GroupVersionResource{ + Group: "operator.openshift.io", + Version: "v1", + Resource: "ingresscontrollers", +} + // infraObj // TODO at the moment can be used to decode some AWS platform specific information from the infrastructure object // like region and resourceTags (which is actually used to detect if this is a ROSA cluster)